1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug)]
121pub struct RepositoryEntry {
122 // Path to the actual .git folder.
123 // Note: if .git is a file, this points to the folder indicated by the .git file
124 pub(crate) git_dir_path: Arc<Path>,
125 pub(crate) git_dir_entry_id: ProjectEntryId,
126 pub(crate) work_directory: RepositoryWorkDirectory,
127 pub(crate) scan_id: usize,
128 // TODO: pub(crate) head_ref: Arc<str>,
129}
130
131impl RepositoryEntry {
132 // Note that this path should be relative to the worktree root.
133 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
134 path.starts_with(self.git_dir_path.as_ref())
135 }
136}
137
138/// This path corresponds to the 'content path' (the folder that contains the .git)
139#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
140pub struct RepositoryWorkDirectory(Arc<Path>);
141
142impl RepositoryWorkDirectory {
143 // Note that these paths should be relative to the worktree root.
144 pub(crate) fn contains(&self, path: &Path) -> bool {
145 path.starts_with(self.0.as_ref())
146 }
147
148 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
149 path.strip_prefix(self.0.as_ref())
150 .ok()
151 .map(move |path| RepoPath(path.to_owned()))
152 }
153}
154
155impl Deref for RepositoryWorkDirectory {
156 type Target = Path;
157
158 fn deref(&self) -> &Self::Target {
159 self.0.as_ref()
160 }
161}
162
163#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
164pub struct RepoPath(PathBuf);
165
166impl AsRef<Path> for RepoPath {
167 fn as_ref(&self) -> &Path {
168 self.0.as_ref()
169 }
170}
171
172impl Deref for RepoPath {
173 type Target = PathBuf;
174
175 fn deref(&self) -> &Self::Target {
176 &self.0
177 }
178}
179
180impl AsRef<Path> for RepositoryWorkDirectory {
181 fn as_ref(&self) -> &Path {
182 self.0.as_ref()
183 }
184}
185
186impl Default for RepositoryWorkDirectory {
187 fn default() -> Self {
188 RepositoryWorkDirectory(Arc::from(Path::new("")))
189 }
190}
191
192#[derive(Debug, Clone)]
193pub struct LocalSnapshot {
194 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
195 // The ProjectEntryId corresponds to the entry for the .git dir
196 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
197 removed_entry_ids: HashMap<u64, ProjectEntryId>,
198 next_entry_id: Arc<AtomicUsize>,
199 snapshot: Snapshot,
200}
201
202impl Deref for LocalSnapshot {
203 type Target = Snapshot;
204
205 fn deref(&self) -> &Self::Target {
206 &self.snapshot
207 }
208}
209
210impl DerefMut for LocalSnapshot {
211 fn deref_mut(&mut self) -> &mut Self::Target {
212 &mut self.snapshot
213 }
214}
215
216enum ScanState {
217 Started,
218 Updated {
219 snapshot: LocalSnapshot,
220 changes: HashMap<Arc<Path>, PathChange>,
221 barrier: Option<barrier::Sender>,
222 scanning: bool,
223 },
224}
225
226struct ShareState {
227 project_id: u64,
228 snapshots_tx: watch::Sender<LocalSnapshot>,
229 resume_updates: watch::Sender<()>,
230 _maintain_remote_snapshot: Task<Option<()>>,
231}
232
233pub enum Event {
234 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
235 UpdatedGitRepositories(Vec<RepositoryEntry>),
236}
237
238impl Entity for Worktree {
239 type Event = Event;
240}
241
242impl Worktree {
243 pub async fn local(
244 client: Arc<Client>,
245 path: impl Into<Arc<Path>>,
246 visible: bool,
247 fs: Arc<dyn Fs>,
248 next_entry_id: Arc<AtomicUsize>,
249 cx: &mut AsyncAppContext,
250 ) -> Result<ModelHandle<Self>> {
251 // After determining whether the root entry is a file or a directory, populate the
252 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
253 let abs_path = path.into();
254 let metadata = fs
255 .metadata(&abs_path)
256 .await
257 .context("failed to stat worktree path")?;
258
259 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
260 let root_name = abs_path
261 .file_name()
262 .map_or(String::new(), |f| f.to_string_lossy().to_string());
263
264 let mut snapshot = LocalSnapshot {
265 ignores_by_parent_abs_path: Default::default(),
266 removed_entry_ids: Default::default(),
267 git_repositories: Default::default(),
268 next_entry_id,
269 snapshot: Snapshot {
270 id: WorktreeId::from_usize(cx.model_id()),
271 abs_path: abs_path.clone(),
272 root_name: root_name.clone(),
273 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
274 entries_by_path: Default::default(),
275 entries_by_id: Default::default(),
276 repository_entries: Default::default(),
277 scan_id: 1,
278 completed_scan_id: 0,
279 },
280 };
281
282 if let Some(metadata) = metadata {
283 snapshot.insert_entry(
284 Entry::new(
285 Arc::from(Path::new("")),
286 &metadata,
287 &snapshot.next_entry_id,
288 snapshot.root_char_bag,
289 ),
290 fs.as_ref(),
291 );
292 }
293
294 let (path_changes_tx, path_changes_rx) = channel::unbounded();
295 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
296
297 cx.spawn_weak(|this, mut cx| async move {
298 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
299 this.update(&mut cx, |this, cx| {
300 let this = this.as_local_mut().unwrap();
301 match state {
302 ScanState::Started => {
303 *this.is_scanning.0.borrow_mut() = true;
304 }
305 ScanState::Updated {
306 snapshot,
307 changes,
308 barrier,
309 scanning,
310 } => {
311 *this.is_scanning.0.borrow_mut() = scanning;
312 this.set_snapshot(snapshot, cx);
313 cx.emit(Event::UpdatedEntries(changes));
314 drop(barrier);
315 }
316 }
317 cx.notify();
318 });
319 }
320 })
321 .detach();
322
323 let background_scanner_task = cx.background().spawn({
324 let fs = fs.clone();
325 let snapshot = snapshot.clone();
326 let background = cx.background().clone();
327 async move {
328 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
329 BackgroundScanner::new(
330 snapshot,
331 fs,
332 scan_states_tx,
333 background,
334 path_changes_rx,
335 )
336 .run(events)
337 .await;
338 }
339 });
340
341 Worktree::Local(LocalWorktree {
342 snapshot,
343 is_scanning: watch::channel_with(true),
344 share: None,
345 path_changes_tx,
346 _background_scanner_task: background_scanner_task,
347 diagnostics: Default::default(),
348 diagnostic_summaries: Default::default(),
349 client,
350 fs,
351 visible,
352 })
353 }))
354 }
355
356 pub fn remote(
357 project_remote_id: u64,
358 replica_id: ReplicaId,
359 worktree: proto::WorktreeMetadata,
360 client: Arc<Client>,
361 cx: &mut AppContext,
362 ) -> ModelHandle<Self> {
363 cx.add_model(|cx: &mut ModelContext<Self>| {
364 let snapshot = Snapshot {
365 id: WorktreeId(worktree.id as usize),
366 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
367 root_name: worktree.root_name.clone(),
368 root_char_bag: worktree
369 .root_name
370 .chars()
371 .map(|c| c.to_ascii_lowercase())
372 .collect(),
373 entries_by_path: Default::default(),
374 entries_by_id: Default::default(),
375 repository_entries: Default::default(),
376 scan_id: 1,
377 completed_scan_id: 0,
378 };
379
380 let (updates_tx, mut updates_rx) = mpsc::unbounded();
381 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
382 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
383
384 cx.background()
385 .spawn({
386 let background_snapshot = background_snapshot.clone();
387 async move {
388 while let Some(update) = updates_rx.next().await {
389 if let Err(error) =
390 background_snapshot.lock().apply_remote_update(update)
391 {
392 log::error!("error applying worktree update: {}", error);
393 }
394 snapshot_updated_tx.send(()).await.ok();
395 }
396 }
397 })
398 .detach();
399
400 cx.spawn_weak(|this, mut cx| async move {
401 while (snapshot_updated_rx.recv().await).is_some() {
402 if let Some(this) = this.upgrade(&cx) {
403 this.update(&mut cx, |this, cx| {
404 let this = this.as_remote_mut().unwrap();
405 this.snapshot = this.background_snapshot.lock().clone();
406 cx.emit(Event::UpdatedEntries(Default::default()));
407 cx.notify();
408 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
409 if this.observed_snapshot(*scan_id) {
410 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
411 let _ = tx.send(());
412 } else {
413 break;
414 }
415 }
416 });
417 } else {
418 break;
419 }
420 }
421 })
422 .detach();
423
424 Worktree::Remote(RemoteWorktree {
425 project_id: project_remote_id,
426 replica_id,
427 snapshot: snapshot.clone(),
428 background_snapshot,
429 updates_tx: Some(updates_tx),
430 snapshot_subscriptions: Default::default(),
431 client: client.clone(),
432 diagnostic_summaries: Default::default(),
433 visible: worktree.visible,
434 disconnected: false,
435 })
436 })
437 }
438
439 pub fn as_local(&self) -> Option<&LocalWorktree> {
440 if let Worktree::Local(worktree) = self {
441 Some(worktree)
442 } else {
443 None
444 }
445 }
446
447 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
448 if let Worktree::Remote(worktree) = self {
449 Some(worktree)
450 } else {
451 None
452 }
453 }
454
455 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
456 if let Worktree::Local(worktree) = self {
457 Some(worktree)
458 } else {
459 None
460 }
461 }
462
463 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
464 if let Worktree::Remote(worktree) = self {
465 Some(worktree)
466 } else {
467 None
468 }
469 }
470
471 pub fn is_local(&self) -> bool {
472 matches!(self, Worktree::Local(_))
473 }
474
475 pub fn is_remote(&self) -> bool {
476 !self.is_local()
477 }
478
479 pub fn snapshot(&self) -> Snapshot {
480 match self {
481 Worktree::Local(worktree) => worktree.snapshot().snapshot,
482 Worktree::Remote(worktree) => worktree.snapshot(),
483 }
484 }
485
486 pub fn scan_id(&self) -> usize {
487 match self {
488 Worktree::Local(worktree) => worktree.snapshot.scan_id,
489 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
490 }
491 }
492
493 pub fn completed_scan_id(&self) -> usize {
494 match self {
495 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
496 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
497 }
498 }
499
500 pub fn is_visible(&self) -> bool {
501 match self {
502 Worktree::Local(worktree) => worktree.visible,
503 Worktree::Remote(worktree) => worktree.visible,
504 }
505 }
506
507 pub fn replica_id(&self) -> ReplicaId {
508 match self {
509 Worktree::Local(_) => 0,
510 Worktree::Remote(worktree) => worktree.replica_id,
511 }
512 }
513
514 pub fn diagnostic_summaries(
515 &self,
516 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
517 match self {
518 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
519 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
520 }
521 .iter()
522 .flat_map(|(path, summaries)| {
523 summaries
524 .iter()
525 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
526 })
527 }
528
529 pub fn abs_path(&self) -> Arc<Path> {
530 match self {
531 Worktree::Local(worktree) => worktree.abs_path.clone(),
532 Worktree::Remote(worktree) => worktree.abs_path.clone(),
533 }
534 }
535}
536
537impl LocalWorktree {
538 pub fn contains_abs_path(&self, path: &Path) -> bool {
539 path.starts_with(&self.abs_path)
540 }
541
542 fn absolutize(&self, path: &Path) -> PathBuf {
543 if path.file_name().is_some() {
544 self.abs_path.join(path)
545 } else {
546 self.abs_path.to_path_buf()
547 }
548 }
549
550 pub(crate) fn load_buffer(
551 &mut self,
552 id: u64,
553 path: &Path,
554 cx: &mut ModelContext<Worktree>,
555 ) -> Task<Result<ModelHandle<Buffer>>> {
556 let path = Arc::from(path);
557 cx.spawn(move |this, mut cx| async move {
558 let (file, contents, diff_base) = this
559 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
560 .await?;
561 let text_buffer = cx
562 .background()
563 .spawn(async move { text::Buffer::new(0, id, contents) })
564 .await;
565 Ok(cx.add_model(|cx| {
566 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
567 buffer.git_diff_recalc(cx);
568 buffer
569 }))
570 })
571 }
572
573 pub fn diagnostics_for_path(
574 &self,
575 path: &Path,
576 ) -> Vec<(
577 LanguageServerId,
578 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
579 )> {
580 self.diagnostics.get(path).cloned().unwrap_or_default()
581 }
582
583 pub fn update_diagnostics(
584 &mut self,
585 server_id: LanguageServerId,
586 worktree_path: Arc<Path>,
587 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
588 _: &mut ModelContext<Worktree>,
589 ) -> Result<bool> {
590 let summaries_by_server_id = self
591 .diagnostic_summaries
592 .entry(worktree_path.clone())
593 .or_default();
594
595 let old_summary = summaries_by_server_id
596 .remove(&server_id)
597 .unwrap_or_default();
598
599 let new_summary = DiagnosticSummary::new(&diagnostics);
600 if new_summary.is_empty() {
601 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
602 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
603 diagnostics_by_server_id.remove(ix);
604 }
605 if diagnostics_by_server_id.is_empty() {
606 self.diagnostics.remove(&worktree_path);
607 }
608 }
609 } else {
610 summaries_by_server_id.insert(server_id, new_summary);
611 let diagnostics_by_server_id =
612 self.diagnostics.entry(worktree_path.clone()).or_default();
613 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
614 Ok(ix) => {
615 diagnostics_by_server_id[ix] = (server_id, diagnostics);
616 }
617 Err(ix) => {
618 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
619 }
620 }
621 }
622
623 if !old_summary.is_empty() || !new_summary.is_empty() {
624 if let Some(share) = self.share.as_ref() {
625 self.client
626 .send(proto::UpdateDiagnosticSummary {
627 project_id: share.project_id,
628 worktree_id: self.id().to_proto(),
629 summary: Some(proto::DiagnosticSummary {
630 path: worktree_path.to_string_lossy().to_string(),
631 language_server_id: server_id.0 as u64,
632 error_count: new_summary.error_count as u32,
633 warning_count: new_summary.warning_count as u32,
634 }),
635 })
636 .log_err();
637 }
638 }
639
640 Ok(!old_summary.is_empty() || !new_summary.is_empty())
641 }
642
643 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
644 let updated_repos = Self::changed_repos(
645 &self.snapshot.repository_entries,
646 &new_snapshot.repository_entries,
647 );
648 self.snapshot = new_snapshot;
649
650 if let Some(share) = self.share.as_mut() {
651 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
652 }
653
654 if !updated_repos.is_empty() {
655 cx.emit(Event::UpdatedGitRepositories(updated_repos));
656 }
657 }
658
659 fn changed_repos(
660 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
661 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
662 ) -> Vec<RepositoryEntry> {
663 fn diff<'a>(
664 a: impl Iterator<Item = &'a RepositoryEntry>,
665 mut b: impl Iterator<Item = &'a RepositoryEntry>,
666 updated: &mut HashMap<&'a Path, RepositoryEntry>,
667 ) {
668 for a_repo in a {
669 let matched = b.find(|b_repo| {
670 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
671 });
672
673 if matched.is_none() {
674 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
675 }
676 }
677 }
678
679 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
680
681 diff(old_repos.values(), new_repos.values(), &mut updated);
682 diff(new_repos.values(), old_repos.values(), &mut updated);
683
684 updated.into_values().collect()
685 }
686
687 pub fn scan_complete(&self) -> impl Future<Output = ()> {
688 let mut is_scanning_rx = self.is_scanning.1.clone();
689 async move {
690 let mut is_scanning = is_scanning_rx.borrow().clone();
691 while is_scanning {
692 if let Some(value) = is_scanning_rx.recv().await {
693 is_scanning = value;
694 } else {
695 break;
696 }
697 }
698 }
699 }
700
701 pub fn snapshot(&self) -> LocalSnapshot {
702 self.snapshot.clone()
703 }
704
705 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
706 proto::WorktreeMetadata {
707 id: self.id().to_proto(),
708 root_name: self.root_name().to_string(),
709 visible: self.visible,
710 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
711 }
712 }
713
714 fn load(
715 &self,
716 path: &Path,
717 cx: &mut ModelContext<Worktree>,
718 ) -> Task<Result<(File, String, Option<String>)>> {
719 let handle = cx.handle();
720 let path = Arc::from(path);
721 let abs_path = self.absolutize(&path);
722 let fs = self.fs.clone();
723 let snapshot = self.snapshot();
724
725 let mut index_task = None;
726
727 if let Some(repo) = snapshot.repo_for(&path) {
728 let repo_path = repo.work_directory.relativize(&path).unwrap();
729 if let Some(repo) = self.git_repositories.get(&repo.git_dir_entry_id) {
730 let repo = repo.to_owned();
731 index_task = Some(
732 cx.background()
733 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
734 );
735 }
736 }
737
738 cx.spawn(|this, mut cx| async move {
739 let text = fs.load(&abs_path).await?;
740
741 let diff_base = if let Some(index_task) = index_task {
742 index_task.await
743 } else {
744 None
745 };
746
747 // Eagerly populate the snapshot with an updated entry for the loaded file
748 let entry = this
749 .update(&mut cx, |this, cx| {
750 this.as_local().unwrap().refresh_entry(path, None, cx)
751 })
752 .await?;
753
754 Ok((
755 File {
756 entry_id: entry.id,
757 worktree: handle,
758 path: entry.path,
759 mtime: entry.mtime,
760 is_local: true,
761 is_deleted: false,
762 },
763 text,
764 diff_base,
765 ))
766 })
767 }
768
769 pub fn save_buffer(
770 &self,
771 buffer_handle: ModelHandle<Buffer>,
772 path: Arc<Path>,
773 has_changed_file: bool,
774 cx: &mut ModelContext<Worktree>,
775 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
776 let handle = cx.handle();
777 let buffer = buffer_handle.read(cx);
778
779 let rpc = self.client.clone();
780 let buffer_id = buffer.remote_id();
781 let project_id = self.share.as_ref().map(|share| share.project_id);
782
783 let text = buffer.as_rope().clone();
784 let fingerprint = text.fingerprint();
785 let version = buffer.version();
786 let save = self.write_file(path, text, buffer.line_ending(), cx);
787
788 cx.as_mut().spawn(|mut cx| async move {
789 let entry = save.await?;
790
791 if has_changed_file {
792 let new_file = Arc::new(File {
793 entry_id: entry.id,
794 worktree: handle,
795 path: entry.path,
796 mtime: entry.mtime,
797 is_local: true,
798 is_deleted: false,
799 });
800
801 if let Some(project_id) = project_id {
802 rpc.send(proto::UpdateBufferFile {
803 project_id,
804 buffer_id,
805 file: Some(new_file.to_proto()),
806 })
807 .log_err();
808 }
809
810 buffer_handle.update(&mut cx, |buffer, cx| {
811 if has_changed_file {
812 buffer.file_updated(new_file, cx).detach();
813 }
814 });
815 }
816
817 if let Some(project_id) = project_id {
818 rpc.send(proto::BufferSaved {
819 project_id,
820 buffer_id,
821 version: serialize_version(&version),
822 mtime: Some(entry.mtime.into()),
823 fingerprint: serialize_fingerprint(fingerprint),
824 })?;
825 }
826
827 buffer_handle.update(&mut cx, |buffer, cx| {
828 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
829 });
830
831 Ok((version, fingerprint, entry.mtime))
832 })
833 }
834
835 pub fn create_entry(
836 &self,
837 path: impl Into<Arc<Path>>,
838 is_dir: bool,
839 cx: &mut ModelContext<Worktree>,
840 ) -> Task<Result<Entry>> {
841 let path = path.into();
842 let abs_path = self.absolutize(&path);
843 let fs = self.fs.clone();
844 let write = cx.background().spawn(async move {
845 if is_dir {
846 fs.create_dir(&abs_path).await
847 } else {
848 fs.save(&abs_path, &Default::default(), Default::default())
849 .await
850 }
851 });
852
853 cx.spawn(|this, mut cx| async move {
854 write.await?;
855 this.update(&mut cx, |this, cx| {
856 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
857 })
858 .await
859 })
860 }
861
862 pub fn write_file(
863 &self,
864 path: impl Into<Arc<Path>>,
865 text: Rope,
866 line_ending: LineEnding,
867 cx: &mut ModelContext<Worktree>,
868 ) -> Task<Result<Entry>> {
869 let path = path.into();
870 let abs_path = self.absolutize(&path);
871 let fs = self.fs.clone();
872 let write = cx
873 .background()
874 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
875
876 cx.spawn(|this, mut cx| async move {
877 write.await?;
878 this.update(&mut cx, |this, cx| {
879 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
880 })
881 .await
882 })
883 }
884
885 pub fn delete_entry(
886 &self,
887 entry_id: ProjectEntryId,
888 cx: &mut ModelContext<Worktree>,
889 ) -> Option<Task<Result<()>>> {
890 let entry = self.entry_for_id(entry_id)?.clone();
891 let abs_path = self.abs_path.clone();
892 let fs = self.fs.clone();
893
894 let delete = cx.background().spawn(async move {
895 let mut abs_path = fs.canonicalize(&abs_path).await?;
896 if entry.path.file_name().is_some() {
897 abs_path = abs_path.join(&entry.path);
898 }
899 if entry.is_file() {
900 fs.remove_file(&abs_path, Default::default()).await?;
901 } else {
902 fs.remove_dir(
903 &abs_path,
904 RemoveOptions {
905 recursive: true,
906 ignore_if_not_exists: false,
907 },
908 )
909 .await?;
910 }
911 anyhow::Ok(abs_path)
912 });
913
914 Some(cx.spawn(|this, mut cx| async move {
915 let abs_path = delete.await?;
916 let (tx, mut rx) = barrier::channel();
917 this.update(&mut cx, |this, _| {
918 this.as_local_mut()
919 .unwrap()
920 .path_changes_tx
921 .try_send((vec![abs_path], tx))
922 })?;
923 rx.recv().await;
924 Ok(())
925 }))
926 }
927
928 pub fn rename_entry(
929 &self,
930 entry_id: ProjectEntryId,
931 new_path: impl Into<Arc<Path>>,
932 cx: &mut ModelContext<Worktree>,
933 ) -> Option<Task<Result<Entry>>> {
934 let old_path = self.entry_for_id(entry_id)?.path.clone();
935 let new_path = new_path.into();
936 let abs_old_path = self.absolutize(&old_path);
937 let abs_new_path = self.absolutize(&new_path);
938 let fs = self.fs.clone();
939 let rename = cx.background().spawn(async move {
940 fs.rename(&abs_old_path, &abs_new_path, Default::default())
941 .await
942 });
943
944 Some(cx.spawn(|this, mut cx| async move {
945 rename.await?;
946 this.update(&mut cx, |this, cx| {
947 this.as_local_mut()
948 .unwrap()
949 .refresh_entry(new_path.clone(), Some(old_path), cx)
950 })
951 .await
952 }))
953 }
954
955 pub fn copy_entry(
956 &self,
957 entry_id: ProjectEntryId,
958 new_path: impl Into<Arc<Path>>,
959 cx: &mut ModelContext<Worktree>,
960 ) -> Option<Task<Result<Entry>>> {
961 let old_path = self.entry_for_id(entry_id)?.path.clone();
962 let new_path = new_path.into();
963 let abs_old_path = self.absolutize(&old_path);
964 let abs_new_path = self.absolutize(&new_path);
965 let fs = self.fs.clone();
966 let copy = cx.background().spawn(async move {
967 copy_recursive(
968 fs.as_ref(),
969 &abs_old_path,
970 &abs_new_path,
971 Default::default(),
972 )
973 .await
974 });
975
976 Some(cx.spawn(|this, mut cx| async move {
977 copy.await?;
978 this.update(&mut cx, |this, cx| {
979 this.as_local_mut()
980 .unwrap()
981 .refresh_entry(new_path.clone(), None, cx)
982 })
983 .await
984 }))
985 }
986
987 fn refresh_entry(
988 &self,
989 path: Arc<Path>,
990 old_path: Option<Arc<Path>>,
991 cx: &mut ModelContext<Worktree>,
992 ) -> Task<Result<Entry>> {
993 let fs = self.fs.clone();
994 let abs_root_path = self.abs_path.clone();
995 let path_changes_tx = self.path_changes_tx.clone();
996 cx.spawn_weak(move |this, mut cx| async move {
997 let abs_path = fs.canonicalize(&abs_root_path).await?;
998 let mut paths = Vec::with_capacity(2);
999 paths.push(if path.file_name().is_some() {
1000 abs_path.join(&path)
1001 } else {
1002 abs_path.clone()
1003 });
1004 if let Some(old_path) = old_path {
1005 paths.push(if old_path.file_name().is_some() {
1006 abs_path.join(&old_path)
1007 } else {
1008 abs_path.clone()
1009 });
1010 }
1011
1012 let (tx, mut rx) = barrier::channel();
1013 path_changes_tx.try_send((paths, tx))?;
1014 rx.recv().await;
1015 this.upgrade(&cx)
1016 .ok_or_else(|| anyhow!("worktree was dropped"))?
1017 .update(&mut cx, |this, _| {
1018 this.entry_for_path(path)
1019 .cloned()
1020 .ok_or_else(|| anyhow!("failed to read path after update"))
1021 })
1022 })
1023 }
1024
1025 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1026 let (share_tx, share_rx) = oneshot::channel();
1027
1028 if let Some(share) = self.share.as_mut() {
1029 let _ = share_tx.send(());
1030 *share.resume_updates.borrow_mut() = ();
1031 } else {
1032 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1033 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1034 let worktree_id = cx.model_id() as u64;
1035
1036 for (path, summaries) in &self.diagnostic_summaries {
1037 for (&server_id, summary) in summaries {
1038 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1039 project_id,
1040 worktree_id,
1041 summary: Some(summary.to_proto(server_id, &path)),
1042 }) {
1043 return Task::ready(Err(e));
1044 }
1045 }
1046 }
1047
1048 let _maintain_remote_snapshot = cx.background().spawn({
1049 let client = self.client.clone();
1050 async move {
1051 let mut share_tx = Some(share_tx);
1052 let mut prev_snapshot = LocalSnapshot {
1053 ignores_by_parent_abs_path: Default::default(),
1054 removed_entry_ids: Default::default(),
1055 next_entry_id: Default::default(),
1056 git_repositories: Default::default(),
1057 snapshot: Snapshot {
1058 id: WorktreeId(worktree_id as usize),
1059 abs_path: Path::new("").into(),
1060 root_name: Default::default(),
1061 root_char_bag: Default::default(),
1062 entries_by_path: Default::default(),
1063 entries_by_id: Default::default(),
1064 repository_entries: Default::default(),
1065 scan_id: 0,
1066 completed_scan_id: 0,
1067 },
1068 };
1069 while let Some(snapshot) = snapshots_rx.recv().await {
1070 #[cfg(any(test, feature = "test-support"))]
1071 const MAX_CHUNK_SIZE: usize = 2;
1072 #[cfg(not(any(test, feature = "test-support")))]
1073 const MAX_CHUNK_SIZE: usize = 256;
1074
1075 let update =
1076 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1077 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1078 let _ = resume_updates_rx.try_recv();
1079 while let Err(error) = client.request(update.clone()).await {
1080 log::error!("failed to send worktree update: {}", error);
1081 log::info!("waiting to resume updates");
1082 if resume_updates_rx.next().await.is_none() {
1083 return Ok(());
1084 }
1085 }
1086 }
1087
1088 if let Some(share_tx) = share_tx.take() {
1089 let _ = share_tx.send(());
1090 }
1091
1092 prev_snapshot = snapshot;
1093 }
1094
1095 Ok::<_, anyhow::Error>(())
1096 }
1097 .log_err()
1098 });
1099
1100 self.share = Some(ShareState {
1101 project_id,
1102 snapshots_tx,
1103 resume_updates: resume_updates_tx,
1104 _maintain_remote_snapshot,
1105 });
1106 }
1107
1108 cx.foreground()
1109 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1110 }
1111
1112 pub fn unshare(&mut self) {
1113 self.share.take();
1114 }
1115
1116 pub fn is_shared(&self) -> bool {
1117 self.share.is_some()
1118 }
1119
1120 pub fn load_index_text(
1121 &self,
1122 repo: RepositoryEntry,
1123 repo_path: RepoPath,
1124 cx: &mut ModelContext<Worktree>,
1125 ) -> Task<Option<String>> {
1126 let Some(git_ptr) = self.git_repositories.get(&repo.git_dir_entry_id).map(|git_ptr| git_ptr.to_owned()) else {
1127 return Task::Ready(Some(None))
1128 };
1129 cx.background()
1130 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1131 }
1132}
1133
1134impl RemoteWorktree {
1135 fn snapshot(&self) -> Snapshot {
1136 self.snapshot.clone()
1137 }
1138
1139 pub fn disconnected_from_host(&mut self) {
1140 self.updates_tx.take();
1141 self.snapshot_subscriptions.clear();
1142 self.disconnected = true;
1143 }
1144
1145 pub fn save_buffer(
1146 &self,
1147 buffer_handle: ModelHandle<Buffer>,
1148 cx: &mut ModelContext<Worktree>,
1149 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1150 let buffer = buffer_handle.read(cx);
1151 let buffer_id = buffer.remote_id();
1152 let version = buffer.version();
1153 let rpc = self.client.clone();
1154 let project_id = self.project_id;
1155 cx.as_mut().spawn(|mut cx| async move {
1156 let response = rpc
1157 .request(proto::SaveBuffer {
1158 project_id,
1159 buffer_id,
1160 version: serialize_version(&version),
1161 })
1162 .await?;
1163 let version = deserialize_version(&response.version);
1164 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1165 let mtime = response
1166 .mtime
1167 .ok_or_else(|| anyhow!("missing mtime"))?
1168 .into();
1169
1170 buffer_handle.update(&mut cx, |buffer, cx| {
1171 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1172 });
1173
1174 Ok((version, fingerprint, mtime))
1175 })
1176 }
1177
1178 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1179 if let Some(updates_tx) = &self.updates_tx {
1180 updates_tx
1181 .unbounded_send(update)
1182 .expect("consumer runs to completion");
1183 }
1184 }
1185
1186 fn observed_snapshot(&self, scan_id: usize) -> bool {
1187 self.completed_scan_id >= scan_id
1188 }
1189
1190 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1191 let (tx, rx) = oneshot::channel();
1192 if self.observed_snapshot(scan_id) {
1193 let _ = tx.send(());
1194 } else if self.disconnected {
1195 drop(tx);
1196 } else {
1197 match self
1198 .snapshot_subscriptions
1199 .binary_search_by_key(&scan_id, |probe| probe.0)
1200 {
1201 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1202 }
1203 }
1204
1205 async move {
1206 rx.await?;
1207 Ok(())
1208 }
1209 }
1210
1211 pub fn update_diagnostic_summary(
1212 &mut self,
1213 path: Arc<Path>,
1214 summary: &proto::DiagnosticSummary,
1215 ) {
1216 let server_id = LanguageServerId(summary.language_server_id as usize);
1217 let summary = DiagnosticSummary {
1218 error_count: summary.error_count as usize,
1219 warning_count: summary.warning_count as usize,
1220 };
1221
1222 if summary.is_empty() {
1223 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1224 summaries.remove(&server_id);
1225 if summaries.is_empty() {
1226 self.diagnostic_summaries.remove(&path);
1227 }
1228 }
1229 } else {
1230 self.diagnostic_summaries
1231 .entry(path)
1232 .or_default()
1233 .insert(server_id, summary);
1234 }
1235 }
1236
1237 pub fn insert_entry(
1238 &mut self,
1239 entry: proto::Entry,
1240 scan_id: usize,
1241 cx: &mut ModelContext<Worktree>,
1242 ) -> Task<Result<Entry>> {
1243 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1244 cx.spawn(|this, mut cx| async move {
1245 wait_for_snapshot.await?;
1246 this.update(&mut cx, |worktree, _| {
1247 let worktree = worktree.as_remote_mut().unwrap();
1248 let mut snapshot = worktree.background_snapshot.lock();
1249 let entry = snapshot.insert_entry(entry);
1250 worktree.snapshot = snapshot.clone();
1251 entry
1252 })
1253 })
1254 }
1255
1256 pub(crate) fn delete_entry(
1257 &mut self,
1258 id: ProjectEntryId,
1259 scan_id: usize,
1260 cx: &mut ModelContext<Worktree>,
1261 ) -> Task<Result<()>> {
1262 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1263 cx.spawn(|this, mut cx| async move {
1264 wait_for_snapshot.await?;
1265 this.update(&mut cx, |worktree, _| {
1266 let worktree = worktree.as_remote_mut().unwrap();
1267 let mut snapshot = worktree.background_snapshot.lock();
1268 snapshot.delete_entry(id);
1269 worktree.snapshot = snapshot.clone();
1270 });
1271 Ok(())
1272 })
1273 }
1274}
1275
1276impl Snapshot {
1277 pub fn id(&self) -> WorktreeId {
1278 self.id
1279 }
1280
1281 pub fn abs_path(&self) -> &Arc<Path> {
1282 &self.abs_path
1283 }
1284
1285 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1286 self.entries_by_id.get(&entry_id, &()).is_some()
1287 }
1288
1289 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1290 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1291 let old_entry = self.entries_by_id.insert_or_replace(
1292 PathEntry {
1293 id: entry.id,
1294 path: entry.path.clone(),
1295 is_ignored: entry.is_ignored,
1296 scan_id: 0,
1297 },
1298 &(),
1299 );
1300 if let Some(old_entry) = old_entry {
1301 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1302 }
1303 self.entries_by_path.insert_or_replace(entry.clone(), &());
1304 Ok(entry)
1305 }
1306
1307 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1308 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1309 self.entries_by_path = {
1310 let mut cursor = self.entries_by_path.cursor();
1311 let mut new_entries_by_path =
1312 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1313 while let Some(entry) = cursor.item() {
1314 if entry.path.starts_with(&removed_entry.path) {
1315 self.entries_by_id.remove(&entry.id, &());
1316 cursor.next(&());
1317 } else {
1318 break;
1319 }
1320 }
1321 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1322 new_entries_by_path
1323 };
1324
1325 Some(removed_entry.path)
1326 }
1327
1328 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1329 let mut entries_by_path_edits = Vec::new();
1330 let mut entries_by_id_edits = Vec::new();
1331 for entry_id in update.removed_entries {
1332 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1333 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1334 entries_by_id_edits.push(Edit::Remove(entry.id));
1335 }
1336 }
1337
1338 for entry in update.updated_entries {
1339 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1340 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1341 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1342 }
1343 entries_by_id_edits.push(Edit::Insert(PathEntry {
1344 id: entry.id,
1345 path: entry.path.clone(),
1346 is_ignored: entry.is_ignored,
1347 scan_id: 0,
1348 }));
1349 entries_by_path_edits.push(Edit::Insert(entry));
1350 }
1351
1352 self.entries_by_path.edit(entries_by_path_edits, &());
1353 self.entries_by_id.edit(entries_by_id_edits, &());
1354 self.scan_id = update.scan_id as usize;
1355 if update.is_last_update {
1356 self.completed_scan_id = update.scan_id as usize;
1357 }
1358
1359 Ok(())
1360 }
1361
1362 pub fn file_count(&self) -> usize {
1363 self.entries_by_path.summary().file_count
1364 }
1365
1366 pub fn visible_file_count(&self) -> usize {
1367 self.entries_by_path.summary().visible_file_count
1368 }
1369
1370 fn traverse_from_offset(
1371 &self,
1372 include_dirs: bool,
1373 include_ignored: bool,
1374 start_offset: usize,
1375 ) -> Traversal {
1376 let mut cursor = self.entries_by_path.cursor();
1377 cursor.seek(
1378 &TraversalTarget::Count {
1379 count: start_offset,
1380 include_dirs,
1381 include_ignored,
1382 },
1383 Bias::Right,
1384 &(),
1385 );
1386 Traversal {
1387 cursor,
1388 include_dirs,
1389 include_ignored,
1390 }
1391 }
1392
1393 fn traverse_from_path(
1394 &self,
1395 include_dirs: bool,
1396 include_ignored: bool,
1397 path: &Path,
1398 ) -> Traversal {
1399 let mut cursor = self.entries_by_path.cursor();
1400 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1401 Traversal {
1402 cursor,
1403 include_dirs,
1404 include_ignored,
1405 }
1406 }
1407
1408 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1409 self.traverse_from_offset(false, include_ignored, start)
1410 }
1411
1412 pub fn entries(&self, include_ignored: bool) -> Traversal {
1413 self.traverse_from_offset(true, include_ignored, 0)
1414 }
1415
1416 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1417 let empty_path = Path::new("");
1418 self.entries_by_path
1419 .cursor::<()>()
1420 .filter(move |entry| entry.path.as_ref() != empty_path)
1421 .map(|entry| &entry.path)
1422 }
1423
1424 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1425 let mut cursor = self.entries_by_path.cursor();
1426 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1427 let traversal = Traversal {
1428 cursor,
1429 include_dirs: true,
1430 include_ignored: true,
1431 };
1432 ChildEntriesIter {
1433 traversal,
1434 parent_path,
1435 }
1436 }
1437
1438 pub fn root_entry(&self) -> Option<&Entry> {
1439 self.entry_for_path("")
1440 }
1441
1442 pub fn root_name(&self) -> &str {
1443 &self.root_name
1444 }
1445
1446 pub fn scan_id(&self) -> usize {
1447 self.scan_id
1448 }
1449
1450 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1451 let path = path.as_ref();
1452 self.traverse_from_path(true, true, path)
1453 .entry()
1454 .and_then(|entry| {
1455 if entry.path.as_ref() == path {
1456 Some(entry)
1457 } else {
1458 None
1459 }
1460 })
1461 }
1462
1463 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1464 let entry = self.entries_by_id.get(&id, &())?;
1465 self.entry_for_path(&entry.path)
1466 }
1467
1468 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1469 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1470 }
1471}
1472
1473impl LocalSnapshot {
1474 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1475 let mut max_len = 0;
1476 let mut current_candidate = None;
1477 for (work_directory, repo) in (&self.repository_entries).iter() {
1478 if work_directory.contains(path) {
1479 if work_directory.0.as_os_str().len() > max_len {
1480 current_candidate = Some(repo);
1481 max_len = work_directory.0.as_os_str().len();
1482 } else {
1483 break;
1484 }
1485 }
1486 }
1487
1488 current_candidate.map(|entry| entry.to_owned())
1489 }
1490
1491 pub(crate) fn repo_for_metadata(
1492 &self,
1493 path: &Path,
1494 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1495 self.repository_entries
1496 .iter()
1497 .find(|(_, repo)| repo.in_dot_git(path))
1498 .map(|(work_directory, entry)| {
1499 (
1500 work_directory.to_owned(),
1501 self.git_repositories
1502 .get(&entry.git_dir_entry_id)
1503 .expect("These two data structures should be in sync")
1504 .to_owned(),
1505 )
1506 })
1507 }
1508
1509 #[cfg(test)]
1510 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1511 let root_name = self.root_name.clone();
1512 proto::UpdateWorktree {
1513 project_id,
1514 worktree_id: self.id().to_proto(),
1515 abs_path: self.abs_path().to_string_lossy().into(),
1516 root_name,
1517 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1518 removed_entries: Default::default(),
1519 scan_id: self.scan_id as u64,
1520 is_last_update: true,
1521 }
1522 }
1523
1524 pub(crate) fn build_update(
1525 &self,
1526 other: &Self,
1527 project_id: u64,
1528 worktree_id: u64,
1529 include_ignored: bool,
1530 ) -> proto::UpdateWorktree {
1531 let mut updated_entries = Vec::new();
1532 let mut removed_entries = Vec::new();
1533 let mut self_entries = self
1534 .entries_by_id
1535 .cursor::<()>()
1536 .filter(|e| include_ignored || !e.is_ignored)
1537 .peekable();
1538 let mut other_entries = other
1539 .entries_by_id
1540 .cursor::<()>()
1541 .filter(|e| include_ignored || !e.is_ignored)
1542 .peekable();
1543 loop {
1544 match (self_entries.peek(), other_entries.peek()) {
1545 (Some(self_entry), Some(other_entry)) => {
1546 match Ord::cmp(&self_entry.id, &other_entry.id) {
1547 Ordering::Less => {
1548 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1549 updated_entries.push(entry);
1550 self_entries.next();
1551 }
1552 Ordering::Equal => {
1553 if self_entry.scan_id != other_entry.scan_id {
1554 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1555 updated_entries.push(entry);
1556 }
1557
1558 self_entries.next();
1559 other_entries.next();
1560 }
1561 Ordering::Greater => {
1562 removed_entries.push(other_entry.id.to_proto());
1563 other_entries.next();
1564 }
1565 }
1566 }
1567 (Some(self_entry), None) => {
1568 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1569 updated_entries.push(entry);
1570 self_entries.next();
1571 }
1572 (None, Some(other_entry)) => {
1573 removed_entries.push(other_entry.id.to_proto());
1574 other_entries.next();
1575 }
1576 (None, None) => break,
1577 }
1578 }
1579
1580 proto::UpdateWorktree {
1581 project_id,
1582 worktree_id,
1583 abs_path: self.abs_path().to_string_lossy().into(),
1584 root_name: self.root_name().to_string(),
1585 updated_entries,
1586 removed_entries,
1587 scan_id: self.scan_id as u64,
1588 is_last_update: self.completed_scan_id == self.scan_id,
1589 }
1590 }
1591
1592 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1593 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1594 let abs_path = self.abs_path.join(&entry.path);
1595 match smol::block_on(build_gitignore(&abs_path, fs)) {
1596 Ok(ignore) => {
1597 self.ignores_by_parent_abs_path.insert(
1598 abs_path.parent().unwrap().into(),
1599 (Arc::new(ignore), self.scan_id),
1600 );
1601 }
1602 Err(error) => {
1603 log::error!(
1604 "error loading .gitignore file {:?} - {:?}",
1605 &entry.path,
1606 error
1607 );
1608 }
1609 }
1610 }
1611
1612 self.reuse_entry_id(&mut entry);
1613
1614 if entry.kind == EntryKind::PendingDir {
1615 if let Some(existing_entry) =
1616 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1617 {
1618 entry.kind = existing_entry.kind;
1619 }
1620 }
1621
1622 let scan_id = self.scan_id;
1623 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1624 if let Some(removed) = removed {
1625 if removed.id != entry.id {
1626 self.entries_by_id.remove(&removed.id, &());
1627 }
1628 }
1629 self.entries_by_id.insert_or_replace(
1630 PathEntry {
1631 id: entry.id,
1632 path: entry.path.clone(),
1633 is_ignored: entry.is_ignored,
1634 scan_id,
1635 },
1636 &(),
1637 );
1638
1639 entry
1640 }
1641
1642 fn populate_dir(
1643 &mut self,
1644 parent_path: Arc<Path>,
1645 entries: impl IntoIterator<Item = Entry>,
1646 ignore: Option<Arc<Gitignore>>,
1647 fs: &dyn Fs,
1648 ) {
1649 let mut parent_entry = if let Some(parent_entry) =
1650 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1651 {
1652 parent_entry.clone()
1653 } else {
1654 log::warn!(
1655 "populating a directory {:?} that has been removed",
1656 parent_path
1657 );
1658 return;
1659 };
1660
1661 match parent_entry.kind {
1662 EntryKind::PendingDir => {
1663 parent_entry.kind = EntryKind::Dir;
1664 }
1665 EntryKind::Dir => {}
1666 _ => return,
1667 }
1668
1669 if let Some(ignore) = ignore {
1670 self.ignores_by_parent_abs_path.insert(
1671 self.abs_path.join(&parent_path).into(),
1672 (ignore, self.scan_id),
1673 );
1674 }
1675
1676 if parent_path.file_name() == Some(&DOT_GIT) {
1677 let abs_path = self.abs_path.join(&parent_path);
1678 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1679
1680 let key = RepositoryWorkDirectory(content_path.clone());
1681 if self.repository_entries.get(&key).is_none() {
1682 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1683 self.repository_entries.insert(
1684 key.clone(),
1685 RepositoryEntry {
1686 git_dir_path: parent_path.clone(),
1687 git_dir_entry_id: parent_entry.id,
1688 work_directory: key,
1689 scan_id: 0,
1690 },
1691 );
1692
1693 self.git_repositories.insert(parent_entry.id, repo)
1694 }
1695 }
1696 }
1697
1698 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1699 let mut entries_by_id_edits = Vec::new();
1700
1701 for mut entry in entries {
1702 self.reuse_entry_id(&mut entry);
1703 entries_by_id_edits.push(Edit::Insert(PathEntry {
1704 id: entry.id,
1705 path: entry.path.clone(),
1706 is_ignored: entry.is_ignored,
1707 scan_id: self.scan_id,
1708 }));
1709 entries_by_path_edits.push(Edit::Insert(entry));
1710 }
1711
1712 self.entries_by_path.edit(entries_by_path_edits, &());
1713 self.entries_by_id.edit(entries_by_id_edits, &());
1714 }
1715
1716 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1717 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1718 entry.id = removed_entry_id;
1719 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1720 entry.id = existing_entry.id;
1721 }
1722 }
1723
1724 fn remove_path(&mut self, path: &Path) {
1725 let mut new_entries;
1726 let removed_entries;
1727 {
1728 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1729 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1730 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1731 new_entries.push_tree(cursor.suffix(&()), &());
1732 }
1733 self.entries_by_path = new_entries;
1734
1735 let mut entries_by_id_edits = Vec::new();
1736 for entry in removed_entries.cursor::<()>() {
1737 let removed_entry_id = self
1738 .removed_entry_ids
1739 .entry(entry.inode)
1740 .or_insert(entry.id);
1741 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1742 entries_by_id_edits.push(Edit::Remove(entry.id));
1743 }
1744 self.entries_by_id.edit(entries_by_id_edits, &());
1745
1746 if path.file_name() == Some(&GITIGNORE) {
1747 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1748 if let Some((_, scan_id)) = self
1749 .ignores_by_parent_abs_path
1750 .get_mut(abs_parent_path.as_path())
1751 {
1752 *scan_id = self.snapshot.scan_id;
1753 }
1754 } else if path.file_name() == Some(&DOT_GIT) {
1755 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1756 self.snapshot
1757 .repository_entries
1758 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1759 }
1760 }
1761
1762 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1763 let mut inodes = TreeSet::default();
1764 for ancestor in path.ancestors().skip(1) {
1765 if let Some(entry) = self.entry_for_path(ancestor) {
1766 inodes.insert(entry.inode);
1767 }
1768 }
1769 inodes
1770 }
1771
1772 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1773 let mut new_ignores = Vec::new();
1774 for ancestor in abs_path.ancestors().skip(1) {
1775 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1776 new_ignores.push((ancestor, Some(ignore.clone())));
1777 } else {
1778 new_ignores.push((ancestor, None));
1779 }
1780 }
1781
1782 let mut ignore_stack = IgnoreStack::none();
1783 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1784 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1785 ignore_stack = IgnoreStack::all();
1786 break;
1787 } else if let Some(ignore) = ignore {
1788 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1789 }
1790 }
1791
1792 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1793 ignore_stack = IgnoreStack::all();
1794 }
1795
1796 ignore_stack
1797 }
1798}
1799
1800async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1801 let contents = fs.load(abs_path).await?;
1802 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1803 let mut builder = GitignoreBuilder::new(parent);
1804 for line in contents.lines() {
1805 builder.add_line(Some(abs_path.into()), line)?;
1806 }
1807 Ok(builder.build()?)
1808}
1809
1810impl WorktreeId {
1811 pub fn from_usize(handle_id: usize) -> Self {
1812 Self(handle_id)
1813 }
1814
1815 pub(crate) fn from_proto(id: u64) -> Self {
1816 Self(id as usize)
1817 }
1818
1819 pub fn to_proto(&self) -> u64 {
1820 self.0 as u64
1821 }
1822
1823 pub fn to_usize(&self) -> usize {
1824 self.0
1825 }
1826}
1827
1828impl fmt::Display for WorktreeId {
1829 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1830 self.0.fmt(f)
1831 }
1832}
1833
1834impl Deref for Worktree {
1835 type Target = Snapshot;
1836
1837 fn deref(&self) -> &Self::Target {
1838 match self {
1839 Worktree::Local(worktree) => &worktree.snapshot,
1840 Worktree::Remote(worktree) => &worktree.snapshot,
1841 }
1842 }
1843}
1844
1845impl Deref for LocalWorktree {
1846 type Target = LocalSnapshot;
1847
1848 fn deref(&self) -> &Self::Target {
1849 &self.snapshot
1850 }
1851}
1852
1853impl Deref for RemoteWorktree {
1854 type Target = Snapshot;
1855
1856 fn deref(&self) -> &Self::Target {
1857 &self.snapshot
1858 }
1859}
1860
1861impl fmt::Debug for LocalWorktree {
1862 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1863 self.snapshot.fmt(f)
1864 }
1865}
1866
1867impl fmt::Debug for Snapshot {
1868 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1869 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1870 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1871
1872 impl<'a> fmt::Debug for EntriesByPath<'a> {
1873 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1874 f.debug_map()
1875 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1876 .finish()
1877 }
1878 }
1879
1880 impl<'a> fmt::Debug for EntriesById<'a> {
1881 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1882 f.debug_list().entries(self.0.iter()).finish()
1883 }
1884 }
1885
1886 f.debug_struct("Snapshot")
1887 .field("id", &self.id)
1888 .field("root_name", &self.root_name)
1889 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1890 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1891 .finish()
1892 }
1893}
1894
1895#[derive(Clone, PartialEq)]
1896pub struct File {
1897 pub worktree: ModelHandle<Worktree>,
1898 pub path: Arc<Path>,
1899 pub mtime: SystemTime,
1900 pub(crate) entry_id: ProjectEntryId,
1901 pub(crate) is_local: bool,
1902 pub(crate) is_deleted: bool,
1903}
1904
1905impl language::File for File {
1906 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1907 if self.is_local {
1908 Some(self)
1909 } else {
1910 None
1911 }
1912 }
1913
1914 fn mtime(&self) -> SystemTime {
1915 self.mtime
1916 }
1917
1918 fn path(&self) -> &Arc<Path> {
1919 &self.path
1920 }
1921
1922 fn full_path(&self, cx: &AppContext) -> PathBuf {
1923 let mut full_path = PathBuf::new();
1924 let worktree = self.worktree.read(cx);
1925
1926 if worktree.is_visible() {
1927 full_path.push(worktree.root_name());
1928 } else {
1929 let path = worktree.abs_path();
1930
1931 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1932 full_path.push("~");
1933 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1934 } else {
1935 full_path.push(path)
1936 }
1937 }
1938
1939 if self.path.components().next().is_some() {
1940 full_path.push(&self.path);
1941 }
1942
1943 full_path
1944 }
1945
1946 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1947 /// of its worktree, then this method will return the name of the worktree itself.
1948 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1949 self.path
1950 .file_name()
1951 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1952 }
1953
1954 fn is_deleted(&self) -> bool {
1955 self.is_deleted
1956 }
1957
1958 fn as_any(&self) -> &dyn Any {
1959 self
1960 }
1961
1962 fn to_proto(&self) -> rpc::proto::File {
1963 rpc::proto::File {
1964 worktree_id: self.worktree.id() as u64,
1965 entry_id: self.entry_id.to_proto(),
1966 path: self.path.to_string_lossy().into(),
1967 mtime: Some(self.mtime.into()),
1968 is_deleted: self.is_deleted,
1969 }
1970 }
1971}
1972
1973impl language::LocalFile for File {
1974 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1975 self.worktree
1976 .read(cx)
1977 .as_local()
1978 .unwrap()
1979 .abs_path
1980 .join(&self.path)
1981 }
1982
1983 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1984 let worktree = self.worktree.read(cx).as_local().unwrap();
1985 let abs_path = worktree.absolutize(&self.path);
1986 let fs = worktree.fs.clone();
1987 cx.background()
1988 .spawn(async move { fs.load(&abs_path).await })
1989 }
1990
1991 fn buffer_reloaded(
1992 &self,
1993 buffer_id: u64,
1994 version: &clock::Global,
1995 fingerprint: RopeFingerprint,
1996 line_ending: LineEnding,
1997 mtime: SystemTime,
1998 cx: &mut AppContext,
1999 ) {
2000 let worktree = self.worktree.read(cx).as_local().unwrap();
2001 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2002 worktree
2003 .client
2004 .send(proto::BufferReloaded {
2005 project_id,
2006 buffer_id,
2007 version: serialize_version(version),
2008 mtime: Some(mtime.into()),
2009 fingerprint: serialize_fingerprint(fingerprint),
2010 line_ending: serialize_line_ending(line_ending) as i32,
2011 })
2012 .log_err();
2013 }
2014 }
2015}
2016
2017impl File {
2018 pub fn from_proto(
2019 proto: rpc::proto::File,
2020 worktree: ModelHandle<Worktree>,
2021 cx: &AppContext,
2022 ) -> Result<Self> {
2023 let worktree_id = worktree
2024 .read(cx)
2025 .as_remote()
2026 .ok_or_else(|| anyhow!("not remote"))?
2027 .id();
2028
2029 if worktree_id.to_proto() != proto.worktree_id {
2030 return Err(anyhow!("worktree id does not match file"));
2031 }
2032
2033 Ok(Self {
2034 worktree,
2035 path: Path::new(&proto.path).into(),
2036 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2037 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2038 is_local: false,
2039 is_deleted: proto.is_deleted,
2040 })
2041 }
2042
2043 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2044 file.and_then(|f| f.as_any().downcast_ref())
2045 }
2046
2047 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2048 self.worktree.read(cx).id()
2049 }
2050
2051 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2052 if self.is_deleted {
2053 None
2054 } else {
2055 Some(self.entry_id)
2056 }
2057 }
2058}
2059
2060#[derive(Clone, Debug, PartialEq, Eq)]
2061pub struct Entry {
2062 pub id: ProjectEntryId,
2063 pub kind: EntryKind,
2064 pub path: Arc<Path>,
2065 pub inode: u64,
2066 pub mtime: SystemTime,
2067 pub is_symlink: bool,
2068 pub is_ignored: bool,
2069}
2070
2071#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2072pub enum EntryKind {
2073 PendingDir,
2074 Dir,
2075 File(CharBag),
2076}
2077
2078#[derive(Clone, Copy, Debug)]
2079pub enum PathChange {
2080 Added,
2081 Removed,
2082 Updated,
2083 AddedOrUpdated,
2084}
2085
2086impl Entry {
2087 fn new(
2088 path: Arc<Path>,
2089 metadata: &fs::Metadata,
2090 next_entry_id: &AtomicUsize,
2091 root_char_bag: CharBag,
2092 ) -> Self {
2093 Self {
2094 id: ProjectEntryId::new(next_entry_id),
2095 kind: if metadata.is_dir {
2096 EntryKind::PendingDir
2097 } else {
2098 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2099 },
2100 path,
2101 inode: metadata.inode,
2102 mtime: metadata.mtime,
2103 is_symlink: metadata.is_symlink,
2104 is_ignored: false,
2105 }
2106 }
2107
2108 pub fn is_dir(&self) -> bool {
2109 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2110 }
2111
2112 pub fn is_file(&self) -> bool {
2113 matches!(self.kind, EntryKind::File(_))
2114 }
2115}
2116
2117impl sum_tree::Item for Entry {
2118 type Summary = EntrySummary;
2119
2120 fn summary(&self) -> Self::Summary {
2121 let visible_count = if self.is_ignored { 0 } else { 1 };
2122 let file_count;
2123 let visible_file_count;
2124 if self.is_file() {
2125 file_count = 1;
2126 visible_file_count = visible_count;
2127 } else {
2128 file_count = 0;
2129 visible_file_count = 0;
2130 }
2131
2132 EntrySummary {
2133 max_path: self.path.clone(),
2134 count: 1,
2135 visible_count,
2136 file_count,
2137 visible_file_count,
2138 }
2139 }
2140}
2141
2142impl sum_tree::KeyedItem for Entry {
2143 type Key = PathKey;
2144
2145 fn key(&self) -> Self::Key {
2146 PathKey(self.path.clone())
2147 }
2148}
2149
2150#[derive(Clone, Debug)]
2151pub struct EntrySummary {
2152 max_path: Arc<Path>,
2153 count: usize,
2154 visible_count: usize,
2155 file_count: usize,
2156 visible_file_count: usize,
2157}
2158
2159impl Default for EntrySummary {
2160 fn default() -> Self {
2161 Self {
2162 max_path: Arc::from(Path::new("")),
2163 count: 0,
2164 visible_count: 0,
2165 file_count: 0,
2166 visible_file_count: 0,
2167 }
2168 }
2169}
2170
2171impl sum_tree::Summary for EntrySummary {
2172 type Context = ();
2173
2174 fn add_summary(&mut self, rhs: &Self, _: &()) {
2175 self.max_path = rhs.max_path.clone();
2176 self.count += rhs.count;
2177 self.visible_count += rhs.visible_count;
2178 self.file_count += rhs.file_count;
2179 self.visible_file_count += rhs.visible_file_count;
2180 }
2181}
2182
2183#[derive(Clone, Debug)]
2184struct PathEntry {
2185 id: ProjectEntryId,
2186 path: Arc<Path>,
2187 is_ignored: bool,
2188 scan_id: usize,
2189}
2190
2191impl sum_tree::Item for PathEntry {
2192 type Summary = PathEntrySummary;
2193
2194 fn summary(&self) -> Self::Summary {
2195 PathEntrySummary { max_id: self.id }
2196 }
2197}
2198
2199impl sum_tree::KeyedItem for PathEntry {
2200 type Key = ProjectEntryId;
2201
2202 fn key(&self) -> Self::Key {
2203 self.id
2204 }
2205}
2206
2207#[derive(Clone, Debug, Default)]
2208struct PathEntrySummary {
2209 max_id: ProjectEntryId,
2210}
2211
2212impl sum_tree::Summary for PathEntrySummary {
2213 type Context = ();
2214
2215 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2216 self.max_id = summary.max_id;
2217 }
2218}
2219
2220impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2221 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2222 *self = summary.max_id;
2223 }
2224}
2225
2226#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2227pub struct PathKey(Arc<Path>);
2228
2229impl Default for PathKey {
2230 fn default() -> Self {
2231 Self(Path::new("").into())
2232 }
2233}
2234
2235impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2236 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2237 self.0 = summary.max_path.clone();
2238 }
2239}
2240
2241struct BackgroundScanner {
2242 snapshot: Mutex<LocalSnapshot>,
2243 fs: Arc<dyn Fs>,
2244 status_updates_tx: UnboundedSender<ScanState>,
2245 executor: Arc<executor::Background>,
2246 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2247 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2248 finished_initial_scan: bool,
2249}
2250
2251impl BackgroundScanner {
2252 fn new(
2253 snapshot: LocalSnapshot,
2254 fs: Arc<dyn Fs>,
2255 status_updates_tx: UnboundedSender<ScanState>,
2256 executor: Arc<executor::Background>,
2257 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2258 ) -> Self {
2259 Self {
2260 fs,
2261 status_updates_tx,
2262 executor,
2263 refresh_requests_rx,
2264 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2265 snapshot: Mutex::new(snapshot),
2266 finished_initial_scan: false,
2267 }
2268 }
2269
2270 async fn run(
2271 &mut self,
2272 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2273 ) {
2274 use futures::FutureExt as _;
2275
2276 let (root_abs_path, root_inode) = {
2277 let snapshot = self.snapshot.lock();
2278 (
2279 snapshot.abs_path.clone(),
2280 snapshot.root_entry().map(|e| e.inode),
2281 )
2282 };
2283
2284 // Populate ignores above the root.
2285 let ignore_stack;
2286 for ancestor in root_abs_path.ancestors().skip(1) {
2287 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2288 {
2289 self.snapshot
2290 .lock()
2291 .ignores_by_parent_abs_path
2292 .insert(ancestor.into(), (ignore.into(), 0));
2293 }
2294 }
2295 {
2296 let mut snapshot = self.snapshot.lock();
2297 snapshot.scan_id += 1;
2298 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2299 if ignore_stack.is_all() {
2300 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2301 root_entry.is_ignored = true;
2302 snapshot.insert_entry(root_entry, self.fs.as_ref());
2303 }
2304 }
2305 };
2306
2307 // Perform an initial scan of the directory.
2308 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2309 smol::block_on(scan_job_tx.send(ScanJob {
2310 abs_path: root_abs_path,
2311 path: Arc::from(Path::new("")),
2312 ignore_stack,
2313 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2314 scan_queue: scan_job_tx.clone(),
2315 }))
2316 .unwrap();
2317 drop(scan_job_tx);
2318 self.scan_dirs(true, scan_job_rx).await;
2319 {
2320 let mut snapshot = self.snapshot.lock();
2321 snapshot.completed_scan_id = snapshot.scan_id;
2322 }
2323 self.send_status_update(false, None);
2324
2325 // Process any any FS events that occurred while performing the initial scan.
2326 // For these events, update events cannot be as precise, because we didn't
2327 // have the previous state loaded yet.
2328 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2329 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2330 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2331 paths.extend(more_events.into_iter().map(|e| e.path));
2332 }
2333 self.process_events(paths).await;
2334 }
2335
2336 self.finished_initial_scan = true;
2337
2338 // Continue processing events until the worktree is dropped.
2339 loop {
2340 select_biased! {
2341 // Process any path refresh requests from the worktree. Prioritize
2342 // these before handling changes reported by the filesystem.
2343 request = self.refresh_requests_rx.recv().fuse() => {
2344 let Ok((paths, barrier)) = request else { break };
2345 if !self.process_refresh_request(paths, barrier).await {
2346 return;
2347 }
2348 }
2349
2350 events = events_rx.next().fuse() => {
2351 let Some(events) = events else { break };
2352 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2353 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2354 paths.extend(more_events.into_iter().map(|e| e.path));
2355 }
2356 self.process_events(paths).await;
2357 }
2358 }
2359 }
2360 }
2361
2362 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2363 self.reload_entries_for_paths(paths, None).await;
2364 self.send_status_update(false, Some(barrier))
2365 }
2366
2367 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2368 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2369 if let Some(mut paths) = self
2370 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2371 .await
2372 {
2373 paths.sort_unstable();
2374 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2375 }
2376 drop(scan_job_tx);
2377 self.scan_dirs(false, scan_job_rx).await;
2378
2379 self.update_ignore_statuses().await;
2380
2381 let mut snapshot = self.snapshot.lock();
2382
2383 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2384 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2385 snapshot.git_repositories = git_repositories;
2386
2387 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2388 git_repository_entries.retain(|_, entry| snapshot.contains_entry(entry.git_dir_entry_id));
2389 snapshot.snapshot.repository_entries = git_repository_entries;
2390
2391 snapshot.removed_entry_ids.clear();
2392 snapshot.completed_scan_id = snapshot.scan_id;
2393
2394 drop(snapshot);
2395
2396 self.send_status_update(false, None);
2397 }
2398
2399 async fn scan_dirs(
2400 &self,
2401 enable_progress_updates: bool,
2402 scan_jobs_rx: channel::Receiver<ScanJob>,
2403 ) {
2404 use futures::FutureExt as _;
2405
2406 if self
2407 .status_updates_tx
2408 .unbounded_send(ScanState::Started)
2409 .is_err()
2410 {
2411 return;
2412 }
2413
2414 let progress_update_count = AtomicUsize::new(0);
2415 self.executor
2416 .scoped(|scope| {
2417 for _ in 0..self.executor.num_cpus() {
2418 scope.spawn(async {
2419 let mut last_progress_update_count = 0;
2420 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2421 futures::pin_mut!(progress_update_timer);
2422
2423 loop {
2424 select_biased! {
2425 // Process any path refresh requests before moving on to process
2426 // the scan queue, so that user operations are prioritized.
2427 request = self.refresh_requests_rx.recv().fuse() => {
2428 let Ok((paths, barrier)) = request else { break };
2429 if !self.process_refresh_request(paths, barrier).await {
2430 return;
2431 }
2432 }
2433
2434 // Send periodic progress updates to the worktree. Use an atomic counter
2435 // to ensure that only one of the workers sends a progress update after
2436 // the update interval elapses.
2437 _ = progress_update_timer => {
2438 match progress_update_count.compare_exchange(
2439 last_progress_update_count,
2440 last_progress_update_count + 1,
2441 SeqCst,
2442 SeqCst
2443 ) {
2444 Ok(_) => {
2445 last_progress_update_count += 1;
2446 self.send_status_update(true, None);
2447 }
2448 Err(count) => {
2449 last_progress_update_count = count;
2450 }
2451 }
2452 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2453 }
2454
2455 // Recursively load directories from the file system.
2456 job = scan_jobs_rx.recv().fuse() => {
2457 let Ok(job) = job else { break };
2458 if let Err(err) = self.scan_dir(&job).await {
2459 if job.path.as_ref() != Path::new("") {
2460 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2461 }
2462 }
2463 }
2464 }
2465 }
2466 })
2467 }
2468 })
2469 .await;
2470 }
2471
2472 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2473 let mut prev_state = self.prev_state.lock();
2474 let snapshot = self.snapshot.lock().clone();
2475 let mut old_snapshot = snapshot.snapshot.clone();
2476 mem::swap(&mut old_snapshot, &mut prev_state.0);
2477 let changed_paths = mem::take(&mut prev_state.1);
2478 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2479 self.status_updates_tx
2480 .unbounded_send(ScanState::Updated {
2481 snapshot,
2482 changes,
2483 scanning,
2484 barrier,
2485 })
2486 .is_ok()
2487 }
2488
2489 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2490 let mut new_entries: Vec<Entry> = Vec::new();
2491 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2492 let mut ignore_stack = job.ignore_stack.clone();
2493 let mut new_ignore = None;
2494 let (root_abs_path, root_char_bag, next_entry_id) = {
2495 let snapshot = self.snapshot.lock();
2496 (
2497 snapshot.abs_path().clone(),
2498 snapshot.root_char_bag,
2499 snapshot.next_entry_id.clone(),
2500 )
2501 };
2502 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2503 while let Some(child_abs_path) = child_paths.next().await {
2504 let child_abs_path: Arc<Path> = match child_abs_path {
2505 Ok(child_abs_path) => child_abs_path.into(),
2506 Err(error) => {
2507 log::error!("error processing entry {:?}", error);
2508 continue;
2509 }
2510 };
2511
2512 let child_name = child_abs_path.file_name().unwrap();
2513 let child_path: Arc<Path> = job.path.join(child_name).into();
2514 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2515 Ok(Some(metadata)) => metadata,
2516 Ok(None) => continue,
2517 Err(err) => {
2518 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2519 continue;
2520 }
2521 };
2522
2523 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2524 if child_name == *GITIGNORE {
2525 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2526 Ok(ignore) => {
2527 let ignore = Arc::new(ignore);
2528 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2529 new_ignore = Some(ignore);
2530 }
2531 Err(error) => {
2532 log::error!(
2533 "error loading .gitignore file {:?} - {:?}",
2534 child_name,
2535 error
2536 );
2537 }
2538 }
2539
2540 // Update ignore status of any child entries we've already processed to reflect the
2541 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2542 // there should rarely be too numerous. Update the ignore stack associated with any
2543 // new jobs as well.
2544 let mut new_jobs = new_jobs.iter_mut();
2545 for entry in &mut new_entries {
2546 let entry_abs_path = root_abs_path.join(&entry.path);
2547 entry.is_ignored =
2548 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2549
2550 if entry.is_dir() {
2551 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2552 job.ignore_stack = if entry.is_ignored {
2553 IgnoreStack::all()
2554 } else {
2555 ignore_stack.clone()
2556 };
2557 }
2558 }
2559 }
2560 }
2561
2562 let mut child_entry = Entry::new(
2563 child_path.clone(),
2564 &child_metadata,
2565 &next_entry_id,
2566 root_char_bag,
2567 );
2568
2569 if child_entry.is_dir() {
2570 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2571 child_entry.is_ignored = is_ignored;
2572
2573 // Avoid recursing until crash in the case of a recursive symlink
2574 if !job.ancestor_inodes.contains(&child_entry.inode) {
2575 let mut ancestor_inodes = job.ancestor_inodes.clone();
2576 ancestor_inodes.insert(child_entry.inode);
2577
2578 new_jobs.push(Some(ScanJob {
2579 abs_path: child_abs_path,
2580 path: child_path,
2581 ignore_stack: if is_ignored {
2582 IgnoreStack::all()
2583 } else {
2584 ignore_stack.clone()
2585 },
2586 ancestor_inodes,
2587 scan_queue: job.scan_queue.clone(),
2588 }));
2589 } else {
2590 new_jobs.push(None);
2591 }
2592 } else {
2593 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2594 }
2595
2596 new_entries.push(child_entry);
2597 }
2598
2599 self.snapshot.lock().populate_dir(
2600 job.path.clone(),
2601 new_entries,
2602 new_ignore,
2603 self.fs.as_ref(),
2604 );
2605
2606 for new_job in new_jobs {
2607 if let Some(new_job) = new_job {
2608 job.scan_queue.send(new_job).await.unwrap();
2609 }
2610 }
2611
2612 Ok(())
2613 }
2614
2615 async fn reload_entries_for_paths(
2616 &self,
2617 mut abs_paths: Vec<PathBuf>,
2618 scan_queue_tx: Option<Sender<ScanJob>>,
2619 ) -> Option<Vec<Arc<Path>>> {
2620 let doing_recursive_update = scan_queue_tx.is_some();
2621
2622 abs_paths.sort_unstable();
2623 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2624
2625 let root_abs_path = self.snapshot.lock().abs_path.clone();
2626 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2627 let metadata = futures::future::join_all(
2628 abs_paths
2629 .iter()
2630 .map(|abs_path| self.fs.metadata(&abs_path))
2631 .collect::<Vec<_>>(),
2632 )
2633 .await;
2634
2635 let mut snapshot = self.snapshot.lock();
2636 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2637 snapshot.scan_id += 1;
2638 if is_idle && !doing_recursive_update {
2639 snapshot.completed_scan_id = snapshot.scan_id;
2640 }
2641
2642 // Remove any entries for paths that no longer exist or are being recursively
2643 // refreshed. Do this before adding any new entries, so that renames can be
2644 // detected regardless of the order of the paths.
2645 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2646 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2647 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2648 if matches!(metadata, Ok(None)) || doing_recursive_update {
2649 snapshot.remove_path(path);
2650 }
2651 event_paths.push(path.into());
2652 } else {
2653 log::error!(
2654 "unexpected event {:?} for root path {:?}",
2655 abs_path,
2656 root_canonical_path
2657 );
2658 }
2659 }
2660
2661 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2662 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2663
2664 match metadata {
2665 Ok(Some(metadata)) => {
2666 let ignore_stack =
2667 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2668 let mut fs_entry = Entry::new(
2669 path.clone(),
2670 &metadata,
2671 snapshot.next_entry_id.as_ref(),
2672 snapshot.root_char_bag,
2673 );
2674 fs_entry.is_ignored = ignore_stack.is_all();
2675 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2676
2677 let scan_id = snapshot.scan_id;
2678
2679 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2680 if let Some((key, repo)) = repo_with_path_in_dotgit {
2681 repo.lock().reload_index();
2682
2683 snapshot
2684 .repository_entries
2685 .update(&key, |entry| entry.scan_id = scan_id);
2686 }
2687
2688 if let Some(scan_queue_tx) = &scan_queue_tx {
2689 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2690 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2691 ancestor_inodes.insert(metadata.inode);
2692 smol::block_on(scan_queue_tx.send(ScanJob {
2693 abs_path,
2694 path,
2695 ignore_stack,
2696 ancestor_inodes,
2697 scan_queue: scan_queue_tx.clone(),
2698 }))
2699 .unwrap();
2700 }
2701 }
2702 }
2703 Ok(None) => {}
2704 Err(err) => {
2705 // TODO - create a special 'error' entry in the entries tree to mark this
2706 log::error!("error reading file on event {:?}", err);
2707 }
2708 }
2709 }
2710
2711 Some(event_paths)
2712 }
2713
2714 async fn update_ignore_statuses(&self) {
2715 use futures::FutureExt as _;
2716
2717 let mut snapshot = self.snapshot.lock().clone();
2718 let mut ignores_to_update = Vec::new();
2719 let mut ignores_to_delete = Vec::new();
2720 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2721 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2722 if *scan_id > snapshot.completed_scan_id
2723 && snapshot.entry_for_path(parent_path).is_some()
2724 {
2725 ignores_to_update.push(parent_abs_path.clone());
2726 }
2727
2728 let ignore_path = parent_path.join(&*GITIGNORE);
2729 if snapshot.entry_for_path(ignore_path).is_none() {
2730 ignores_to_delete.push(parent_abs_path.clone());
2731 }
2732 }
2733 }
2734
2735 for parent_abs_path in ignores_to_delete {
2736 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2737 self.snapshot
2738 .lock()
2739 .ignores_by_parent_abs_path
2740 .remove(&parent_abs_path);
2741 }
2742
2743 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2744 ignores_to_update.sort_unstable();
2745 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2746 while let Some(parent_abs_path) = ignores_to_update.next() {
2747 while ignores_to_update
2748 .peek()
2749 .map_or(false, |p| p.starts_with(&parent_abs_path))
2750 {
2751 ignores_to_update.next().unwrap();
2752 }
2753
2754 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2755 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2756 abs_path: parent_abs_path,
2757 ignore_stack,
2758 ignore_queue: ignore_queue_tx.clone(),
2759 }))
2760 .unwrap();
2761 }
2762 drop(ignore_queue_tx);
2763
2764 self.executor
2765 .scoped(|scope| {
2766 for _ in 0..self.executor.num_cpus() {
2767 scope.spawn(async {
2768 loop {
2769 select_biased! {
2770 // Process any path refresh requests before moving on to process
2771 // the queue of ignore statuses.
2772 request = self.refresh_requests_rx.recv().fuse() => {
2773 let Ok((paths, barrier)) = request else { break };
2774 if !self.process_refresh_request(paths, barrier).await {
2775 return;
2776 }
2777 }
2778
2779 // Recursively process directories whose ignores have changed.
2780 job = ignore_queue_rx.recv().fuse() => {
2781 let Ok(job) = job else { break };
2782 self.update_ignore_status(job, &snapshot).await;
2783 }
2784 }
2785 }
2786 });
2787 }
2788 })
2789 .await;
2790 }
2791
2792 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2793 let mut ignore_stack = job.ignore_stack;
2794 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2795 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2796 }
2797
2798 let mut entries_by_id_edits = Vec::new();
2799 let mut entries_by_path_edits = Vec::new();
2800 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2801 for mut entry in snapshot.child_entries(path).cloned() {
2802 let was_ignored = entry.is_ignored;
2803 let abs_path = snapshot.abs_path().join(&entry.path);
2804 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2805 if entry.is_dir() {
2806 let child_ignore_stack = if entry.is_ignored {
2807 IgnoreStack::all()
2808 } else {
2809 ignore_stack.clone()
2810 };
2811 job.ignore_queue
2812 .send(UpdateIgnoreStatusJob {
2813 abs_path: abs_path.into(),
2814 ignore_stack: child_ignore_stack,
2815 ignore_queue: job.ignore_queue.clone(),
2816 })
2817 .await
2818 .unwrap();
2819 }
2820
2821 if entry.is_ignored != was_ignored {
2822 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2823 path_entry.scan_id = snapshot.scan_id;
2824 path_entry.is_ignored = entry.is_ignored;
2825 entries_by_id_edits.push(Edit::Insert(path_entry));
2826 entries_by_path_edits.push(Edit::Insert(entry));
2827 }
2828 }
2829
2830 let mut snapshot = self.snapshot.lock();
2831 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2832 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2833 }
2834
2835 fn build_change_set(
2836 &self,
2837 old_snapshot: &Snapshot,
2838 new_snapshot: &Snapshot,
2839 event_paths: Vec<Arc<Path>>,
2840 ) -> HashMap<Arc<Path>, PathChange> {
2841 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2842
2843 let mut changes = HashMap::default();
2844 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2845 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2846 let received_before_initialized = !self.finished_initial_scan;
2847
2848 for path in event_paths {
2849 let path = PathKey(path);
2850 old_paths.seek(&path, Bias::Left, &());
2851 new_paths.seek(&path, Bias::Left, &());
2852
2853 loop {
2854 match (old_paths.item(), new_paths.item()) {
2855 (Some(old_entry), Some(new_entry)) => {
2856 if old_entry.path > path.0
2857 && new_entry.path > path.0
2858 && !old_entry.path.starts_with(&path.0)
2859 && !new_entry.path.starts_with(&path.0)
2860 {
2861 break;
2862 }
2863
2864 match Ord::cmp(&old_entry.path, &new_entry.path) {
2865 Ordering::Less => {
2866 changes.insert(old_entry.path.clone(), Removed);
2867 old_paths.next(&());
2868 }
2869 Ordering::Equal => {
2870 if received_before_initialized {
2871 // If the worktree was not fully initialized when this event was generated,
2872 // we can't know whether this entry was added during the scan or whether
2873 // it was merely updated.
2874 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2875 } else if old_entry.mtime != new_entry.mtime {
2876 changes.insert(new_entry.path.clone(), Updated);
2877 }
2878 old_paths.next(&());
2879 new_paths.next(&());
2880 }
2881 Ordering::Greater => {
2882 changes.insert(new_entry.path.clone(), Added);
2883 new_paths.next(&());
2884 }
2885 }
2886 }
2887 (Some(old_entry), None) => {
2888 changes.insert(old_entry.path.clone(), Removed);
2889 old_paths.next(&());
2890 }
2891 (None, Some(new_entry)) => {
2892 changes.insert(new_entry.path.clone(), Added);
2893 new_paths.next(&());
2894 }
2895 (None, None) => break,
2896 }
2897 }
2898 }
2899 changes
2900 }
2901
2902 async fn progress_timer(&self, running: bool) {
2903 if !running {
2904 return futures::future::pending().await;
2905 }
2906
2907 #[cfg(any(test, feature = "test-support"))]
2908 if self.fs.is_fake() {
2909 return self.executor.simulate_random_delay().await;
2910 }
2911
2912 smol::Timer::after(Duration::from_millis(100)).await;
2913 }
2914}
2915
2916fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2917 let mut result = root_char_bag;
2918 result.extend(
2919 path.to_string_lossy()
2920 .chars()
2921 .map(|c| c.to_ascii_lowercase()),
2922 );
2923 result
2924}
2925
2926struct ScanJob {
2927 abs_path: Arc<Path>,
2928 path: Arc<Path>,
2929 ignore_stack: Arc<IgnoreStack>,
2930 scan_queue: Sender<ScanJob>,
2931 ancestor_inodes: TreeSet<u64>,
2932}
2933
2934struct UpdateIgnoreStatusJob {
2935 abs_path: Arc<Path>,
2936 ignore_stack: Arc<IgnoreStack>,
2937 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2938}
2939
2940pub trait WorktreeHandle {
2941 #[cfg(any(test, feature = "test-support"))]
2942 fn flush_fs_events<'a>(
2943 &self,
2944 cx: &'a gpui::TestAppContext,
2945 ) -> futures::future::LocalBoxFuture<'a, ()>;
2946}
2947
2948impl WorktreeHandle for ModelHandle<Worktree> {
2949 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2950 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2951 // extra directory scans, and emit extra scan-state notifications.
2952 //
2953 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2954 // to ensure that all redundant FS events have already been processed.
2955 #[cfg(any(test, feature = "test-support"))]
2956 fn flush_fs_events<'a>(
2957 &self,
2958 cx: &'a gpui::TestAppContext,
2959 ) -> futures::future::LocalBoxFuture<'a, ()> {
2960 use smol::future::FutureExt;
2961
2962 let filename = "fs-event-sentinel";
2963 let tree = self.clone();
2964 let (fs, root_path) = self.read_with(cx, |tree, _| {
2965 let tree = tree.as_local().unwrap();
2966 (tree.fs.clone(), tree.abs_path().clone())
2967 });
2968
2969 async move {
2970 fs.create_file(&root_path.join(filename), Default::default())
2971 .await
2972 .unwrap();
2973 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2974 .await;
2975
2976 fs.remove_file(&root_path.join(filename), Default::default())
2977 .await
2978 .unwrap();
2979 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2980 .await;
2981
2982 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2983 .await;
2984 }
2985 .boxed_local()
2986 }
2987}
2988
2989#[derive(Clone, Debug)]
2990struct TraversalProgress<'a> {
2991 max_path: &'a Path,
2992 count: usize,
2993 visible_count: usize,
2994 file_count: usize,
2995 visible_file_count: usize,
2996}
2997
2998impl<'a> TraversalProgress<'a> {
2999 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3000 match (include_ignored, include_dirs) {
3001 (true, true) => self.count,
3002 (true, false) => self.file_count,
3003 (false, true) => self.visible_count,
3004 (false, false) => self.visible_file_count,
3005 }
3006 }
3007}
3008
3009impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3010 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3011 self.max_path = summary.max_path.as_ref();
3012 self.count += summary.count;
3013 self.visible_count += summary.visible_count;
3014 self.file_count += summary.file_count;
3015 self.visible_file_count += summary.visible_file_count;
3016 }
3017}
3018
3019impl<'a> Default for TraversalProgress<'a> {
3020 fn default() -> Self {
3021 Self {
3022 max_path: Path::new(""),
3023 count: 0,
3024 visible_count: 0,
3025 file_count: 0,
3026 visible_file_count: 0,
3027 }
3028 }
3029}
3030
3031pub struct Traversal<'a> {
3032 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3033 include_ignored: bool,
3034 include_dirs: bool,
3035}
3036
3037impl<'a> Traversal<'a> {
3038 pub fn advance(&mut self) -> bool {
3039 self.advance_to_offset(self.offset() + 1)
3040 }
3041
3042 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3043 self.cursor.seek_forward(
3044 &TraversalTarget::Count {
3045 count: offset,
3046 include_dirs: self.include_dirs,
3047 include_ignored: self.include_ignored,
3048 },
3049 Bias::Right,
3050 &(),
3051 )
3052 }
3053
3054 pub fn advance_to_sibling(&mut self) -> bool {
3055 while let Some(entry) = self.cursor.item() {
3056 self.cursor.seek_forward(
3057 &TraversalTarget::PathSuccessor(&entry.path),
3058 Bias::Left,
3059 &(),
3060 );
3061 if let Some(entry) = self.cursor.item() {
3062 if (self.include_dirs || !entry.is_dir())
3063 && (self.include_ignored || !entry.is_ignored)
3064 {
3065 return true;
3066 }
3067 }
3068 }
3069 false
3070 }
3071
3072 pub fn entry(&self) -> Option<&'a Entry> {
3073 self.cursor.item()
3074 }
3075
3076 pub fn offset(&self) -> usize {
3077 self.cursor
3078 .start()
3079 .count(self.include_dirs, self.include_ignored)
3080 }
3081}
3082
3083impl<'a> Iterator for Traversal<'a> {
3084 type Item = &'a Entry;
3085
3086 fn next(&mut self) -> Option<Self::Item> {
3087 if let Some(item) = self.entry() {
3088 self.advance();
3089 Some(item)
3090 } else {
3091 None
3092 }
3093 }
3094}
3095
3096#[derive(Debug)]
3097enum TraversalTarget<'a> {
3098 Path(&'a Path),
3099 PathSuccessor(&'a Path),
3100 Count {
3101 count: usize,
3102 include_ignored: bool,
3103 include_dirs: bool,
3104 },
3105}
3106
3107impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3108 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3109 match self {
3110 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3111 TraversalTarget::PathSuccessor(path) => {
3112 if !cursor_location.max_path.starts_with(path) {
3113 Ordering::Equal
3114 } else {
3115 Ordering::Greater
3116 }
3117 }
3118 TraversalTarget::Count {
3119 count,
3120 include_dirs,
3121 include_ignored,
3122 } => Ord::cmp(
3123 count,
3124 &cursor_location.count(*include_dirs, *include_ignored),
3125 ),
3126 }
3127 }
3128}
3129
3130struct ChildEntriesIter<'a> {
3131 parent_path: &'a Path,
3132 traversal: Traversal<'a>,
3133}
3134
3135impl<'a> Iterator for ChildEntriesIter<'a> {
3136 type Item = &'a Entry;
3137
3138 fn next(&mut self) -> Option<Self::Item> {
3139 if let Some(item) = self.traversal.entry() {
3140 if item.path.starts_with(&self.parent_path) {
3141 self.traversal.advance_to_sibling();
3142 return Some(item);
3143 }
3144 }
3145 None
3146 }
3147}
3148
3149impl<'a> From<&'a Entry> for proto::Entry {
3150 fn from(entry: &'a Entry) -> Self {
3151 Self {
3152 id: entry.id.to_proto(),
3153 is_dir: entry.is_dir(),
3154 path: entry.path.to_string_lossy().into(),
3155 inode: entry.inode,
3156 mtime: Some(entry.mtime.into()),
3157 is_symlink: entry.is_symlink,
3158 is_ignored: entry.is_ignored,
3159 }
3160 }
3161}
3162
3163impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3164 type Error = anyhow::Error;
3165
3166 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3167 if let Some(mtime) = entry.mtime {
3168 let kind = if entry.is_dir {
3169 EntryKind::Dir
3170 } else {
3171 let mut char_bag = *root_char_bag;
3172 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3173 EntryKind::File(char_bag)
3174 };
3175 let path: Arc<Path> = PathBuf::from(entry.path).into();
3176 Ok(Entry {
3177 id: ProjectEntryId::from_proto(entry.id),
3178 kind,
3179 path,
3180 inode: entry.inode,
3181 mtime: mtime.into(),
3182 is_symlink: entry.is_symlink,
3183 is_ignored: entry.is_ignored,
3184 })
3185 } else {
3186 Err(anyhow!(
3187 "missing mtime in remote worktree entry {:?}",
3188 entry.path
3189 ))
3190 }
3191 }
3192}
3193
3194#[cfg(test)]
3195mod tests {
3196 use super::*;
3197 use fs::{FakeFs, RealFs};
3198 use gpui::{executor::Deterministic, TestAppContext};
3199 use pretty_assertions::assert_eq;
3200 use rand::prelude::*;
3201 use serde_json::json;
3202 use std::{env, fmt::Write};
3203 use util::{http::FakeHttpClient, test::temp_tree};
3204
3205 #[gpui::test]
3206 async fn test_traversal(cx: &mut TestAppContext) {
3207 let fs = FakeFs::new(cx.background());
3208 fs.insert_tree(
3209 "/root",
3210 json!({
3211 ".gitignore": "a/b\n",
3212 "a": {
3213 "b": "",
3214 "c": "",
3215 }
3216 }),
3217 )
3218 .await;
3219
3220 let http_client = FakeHttpClient::with_404_response();
3221 let client = cx.read(|cx| Client::new(http_client, cx));
3222
3223 let tree = Worktree::local(
3224 client,
3225 Path::new("/root"),
3226 true,
3227 fs,
3228 Default::default(),
3229 &mut cx.to_async(),
3230 )
3231 .await
3232 .unwrap();
3233 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3234 .await;
3235
3236 tree.read_with(cx, |tree, _| {
3237 assert_eq!(
3238 tree.entries(false)
3239 .map(|entry| entry.path.as_ref())
3240 .collect::<Vec<_>>(),
3241 vec![
3242 Path::new(""),
3243 Path::new(".gitignore"),
3244 Path::new("a"),
3245 Path::new("a/c"),
3246 ]
3247 );
3248 assert_eq!(
3249 tree.entries(true)
3250 .map(|entry| entry.path.as_ref())
3251 .collect::<Vec<_>>(),
3252 vec![
3253 Path::new(""),
3254 Path::new(".gitignore"),
3255 Path::new("a"),
3256 Path::new("a/b"),
3257 Path::new("a/c"),
3258 ]
3259 );
3260 })
3261 }
3262
3263 #[gpui::test(iterations = 10)]
3264 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3265 let fs = FakeFs::new(cx.background());
3266 fs.insert_tree(
3267 "/root",
3268 json!({
3269 "lib": {
3270 "a": {
3271 "a.txt": ""
3272 },
3273 "b": {
3274 "b.txt": ""
3275 }
3276 }
3277 }),
3278 )
3279 .await;
3280 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3281 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3282
3283 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3284 let tree = Worktree::local(
3285 client,
3286 Path::new("/root"),
3287 true,
3288 fs.clone(),
3289 Default::default(),
3290 &mut cx.to_async(),
3291 )
3292 .await
3293 .unwrap();
3294
3295 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3296 .await;
3297
3298 tree.read_with(cx, |tree, _| {
3299 assert_eq!(
3300 tree.entries(false)
3301 .map(|entry| entry.path.as_ref())
3302 .collect::<Vec<_>>(),
3303 vec![
3304 Path::new(""),
3305 Path::new("lib"),
3306 Path::new("lib/a"),
3307 Path::new("lib/a/a.txt"),
3308 Path::new("lib/a/lib"),
3309 Path::new("lib/b"),
3310 Path::new("lib/b/b.txt"),
3311 Path::new("lib/b/lib"),
3312 ]
3313 );
3314 });
3315
3316 fs.rename(
3317 Path::new("/root/lib/a/lib"),
3318 Path::new("/root/lib/a/lib-2"),
3319 Default::default(),
3320 )
3321 .await
3322 .unwrap();
3323 executor.run_until_parked();
3324 tree.read_with(cx, |tree, _| {
3325 assert_eq!(
3326 tree.entries(false)
3327 .map(|entry| entry.path.as_ref())
3328 .collect::<Vec<_>>(),
3329 vec![
3330 Path::new(""),
3331 Path::new("lib"),
3332 Path::new("lib/a"),
3333 Path::new("lib/a/a.txt"),
3334 Path::new("lib/a/lib-2"),
3335 Path::new("lib/b"),
3336 Path::new("lib/b/b.txt"),
3337 Path::new("lib/b/lib"),
3338 ]
3339 );
3340 });
3341 }
3342
3343 #[gpui::test]
3344 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3345 let parent_dir = temp_tree(json!({
3346 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3347 "tree": {
3348 ".git": {},
3349 ".gitignore": "ignored-dir\n",
3350 "tracked-dir": {
3351 "tracked-file1": "",
3352 "ancestor-ignored-file1": "",
3353 },
3354 "ignored-dir": {
3355 "ignored-file1": ""
3356 }
3357 }
3358 }));
3359 let dir = parent_dir.path().join("tree");
3360
3361 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3362
3363 let tree = Worktree::local(
3364 client,
3365 dir.as_path(),
3366 true,
3367 Arc::new(RealFs),
3368 Default::default(),
3369 &mut cx.to_async(),
3370 )
3371 .await
3372 .unwrap();
3373 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3374 .await;
3375 tree.flush_fs_events(cx).await;
3376 cx.read(|cx| {
3377 let tree = tree.read(cx);
3378 assert!(
3379 !tree
3380 .entry_for_path("tracked-dir/tracked-file1")
3381 .unwrap()
3382 .is_ignored
3383 );
3384 assert!(
3385 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3386 .unwrap()
3387 .is_ignored
3388 );
3389 assert!(
3390 tree.entry_for_path("ignored-dir/ignored-file1")
3391 .unwrap()
3392 .is_ignored
3393 );
3394 });
3395
3396 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3397 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3398 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3399 tree.flush_fs_events(cx).await;
3400 cx.read(|cx| {
3401 let tree = tree.read(cx);
3402 assert!(
3403 !tree
3404 .entry_for_path("tracked-dir/tracked-file2")
3405 .unwrap()
3406 .is_ignored
3407 );
3408 assert!(
3409 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3410 .unwrap()
3411 .is_ignored
3412 );
3413 assert!(
3414 tree.entry_for_path("ignored-dir/ignored-file2")
3415 .unwrap()
3416 .is_ignored
3417 );
3418 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3419 });
3420 }
3421
3422 #[gpui::test]
3423 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3424 let root = temp_tree(json!({
3425 "dir1": {
3426 ".git": {},
3427 "deps": {
3428 "dep1": {
3429 ".git": {},
3430 "src": {
3431 "a.txt": ""
3432 }
3433 }
3434 },
3435 "src": {
3436 "b.txt": ""
3437 }
3438 },
3439 "c.txt": "",
3440 }));
3441
3442 let http_client = FakeHttpClient::with_404_response();
3443 let client = cx.read(|cx| Client::new(http_client, cx));
3444 let tree = Worktree::local(
3445 client,
3446 root.path(),
3447 true,
3448 Arc::new(RealFs),
3449 Default::default(),
3450 &mut cx.to_async(),
3451 )
3452 .await
3453 .unwrap();
3454
3455 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3456 .await;
3457 tree.flush_fs_events(cx).await;
3458
3459 tree.read_with(cx, |tree, _cx| {
3460 let tree = tree.as_local().unwrap();
3461
3462 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3463
3464 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3465 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3466 assert_eq!(entry.git_dir_path.as_ref(), Path::new("dir1/.git"));
3467
3468 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3469 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3470 assert_eq!(
3471 entry.git_dir_path.as_ref(),
3472 Path::new("dir1/deps/dep1/.git"),
3473 );
3474 });
3475
3476 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3477 let tree = tree.as_local().unwrap();
3478 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3479 entry.scan_id
3480 });
3481
3482 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3483 tree.flush_fs_events(cx).await;
3484
3485 tree.read_with(cx, |tree, _cx| {
3486 let tree = tree.as_local().unwrap();
3487 let new_scan_id = {
3488 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3489 entry.scan_id
3490 };
3491 assert_ne!(
3492 original_scan_id, new_scan_id,
3493 "original {original_scan_id}, new {new_scan_id}"
3494 );
3495 });
3496
3497 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3498 tree.flush_fs_events(cx).await;
3499
3500 tree.read_with(cx, |tree, _cx| {
3501 let tree = tree.as_local().unwrap();
3502
3503 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3504 });
3505 }
3506
3507 #[test]
3508 fn test_changed_repos() {
3509 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3510 RepositoryEntry {
3511 scan_id,
3512 git_dir_path: git_dir_path.as_ref().into(),
3513 git_dir_entry_id: ProjectEntryId(0),
3514 work_directory: RepositoryWorkDirectory(
3515 Path::new(&format!("don't-care-{}", scan_id)).into(),
3516 ),
3517 }
3518 }
3519
3520 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3521 prev_repos.insert(
3522 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3523 fake_entry("/.git", 0),
3524 );
3525 prev_repos.insert(
3526 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3527 fake_entry("/a/.git", 0),
3528 );
3529 prev_repos.insert(
3530 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3531 fake_entry("/a/b/.git", 0),
3532 );
3533
3534 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3535 new_repos.insert(
3536 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3537 fake_entry("/a/.git", 1),
3538 );
3539 new_repos.insert(
3540 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3541 fake_entry("/a/b/.git", 0),
3542 );
3543 new_repos.insert(
3544 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3545 fake_entry("/a/c/.git", 0),
3546 );
3547
3548 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3549
3550 // Deletion retained
3551 assert!(res
3552 .iter()
3553 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3554 .is_some());
3555
3556 // Update retained
3557 assert!(res
3558 .iter()
3559 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3560 .is_some());
3561
3562 // Addition retained
3563 assert!(res
3564 .iter()
3565 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3566 .is_some());
3567
3568 // Nochange, not retained
3569 assert!(res
3570 .iter()
3571 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3572 .is_none());
3573 }
3574
3575 #[gpui::test]
3576 async fn test_write_file(cx: &mut TestAppContext) {
3577 let dir = temp_tree(json!({
3578 ".git": {},
3579 ".gitignore": "ignored-dir\n",
3580 "tracked-dir": {},
3581 "ignored-dir": {}
3582 }));
3583
3584 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3585
3586 let tree = Worktree::local(
3587 client,
3588 dir.path(),
3589 true,
3590 Arc::new(RealFs),
3591 Default::default(),
3592 &mut cx.to_async(),
3593 )
3594 .await
3595 .unwrap();
3596 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3597 .await;
3598 tree.flush_fs_events(cx).await;
3599
3600 tree.update(cx, |tree, cx| {
3601 tree.as_local().unwrap().write_file(
3602 Path::new("tracked-dir/file.txt"),
3603 "hello".into(),
3604 Default::default(),
3605 cx,
3606 )
3607 })
3608 .await
3609 .unwrap();
3610 tree.update(cx, |tree, cx| {
3611 tree.as_local().unwrap().write_file(
3612 Path::new("ignored-dir/file.txt"),
3613 "world".into(),
3614 Default::default(),
3615 cx,
3616 )
3617 })
3618 .await
3619 .unwrap();
3620
3621 tree.read_with(cx, |tree, _| {
3622 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3623 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3624 assert!(!tracked.is_ignored);
3625 assert!(ignored.is_ignored);
3626 });
3627 }
3628
3629 #[gpui::test(iterations = 30)]
3630 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3631 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3632
3633 let fs = FakeFs::new(cx.background());
3634 fs.insert_tree(
3635 "/root",
3636 json!({
3637 "b": {},
3638 "c": {},
3639 "d": {},
3640 }),
3641 )
3642 .await;
3643
3644 let tree = Worktree::local(
3645 client,
3646 "/root".as_ref(),
3647 true,
3648 fs,
3649 Default::default(),
3650 &mut cx.to_async(),
3651 )
3652 .await
3653 .unwrap();
3654
3655 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3656
3657 let entry = tree
3658 .update(cx, |tree, cx| {
3659 tree.as_local_mut()
3660 .unwrap()
3661 .create_entry("a/e".as_ref(), true, cx)
3662 })
3663 .await
3664 .unwrap();
3665 assert!(entry.is_dir());
3666
3667 cx.foreground().run_until_parked();
3668 tree.read_with(cx, |tree, _| {
3669 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3670 });
3671
3672 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3673 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3674 snapshot1.apply_remote_update(update).unwrap();
3675 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3676 }
3677
3678 #[gpui::test(iterations = 100)]
3679 async fn test_random_worktree_operations_during_initial_scan(
3680 cx: &mut TestAppContext,
3681 mut rng: StdRng,
3682 ) {
3683 let operations = env::var("OPERATIONS")
3684 .map(|o| o.parse().unwrap())
3685 .unwrap_or(5);
3686 let initial_entries = env::var("INITIAL_ENTRIES")
3687 .map(|o| o.parse().unwrap())
3688 .unwrap_or(20);
3689
3690 let root_dir = Path::new("/test");
3691 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3692 fs.as_fake().insert_tree(root_dir, json!({})).await;
3693 for _ in 0..initial_entries {
3694 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3695 }
3696 log::info!("generated initial tree");
3697
3698 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3699 let worktree = Worktree::local(
3700 client.clone(),
3701 root_dir,
3702 true,
3703 fs.clone(),
3704 Default::default(),
3705 &mut cx.to_async(),
3706 )
3707 .await
3708 .unwrap();
3709
3710 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3711
3712 for _ in 0..operations {
3713 worktree
3714 .update(cx, |worktree, cx| {
3715 randomly_mutate_worktree(worktree, &mut rng, cx)
3716 })
3717 .await
3718 .log_err();
3719 worktree.read_with(cx, |tree, _| {
3720 tree.as_local().unwrap().snapshot.check_invariants()
3721 });
3722
3723 if rng.gen_bool(0.6) {
3724 let new_snapshot =
3725 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3726 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3727 snapshot.apply_remote_update(update.clone()).unwrap();
3728 assert_eq!(
3729 snapshot.to_vec(true),
3730 new_snapshot.to_vec(true),
3731 "incorrect snapshot after update {:?}",
3732 update
3733 );
3734 }
3735 }
3736
3737 worktree
3738 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3739 .await;
3740 worktree.read_with(cx, |tree, _| {
3741 tree.as_local().unwrap().snapshot.check_invariants()
3742 });
3743
3744 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3745 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3746 snapshot.apply_remote_update(update.clone()).unwrap();
3747 assert_eq!(
3748 snapshot.to_vec(true),
3749 new_snapshot.to_vec(true),
3750 "incorrect snapshot after update {:?}",
3751 update
3752 );
3753 }
3754
3755 #[gpui::test(iterations = 100)]
3756 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3757 let operations = env::var("OPERATIONS")
3758 .map(|o| o.parse().unwrap())
3759 .unwrap_or(40);
3760 let initial_entries = env::var("INITIAL_ENTRIES")
3761 .map(|o| o.parse().unwrap())
3762 .unwrap_or(20);
3763
3764 let root_dir = Path::new("/test");
3765 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3766 fs.as_fake().insert_tree(root_dir, json!({})).await;
3767 for _ in 0..initial_entries {
3768 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3769 }
3770 log::info!("generated initial tree");
3771
3772 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3773 let worktree = Worktree::local(
3774 client.clone(),
3775 root_dir,
3776 true,
3777 fs.clone(),
3778 Default::default(),
3779 &mut cx.to_async(),
3780 )
3781 .await
3782 .unwrap();
3783
3784 worktree
3785 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3786 .await;
3787
3788 // After the initial scan is complete, the `UpdatedEntries` event can
3789 // be used to follow along with all changes to the worktree's snapshot.
3790 worktree.update(cx, |tree, cx| {
3791 let mut paths = tree
3792 .as_local()
3793 .unwrap()
3794 .paths()
3795 .cloned()
3796 .collect::<Vec<_>>();
3797
3798 cx.subscribe(&worktree, move |tree, _, event, _| {
3799 if let Event::UpdatedEntries(changes) = event {
3800 for (path, change_type) in changes.iter() {
3801 let path = path.clone();
3802 let ix = match paths.binary_search(&path) {
3803 Ok(ix) | Err(ix) => ix,
3804 };
3805 match change_type {
3806 PathChange::Added => {
3807 assert_ne!(paths.get(ix), Some(&path));
3808 paths.insert(ix, path);
3809 }
3810 PathChange::Removed => {
3811 assert_eq!(paths.get(ix), Some(&path));
3812 paths.remove(ix);
3813 }
3814 PathChange::Updated => {
3815 assert_eq!(paths.get(ix), Some(&path));
3816 }
3817 PathChange::AddedOrUpdated => {
3818 if paths[ix] != path {
3819 paths.insert(ix, path);
3820 }
3821 }
3822 }
3823 }
3824 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3825 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3826 }
3827 })
3828 .detach();
3829 });
3830
3831 let mut snapshots = Vec::new();
3832 let mut mutations_len = operations;
3833 while mutations_len > 1 {
3834 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3835 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3836 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3837 let len = rng.gen_range(0..=buffered_event_count);
3838 log::info!("flushing {} events", len);
3839 fs.as_fake().flush_events(len).await;
3840 } else {
3841 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3842 mutations_len -= 1;
3843 }
3844
3845 cx.foreground().run_until_parked();
3846 if rng.gen_bool(0.2) {
3847 log::info!("storing snapshot {}", snapshots.len());
3848 let snapshot =
3849 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3850 snapshots.push(snapshot);
3851 }
3852 }
3853
3854 log::info!("quiescing");
3855 fs.as_fake().flush_events(usize::MAX).await;
3856 cx.foreground().run_until_parked();
3857 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3858 snapshot.check_invariants();
3859
3860 {
3861 let new_worktree = Worktree::local(
3862 client.clone(),
3863 root_dir,
3864 true,
3865 fs.clone(),
3866 Default::default(),
3867 &mut cx.to_async(),
3868 )
3869 .await
3870 .unwrap();
3871 new_worktree
3872 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3873 .await;
3874 let new_snapshot =
3875 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3876 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3877 }
3878
3879 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3880 let include_ignored = rng.gen::<bool>();
3881 if !include_ignored {
3882 let mut entries_by_path_edits = Vec::new();
3883 let mut entries_by_id_edits = Vec::new();
3884 for entry in prev_snapshot
3885 .entries_by_id
3886 .cursor::<()>()
3887 .filter(|e| e.is_ignored)
3888 {
3889 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3890 entries_by_id_edits.push(Edit::Remove(entry.id));
3891 }
3892
3893 prev_snapshot
3894 .entries_by_path
3895 .edit(entries_by_path_edits, &());
3896 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3897 }
3898
3899 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3900 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3901 assert_eq!(
3902 prev_snapshot.to_vec(include_ignored),
3903 snapshot.to_vec(include_ignored),
3904 "wrong update for snapshot {i}. update: {:?}",
3905 update
3906 );
3907 }
3908 }
3909
3910 fn randomly_mutate_worktree(
3911 worktree: &mut Worktree,
3912 rng: &mut impl Rng,
3913 cx: &mut ModelContext<Worktree>,
3914 ) -> Task<Result<()>> {
3915 let worktree = worktree.as_local_mut().unwrap();
3916 let snapshot = worktree.snapshot();
3917 let entry = snapshot.entries(false).choose(rng).unwrap();
3918
3919 match rng.gen_range(0_u32..100) {
3920 0..=33 if entry.path.as_ref() != Path::new("") => {
3921 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3922 worktree.delete_entry(entry.id, cx).unwrap()
3923 }
3924 ..=66 if entry.path.as_ref() != Path::new("") => {
3925 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3926 let new_parent_path = if other_entry.is_dir() {
3927 other_entry.path.clone()
3928 } else {
3929 other_entry.path.parent().unwrap().into()
3930 };
3931 let mut new_path = new_parent_path.join(gen_name(rng));
3932 if new_path.starts_with(&entry.path) {
3933 new_path = gen_name(rng).into();
3934 }
3935
3936 log::info!(
3937 "renaming entry {:?} ({}) to {:?}",
3938 entry.path,
3939 entry.id.0,
3940 new_path
3941 );
3942 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3943 cx.foreground().spawn(async move {
3944 task.await?;
3945 Ok(())
3946 })
3947 }
3948 _ => {
3949 let task = if entry.is_dir() {
3950 let child_path = entry.path.join(gen_name(rng));
3951 let is_dir = rng.gen_bool(0.3);
3952 log::info!(
3953 "creating {} at {:?}",
3954 if is_dir { "dir" } else { "file" },
3955 child_path,
3956 );
3957 worktree.create_entry(child_path, is_dir, cx)
3958 } else {
3959 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3960 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3961 };
3962 cx.foreground().spawn(async move {
3963 task.await?;
3964 Ok(())
3965 })
3966 }
3967 }
3968 }
3969
3970 async fn randomly_mutate_fs(
3971 fs: &Arc<dyn Fs>,
3972 root_path: &Path,
3973 insertion_probability: f64,
3974 rng: &mut impl Rng,
3975 ) {
3976 let mut files = Vec::new();
3977 let mut dirs = Vec::new();
3978 for path in fs.as_fake().paths() {
3979 if path.starts_with(root_path) {
3980 if fs.is_file(&path).await {
3981 files.push(path);
3982 } else {
3983 dirs.push(path);
3984 }
3985 }
3986 }
3987
3988 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3989 let path = dirs.choose(rng).unwrap();
3990 let new_path = path.join(gen_name(rng));
3991
3992 if rng.gen() {
3993 log::info!(
3994 "creating dir {:?}",
3995 new_path.strip_prefix(root_path).unwrap()
3996 );
3997 fs.create_dir(&new_path).await.unwrap();
3998 } else {
3999 log::info!(
4000 "creating file {:?}",
4001 new_path.strip_prefix(root_path).unwrap()
4002 );
4003 fs.create_file(&new_path, Default::default()).await.unwrap();
4004 }
4005 } else if rng.gen_bool(0.05) {
4006 let ignore_dir_path = dirs.choose(rng).unwrap();
4007 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4008
4009 let subdirs = dirs
4010 .iter()
4011 .filter(|d| d.starts_with(&ignore_dir_path))
4012 .cloned()
4013 .collect::<Vec<_>>();
4014 let subfiles = files
4015 .iter()
4016 .filter(|d| d.starts_with(&ignore_dir_path))
4017 .cloned()
4018 .collect::<Vec<_>>();
4019 let files_to_ignore = {
4020 let len = rng.gen_range(0..=subfiles.len());
4021 subfiles.choose_multiple(rng, len)
4022 };
4023 let dirs_to_ignore = {
4024 let len = rng.gen_range(0..subdirs.len());
4025 subdirs.choose_multiple(rng, len)
4026 };
4027
4028 let mut ignore_contents = String::new();
4029 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4030 writeln!(
4031 ignore_contents,
4032 "{}",
4033 path_to_ignore
4034 .strip_prefix(&ignore_dir_path)
4035 .unwrap()
4036 .to_str()
4037 .unwrap()
4038 )
4039 .unwrap();
4040 }
4041 log::info!(
4042 "creating gitignore {:?} with contents:\n{}",
4043 ignore_path.strip_prefix(&root_path).unwrap(),
4044 ignore_contents
4045 );
4046 fs.save(
4047 &ignore_path,
4048 &ignore_contents.as_str().into(),
4049 Default::default(),
4050 )
4051 .await
4052 .unwrap();
4053 } else {
4054 let old_path = {
4055 let file_path = files.choose(rng);
4056 let dir_path = dirs[1..].choose(rng);
4057 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4058 };
4059
4060 let is_rename = rng.gen();
4061 if is_rename {
4062 let new_path_parent = dirs
4063 .iter()
4064 .filter(|d| !d.starts_with(old_path))
4065 .choose(rng)
4066 .unwrap();
4067
4068 let overwrite_existing_dir =
4069 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4070 let new_path = if overwrite_existing_dir {
4071 fs.remove_dir(
4072 &new_path_parent,
4073 RemoveOptions {
4074 recursive: true,
4075 ignore_if_not_exists: true,
4076 },
4077 )
4078 .await
4079 .unwrap();
4080 new_path_parent.to_path_buf()
4081 } else {
4082 new_path_parent.join(gen_name(rng))
4083 };
4084
4085 log::info!(
4086 "renaming {:?} to {}{:?}",
4087 old_path.strip_prefix(&root_path).unwrap(),
4088 if overwrite_existing_dir {
4089 "overwrite "
4090 } else {
4091 ""
4092 },
4093 new_path.strip_prefix(&root_path).unwrap()
4094 );
4095 fs.rename(
4096 &old_path,
4097 &new_path,
4098 fs::RenameOptions {
4099 overwrite: true,
4100 ignore_if_exists: true,
4101 },
4102 )
4103 .await
4104 .unwrap();
4105 } else if fs.is_file(&old_path).await {
4106 log::info!(
4107 "deleting file {:?}",
4108 old_path.strip_prefix(&root_path).unwrap()
4109 );
4110 fs.remove_file(old_path, Default::default()).await.unwrap();
4111 } else {
4112 log::info!(
4113 "deleting dir {:?}",
4114 old_path.strip_prefix(&root_path).unwrap()
4115 );
4116 fs.remove_dir(
4117 &old_path,
4118 RemoveOptions {
4119 recursive: true,
4120 ignore_if_not_exists: true,
4121 },
4122 )
4123 .await
4124 .unwrap();
4125 }
4126 }
4127 }
4128
4129 fn gen_name(rng: &mut impl Rng) -> String {
4130 (0..6)
4131 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4132 .map(char::from)
4133 .collect()
4134 }
4135
4136 impl LocalSnapshot {
4137 fn check_invariants(&self) {
4138 assert_eq!(
4139 self.entries_by_path
4140 .cursor::<()>()
4141 .map(|e| (&e.path, e.id))
4142 .collect::<Vec<_>>(),
4143 self.entries_by_id
4144 .cursor::<()>()
4145 .map(|e| (&e.path, e.id))
4146 .collect::<collections::BTreeSet<_>>()
4147 .into_iter()
4148 .collect::<Vec<_>>(),
4149 "entries_by_path and entries_by_id are inconsistent"
4150 );
4151
4152 let mut files = self.files(true, 0);
4153 let mut visible_files = self.files(false, 0);
4154 for entry in self.entries_by_path.cursor::<()>() {
4155 if entry.is_file() {
4156 assert_eq!(files.next().unwrap().inode, entry.inode);
4157 if !entry.is_ignored {
4158 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4159 }
4160 }
4161 }
4162
4163 assert!(files.next().is_none());
4164 assert!(visible_files.next().is_none());
4165
4166 let mut bfs_paths = Vec::new();
4167 let mut stack = vec![Path::new("")];
4168 while let Some(path) = stack.pop() {
4169 bfs_paths.push(path);
4170 let ix = stack.len();
4171 for child_entry in self.child_entries(path) {
4172 stack.insert(ix, &child_entry.path);
4173 }
4174 }
4175
4176 let dfs_paths_via_iter = self
4177 .entries_by_path
4178 .cursor::<()>()
4179 .map(|e| e.path.as_ref())
4180 .collect::<Vec<_>>();
4181 assert_eq!(bfs_paths, dfs_paths_via_iter);
4182
4183 let dfs_paths_via_traversal = self
4184 .entries(true)
4185 .map(|e| e.path.as_ref())
4186 .collect::<Vec<_>>();
4187 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4188
4189 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4190 let ignore_parent_path =
4191 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4192 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4193 assert!(self
4194 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4195 .is_some());
4196 }
4197 }
4198
4199 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4200 let mut paths = Vec::new();
4201 for entry in self.entries_by_path.cursor::<()>() {
4202 if include_ignored || !entry.is_ignored {
4203 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4204 }
4205 }
4206 paths.sort_by(|a, b| a.0.cmp(b.0));
4207 paths
4208 }
4209 }
4210}