1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 os::unix::prelude::OsStrExt,
47 path::{Path, PathBuf},
48 pin::Pin,
49 sync::{
50 atomic::{AtomicUsize, Ordering::SeqCst},
51 Arc,
52 },
53 time::{Duration, SystemTime},
54};
55use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
56use util::{paths::HOME, ResultExt, TryFutureExt};
57
58#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
59pub struct WorktreeId(usize);
60
61pub enum Worktree {
62 Local(LocalWorktree),
63 Remote(RemoteWorktree),
64}
65
66pub struct LocalWorktree {
67 snapshot: LocalSnapshot,
68 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
69 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
70 _background_scanner_task: Task<()>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<
73 Arc<Path>,
74 Vec<(
75 LanguageServerId,
76 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
77 )>,
78 >,
79 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
80 client: Arc<Client>,
81 fs: Arc<dyn Fs>,
82 visible: bool,
83}
84
85pub struct RemoteWorktree {
86 snapshot: Snapshot,
87 background_snapshot: Arc<Mutex<Snapshot>>,
88 project_id: u64,
89 client: Arc<Client>,
90 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
91 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
92 replica_id: ReplicaId,
93 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
94 visible: bool,
95 disconnected: bool,
96}
97
98#[derive(Clone)]
99pub struct Snapshot {
100 id: WorktreeId,
101 abs_path: Arc<Path>,
102 root_name: String,
103 root_char_bag: CharBag,
104 entries_by_path: SumTree<Entry>,
105 entries_by_id: SumTree<PathEntry>,
106 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
107
108 /// A number that increases every time the worktree begins scanning
109 /// a set of paths from the filesystem. This scanning could be caused
110 /// by some operation performed on the worktree, such as reading or
111 /// writing a file, or by an event reported by the filesystem.
112 scan_id: usize,
113
114 /// The latest scan id that has completed, and whose preceding scans
115 /// have all completed. The current `scan_id` could be more than one
116 /// greater than the `completed_scan_id` if operations are performed
117 /// on the worktree while it is processing a file-system event.
118 completed_scan_id: usize,
119}
120
121#[derive(Clone, Debug)]
122pub struct RepositoryEntry {
123 // Path to the actual .git folder.
124 // Note: if .git is a file, this points to the folder indicated by the .git file
125 pub(crate) git_dir_path: Arc<Path>,
126 pub(crate) git_dir_entry_id: ProjectEntryId,
127 pub(crate) work_directory: RepositoryWorkDirectory,
128 pub(crate) scan_id: usize,
129 pub(crate) branch: Option<Arc<str>>,
130}
131
132impl RepositoryEntry {
133 // Note that this path should be relative to the worktree root.
134 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
135 path.starts_with(self.git_dir_path.as_ref())
136 }
137
138 pub fn branch(&self) -> Option<Arc<str>> {
139 self.branch.clone()
140 }
141
142 pub fn work_directory(&self) -> Arc<Path> {
143 self.work_directory.0.clone()
144 }
145}
146
147impl From<&RepositoryEntry> for proto::RepositoryEntry {
148 fn from(value: &RepositoryEntry) -> Self {
149 proto::RepositoryEntry {
150 git_dir_entry_id: value.git_dir_entry_id.to_proto(),
151 scan_id: value.scan_id as u64,
152 git_dir_path: value.git_dir_path.as_os_str().as_bytes().to_vec(),
153 work_directory: value.work_directory.0.as_os_str().as_bytes().to_vec(),
154 branch: value.branch.as_ref().map(|str| str.to_string()),
155 }
156 }
157}
158
159/// This path corresponds to the 'content path' (the folder that contains the .git)
160#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
161pub struct RepositoryWorkDirectory(Arc<Path>);
162
163impl RepositoryWorkDirectory {
164 // Note that these paths should be relative to the worktree root.
165 pub(crate) fn contains(&self, path: &Path) -> bool {
166 path.starts_with(self.0.as_ref())
167 }
168
169 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
170 path.strip_prefix(self.0.as_ref())
171 .ok()
172 .map(move |path| RepoPath(path.to_owned()))
173 }
174}
175
176impl Deref for RepositoryWorkDirectory {
177 type Target = Path;
178
179 fn deref(&self) -> &Self::Target {
180 self.0.as_ref()
181 }
182}
183
184impl<'a> From<&'a str> for RepositoryWorkDirectory {
185 fn from(value: &'a str) -> Self {
186 RepositoryWorkDirectory(Path::new(value).into())
187 }
188}
189
190#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
191pub struct RepoPath(PathBuf);
192
193impl AsRef<Path> for RepoPath {
194 fn as_ref(&self) -> &Path {
195 self.0.as_ref()
196 }
197}
198
199impl Deref for RepoPath {
200 type Target = PathBuf;
201
202 fn deref(&self) -> &Self::Target {
203 &self.0
204 }
205}
206
207impl AsRef<Path> for RepositoryWorkDirectory {
208 fn as_ref(&self) -> &Path {
209 self.0.as_ref()
210 }
211}
212
213impl Default for RepositoryWorkDirectory {
214 fn default() -> Self {
215 RepositoryWorkDirectory(Arc::from(Path::new("")))
216 }
217}
218
219#[derive(Debug, Clone)]
220pub struct LocalSnapshot {
221 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
222 // The ProjectEntryId corresponds to the entry for the .git dir
223 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
224 removed_entry_ids: HashMap<u64, ProjectEntryId>,
225 next_entry_id: Arc<AtomicUsize>,
226 snapshot: Snapshot,
227}
228
229impl Deref for LocalSnapshot {
230 type Target = Snapshot;
231
232 fn deref(&self) -> &Self::Target {
233 &self.snapshot
234 }
235}
236
237impl DerefMut for LocalSnapshot {
238 fn deref_mut(&mut self) -> &mut Self::Target {
239 &mut self.snapshot
240 }
241}
242
243enum ScanState {
244 Started,
245 Updated {
246 snapshot: LocalSnapshot,
247 changes: HashMap<Arc<Path>, PathChange>,
248 barrier: Option<barrier::Sender>,
249 scanning: bool,
250 },
251}
252
253struct ShareState {
254 project_id: u64,
255 snapshots_tx: watch::Sender<LocalSnapshot>,
256 resume_updates: watch::Sender<()>,
257 _maintain_remote_snapshot: Task<Option<()>>,
258}
259
260pub enum Event {
261 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
262 UpdatedGitRepositories(Vec<RepositoryEntry>),
263}
264
265impl Entity for Worktree {
266 type Event = Event;
267}
268
269impl Worktree {
270 pub async fn local(
271 client: Arc<Client>,
272 path: impl Into<Arc<Path>>,
273 visible: bool,
274 fs: Arc<dyn Fs>,
275 next_entry_id: Arc<AtomicUsize>,
276 cx: &mut AsyncAppContext,
277 ) -> Result<ModelHandle<Self>> {
278 // After determining whether the root entry is a file or a directory, populate the
279 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
280 let abs_path = path.into();
281 let metadata = fs
282 .metadata(&abs_path)
283 .await
284 .context("failed to stat worktree path")?;
285
286 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
287 let root_name = abs_path
288 .file_name()
289 .map_or(String::new(), |f| f.to_string_lossy().to_string());
290
291 let mut snapshot = LocalSnapshot {
292 ignores_by_parent_abs_path: Default::default(),
293 removed_entry_ids: Default::default(),
294 git_repositories: Default::default(),
295 next_entry_id,
296 snapshot: Snapshot {
297 id: WorktreeId::from_usize(cx.model_id()),
298 abs_path: abs_path.clone(),
299 root_name: root_name.clone(),
300 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
301 entries_by_path: Default::default(),
302 entries_by_id: Default::default(),
303 repository_entries: Default::default(),
304 scan_id: 1,
305 completed_scan_id: 0,
306 },
307 };
308
309 if let Some(metadata) = metadata {
310 snapshot.insert_entry(
311 Entry::new(
312 Arc::from(Path::new("")),
313 &metadata,
314 &snapshot.next_entry_id,
315 snapshot.root_char_bag,
316 ),
317 fs.as_ref(),
318 );
319 }
320
321 let (path_changes_tx, path_changes_rx) = channel::unbounded();
322 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
323
324 cx.spawn_weak(|this, mut cx| async move {
325 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
326 this.update(&mut cx, |this, cx| {
327 let this = this.as_local_mut().unwrap();
328 match state {
329 ScanState::Started => {
330 *this.is_scanning.0.borrow_mut() = true;
331 }
332 ScanState::Updated {
333 snapshot,
334 changes,
335 barrier,
336 scanning,
337 } => {
338 *this.is_scanning.0.borrow_mut() = scanning;
339 this.set_snapshot(snapshot, cx);
340 cx.emit(Event::UpdatedEntries(changes));
341 drop(barrier);
342 }
343 }
344 cx.notify();
345 });
346 }
347 })
348 .detach();
349
350 let background_scanner_task = cx.background().spawn({
351 let fs = fs.clone();
352 let snapshot = snapshot.clone();
353 let background = cx.background().clone();
354 async move {
355 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
356 BackgroundScanner::new(
357 snapshot,
358 fs,
359 scan_states_tx,
360 background,
361 path_changes_rx,
362 )
363 .run(events)
364 .await;
365 }
366 });
367
368 Worktree::Local(LocalWorktree {
369 snapshot,
370 is_scanning: watch::channel_with(true),
371 share: None,
372 path_changes_tx,
373 _background_scanner_task: background_scanner_task,
374 diagnostics: Default::default(),
375 diagnostic_summaries: Default::default(),
376 client,
377 fs,
378 visible,
379 })
380 }))
381 }
382
383 pub fn remote(
384 project_remote_id: u64,
385 replica_id: ReplicaId,
386 worktree: proto::WorktreeMetadata,
387 client: Arc<Client>,
388 cx: &mut AppContext,
389 ) -> ModelHandle<Self> {
390 cx.add_model(|cx: &mut ModelContext<Self>| {
391 let snapshot = Snapshot {
392 id: WorktreeId(worktree.id as usize),
393 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
394 root_name: worktree.root_name.clone(),
395 root_char_bag: worktree
396 .root_name
397 .chars()
398 .map(|c| c.to_ascii_lowercase())
399 .collect(),
400 entries_by_path: Default::default(),
401 entries_by_id: Default::default(),
402 repository_entries: Default::default(),
403 scan_id: 1,
404 completed_scan_id: 0,
405 };
406
407 let (updates_tx, mut updates_rx) = mpsc::unbounded();
408 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
409 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
410
411 cx.background()
412 .spawn({
413 let background_snapshot = background_snapshot.clone();
414 async move {
415 while let Some(update) = updates_rx.next().await {
416 if let Err(error) =
417 background_snapshot.lock().apply_remote_update(update)
418 {
419 log::error!("error applying worktree update: {}", error);
420 }
421 snapshot_updated_tx.send(()).await.ok();
422 }
423 }
424 })
425 .detach();
426
427 cx.spawn_weak(|this, mut cx| async move {
428 while (snapshot_updated_rx.recv().await).is_some() {
429 if let Some(this) = this.upgrade(&cx) {
430 this.update(&mut cx, |this, cx| {
431 let this = this.as_remote_mut().unwrap();
432 this.snapshot = this.background_snapshot.lock().clone();
433 cx.emit(Event::UpdatedEntries(Default::default()));
434 cx.notify();
435 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
436 if this.observed_snapshot(*scan_id) {
437 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
438 let _ = tx.send(());
439 } else {
440 break;
441 }
442 }
443 });
444 } else {
445 break;
446 }
447 }
448 })
449 .detach();
450
451 Worktree::Remote(RemoteWorktree {
452 project_id: project_remote_id,
453 replica_id,
454 snapshot: snapshot.clone(),
455 background_snapshot,
456 updates_tx: Some(updates_tx),
457 snapshot_subscriptions: Default::default(),
458 client: client.clone(),
459 diagnostic_summaries: Default::default(),
460 visible: worktree.visible,
461 disconnected: false,
462 })
463 })
464 }
465
466 pub fn as_local(&self) -> Option<&LocalWorktree> {
467 if let Worktree::Local(worktree) = self {
468 Some(worktree)
469 } else {
470 None
471 }
472 }
473
474 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
475 if let Worktree::Remote(worktree) = self {
476 Some(worktree)
477 } else {
478 None
479 }
480 }
481
482 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
483 if let Worktree::Local(worktree) = self {
484 Some(worktree)
485 } else {
486 None
487 }
488 }
489
490 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
491 if let Worktree::Remote(worktree) = self {
492 Some(worktree)
493 } else {
494 None
495 }
496 }
497
498 pub fn is_local(&self) -> bool {
499 matches!(self, Worktree::Local(_))
500 }
501
502 pub fn is_remote(&self) -> bool {
503 !self.is_local()
504 }
505
506 pub fn snapshot(&self) -> Snapshot {
507 match self {
508 Worktree::Local(worktree) => worktree.snapshot().snapshot,
509 Worktree::Remote(worktree) => worktree.snapshot(),
510 }
511 }
512
513 pub fn scan_id(&self) -> usize {
514 match self {
515 Worktree::Local(worktree) => worktree.snapshot.scan_id,
516 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
517 }
518 }
519
520 pub fn completed_scan_id(&self) -> usize {
521 match self {
522 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
523 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
524 }
525 }
526
527 pub fn is_visible(&self) -> bool {
528 match self {
529 Worktree::Local(worktree) => worktree.visible,
530 Worktree::Remote(worktree) => worktree.visible,
531 }
532 }
533
534 pub fn replica_id(&self) -> ReplicaId {
535 match self {
536 Worktree::Local(_) => 0,
537 Worktree::Remote(worktree) => worktree.replica_id,
538 }
539 }
540
541 pub fn diagnostic_summaries(
542 &self,
543 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
544 match self {
545 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
546 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
547 }
548 .iter()
549 .flat_map(|(path, summaries)| {
550 summaries
551 .iter()
552 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
553 })
554 }
555
556 pub fn abs_path(&self) -> Arc<Path> {
557 match self {
558 Worktree::Local(worktree) => worktree.abs_path.clone(),
559 Worktree::Remote(worktree) => worktree.abs_path.clone(),
560 }
561 }
562}
563
564impl LocalWorktree {
565 pub fn contains_abs_path(&self, path: &Path) -> bool {
566 path.starts_with(&self.abs_path)
567 }
568
569 fn absolutize(&self, path: &Path) -> PathBuf {
570 if path.file_name().is_some() {
571 self.abs_path.join(path)
572 } else {
573 self.abs_path.to_path_buf()
574 }
575 }
576
577 pub(crate) fn load_buffer(
578 &mut self,
579 id: u64,
580 path: &Path,
581 cx: &mut ModelContext<Worktree>,
582 ) -> Task<Result<ModelHandle<Buffer>>> {
583 let path = Arc::from(path);
584 cx.spawn(move |this, mut cx| async move {
585 let (file, contents, diff_base) = this
586 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
587 .await?;
588 let text_buffer = cx
589 .background()
590 .spawn(async move { text::Buffer::new(0, id, contents) })
591 .await;
592 Ok(cx.add_model(|cx| {
593 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
594 buffer.git_diff_recalc(cx);
595 buffer
596 }))
597 })
598 }
599
600 pub fn diagnostics_for_path(
601 &self,
602 path: &Path,
603 ) -> Vec<(
604 LanguageServerId,
605 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
606 )> {
607 self.diagnostics.get(path).cloned().unwrap_or_default()
608 }
609
610 pub fn update_diagnostics(
611 &mut self,
612 server_id: LanguageServerId,
613 worktree_path: Arc<Path>,
614 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
615 _: &mut ModelContext<Worktree>,
616 ) -> Result<bool> {
617 let summaries_by_server_id = self
618 .diagnostic_summaries
619 .entry(worktree_path.clone())
620 .or_default();
621
622 let old_summary = summaries_by_server_id
623 .remove(&server_id)
624 .unwrap_or_default();
625
626 let new_summary = DiagnosticSummary::new(&diagnostics);
627 if new_summary.is_empty() {
628 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
629 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
630 diagnostics_by_server_id.remove(ix);
631 }
632 if diagnostics_by_server_id.is_empty() {
633 self.diagnostics.remove(&worktree_path);
634 }
635 }
636 } else {
637 summaries_by_server_id.insert(server_id, new_summary);
638 let diagnostics_by_server_id =
639 self.diagnostics.entry(worktree_path.clone()).or_default();
640 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
641 Ok(ix) => {
642 diagnostics_by_server_id[ix] = (server_id, diagnostics);
643 }
644 Err(ix) => {
645 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
646 }
647 }
648 }
649
650 if !old_summary.is_empty() || !new_summary.is_empty() {
651 if let Some(share) = self.share.as_ref() {
652 self.client
653 .send(proto::UpdateDiagnosticSummary {
654 project_id: share.project_id,
655 worktree_id: self.id().to_proto(),
656 summary: Some(proto::DiagnosticSummary {
657 path: worktree_path.to_string_lossy().to_string(),
658 language_server_id: server_id.0 as u64,
659 error_count: new_summary.error_count as u32,
660 warning_count: new_summary.warning_count as u32,
661 }),
662 })
663 .log_err();
664 }
665 }
666
667 Ok(!old_summary.is_empty() || !new_summary.is_empty())
668 }
669
670 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
671 let updated_repos = Self::changed_repos(
672 &self.snapshot.repository_entries,
673 &new_snapshot.repository_entries,
674 );
675 self.snapshot = new_snapshot;
676
677 if let Some(share) = self.share.as_mut() {
678 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
679 }
680
681 if !updated_repos.is_empty() {
682 cx.emit(Event::UpdatedGitRepositories(updated_repos));
683 }
684 }
685
686 fn changed_repos(
687 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
688 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
689 ) -> Vec<RepositoryEntry> {
690 fn diff<'a>(
691 a: impl Iterator<Item = &'a RepositoryEntry>,
692 mut b: impl Iterator<Item = &'a RepositoryEntry>,
693 updated: &mut HashMap<&'a Path, RepositoryEntry>,
694 ) {
695 for a_repo in a {
696 let matched = b.find(|b_repo| {
697 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
698 });
699
700 if matched.is_none() {
701 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
702 }
703 }
704 }
705
706 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
707
708 diff(old_repos.values(), new_repos.values(), &mut updated);
709 diff(new_repos.values(), old_repos.values(), &mut updated);
710
711 updated.into_values().collect()
712 }
713
714 pub fn scan_complete(&self) -> impl Future<Output = ()> {
715 let mut is_scanning_rx = self.is_scanning.1.clone();
716 async move {
717 let mut is_scanning = is_scanning_rx.borrow().clone();
718 while is_scanning {
719 if let Some(value) = is_scanning_rx.recv().await {
720 is_scanning = value;
721 } else {
722 break;
723 }
724 }
725 }
726 }
727
728 pub fn snapshot(&self) -> LocalSnapshot {
729 self.snapshot.clone()
730 }
731
732 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
733 proto::WorktreeMetadata {
734 id: self.id().to_proto(),
735 root_name: self.root_name().to_string(),
736 visible: self.visible,
737 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
738 }
739 }
740
741 fn load(
742 &self,
743 path: &Path,
744 cx: &mut ModelContext<Worktree>,
745 ) -> Task<Result<(File, String, Option<String>)>> {
746 let handle = cx.handle();
747 let path = Arc::from(path);
748 let abs_path = self.absolutize(&path);
749 let fs = self.fs.clone();
750 let snapshot = self.snapshot();
751
752 let mut index_task = None;
753
754 if let Some(repo) = snapshot.repo_for(&path) {
755 let repo_path = repo.work_directory.relativize(&path).unwrap();
756 if let Some(repo) = self.git_repositories.get(&repo.git_dir_entry_id) {
757 let repo = repo.to_owned();
758 index_task = Some(
759 cx.background()
760 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
761 );
762 }
763 }
764
765 cx.spawn(|this, mut cx| async move {
766 let text = fs.load(&abs_path).await?;
767
768 let diff_base = if let Some(index_task) = index_task {
769 index_task.await
770 } else {
771 None
772 };
773
774 // Eagerly populate the snapshot with an updated entry for the loaded file
775 let entry = this
776 .update(&mut cx, |this, cx| {
777 this.as_local().unwrap().refresh_entry(path, None, cx)
778 })
779 .await?;
780
781 Ok((
782 File {
783 entry_id: entry.id,
784 worktree: handle,
785 path: entry.path,
786 mtime: entry.mtime,
787 is_local: true,
788 is_deleted: false,
789 },
790 text,
791 diff_base,
792 ))
793 })
794 }
795
796 pub fn save_buffer(
797 &self,
798 buffer_handle: ModelHandle<Buffer>,
799 path: Arc<Path>,
800 has_changed_file: bool,
801 cx: &mut ModelContext<Worktree>,
802 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
803 let handle = cx.handle();
804 let buffer = buffer_handle.read(cx);
805
806 let rpc = self.client.clone();
807 let buffer_id = buffer.remote_id();
808 let project_id = self.share.as_ref().map(|share| share.project_id);
809
810 let text = buffer.as_rope().clone();
811 let fingerprint = text.fingerprint();
812 let version = buffer.version();
813 let save = self.write_file(path, text, buffer.line_ending(), cx);
814
815 cx.as_mut().spawn(|mut cx| async move {
816 let entry = save.await?;
817
818 if has_changed_file {
819 let new_file = Arc::new(File {
820 entry_id: entry.id,
821 worktree: handle,
822 path: entry.path,
823 mtime: entry.mtime,
824 is_local: true,
825 is_deleted: false,
826 });
827
828 if let Some(project_id) = project_id {
829 rpc.send(proto::UpdateBufferFile {
830 project_id,
831 buffer_id,
832 file: Some(new_file.to_proto()),
833 })
834 .log_err();
835 }
836
837 buffer_handle.update(&mut cx, |buffer, cx| {
838 if has_changed_file {
839 buffer.file_updated(new_file, cx).detach();
840 }
841 });
842 }
843
844 if let Some(project_id) = project_id {
845 rpc.send(proto::BufferSaved {
846 project_id,
847 buffer_id,
848 version: serialize_version(&version),
849 mtime: Some(entry.mtime.into()),
850 fingerprint: serialize_fingerprint(fingerprint),
851 })?;
852 }
853
854 buffer_handle.update(&mut cx, |buffer, cx| {
855 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
856 });
857
858 Ok((version, fingerprint, entry.mtime))
859 })
860 }
861
862 pub fn create_entry(
863 &self,
864 path: impl Into<Arc<Path>>,
865 is_dir: bool,
866 cx: &mut ModelContext<Worktree>,
867 ) -> Task<Result<Entry>> {
868 let path = path.into();
869 let abs_path = self.absolutize(&path);
870 let fs = self.fs.clone();
871 let write = cx.background().spawn(async move {
872 if is_dir {
873 fs.create_dir(&abs_path).await
874 } else {
875 fs.save(&abs_path, &Default::default(), Default::default())
876 .await
877 }
878 });
879
880 cx.spawn(|this, mut cx| async move {
881 write.await?;
882 this.update(&mut cx, |this, cx| {
883 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
884 })
885 .await
886 })
887 }
888
889 pub fn write_file(
890 &self,
891 path: impl Into<Arc<Path>>,
892 text: Rope,
893 line_ending: LineEnding,
894 cx: &mut ModelContext<Worktree>,
895 ) -> Task<Result<Entry>> {
896 let path = path.into();
897 let abs_path = self.absolutize(&path);
898 let fs = self.fs.clone();
899 let write = cx
900 .background()
901 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
902
903 cx.spawn(|this, mut cx| async move {
904 write.await?;
905 this.update(&mut cx, |this, cx| {
906 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
907 })
908 .await
909 })
910 }
911
912 pub fn delete_entry(
913 &self,
914 entry_id: ProjectEntryId,
915 cx: &mut ModelContext<Worktree>,
916 ) -> Option<Task<Result<()>>> {
917 let entry = self.entry_for_id(entry_id)?.clone();
918 let abs_path = self.abs_path.clone();
919 let fs = self.fs.clone();
920
921 let delete = cx.background().spawn(async move {
922 let mut abs_path = fs.canonicalize(&abs_path).await?;
923 if entry.path.file_name().is_some() {
924 abs_path = abs_path.join(&entry.path);
925 }
926 if entry.is_file() {
927 fs.remove_file(&abs_path, Default::default()).await?;
928 } else {
929 fs.remove_dir(
930 &abs_path,
931 RemoveOptions {
932 recursive: true,
933 ignore_if_not_exists: false,
934 },
935 )
936 .await?;
937 }
938 anyhow::Ok(abs_path)
939 });
940
941 Some(cx.spawn(|this, mut cx| async move {
942 let abs_path = delete.await?;
943 let (tx, mut rx) = barrier::channel();
944 this.update(&mut cx, |this, _| {
945 this.as_local_mut()
946 .unwrap()
947 .path_changes_tx
948 .try_send((vec![abs_path], tx))
949 })?;
950 rx.recv().await;
951 Ok(())
952 }))
953 }
954
955 pub fn rename_entry(
956 &self,
957 entry_id: ProjectEntryId,
958 new_path: impl Into<Arc<Path>>,
959 cx: &mut ModelContext<Worktree>,
960 ) -> Option<Task<Result<Entry>>> {
961 let old_path = self.entry_for_id(entry_id)?.path.clone();
962 let new_path = new_path.into();
963 let abs_old_path = self.absolutize(&old_path);
964 let abs_new_path = self.absolutize(&new_path);
965 let fs = self.fs.clone();
966 let rename = cx.background().spawn(async move {
967 fs.rename(&abs_old_path, &abs_new_path, Default::default())
968 .await
969 });
970
971 Some(cx.spawn(|this, mut cx| async move {
972 rename.await?;
973 this.update(&mut cx, |this, cx| {
974 this.as_local_mut()
975 .unwrap()
976 .refresh_entry(new_path.clone(), Some(old_path), cx)
977 })
978 .await
979 }))
980 }
981
982 pub fn copy_entry(
983 &self,
984 entry_id: ProjectEntryId,
985 new_path: impl Into<Arc<Path>>,
986 cx: &mut ModelContext<Worktree>,
987 ) -> Option<Task<Result<Entry>>> {
988 let old_path = self.entry_for_id(entry_id)?.path.clone();
989 let new_path = new_path.into();
990 let abs_old_path = self.absolutize(&old_path);
991 let abs_new_path = self.absolutize(&new_path);
992 let fs = self.fs.clone();
993 let copy = cx.background().spawn(async move {
994 copy_recursive(
995 fs.as_ref(),
996 &abs_old_path,
997 &abs_new_path,
998 Default::default(),
999 )
1000 .await
1001 });
1002
1003 Some(cx.spawn(|this, mut cx| async move {
1004 copy.await?;
1005 this.update(&mut cx, |this, cx| {
1006 this.as_local_mut()
1007 .unwrap()
1008 .refresh_entry(new_path.clone(), None, cx)
1009 })
1010 .await
1011 }))
1012 }
1013
1014 fn refresh_entry(
1015 &self,
1016 path: Arc<Path>,
1017 old_path: Option<Arc<Path>>,
1018 cx: &mut ModelContext<Worktree>,
1019 ) -> Task<Result<Entry>> {
1020 let fs = self.fs.clone();
1021 let abs_root_path = self.abs_path.clone();
1022 let path_changes_tx = self.path_changes_tx.clone();
1023 cx.spawn_weak(move |this, mut cx| async move {
1024 let abs_path = fs.canonicalize(&abs_root_path).await?;
1025 let mut paths = Vec::with_capacity(2);
1026 paths.push(if path.file_name().is_some() {
1027 abs_path.join(&path)
1028 } else {
1029 abs_path.clone()
1030 });
1031 if let Some(old_path) = old_path {
1032 paths.push(if old_path.file_name().is_some() {
1033 abs_path.join(&old_path)
1034 } else {
1035 abs_path.clone()
1036 });
1037 }
1038
1039 let (tx, mut rx) = barrier::channel();
1040 path_changes_tx.try_send((paths, tx))?;
1041 rx.recv().await;
1042 this.upgrade(&cx)
1043 .ok_or_else(|| anyhow!("worktree was dropped"))?
1044 .update(&mut cx, |this, _| {
1045 this.entry_for_path(path)
1046 .cloned()
1047 .ok_or_else(|| anyhow!("failed to read path after update"))
1048 })
1049 })
1050 }
1051
1052 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1053 let (share_tx, share_rx) = oneshot::channel();
1054
1055 if let Some(share) = self.share.as_mut() {
1056 let _ = share_tx.send(());
1057 *share.resume_updates.borrow_mut() = ();
1058 } else {
1059 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1060 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1061 let worktree_id = cx.model_id() as u64;
1062
1063 for (path, summaries) in &self.diagnostic_summaries {
1064 for (&server_id, summary) in summaries {
1065 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1066 project_id,
1067 worktree_id,
1068 summary: Some(summary.to_proto(server_id, &path)),
1069 }) {
1070 return Task::ready(Err(e));
1071 }
1072 }
1073 }
1074
1075 let _maintain_remote_snapshot = cx.background().spawn({
1076 let client = self.client.clone();
1077 async move {
1078 let mut share_tx = Some(share_tx);
1079 let mut prev_snapshot = LocalSnapshot {
1080 ignores_by_parent_abs_path: Default::default(),
1081 removed_entry_ids: Default::default(),
1082 next_entry_id: Default::default(),
1083 git_repositories: Default::default(),
1084 snapshot: Snapshot {
1085 id: WorktreeId(worktree_id as usize),
1086 abs_path: Path::new("").into(),
1087 root_name: Default::default(),
1088 root_char_bag: Default::default(),
1089 entries_by_path: Default::default(),
1090 entries_by_id: Default::default(),
1091 repository_entries: Default::default(),
1092 scan_id: 0,
1093 completed_scan_id: 0,
1094 },
1095 };
1096 while let Some(snapshot) = snapshots_rx.recv().await {
1097 #[cfg(any(test, feature = "test-support"))]
1098 const MAX_CHUNK_SIZE: usize = 2;
1099 #[cfg(not(any(test, feature = "test-support")))]
1100 const MAX_CHUNK_SIZE: usize = 256;
1101
1102 let update =
1103 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1104 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1105 let _ = resume_updates_rx.try_recv();
1106 while let Err(error) = client.request(update.clone()).await {
1107 log::error!("failed to send worktree update: {}", error);
1108 log::info!("waiting to resume updates");
1109 if resume_updates_rx.next().await.is_none() {
1110 return Ok(());
1111 }
1112 }
1113 }
1114
1115 if let Some(share_tx) = share_tx.take() {
1116 let _ = share_tx.send(());
1117 }
1118
1119 prev_snapshot = snapshot;
1120 }
1121
1122 Ok::<_, anyhow::Error>(())
1123 }
1124 .log_err()
1125 });
1126
1127 self.share = Some(ShareState {
1128 project_id,
1129 snapshots_tx,
1130 resume_updates: resume_updates_tx,
1131 _maintain_remote_snapshot,
1132 });
1133 }
1134
1135 cx.foreground()
1136 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1137 }
1138
1139 pub fn unshare(&mut self) {
1140 self.share.take();
1141 }
1142
1143 pub fn is_shared(&self) -> bool {
1144 self.share.is_some()
1145 }
1146
1147 pub fn load_index_text(
1148 &self,
1149 repo: RepositoryEntry,
1150 repo_path: RepoPath,
1151 cx: &mut ModelContext<Worktree>,
1152 ) -> Task<Option<String>> {
1153 let Some(git_ptr) = self.git_repositories.get(&repo.git_dir_entry_id).map(|git_ptr| git_ptr.to_owned()) else {
1154 return Task::Ready(Some(None))
1155 };
1156 cx.background()
1157 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1158 }
1159}
1160
1161impl RemoteWorktree {
1162 fn snapshot(&self) -> Snapshot {
1163 self.snapshot.clone()
1164 }
1165
1166 pub fn disconnected_from_host(&mut self) {
1167 self.updates_tx.take();
1168 self.snapshot_subscriptions.clear();
1169 self.disconnected = true;
1170 }
1171
1172 pub fn save_buffer(
1173 &self,
1174 buffer_handle: ModelHandle<Buffer>,
1175 cx: &mut ModelContext<Worktree>,
1176 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1177 let buffer = buffer_handle.read(cx);
1178 let buffer_id = buffer.remote_id();
1179 let version = buffer.version();
1180 let rpc = self.client.clone();
1181 let project_id = self.project_id;
1182 cx.as_mut().spawn(|mut cx| async move {
1183 let response = rpc
1184 .request(proto::SaveBuffer {
1185 project_id,
1186 buffer_id,
1187 version: serialize_version(&version),
1188 })
1189 .await?;
1190 let version = deserialize_version(&response.version);
1191 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1192 let mtime = response
1193 .mtime
1194 .ok_or_else(|| anyhow!("missing mtime"))?
1195 .into();
1196
1197 buffer_handle.update(&mut cx, |buffer, cx| {
1198 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1199 });
1200
1201 Ok((version, fingerprint, mtime))
1202 })
1203 }
1204
1205 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1206 if let Some(updates_tx) = &self.updates_tx {
1207 updates_tx
1208 .unbounded_send(update)
1209 .expect("consumer runs to completion");
1210 }
1211 }
1212
1213 fn observed_snapshot(&self, scan_id: usize) -> bool {
1214 self.completed_scan_id >= scan_id
1215 }
1216
1217 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1218 let (tx, rx) = oneshot::channel();
1219 if self.observed_snapshot(scan_id) {
1220 let _ = tx.send(());
1221 } else if self.disconnected {
1222 drop(tx);
1223 } else {
1224 match self
1225 .snapshot_subscriptions
1226 .binary_search_by_key(&scan_id, |probe| probe.0)
1227 {
1228 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1229 }
1230 }
1231
1232 async move {
1233 rx.await?;
1234 Ok(())
1235 }
1236 }
1237
1238 pub fn update_diagnostic_summary(
1239 &mut self,
1240 path: Arc<Path>,
1241 summary: &proto::DiagnosticSummary,
1242 ) {
1243 let server_id = LanguageServerId(summary.language_server_id as usize);
1244 let summary = DiagnosticSummary {
1245 error_count: summary.error_count as usize,
1246 warning_count: summary.warning_count as usize,
1247 };
1248
1249 if summary.is_empty() {
1250 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1251 summaries.remove(&server_id);
1252 if summaries.is_empty() {
1253 self.diagnostic_summaries.remove(&path);
1254 }
1255 }
1256 } else {
1257 self.diagnostic_summaries
1258 .entry(path)
1259 .or_default()
1260 .insert(server_id, summary);
1261 }
1262 }
1263
1264 pub fn insert_entry(
1265 &mut self,
1266 entry: proto::Entry,
1267 scan_id: usize,
1268 cx: &mut ModelContext<Worktree>,
1269 ) -> Task<Result<Entry>> {
1270 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1271 cx.spawn(|this, mut cx| async move {
1272 wait_for_snapshot.await?;
1273 this.update(&mut cx, |worktree, _| {
1274 let worktree = worktree.as_remote_mut().unwrap();
1275 let mut snapshot = worktree.background_snapshot.lock();
1276 let entry = snapshot.insert_entry(entry);
1277 worktree.snapshot = snapshot.clone();
1278 entry
1279 })
1280 })
1281 }
1282
1283 pub(crate) fn delete_entry(
1284 &mut self,
1285 id: ProjectEntryId,
1286 scan_id: usize,
1287 cx: &mut ModelContext<Worktree>,
1288 ) -> Task<Result<()>> {
1289 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1290 cx.spawn(|this, mut cx| async move {
1291 wait_for_snapshot.await?;
1292 this.update(&mut cx, |worktree, _| {
1293 let worktree = worktree.as_remote_mut().unwrap();
1294 let mut snapshot = worktree.background_snapshot.lock();
1295 snapshot.delete_entry(id);
1296 worktree.snapshot = snapshot.clone();
1297 });
1298 Ok(())
1299 })
1300 }
1301}
1302
1303impl Snapshot {
1304 pub fn id(&self) -> WorktreeId {
1305 self.id
1306 }
1307
1308 pub fn abs_path(&self) -> &Arc<Path> {
1309 &self.abs_path
1310 }
1311
1312 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1313 self.entries_by_id.get(&entry_id, &()).is_some()
1314 }
1315
1316 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1317 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1318 let old_entry = self.entries_by_id.insert_or_replace(
1319 PathEntry {
1320 id: entry.id,
1321 path: entry.path.clone(),
1322 is_ignored: entry.is_ignored,
1323 scan_id: 0,
1324 },
1325 &(),
1326 );
1327 if let Some(old_entry) = old_entry {
1328 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1329 }
1330 self.entries_by_path.insert_or_replace(entry.clone(), &());
1331 Ok(entry)
1332 }
1333
1334 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1335 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1336 self.entries_by_path = {
1337 let mut cursor = self.entries_by_path.cursor();
1338 let mut new_entries_by_path =
1339 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1340 while let Some(entry) = cursor.item() {
1341 if entry.path.starts_with(&removed_entry.path) {
1342 self.entries_by_id.remove(&entry.id, &());
1343 cursor.next(&());
1344 } else {
1345 break;
1346 }
1347 }
1348 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1349 new_entries_by_path
1350 };
1351
1352 Some(removed_entry.path)
1353 }
1354
1355 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1356 let mut entries_by_path_edits = Vec::new();
1357 let mut entries_by_id_edits = Vec::new();
1358 for entry_id in update.removed_entries {
1359 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1360 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1361 entries_by_id_edits.push(Edit::Remove(entry.id));
1362 }
1363 }
1364
1365 for entry in update.updated_entries {
1366 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1367 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1368 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1369 }
1370 entries_by_id_edits.push(Edit::Insert(PathEntry {
1371 id: entry.id,
1372 path: entry.path.clone(),
1373 is_ignored: entry.is_ignored,
1374 scan_id: 0,
1375 }));
1376 entries_by_path_edits.push(Edit::Insert(entry));
1377 }
1378
1379 self.entries_by_path.edit(entries_by_path_edits, &());
1380 self.entries_by_id.edit(entries_by_id_edits, &());
1381 self.scan_id = update.scan_id as usize;
1382 if update.is_last_update {
1383 self.completed_scan_id = update.scan_id as usize;
1384 }
1385
1386 Ok(())
1387 }
1388
1389 pub fn file_count(&self) -> usize {
1390 self.entries_by_path.summary().file_count
1391 }
1392
1393 pub fn visible_file_count(&self) -> usize {
1394 self.entries_by_path.summary().visible_file_count
1395 }
1396
1397 fn traverse_from_offset(
1398 &self,
1399 include_dirs: bool,
1400 include_ignored: bool,
1401 start_offset: usize,
1402 ) -> Traversal {
1403 let mut cursor = self.entries_by_path.cursor();
1404 cursor.seek(
1405 &TraversalTarget::Count {
1406 count: start_offset,
1407 include_dirs,
1408 include_ignored,
1409 },
1410 Bias::Right,
1411 &(),
1412 );
1413 Traversal {
1414 cursor,
1415 include_dirs,
1416 include_ignored,
1417 }
1418 }
1419
1420 fn traverse_from_path(
1421 &self,
1422 include_dirs: bool,
1423 include_ignored: bool,
1424 path: &Path,
1425 ) -> Traversal {
1426 let mut cursor = self.entries_by_path.cursor();
1427 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1428 Traversal {
1429 cursor,
1430 include_dirs,
1431 include_ignored,
1432 }
1433 }
1434
1435 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1436 self.traverse_from_offset(false, include_ignored, start)
1437 }
1438
1439 pub fn entries(&self, include_ignored: bool) -> Traversal {
1440 self.traverse_from_offset(true, include_ignored, 0)
1441 }
1442
1443 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1444 let empty_path = Path::new("");
1445 self.entries_by_path
1446 .cursor::<()>()
1447 .filter(move |entry| entry.path.as_ref() != empty_path)
1448 .map(|entry| &entry.path)
1449 }
1450
1451 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1452 let mut cursor = self.entries_by_path.cursor();
1453 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1454 let traversal = Traversal {
1455 cursor,
1456 include_dirs: true,
1457 include_ignored: true,
1458 };
1459 ChildEntriesIter {
1460 traversal,
1461 parent_path,
1462 }
1463 }
1464
1465 pub fn root_entry(&self) -> Option<&Entry> {
1466 self.entry_for_path("")
1467 }
1468
1469 pub fn root_name(&self) -> &str {
1470 &self.root_name
1471 }
1472
1473 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1474 self.repository_entries
1475 .get(&"".into())
1476 .map(|entry| entry.to_owned())
1477 }
1478
1479 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1480 self.repository_entries.values()
1481 }
1482
1483 pub fn scan_id(&self) -> usize {
1484 self.scan_id
1485 }
1486
1487 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1488 let path = path.as_ref();
1489 self.traverse_from_path(true, true, path)
1490 .entry()
1491 .and_then(|entry| {
1492 if entry.path.as_ref() == path {
1493 Some(entry)
1494 } else {
1495 None
1496 }
1497 })
1498 }
1499
1500 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1501 let entry = self.entries_by_id.get(&id, &())?;
1502 self.entry_for_path(&entry.path)
1503 }
1504
1505 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1506 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1507 }
1508}
1509
1510impl LocalSnapshot {
1511 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1512 let mut max_len = 0;
1513 let mut current_candidate = None;
1514 for (work_directory, repo) in (&self.repository_entries).iter() {
1515 if work_directory.contains(path) {
1516 if work_directory.0.as_os_str().len() >= max_len {
1517 current_candidate = Some(repo);
1518 max_len = work_directory.0.as_os_str().len();
1519 } else {
1520 break;
1521 }
1522 }
1523 }
1524
1525 current_candidate.map(|entry| entry.to_owned())
1526 }
1527
1528 pub(crate) fn repo_for_metadata(
1529 &self,
1530 path: &Path,
1531 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1532 self.repository_entries
1533 .iter()
1534 .find(|(_, repo)| repo.in_dot_git(path))
1535 .map(|(work_directory, entry)| {
1536 (
1537 work_directory.to_owned(),
1538 self.git_repositories
1539 .get(&entry.git_dir_entry_id)
1540 .expect("These two data structures should be in sync")
1541 .to_owned(),
1542 )
1543 })
1544 }
1545
1546 #[cfg(test)]
1547 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1548 let root_name = self.root_name.clone();
1549 proto::UpdateWorktree {
1550 project_id,
1551 worktree_id: self.id().to_proto(),
1552 abs_path: self.abs_path().to_string_lossy().into(),
1553 root_name,
1554 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1555 removed_entries: Default::default(),
1556 scan_id: self.scan_id as u64,
1557 is_last_update: true,
1558 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1559 }
1560 }
1561
1562 pub(crate) fn build_update(
1563 &self,
1564 other: &Self,
1565 project_id: u64,
1566 worktree_id: u64,
1567 include_ignored: bool,
1568 ) -> proto::UpdateWorktree {
1569 let mut updated_entries = Vec::new();
1570 let mut removed_entries = Vec::new();
1571 let mut self_entries = self
1572 .entries_by_id
1573 .cursor::<()>()
1574 .filter(|e| include_ignored || !e.is_ignored)
1575 .peekable();
1576 let mut other_entries = other
1577 .entries_by_id
1578 .cursor::<()>()
1579 .filter(|e| include_ignored || !e.is_ignored)
1580 .peekable();
1581 loop {
1582 match (self_entries.peek(), other_entries.peek()) {
1583 (Some(self_entry), Some(other_entry)) => {
1584 match Ord::cmp(&self_entry.id, &other_entry.id) {
1585 Ordering::Less => {
1586 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1587 updated_entries.push(entry);
1588 self_entries.next();
1589 }
1590 Ordering::Equal => {
1591 if self_entry.scan_id != other_entry.scan_id {
1592 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1593 updated_entries.push(entry);
1594 }
1595
1596 self_entries.next();
1597 other_entries.next();
1598 }
1599 Ordering::Greater => {
1600 removed_entries.push(other_entry.id.to_proto());
1601 other_entries.next();
1602 }
1603 }
1604 }
1605 (Some(self_entry), None) => {
1606 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1607 updated_entries.push(entry);
1608 self_entries.next();
1609 }
1610 (None, Some(other_entry)) => {
1611 removed_entries.push(other_entry.id.to_proto());
1612 other_entries.next();
1613 }
1614 (None, None) => break,
1615 }
1616 }
1617
1618 proto::UpdateWorktree {
1619 project_id,
1620 worktree_id,
1621 abs_path: self.abs_path().to_string_lossy().into(),
1622 root_name: self.root_name().to_string(),
1623 updated_entries,
1624 removed_entries,
1625 scan_id: self.scan_id as u64,
1626 is_last_update: self.completed_scan_id == self.scan_id,
1627 // TODO repo
1628 updated_repositories: vec![],
1629 }
1630 }
1631
1632 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1633 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1634 let abs_path = self.abs_path.join(&entry.path);
1635 match smol::block_on(build_gitignore(&abs_path, fs)) {
1636 Ok(ignore) => {
1637 self.ignores_by_parent_abs_path.insert(
1638 abs_path.parent().unwrap().into(),
1639 (Arc::new(ignore), self.scan_id),
1640 );
1641 }
1642 Err(error) => {
1643 log::error!(
1644 "error loading .gitignore file {:?} - {:?}",
1645 &entry.path,
1646 error
1647 );
1648 }
1649 }
1650 }
1651
1652 self.reuse_entry_id(&mut entry);
1653
1654 if entry.kind == EntryKind::PendingDir {
1655 if let Some(existing_entry) =
1656 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1657 {
1658 entry.kind = existing_entry.kind;
1659 }
1660 }
1661
1662 let scan_id = self.scan_id;
1663 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1664 if let Some(removed) = removed {
1665 if removed.id != entry.id {
1666 self.entries_by_id.remove(&removed.id, &());
1667 }
1668 }
1669 self.entries_by_id.insert_or_replace(
1670 PathEntry {
1671 id: entry.id,
1672 path: entry.path.clone(),
1673 is_ignored: entry.is_ignored,
1674 scan_id,
1675 },
1676 &(),
1677 );
1678
1679 entry
1680 }
1681
1682 fn populate_dir(
1683 &mut self,
1684 parent_path: Arc<Path>,
1685 entries: impl IntoIterator<Item = Entry>,
1686 ignore: Option<Arc<Gitignore>>,
1687 fs: &dyn Fs,
1688 ) {
1689 let mut parent_entry = if let Some(parent_entry) =
1690 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1691 {
1692 parent_entry.clone()
1693 } else {
1694 log::warn!(
1695 "populating a directory {:?} that has been removed",
1696 parent_path
1697 );
1698 return;
1699 };
1700
1701 match parent_entry.kind {
1702 EntryKind::PendingDir => {
1703 parent_entry.kind = EntryKind::Dir;
1704 }
1705 EntryKind::Dir => {}
1706 _ => return,
1707 }
1708
1709 if let Some(ignore) = ignore {
1710 self.ignores_by_parent_abs_path.insert(
1711 self.abs_path.join(&parent_path).into(),
1712 (ignore, self.scan_id),
1713 );
1714 }
1715
1716 if parent_path.file_name() == Some(&DOT_GIT) {
1717 let abs_path = self.abs_path.join(&parent_path);
1718 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1719
1720 let key = RepositoryWorkDirectory(content_path.clone());
1721 if self.repository_entries.get(&key).is_none() {
1722 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1723 let repo_lock = repo.lock();
1724 self.repository_entries.insert(
1725 key.clone(),
1726 RepositoryEntry {
1727 git_dir_path: parent_path.clone(),
1728 git_dir_entry_id: parent_entry.id,
1729 work_directory: key,
1730 scan_id: 0,
1731 branch: repo_lock.branch_name().map(Into::into),
1732 },
1733 );
1734 drop(repo_lock);
1735
1736 self.git_repositories.insert(parent_entry.id, repo)
1737 }
1738 }
1739 }
1740
1741 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1742 let mut entries_by_id_edits = Vec::new();
1743
1744 for mut entry in entries {
1745 self.reuse_entry_id(&mut entry);
1746 entries_by_id_edits.push(Edit::Insert(PathEntry {
1747 id: entry.id,
1748 path: entry.path.clone(),
1749 is_ignored: entry.is_ignored,
1750 scan_id: self.scan_id,
1751 }));
1752 entries_by_path_edits.push(Edit::Insert(entry));
1753 }
1754
1755 self.entries_by_path.edit(entries_by_path_edits, &());
1756 self.entries_by_id.edit(entries_by_id_edits, &());
1757 }
1758
1759 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1760 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1761 entry.id = removed_entry_id;
1762 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1763 entry.id = existing_entry.id;
1764 }
1765 }
1766
1767 fn remove_path(&mut self, path: &Path) {
1768 let mut new_entries;
1769 let removed_entries;
1770 {
1771 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1772 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1773 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1774 new_entries.push_tree(cursor.suffix(&()), &());
1775 }
1776 self.entries_by_path = new_entries;
1777
1778 let mut entries_by_id_edits = Vec::new();
1779 for entry in removed_entries.cursor::<()>() {
1780 let removed_entry_id = self
1781 .removed_entry_ids
1782 .entry(entry.inode)
1783 .or_insert(entry.id);
1784 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1785 entries_by_id_edits.push(Edit::Remove(entry.id));
1786 }
1787 self.entries_by_id.edit(entries_by_id_edits, &());
1788
1789 if path.file_name() == Some(&GITIGNORE) {
1790 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1791 if let Some((_, scan_id)) = self
1792 .ignores_by_parent_abs_path
1793 .get_mut(abs_parent_path.as_path())
1794 {
1795 *scan_id = self.snapshot.scan_id;
1796 }
1797 } else if path.file_name() == Some(&DOT_GIT) {
1798 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1799 self.snapshot
1800 .repository_entries
1801 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1802 }
1803 }
1804
1805 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1806 let mut inodes = TreeSet::default();
1807 for ancestor in path.ancestors().skip(1) {
1808 if let Some(entry) = self.entry_for_path(ancestor) {
1809 inodes.insert(entry.inode);
1810 }
1811 }
1812 inodes
1813 }
1814
1815 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1816 let mut new_ignores = Vec::new();
1817 for ancestor in abs_path.ancestors().skip(1) {
1818 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1819 new_ignores.push((ancestor, Some(ignore.clone())));
1820 } else {
1821 new_ignores.push((ancestor, None));
1822 }
1823 }
1824
1825 let mut ignore_stack = IgnoreStack::none();
1826 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1827 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1828 ignore_stack = IgnoreStack::all();
1829 break;
1830 } else if let Some(ignore) = ignore {
1831 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1832 }
1833 }
1834
1835 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1836 ignore_stack = IgnoreStack::all();
1837 }
1838
1839 ignore_stack
1840 }
1841}
1842
1843async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1844 let contents = fs.load(abs_path).await?;
1845 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1846 let mut builder = GitignoreBuilder::new(parent);
1847 for line in contents.lines() {
1848 builder.add_line(Some(abs_path.into()), line)?;
1849 }
1850 Ok(builder.build()?)
1851}
1852
1853impl WorktreeId {
1854 pub fn from_usize(handle_id: usize) -> Self {
1855 Self(handle_id)
1856 }
1857
1858 pub(crate) fn from_proto(id: u64) -> Self {
1859 Self(id as usize)
1860 }
1861
1862 pub fn to_proto(&self) -> u64 {
1863 self.0 as u64
1864 }
1865
1866 pub fn to_usize(&self) -> usize {
1867 self.0
1868 }
1869}
1870
1871impl fmt::Display for WorktreeId {
1872 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1873 self.0.fmt(f)
1874 }
1875}
1876
1877impl Deref for Worktree {
1878 type Target = Snapshot;
1879
1880 fn deref(&self) -> &Self::Target {
1881 match self {
1882 Worktree::Local(worktree) => &worktree.snapshot,
1883 Worktree::Remote(worktree) => &worktree.snapshot,
1884 }
1885 }
1886}
1887
1888impl Deref for LocalWorktree {
1889 type Target = LocalSnapshot;
1890
1891 fn deref(&self) -> &Self::Target {
1892 &self.snapshot
1893 }
1894}
1895
1896impl Deref for RemoteWorktree {
1897 type Target = Snapshot;
1898
1899 fn deref(&self) -> &Self::Target {
1900 &self.snapshot
1901 }
1902}
1903
1904impl fmt::Debug for LocalWorktree {
1905 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1906 self.snapshot.fmt(f)
1907 }
1908}
1909
1910impl fmt::Debug for Snapshot {
1911 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1912 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1913 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1914
1915 impl<'a> fmt::Debug for EntriesByPath<'a> {
1916 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1917 f.debug_map()
1918 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1919 .finish()
1920 }
1921 }
1922
1923 impl<'a> fmt::Debug for EntriesById<'a> {
1924 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1925 f.debug_list().entries(self.0.iter()).finish()
1926 }
1927 }
1928
1929 f.debug_struct("Snapshot")
1930 .field("id", &self.id)
1931 .field("root_name", &self.root_name)
1932 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1933 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1934 .finish()
1935 }
1936}
1937
1938#[derive(Clone, PartialEq)]
1939pub struct File {
1940 pub worktree: ModelHandle<Worktree>,
1941 pub path: Arc<Path>,
1942 pub mtime: SystemTime,
1943 pub(crate) entry_id: ProjectEntryId,
1944 pub(crate) is_local: bool,
1945 pub(crate) is_deleted: bool,
1946}
1947
1948impl language::File for File {
1949 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1950 if self.is_local {
1951 Some(self)
1952 } else {
1953 None
1954 }
1955 }
1956
1957 fn mtime(&self) -> SystemTime {
1958 self.mtime
1959 }
1960
1961 fn path(&self) -> &Arc<Path> {
1962 &self.path
1963 }
1964
1965 fn full_path(&self, cx: &AppContext) -> PathBuf {
1966 let mut full_path = PathBuf::new();
1967 let worktree = self.worktree.read(cx);
1968
1969 if worktree.is_visible() {
1970 full_path.push(worktree.root_name());
1971 } else {
1972 let path = worktree.abs_path();
1973
1974 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1975 full_path.push("~");
1976 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1977 } else {
1978 full_path.push(path)
1979 }
1980 }
1981
1982 if self.path.components().next().is_some() {
1983 full_path.push(&self.path);
1984 }
1985
1986 full_path
1987 }
1988
1989 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1990 /// of its worktree, then this method will return the name of the worktree itself.
1991 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1992 self.path
1993 .file_name()
1994 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1995 }
1996
1997 fn is_deleted(&self) -> bool {
1998 self.is_deleted
1999 }
2000
2001 fn as_any(&self) -> &dyn Any {
2002 self
2003 }
2004
2005 fn to_proto(&self) -> rpc::proto::File {
2006 rpc::proto::File {
2007 worktree_id: self.worktree.id() as u64,
2008 entry_id: self.entry_id.to_proto(),
2009 path: self.path.to_string_lossy().into(),
2010 mtime: Some(self.mtime.into()),
2011 is_deleted: self.is_deleted,
2012 }
2013 }
2014}
2015
2016impl language::LocalFile for File {
2017 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2018 self.worktree
2019 .read(cx)
2020 .as_local()
2021 .unwrap()
2022 .abs_path
2023 .join(&self.path)
2024 }
2025
2026 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2027 let worktree = self.worktree.read(cx).as_local().unwrap();
2028 let abs_path = worktree.absolutize(&self.path);
2029 let fs = worktree.fs.clone();
2030 cx.background()
2031 .spawn(async move { fs.load(&abs_path).await })
2032 }
2033
2034 fn buffer_reloaded(
2035 &self,
2036 buffer_id: u64,
2037 version: &clock::Global,
2038 fingerprint: RopeFingerprint,
2039 line_ending: LineEnding,
2040 mtime: SystemTime,
2041 cx: &mut AppContext,
2042 ) {
2043 let worktree = self.worktree.read(cx).as_local().unwrap();
2044 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2045 worktree
2046 .client
2047 .send(proto::BufferReloaded {
2048 project_id,
2049 buffer_id,
2050 version: serialize_version(version),
2051 mtime: Some(mtime.into()),
2052 fingerprint: serialize_fingerprint(fingerprint),
2053 line_ending: serialize_line_ending(line_ending) as i32,
2054 })
2055 .log_err();
2056 }
2057 }
2058}
2059
2060impl File {
2061 pub fn from_proto(
2062 proto: rpc::proto::File,
2063 worktree: ModelHandle<Worktree>,
2064 cx: &AppContext,
2065 ) -> Result<Self> {
2066 let worktree_id = worktree
2067 .read(cx)
2068 .as_remote()
2069 .ok_or_else(|| anyhow!("not remote"))?
2070 .id();
2071
2072 if worktree_id.to_proto() != proto.worktree_id {
2073 return Err(anyhow!("worktree id does not match file"));
2074 }
2075
2076 Ok(Self {
2077 worktree,
2078 path: Path::new(&proto.path).into(),
2079 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2080 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2081 is_local: false,
2082 is_deleted: proto.is_deleted,
2083 })
2084 }
2085
2086 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2087 file.and_then(|f| f.as_any().downcast_ref())
2088 }
2089
2090 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2091 self.worktree.read(cx).id()
2092 }
2093
2094 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2095 if self.is_deleted {
2096 None
2097 } else {
2098 Some(self.entry_id)
2099 }
2100 }
2101}
2102
2103#[derive(Clone, Debug, PartialEq, Eq)]
2104pub struct Entry {
2105 pub id: ProjectEntryId,
2106 pub kind: EntryKind,
2107 pub path: Arc<Path>,
2108 pub inode: u64,
2109 pub mtime: SystemTime,
2110 pub is_symlink: bool,
2111 pub is_ignored: bool,
2112}
2113
2114#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2115pub enum EntryKind {
2116 PendingDir,
2117 Dir,
2118 File(CharBag),
2119}
2120
2121#[derive(Clone, Copy, Debug)]
2122pub enum PathChange {
2123 Added,
2124 Removed,
2125 Updated,
2126 AddedOrUpdated,
2127}
2128
2129impl Entry {
2130 fn new(
2131 path: Arc<Path>,
2132 metadata: &fs::Metadata,
2133 next_entry_id: &AtomicUsize,
2134 root_char_bag: CharBag,
2135 ) -> Self {
2136 Self {
2137 id: ProjectEntryId::new(next_entry_id),
2138 kind: if metadata.is_dir {
2139 EntryKind::PendingDir
2140 } else {
2141 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2142 },
2143 path,
2144 inode: metadata.inode,
2145 mtime: metadata.mtime,
2146 is_symlink: metadata.is_symlink,
2147 is_ignored: false,
2148 }
2149 }
2150
2151 pub fn is_dir(&self) -> bool {
2152 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2153 }
2154
2155 pub fn is_file(&self) -> bool {
2156 matches!(self.kind, EntryKind::File(_))
2157 }
2158}
2159
2160impl sum_tree::Item for Entry {
2161 type Summary = EntrySummary;
2162
2163 fn summary(&self) -> Self::Summary {
2164 let visible_count = if self.is_ignored { 0 } else { 1 };
2165 let file_count;
2166 let visible_file_count;
2167 if self.is_file() {
2168 file_count = 1;
2169 visible_file_count = visible_count;
2170 } else {
2171 file_count = 0;
2172 visible_file_count = 0;
2173 }
2174
2175 EntrySummary {
2176 max_path: self.path.clone(),
2177 count: 1,
2178 visible_count,
2179 file_count,
2180 visible_file_count,
2181 }
2182 }
2183}
2184
2185impl sum_tree::KeyedItem for Entry {
2186 type Key = PathKey;
2187
2188 fn key(&self) -> Self::Key {
2189 PathKey(self.path.clone())
2190 }
2191}
2192
2193#[derive(Clone, Debug)]
2194pub struct EntrySummary {
2195 max_path: Arc<Path>,
2196 count: usize,
2197 visible_count: usize,
2198 file_count: usize,
2199 visible_file_count: usize,
2200}
2201
2202impl Default for EntrySummary {
2203 fn default() -> Self {
2204 Self {
2205 max_path: Arc::from(Path::new("")),
2206 count: 0,
2207 visible_count: 0,
2208 file_count: 0,
2209 visible_file_count: 0,
2210 }
2211 }
2212}
2213
2214impl sum_tree::Summary for EntrySummary {
2215 type Context = ();
2216
2217 fn add_summary(&mut self, rhs: &Self, _: &()) {
2218 self.max_path = rhs.max_path.clone();
2219 self.count += rhs.count;
2220 self.visible_count += rhs.visible_count;
2221 self.file_count += rhs.file_count;
2222 self.visible_file_count += rhs.visible_file_count;
2223 }
2224}
2225
2226#[derive(Clone, Debug)]
2227struct PathEntry {
2228 id: ProjectEntryId,
2229 path: Arc<Path>,
2230 is_ignored: bool,
2231 scan_id: usize,
2232}
2233
2234impl sum_tree::Item for PathEntry {
2235 type Summary = PathEntrySummary;
2236
2237 fn summary(&self) -> Self::Summary {
2238 PathEntrySummary { max_id: self.id }
2239 }
2240}
2241
2242impl sum_tree::KeyedItem for PathEntry {
2243 type Key = ProjectEntryId;
2244
2245 fn key(&self) -> Self::Key {
2246 self.id
2247 }
2248}
2249
2250#[derive(Clone, Debug, Default)]
2251struct PathEntrySummary {
2252 max_id: ProjectEntryId,
2253}
2254
2255impl sum_tree::Summary for PathEntrySummary {
2256 type Context = ();
2257
2258 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2259 self.max_id = summary.max_id;
2260 }
2261}
2262
2263impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2264 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2265 *self = summary.max_id;
2266 }
2267}
2268
2269#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2270pub struct PathKey(Arc<Path>);
2271
2272impl Default for PathKey {
2273 fn default() -> Self {
2274 Self(Path::new("").into())
2275 }
2276}
2277
2278impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2279 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2280 self.0 = summary.max_path.clone();
2281 }
2282}
2283
2284struct BackgroundScanner {
2285 snapshot: Mutex<LocalSnapshot>,
2286 fs: Arc<dyn Fs>,
2287 status_updates_tx: UnboundedSender<ScanState>,
2288 executor: Arc<executor::Background>,
2289 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2290 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2291 finished_initial_scan: bool,
2292}
2293
2294impl BackgroundScanner {
2295 fn new(
2296 snapshot: LocalSnapshot,
2297 fs: Arc<dyn Fs>,
2298 status_updates_tx: UnboundedSender<ScanState>,
2299 executor: Arc<executor::Background>,
2300 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2301 ) -> Self {
2302 Self {
2303 fs,
2304 status_updates_tx,
2305 executor,
2306 refresh_requests_rx,
2307 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2308 snapshot: Mutex::new(snapshot),
2309 finished_initial_scan: false,
2310 }
2311 }
2312
2313 async fn run(
2314 &mut self,
2315 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2316 ) {
2317 use futures::FutureExt as _;
2318
2319 let (root_abs_path, root_inode) = {
2320 let snapshot = self.snapshot.lock();
2321 (
2322 snapshot.abs_path.clone(),
2323 snapshot.root_entry().map(|e| e.inode),
2324 )
2325 };
2326
2327 // Populate ignores above the root.
2328 let ignore_stack;
2329 for ancestor in root_abs_path.ancestors().skip(1) {
2330 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2331 {
2332 self.snapshot
2333 .lock()
2334 .ignores_by_parent_abs_path
2335 .insert(ancestor.into(), (ignore.into(), 0));
2336 }
2337 }
2338 {
2339 let mut snapshot = self.snapshot.lock();
2340 snapshot.scan_id += 1;
2341 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2342 if ignore_stack.is_all() {
2343 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2344 root_entry.is_ignored = true;
2345 snapshot.insert_entry(root_entry, self.fs.as_ref());
2346 }
2347 }
2348 };
2349
2350 // Perform an initial scan of the directory.
2351 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2352 smol::block_on(scan_job_tx.send(ScanJob {
2353 abs_path: root_abs_path,
2354 path: Arc::from(Path::new("")),
2355 ignore_stack,
2356 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2357 scan_queue: scan_job_tx.clone(),
2358 }))
2359 .unwrap();
2360 drop(scan_job_tx);
2361 self.scan_dirs(true, scan_job_rx).await;
2362 {
2363 let mut snapshot = self.snapshot.lock();
2364 snapshot.completed_scan_id = snapshot.scan_id;
2365 }
2366 self.send_status_update(false, None);
2367
2368 // Process any any FS events that occurred while performing the initial scan.
2369 // For these events, update events cannot be as precise, because we didn't
2370 // have the previous state loaded yet.
2371 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2372 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2373 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2374 paths.extend(more_events.into_iter().map(|e| e.path));
2375 }
2376 self.process_events(paths).await;
2377 }
2378
2379 self.finished_initial_scan = true;
2380
2381 // Continue processing events until the worktree is dropped.
2382 loop {
2383 select_biased! {
2384 // Process any path refresh requests from the worktree. Prioritize
2385 // these before handling changes reported by the filesystem.
2386 request = self.refresh_requests_rx.recv().fuse() => {
2387 let Ok((paths, barrier)) = request else { break };
2388 if !self.process_refresh_request(paths, barrier).await {
2389 return;
2390 }
2391 }
2392
2393 events = events_rx.next().fuse() => {
2394 let Some(events) = events else { break };
2395 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2396 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2397 paths.extend(more_events.into_iter().map(|e| e.path));
2398 }
2399 self.process_events(paths).await;
2400 }
2401 }
2402 }
2403 }
2404
2405 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2406 self.reload_entries_for_paths(paths, None).await;
2407 self.send_status_update(false, Some(barrier))
2408 }
2409
2410 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2411 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2412 if let Some(mut paths) = self
2413 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2414 .await
2415 {
2416 paths.sort_unstable();
2417 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2418 }
2419 drop(scan_job_tx);
2420 self.scan_dirs(false, scan_job_rx).await;
2421
2422 self.update_ignore_statuses().await;
2423
2424 let mut snapshot = self.snapshot.lock();
2425
2426 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2427 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2428 snapshot.git_repositories = git_repositories;
2429
2430 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2431 git_repository_entries.retain(|_, entry| snapshot.contains_entry(entry.git_dir_entry_id));
2432 snapshot.snapshot.repository_entries = git_repository_entries;
2433
2434 snapshot.removed_entry_ids.clear();
2435 snapshot.completed_scan_id = snapshot.scan_id;
2436
2437 drop(snapshot);
2438
2439 self.send_status_update(false, None);
2440 }
2441
2442 async fn scan_dirs(
2443 &self,
2444 enable_progress_updates: bool,
2445 scan_jobs_rx: channel::Receiver<ScanJob>,
2446 ) {
2447 use futures::FutureExt as _;
2448
2449 if self
2450 .status_updates_tx
2451 .unbounded_send(ScanState::Started)
2452 .is_err()
2453 {
2454 return;
2455 }
2456
2457 let progress_update_count = AtomicUsize::new(0);
2458 self.executor
2459 .scoped(|scope| {
2460 for _ in 0..self.executor.num_cpus() {
2461 scope.spawn(async {
2462 let mut last_progress_update_count = 0;
2463 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2464 futures::pin_mut!(progress_update_timer);
2465
2466 loop {
2467 select_biased! {
2468 // Process any path refresh requests before moving on to process
2469 // the scan queue, so that user operations are prioritized.
2470 request = self.refresh_requests_rx.recv().fuse() => {
2471 let Ok((paths, barrier)) = request else { break };
2472 if !self.process_refresh_request(paths, barrier).await {
2473 return;
2474 }
2475 }
2476
2477 // Send periodic progress updates to the worktree. Use an atomic counter
2478 // to ensure that only one of the workers sends a progress update after
2479 // the update interval elapses.
2480 _ = progress_update_timer => {
2481 match progress_update_count.compare_exchange(
2482 last_progress_update_count,
2483 last_progress_update_count + 1,
2484 SeqCst,
2485 SeqCst
2486 ) {
2487 Ok(_) => {
2488 last_progress_update_count += 1;
2489 self.send_status_update(true, None);
2490 }
2491 Err(count) => {
2492 last_progress_update_count = count;
2493 }
2494 }
2495 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2496 }
2497
2498 // Recursively load directories from the file system.
2499 job = scan_jobs_rx.recv().fuse() => {
2500 let Ok(job) = job else { break };
2501 if let Err(err) = self.scan_dir(&job).await {
2502 if job.path.as_ref() != Path::new("") {
2503 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2504 }
2505 }
2506 }
2507 }
2508 }
2509 })
2510 }
2511 })
2512 .await;
2513 }
2514
2515 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2516 let mut prev_state = self.prev_state.lock();
2517 let snapshot = self.snapshot.lock().clone();
2518 let mut old_snapshot = snapshot.snapshot.clone();
2519 mem::swap(&mut old_snapshot, &mut prev_state.0);
2520 let changed_paths = mem::take(&mut prev_state.1);
2521 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2522 self.status_updates_tx
2523 .unbounded_send(ScanState::Updated {
2524 snapshot,
2525 changes,
2526 scanning,
2527 barrier,
2528 })
2529 .is_ok()
2530 }
2531
2532 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2533 let mut new_entries: Vec<Entry> = Vec::new();
2534 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2535 let mut ignore_stack = job.ignore_stack.clone();
2536 let mut new_ignore = None;
2537 let (root_abs_path, root_char_bag, next_entry_id) = {
2538 let snapshot = self.snapshot.lock();
2539 (
2540 snapshot.abs_path().clone(),
2541 snapshot.root_char_bag,
2542 snapshot.next_entry_id.clone(),
2543 )
2544 };
2545 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2546 while let Some(child_abs_path) = child_paths.next().await {
2547 let child_abs_path: Arc<Path> = match child_abs_path {
2548 Ok(child_abs_path) => child_abs_path.into(),
2549 Err(error) => {
2550 log::error!("error processing entry {:?}", error);
2551 continue;
2552 }
2553 };
2554
2555 let child_name = child_abs_path.file_name().unwrap();
2556 let child_path: Arc<Path> = job.path.join(child_name).into();
2557 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2558 Ok(Some(metadata)) => metadata,
2559 Ok(None) => continue,
2560 Err(err) => {
2561 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2562 continue;
2563 }
2564 };
2565
2566 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2567 if child_name == *GITIGNORE {
2568 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2569 Ok(ignore) => {
2570 let ignore = Arc::new(ignore);
2571 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2572 new_ignore = Some(ignore);
2573 }
2574 Err(error) => {
2575 log::error!(
2576 "error loading .gitignore file {:?} - {:?}",
2577 child_name,
2578 error
2579 );
2580 }
2581 }
2582
2583 // Update ignore status of any child entries we've already processed to reflect the
2584 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2585 // there should rarely be too numerous. Update the ignore stack associated with any
2586 // new jobs as well.
2587 let mut new_jobs = new_jobs.iter_mut();
2588 for entry in &mut new_entries {
2589 let entry_abs_path = root_abs_path.join(&entry.path);
2590 entry.is_ignored =
2591 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2592
2593 if entry.is_dir() {
2594 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2595 job.ignore_stack = if entry.is_ignored {
2596 IgnoreStack::all()
2597 } else {
2598 ignore_stack.clone()
2599 };
2600 }
2601 }
2602 }
2603 }
2604
2605 let mut child_entry = Entry::new(
2606 child_path.clone(),
2607 &child_metadata,
2608 &next_entry_id,
2609 root_char_bag,
2610 );
2611
2612 if child_entry.is_dir() {
2613 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2614 child_entry.is_ignored = is_ignored;
2615
2616 // Avoid recursing until crash in the case of a recursive symlink
2617 if !job.ancestor_inodes.contains(&child_entry.inode) {
2618 let mut ancestor_inodes = job.ancestor_inodes.clone();
2619 ancestor_inodes.insert(child_entry.inode);
2620
2621 new_jobs.push(Some(ScanJob {
2622 abs_path: child_abs_path,
2623 path: child_path,
2624 ignore_stack: if is_ignored {
2625 IgnoreStack::all()
2626 } else {
2627 ignore_stack.clone()
2628 },
2629 ancestor_inodes,
2630 scan_queue: job.scan_queue.clone(),
2631 }));
2632 } else {
2633 new_jobs.push(None);
2634 }
2635 } else {
2636 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2637 }
2638
2639 new_entries.push(child_entry);
2640 }
2641
2642 self.snapshot.lock().populate_dir(
2643 job.path.clone(),
2644 new_entries,
2645 new_ignore,
2646 self.fs.as_ref(),
2647 );
2648
2649 for new_job in new_jobs {
2650 if let Some(new_job) = new_job {
2651 job.scan_queue.send(new_job).await.unwrap();
2652 }
2653 }
2654
2655 Ok(())
2656 }
2657
2658 async fn reload_entries_for_paths(
2659 &self,
2660 mut abs_paths: Vec<PathBuf>,
2661 scan_queue_tx: Option<Sender<ScanJob>>,
2662 ) -> Option<Vec<Arc<Path>>> {
2663 let doing_recursive_update = scan_queue_tx.is_some();
2664
2665 abs_paths.sort_unstable();
2666 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2667
2668 let root_abs_path = self.snapshot.lock().abs_path.clone();
2669 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2670 let metadata = futures::future::join_all(
2671 abs_paths
2672 .iter()
2673 .map(|abs_path| self.fs.metadata(&abs_path))
2674 .collect::<Vec<_>>(),
2675 )
2676 .await;
2677
2678 let mut snapshot = self.snapshot.lock();
2679 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2680 snapshot.scan_id += 1;
2681 if is_idle && !doing_recursive_update {
2682 snapshot.completed_scan_id = snapshot.scan_id;
2683 }
2684
2685 // Remove any entries for paths that no longer exist or are being recursively
2686 // refreshed. Do this before adding any new entries, so that renames can be
2687 // detected regardless of the order of the paths.
2688 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2689 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2690 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2691 if matches!(metadata, Ok(None)) || doing_recursive_update {
2692 snapshot.remove_path(path);
2693 }
2694 event_paths.push(path.into());
2695 } else {
2696 log::error!(
2697 "unexpected event {:?} for root path {:?}",
2698 abs_path,
2699 root_canonical_path
2700 );
2701 }
2702 }
2703
2704 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2705 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2706
2707 match metadata {
2708 Ok(Some(metadata)) => {
2709 let ignore_stack =
2710 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2711 let mut fs_entry = Entry::new(
2712 path.clone(),
2713 &metadata,
2714 snapshot.next_entry_id.as_ref(),
2715 snapshot.root_char_bag,
2716 );
2717 fs_entry.is_ignored = ignore_stack.is_all();
2718 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2719
2720 let scan_id = snapshot.scan_id;
2721
2722 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2723 if let Some((key, repo)) = repo_with_path_in_dotgit {
2724 let repo = repo.lock();
2725 repo.reload_index();
2726 let branch = repo.branch_name();
2727
2728 snapshot.repository_entries.update(&key, |entry| {
2729 entry.scan_id = scan_id;
2730 entry.branch = branch.map(Into::into)
2731 });
2732 }
2733
2734 if let Some(scan_queue_tx) = &scan_queue_tx {
2735 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2736 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2737 ancestor_inodes.insert(metadata.inode);
2738 smol::block_on(scan_queue_tx.send(ScanJob {
2739 abs_path,
2740 path,
2741 ignore_stack,
2742 ancestor_inodes,
2743 scan_queue: scan_queue_tx.clone(),
2744 }))
2745 .unwrap();
2746 }
2747 }
2748 }
2749 Ok(None) => {}
2750 Err(err) => {
2751 // TODO - create a special 'error' entry in the entries tree to mark this
2752 log::error!("error reading file on event {:?}", err);
2753 }
2754 }
2755 }
2756
2757 Some(event_paths)
2758 }
2759
2760 async fn update_ignore_statuses(&self) {
2761 use futures::FutureExt as _;
2762
2763 let mut snapshot = self.snapshot.lock().clone();
2764 let mut ignores_to_update = Vec::new();
2765 let mut ignores_to_delete = Vec::new();
2766 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2767 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2768 if *scan_id > snapshot.completed_scan_id
2769 && snapshot.entry_for_path(parent_path).is_some()
2770 {
2771 ignores_to_update.push(parent_abs_path.clone());
2772 }
2773
2774 let ignore_path = parent_path.join(&*GITIGNORE);
2775 if snapshot.entry_for_path(ignore_path).is_none() {
2776 ignores_to_delete.push(parent_abs_path.clone());
2777 }
2778 }
2779 }
2780
2781 for parent_abs_path in ignores_to_delete {
2782 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2783 self.snapshot
2784 .lock()
2785 .ignores_by_parent_abs_path
2786 .remove(&parent_abs_path);
2787 }
2788
2789 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2790 ignores_to_update.sort_unstable();
2791 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2792 while let Some(parent_abs_path) = ignores_to_update.next() {
2793 while ignores_to_update
2794 .peek()
2795 .map_or(false, |p| p.starts_with(&parent_abs_path))
2796 {
2797 ignores_to_update.next().unwrap();
2798 }
2799
2800 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2801 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2802 abs_path: parent_abs_path,
2803 ignore_stack,
2804 ignore_queue: ignore_queue_tx.clone(),
2805 }))
2806 .unwrap();
2807 }
2808 drop(ignore_queue_tx);
2809
2810 self.executor
2811 .scoped(|scope| {
2812 for _ in 0..self.executor.num_cpus() {
2813 scope.spawn(async {
2814 loop {
2815 select_biased! {
2816 // Process any path refresh requests before moving on to process
2817 // the queue of ignore statuses.
2818 request = self.refresh_requests_rx.recv().fuse() => {
2819 let Ok((paths, barrier)) = request else { break };
2820 if !self.process_refresh_request(paths, barrier).await {
2821 return;
2822 }
2823 }
2824
2825 // Recursively process directories whose ignores have changed.
2826 job = ignore_queue_rx.recv().fuse() => {
2827 let Ok(job) = job else { break };
2828 self.update_ignore_status(job, &snapshot).await;
2829 }
2830 }
2831 }
2832 });
2833 }
2834 })
2835 .await;
2836 }
2837
2838 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2839 let mut ignore_stack = job.ignore_stack;
2840 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2841 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2842 }
2843
2844 let mut entries_by_id_edits = Vec::new();
2845 let mut entries_by_path_edits = Vec::new();
2846 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2847 for mut entry in snapshot.child_entries(path).cloned() {
2848 let was_ignored = entry.is_ignored;
2849 let abs_path = snapshot.abs_path().join(&entry.path);
2850 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2851 if entry.is_dir() {
2852 let child_ignore_stack = if entry.is_ignored {
2853 IgnoreStack::all()
2854 } else {
2855 ignore_stack.clone()
2856 };
2857 job.ignore_queue
2858 .send(UpdateIgnoreStatusJob {
2859 abs_path: abs_path.into(),
2860 ignore_stack: child_ignore_stack,
2861 ignore_queue: job.ignore_queue.clone(),
2862 })
2863 .await
2864 .unwrap();
2865 }
2866
2867 if entry.is_ignored != was_ignored {
2868 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2869 path_entry.scan_id = snapshot.scan_id;
2870 path_entry.is_ignored = entry.is_ignored;
2871 entries_by_id_edits.push(Edit::Insert(path_entry));
2872 entries_by_path_edits.push(Edit::Insert(entry));
2873 }
2874 }
2875
2876 let mut snapshot = self.snapshot.lock();
2877 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2878 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2879 }
2880
2881 fn build_change_set(
2882 &self,
2883 old_snapshot: &Snapshot,
2884 new_snapshot: &Snapshot,
2885 event_paths: Vec<Arc<Path>>,
2886 ) -> HashMap<Arc<Path>, PathChange> {
2887 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2888
2889 let mut changes = HashMap::default();
2890 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2891 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2892 let received_before_initialized = !self.finished_initial_scan;
2893
2894 for path in event_paths {
2895 let path = PathKey(path);
2896 old_paths.seek(&path, Bias::Left, &());
2897 new_paths.seek(&path, Bias::Left, &());
2898
2899 loop {
2900 match (old_paths.item(), new_paths.item()) {
2901 (Some(old_entry), Some(new_entry)) => {
2902 if old_entry.path > path.0
2903 && new_entry.path > path.0
2904 && !old_entry.path.starts_with(&path.0)
2905 && !new_entry.path.starts_with(&path.0)
2906 {
2907 break;
2908 }
2909
2910 match Ord::cmp(&old_entry.path, &new_entry.path) {
2911 Ordering::Less => {
2912 changes.insert(old_entry.path.clone(), Removed);
2913 old_paths.next(&());
2914 }
2915 Ordering::Equal => {
2916 if received_before_initialized {
2917 // If the worktree was not fully initialized when this event was generated,
2918 // we can't know whether this entry was added during the scan or whether
2919 // it was merely updated.
2920 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2921 } else if old_entry.mtime != new_entry.mtime {
2922 changes.insert(new_entry.path.clone(), Updated);
2923 }
2924 old_paths.next(&());
2925 new_paths.next(&());
2926 }
2927 Ordering::Greater => {
2928 changes.insert(new_entry.path.clone(), Added);
2929 new_paths.next(&());
2930 }
2931 }
2932 }
2933 (Some(old_entry), None) => {
2934 changes.insert(old_entry.path.clone(), Removed);
2935 old_paths.next(&());
2936 }
2937 (None, Some(new_entry)) => {
2938 changes.insert(new_entry.path.clone(), Added);
2939 new_paths.next(&());
2940 }
2941 (None, None) => break,
2942 }
2943 }
2944 }
2945 changes
2946 }
2947
2948 async fn progress_timer(&self, running: bool) {
2949 if !running {
2950 return futures::future::pending().await;
2951 }
2952
2953 #[cfg(any(test, feature = "test-support"))]
2954 if self.fs.is_fake() {
2955 return self.executor.simulate_random_delay().await;
2956 }
2957
2958 smol::Timer::after(Duration::from_millis(100)).await;
2959 }
2960}
2961
2962fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2963 let mut result = root_char_bag;
2964 result.extend(
2965 path.to_string_lossy()
2966 .chars()
2967 .map(|c| c.to_ascii_lowercase()),
2968 );
2969 result
2970}
2971
2972struct ScanJob {
2973 abs_path: Arc<Path>,
2974 path: Arc<Path>,
2975 ignore_stack: Arc<IgnoreStack>,
2976 scan_queue: Sender<ScanJob>,
2977 ancestor_inodes: TreeSet<u64>,
2978}
2979
2980struct UpdateIgnoreStatusJob {
2981 abs_path: Arc<Path>,
2982 ignore_stack: Arc<IgnoreStack>,
2983 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2984}
2985
2986pub trait WorktreeHandle {
2987 #[cfg(any(test, feature = "test-support"))]
2988 fn flush_fs_events<'a>(
2989 &self,
2990 cx: &'a gpui::TestAppContext,
2991 ) -> futures::future::LocalBoxFuture<'a, ()>;
2992}
2993
2994impl WorktreeHandle for ModelHandle<Worktree> {
2995 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2996 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2997 // extra directory scans, and emit extra scan-state notifications.
2998 //
2999 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3000 // to ensure that all redundant FS events have already been processed.
3001 #[cfg(any(test, feature = "test-support"))]
3002 fn flush_fs_events<'a>(
3003 &self,
3004 cx: &'a gpui::TestAppContext,
3005 ) -> futures::future::LocalBoxFuture<'a, ()> {
3006 use smol::future::FutureExt;
3007
3008 let filename = "fs-event-sentinel";
3009 let tree = self.clone();
3010 let (fs, root_path) = self.read_with(cx, |tree, _| {
3011 let tree = tree.as_local().unwrap();
3012 (tree.fs.clone(), tree.abs_path().clone())
3013 });
3014
3015 async move {
3016 fs.create_file(&root_path.join(filename), Default::default())
3017 .await
3018 .unwrap();
3019 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3020 .await;
3021
3022 fs.remove_file(&root_path.join(filename), Default::default())
3023 .await
3024 .unwrap();
3025 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3026 .await;
3027
3028 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3029 .await;
3030 }
3031 .boxed_local()
3032 }
3033}
3034
3035#[derive(Clone, Debug)]
3036struct TraversalProgress<'a> {
3037 max_path: &'a Path,
3038 count: usize,
3039 visible_count: usize,
3040 file_count: usize,
3041 visible_file_count: usize,
3042}
3043
3044impl<'a> TraversalProgress<'a> {
3045 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3046 match (include_ignored, include_dirs) {
3047 (true, true) => self.count,
3048 (true, false) => self.file_count,
3049 (false, true) => self.visible_count,
3050 (false, false) => self.visible_file_count,
3051 }
3052 }
3053}
3054
3055impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3056 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3057 self.max_path = summary.max_path.as_ref();
3058 self.count += summary.count;
3059 self.visible_count += summary.visible_count;
3060 self.file_count += summary.file_count;
3061 self.visible_file_count += summary.visible_file_count;
3062 }
3063}
3064
3065impl<'a> Default for TraversalProgress<'a> {
3066 fn default() -> Self {
3067 Self {
3068 max_path: Path::new(""),
3069 count: 0,
3070 visible_count: 0,
3071 file_count: 0,
3072 visible_file_count: 0,
3073 }
3074 }
3075}
3076
3077pub struct Traversal<'a> {
3078 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3079 include_ignored: bool,
3080 include_dirs: bool,
3081}
3082
3083impl<'a> Traversal<'a> {
3084 pub fn advance(&mut self) -> bool {
3085 self.advance_to_offset(self.offset() + 1)
3086 }
3087
3088 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3089 self.cursor.seek_forward(
3090 &TraversalTarget::Count {
3091 count: offset,
3092 include_dirs: self.include_dirs,
3093 include_ignored: self.include_ignored,
3094 },
3095 Bias::Right,
3096 &(),
3097 )
3098 }
3099
3100 pub fn advance_to_sibling(&mut self) -> bool {
3101 while let Some(entry) = self.cursor.item() {
3102 self.cursor.seek_forward(
3103 &TraversalTarget::PathSuccessor(&entry.path),
3104 Bias::Left,
3105 &(),
3106 );
3107 if let Some(entry) = self.cursor.item() {
3108 if (self.include_dirs || !entry.is_dir())
3109 && (self.include_ignored || !entry.is_ignored)
3110 {
3111 return true;
3112 }
3113 }
3114 }
3115 false
3116 }
3117
3118 pub fn entry(&self) -> Option<&'a Entry> {
3119 self.cursor.item()
3120 }
3121
3122 pub fn offset(&self) -> usize {
3123 self.cursor
3124 .start()
3125 .count(self.include_dirs, self.include_ignored)
3126 }
3127}
3128
3129impl<'a> Iterator for Traversal<'a> {
3130 type Item = &'a Entry;
3131
3132 fn next(&mut self) -> Option<Self::Item> {
3133 if let Some(item) = self.entry() {
3134 self.advance();
3135 Some(item)
3136 } else {
3137 None
3138 }
3139 }
3140}
3141
3142#[derive(Debug)]
3143enum TraversalTarget<'a> {
3144 Path(&'a Path),
3145 PathSuccessor(&'a Path),
3146 Count {
3147 count: usize,
3148 include_ignored: bool,
3149 include_dirs: bool,
3150 },
3151}
3152
3153impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3154 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3155 match self {
3156 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3157 TraversalTarget::PathSuccessor(path) => {
3158 if !cursor_location.max_path.starts_with(path) {
3159 Ordering::Equal
3160 } else {
3161 Ordering::Greater
3162 }
3163 }
3164 TraversalTarget::Count {
3165 count,
3166 include_dirs,
3167 include_ignored,
3168 } => Ord::cmp(
3169 count,
3170 &cursor_location.count(*include_dirs, *include_ignored),
3171 ),
3172 }
3173 }
3174}
3175
3176struct ChildEntriesIter<'a> {
3177 parent_path: &'a Path,
3178 traversal: Traversal<'a>,
3179}
3180
3181impl<'a> Iterator for ChildEntriesIter<'a> {
3182 type Item = &'a Entry;
3183
3184 fn next(&mut self) -> Option<Self::Item> {
3185 if let Some(item) = self.traversal.entry() {
3186 if item.path.starts_with(&self.parent_path) {
3187 self.traversal.advance_to_sibling();
3188 return Some(item);
3189 }
3190 }
3191 None
3192 }
3193}
3194
3195impl<'a> From<&'a Entry> for proto::Entry {
3196 fn from(entry: &'a Entry) -> Self {
3197 Self {
3198 id: entry.id.to_proto(),
3199 is_dir: entry.is_dir(),
3200 path: entry.path.to_string_lossy().into(),
3201 inode: entry.inode,
3202 mtime: Some(entry.mtime.into()),
3203 is_symlink: entry.is_symlink,
3204 is_ignored: entry.is_ignored,
3205 }
3206 }
3207}
3208
3209impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3210 type Error = anyhow::Error;
3211
3212 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3213 if let Some(mtime) = entry.mtime {
3214 let kind = if entry.is_dir {
3215 EntryKind::Dir
3216 } else {
3217 let mut char_bag = *root_char_bag;
3218 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3219 EntryKind::File(char_bag)
3220 };
3221 let path: Arc<Path> = PathBuf::from(entry.path).into();
3222 Ok(Entry {
3223 id: ProjectEntryId::from_proto(entry.id),
3224 kind,
3225 path,
3226 inode: entry.inode,
3227 mtime: mtime.into(),
3228 is_symlink: entry.is_symlink,
3229 is_ignored: entry.is_ignored,
3230 })
3231 } else {
3232 Err(anyhow!(
3233 "missing mtime in remote worktree entry {:?}",
3234 entry.path
3235 ))
3236 }
3237 }
3238}
3239
3240#[cfg(test)]
3241mod tests {
3242 use super::*;
3243 use fs::{FakeFs, RealFs};
3244 use gpui::{executor::Deterministic, TestAppContext};
3245 use pretty_assertions::assert_eq;
3246 use rand::prelude::*;
3247 use serde_json::json;
3248 use std::{env, fmt::Write};
3249 use util::{http::FakeHttpClient, test::temp_tree};
3250
3251 #[gpui::test]
3252 async fn test_traversal(cx: &mut TestAppContext) {
3253 let fs = FakeFs::new(cx.background());
3254 fs.insert_tree(
3255 "/root",
3256 json!({
3257 ".gitignore": "a/b\n",
3258 "a": {
3259 "b": "",
3260 "c": "",
3261 }
3262 }),
3263 )
3264 .await;
3265
3266 let http_client = FakeHttpClient::with_404_response();
3267 let client = cx.read(|cx| Client::new(http_client, cx));
3268
3269 let tree = Worktree::local(
3270 client,
3271 Path::new("/root"),
3272 true,
3273 fs,
3274 Default::default(),
3275 &mut cx.to_async(),
3276 )
3277 .await
3278 .unwrap();
3279 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3280 .await;
3281
3282 tree.read_with(cx, |tree, _| {
3283 assert_eq!(
3284 tree.entries(false)
3285 .map(|entry| entry.path.as_ref())
3286 .collect::<Vec<_>>(),
3287 vec![
3288 Path::new(""),
3289 Path::new(".gitignore"),
3290 Path::new("a"),
3291 Path::new("a/c"),
3292 ]
3293 );
3294 assert_eq!(
3295 tree.entries(true)
3296 .map(|entry| entry.path.as_ref())
3297 .collect::<Vec<_>>(),
3298 vec![
3299 Path::new(""),
3300 Path::new(".gitignore"),
3301 Path::new("a"),
3302 Path::new("a/b"),
3303 Path::new("a/c"),
3304 ]
3305 );
3306 })
3307 }
3308
3309 #[gpui::test(iterations = 10)]
3310 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3311 let fs = FakeFs::new(cx.background());
3312 fs.insert_tree(
3313 "/root",
3314 json!({
3315 "lib": {
3316 "a": {
3317 "a.txt": ""
3318 },
3319 "b": {
3320 "b.txt": ""
3321 }
3322 }
3323 }),
3324 )
3325 .await;
3326 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3327 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3328
3329 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3330 let tree = Worktree::local(
3331 client,
3332 Path::new("/root"),
3333 true,
3334 fs.clone(),
3335 Default::default(),
3336 &mut cx.to_async(),
3337 )
3338 .await
3339 .unwrap();
3340
3341 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3342 .await;
3343
3344 tree.read_with(cx, |tree, _| {
3345 assert_eq!(
3346 tree.entries(false)
3347 .map(|entry| entry.path.as_ref())
3348 .collect::<Vec<_>>(),
3349 vec![
3350 Path::new(""),
3351 Path::new("lib"),
3352 Path::new("lib/a"),
3353 Path::new("lib/a/a.txt"),
3354 Path::new("lib/a/lib"),
3355 Path::new("lib/b"),
3356 Path::new("lib/b/b.txt"),
3357 Path::new("lib/b/lib"),
3358 ]
3359 );
3360 });
3361
3362 fs.rename(
3363 Path::new("/root/lib/a/lib"),
3364 Path::new("/root/lib/a/lib-2"),
3365 Default::default(),
3366 )
3367 .await
3368 .unwrap();
3369 executor.run_until_parked();
3370 tree.read_with(cx, |tree, _| {
3371 assert_eq!(
3372 tree.entries(false)
3373 .map(|entry| entry.path.as_ref())
3374 .collect::<Vec<_>>(),
3375 vec![
3376 Path::new(""),
3377 Path::new("lib"),
3378 Path::new("lib/a"),
3379 Path::new("lib/a/a.txt"),
3380 Path::new("lib/a/lib-2"),
3381 Path::new("lib/b"),
3382 Path::new("lib/b/b.txt"),
3383 Path::new("lib/b/lib"),
3384 ]
3385 );
3386 });
3387 }
3388
3389 #[gpui::test]
3390 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3391 let parent_dir = temp_tree(json!({
3392 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3393 "tree": {
3394 ".git": {},
3395 ".gitignore": "ignored-dir\n",
3396 "tracked-dir": {
3397 "tracked-file1": "",
3398 "ancestor-ignored-file1": "",
3399 },
3400 "ignored-dir": {
3401 "ignored-file1": ""
3402 }
3403 }
3404 }));
3405 let dir = parent_dir.path().join("tree");
3406
3407 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3408
3409 let tree = Worktree::local(
3410 client,
3411 dir.as_path(),
3412 true,
3413 Arc::new(RealFs),
3414 Default::default(),
3415 &mut cx.to_async(),
3416 )
3417 .await
3418 .unwrap();
3419 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3420 .await;
3421 tree.flush_fs_events(cx).await;
3422 cx.read(|cx| {
3423 let tree = tree.read(cx);
3424 assert!(
3425 !tree
3426 .entry_for_path("tracked-dir/tracked-file1")
3427 .unwrap()
3428 .is_ignored
3429 );
3430 assert!(
3431 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3432 .unwrap()
3433 .is_ignored
3434 );
3435 assert!(
3436 tree.entry_for_path("ignored-dir/ignored-file1")
3437 .unwrap()
3438 .is_ignored
3439 );
3440 });
3441
3442 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3443 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3444 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3445 tree.flush_fs_events(cx).await;
3446 cx.read(|cx| {
3447 let tree = tree.read(cx);
3448 assert!(
3449 !tree
3450 .entry_for_path("tracked-dir/tracked-file2")
3451 .unwrap()
3452 .is_ignored
3453 );
3454 assert!(
3455 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3456 .unwrap()
3457 .is_ignored
3458 );
3459 assert!(
3460 tree.entry_for_path("ignored-dir/ignored-file2")
3461 .unwrap()
3462 .is_ignored
3463 );
3464 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3465 });
3466 }
3467
3468 #[gpui::test]
3469 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3470 let root = temp_tree(json!({
3471 "dir1": {
3472 ".git": {},
3473 "deps": {
3474 "dep1": {
3475 ".git": {},
3476 "src": {
3477 "a.txt": ""
3478 }
3479 }
3480 },
3481 "src": {
3482 "b.txt": ""
3483 }
3484 },
3485 "c.txt": "",
3486 }));
3487
3488 let http_client = FakeHttpClient::with_404_response();
3489 let client = cx.read(|cx| Client::new(http_client, cx));
3490 let tree = Worktree::local(
3491 client,
3492 root.path(),
3493 true,
3494 Arc::new(RealFs),
3495 Default::default(),
3496 &mut cx.to_async(),
3497 )
3498 .await
3499 .unwrap();
3500
3501 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3502 .await;
3503 tree.flush_fs_events(cx).await;
3504
3505 tree.read_with(cx, |tree, _cx| {
3506 let tree = tree.as_local().unwrap();
3507
3508 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3509
3510 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3511 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3512 assert_eq!(entry.git_dir_path.as_ref(), Path::new("dir1/.git"));
3513
3514 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3515 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3516 assert_eq!(
3517 entry.git_dir_path.as_ref(),
3518 Path::new("dir1/deps/dep1/.git"),
3519 );
3520 });
3521
3522 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3523 let tree = tree.as_local().unwrap();
3524 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3525 entry.scan_id
3526 });
3527
3528 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3529 tree.flush_fs_events(cx).await;
3530
3531 tree.read_with(cx, |tree, _cx| {
3532 let tree = tree.as_local().unwrap();
3533 let new_scan_id = {
3534 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3535 entry.scan_id
3536 };
3537 assert_ne!(
3538 original_scan_id, new_scan_id,
3539 "original {original_scan_id}, new {new_scan_id}"
3540 );
3541 });
3542
3543 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3544 tree.flush_fs_events(cx).await;
3545
3546 tree.read_with(cx, |tree, _cx| {
3547 let tree = tree.as_local().unwrap();
3548
3549 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3550 });
3551 }
3552
3553 #[test]
3554 fn test_changed_repos() {
3555 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3556 RepositoryEntry {
3557 scan_id,
3558 git_dir_path: git_dir_path.as_ref().into(),
3559 git_dir_entry_id: ProjectEntryId(0),
3560 work_directory: RepositoryWorkDirectory(
3561 Path::new(&format!("don't-care-{}", scan_id)).into(),
3562 ),
3563 branch: None,
3564 }
3565 }
3566
3567 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3568 prev_repos.insert(
3569 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3570 fake_entry("/.git", 0),
3571 );
3572 prev_repos.insert(
3573 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3574 fake_entry("/a/.git", 0),
3575 );
3576 prev_repos.insert(
3577 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3578 fake_entry("/a/b/.git", 0),
3579 );
3580
3581 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3582 new_repos.insert(
3583 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3584 fake_entry("/a/.git", 1),
3585 );
3586 new_repos.insert(
3587 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3588 fake_entry("/a/b/.git", 0),
3589 );
3590 new_repos.insert(
3591 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3592 fake_entry("/a/c/.git", 0),
3593 );
3594
3595 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3596
3597 // Deletion retained
3598 assert!(res
3599 .iter()
3600 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3601 .is_some());
3602
3603 // Update retained
3604 assert!(res
3605 .iter()
3606 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3607 .is_some());
3608
3609 // Addition retained
3610 assert!(res
3611 .iter()
3612 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3613 .is_some());
3614
3615 // Nochange, not retained
3616 assert!(res
3617 .iter()
3618 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3619 .is_none());
3620 }
3621
3622 #[gpui::test]
3623 async fn test_write_file(cx: &mut TestAppContext) {
3624 let dir = temp_tree(json!({
3625 ".git": {},
3626 ".gitignore": "ignored-dir\n",
3627 "tracked-dir": {},
3628 "ignored-dir": {}
3629 }));
3630
3631 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3632
3633 let tree = Worktree::local(
3634 client,
3635 dir.path(),
3636 true,
3637 Arc::new(RealFs),
3638 Default::default(),
3639 &mut cx.to_async(),
3640 )
3641 .await
3642 .unwrap();
3643 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3644 .await;
3645 tree.flush_fs_events(cx).await;
3646
3647 tree.update(cx, |tree, cx| {
3648 tree.as_local().unwrap().write_file(
3649 Path::new("tracked-dir/file.txt"),
3650 "hello".into(),
3651 Default::default(),
3652 cx,
3653 )
3654 })
3655 .await
3656 .unwrap();
3657 tree.update(cx, |tree, cx| {
3658 tree.as_local().unwrap().write_file(
3659 Path::new("ignored-dir/file.txt"),
3660 "world".into(),
3661 Default::default(),
3662 cx,
3663 )
3664 })
3665 .await
3666 .unwrap();
3667
3668 tree.read_with(cx, |tree, _| {
3669 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3670 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3671 assert!(!tracked.is_ignored);
3672 assert!(ignored.is_ignored);
3673 });
3674 }
3675
3676 #[gpui::test(iterations = 30)]
3677 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3678 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3679
3680 let fs = FakeFs::new(cx.background());
3681 fs.insert_tree(
3682 "/root",
3683 json!({
3684 "b": {},
3685 "c": {},
3686 "d": {},
3687 }),
3688 )
3689 .await;
3690
3691 let tree = Worktree::local(
3692 client,
3693 "/root".as_ref(),
3694 true,
3695 fs,
3696 Default::default(),
3697 &mut cx.to_async(),
3698 )
3699 .await
3700 .unwrap();
3701
3702 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3703
3704 let entry = tree
3705 .update(cx, |tree, cx| {
3706 tree.as_local_mut()
3707 .unwrap()
3708 .create_entry("a/e".as_ref(), true, cx)
3709 })
3710 .await
3711 .unwrap();
3712 assert!(entry.is_dir());
3713
3714 cx.foreground().run_until_parked();
3715 tree.read_with(cx, |tree, _| {
3716 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3717 });
3718
3719 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3720 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3721 snapshot1.apply_remote_update(update).unwrap();
3722 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3723 }
3724
3725 #[gpui::test(iterations = 100)]
3726 async fn test_random_worktree_operations_during_initial_scan(
3727 cx: &mut TestAppContext,
3728 mut rng: StdRng,
3729 ) {
3730 let operations = env::var("OPERATIONS")
3731 .map(|o| o.parse().unwrap())
3732 .unwrap_or(5);
3733 let initial_entries = env::var("INITIAL_ENTRIES")
3734 .map(|o| o.parse().unwrap())
3735 .unwrap_or(20);
3736
3737 let root_dir = Path::new("/test");
3738 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3739 fs.as_fake().insert_tree(root_dir, json!({})).await;
3740 for _ in 0..initial_entries {
3741 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3742 }
3743 log::info!("generated initial tree");
3744
3745 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3746 let worktree = Worktree::local(
3747 client.clone(),
3748 root_dir,
3749 true,
3750 fs.clone(),
3751 Default::default(),
3752 &mut cx.to_async(),
3753 )
3754 .await
3755 .unwrap();
3756
3757 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3758
3759 for _ in 0..operations {
3760 worktree
3761 .update(cx, |worktree, cx| {
3762 randomly_mutate_worktree(worktree, &mut rng, cx)
3763 })
3764 .await
3765 .log_err();
3766 worktree.read_with(cx, |tree, _| {
3767 tree.as_local().unwrap().snapshot.check_invariants()
3768 });
3769
3770 if rng.gen_bool(0.6) {
3771 let new_snapshot =
3772 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3773 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3774 snapshot.apply_remote_update(update.clone()).unwrap();
3775 assert_eq!(
3776 snapshot.to_vec(true),
3777 new_snapshot.to_vec(true),
3778 "incorrect snapshot after update {:?}",
3779 update
3780 );
3781 }
3782 }
3783
3784 worktree
3785 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3786 .await;
3787 worktree.read_with(cx, |tree, _| {
3788 tree.as_local().unwrap().snapshot.check_invariants()
3789 });
3790
3791 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3792 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3793 snapshot.apply_remote_update(update.clone()).unwrap();
3794 assert_eq!(
3795 snapshot.to_vec(true),
3796 new_snapshot.to_vec(true),
3797 "incorrect snapshot after update {:?}",
3798 update
3799 );
3800 }
3801
3802 #[gpui::test(iterations = 100)]
3803 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3804 let operations = env::var("OPERATIONS")
3805 .map(|o| o.parse().unwrap())
3806 .unwrap_or(40);
3807 let initial_entries = env::var("INITIAL_ENTRIES")
3808 .map(|o| o.parse().unwrap())
3809 .unwrap_or(20);
3810
3811 let root_dir = Path::new("/test");
3812 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3813 fs.as_fake().insert_tree(root_dir, json!({})).await;
3814 for _ in 0..initial_entries {
3815 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3816 }
3817 log::info!("generated initial tree");
3818
3819 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3820 let worktree = Worktree::local(
3821 client.clone(),
3822 root_dir,
3823 true,
3824 fs.clone(),
3825 Default::default(),
3826 &mut cx.to_async(),
3827 )
3828 .await
3829 .unwrap();
3830
3831 worktree
3832 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3833 .await;
3834
3835 // After the initial scan is complete, the `UpdatedEntries` event can
3836 // be used to follow along with all changes to the worktree's snapshot.
3837 worktree.update(cx, |tree, cx| {
3838 let mut paths = tree
3839 .as_local()
3840 .unwrap()
3841 .paths()
3842 .cloned()
3843 .collect::<Vec<_>>();
3844
3845 cx.subscribe(&worktree, move |tree, _, event, _| {
3846 if let Event::UpdatedEntries(changes) = event {
3847 for (path, change_type) in changes.iter() {
3848 let path = path.clone();
3849 let ix = match paths.binary_search(&path) {
3850 Ok(ix) | Err(ix) => ix,
3851 };
3852 match change_type {
3853 PathChange::Added => {
3854 assert_ne!(paths.get(ix), Some(&path));
3855 paths.insert(ix, path);
3856 }
3857 PathChange::Removed => {
3858 assert_eq!(paths.get(ix), Some(&path));
3859 paths.remove(ix);
3860 }
3861 PathChange::Updated => {
3862 assert_eq!(paths.get(ix), Some(&path));
3863 }
3864 PathChange::AddedOrUpdated => {
3865 if paths[ix] != path {
3866 paths.insert(ix, path);
3867 }
3868 }
3869 }
3870 }
3871 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3872 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3873 }
3874 })
3875 .detach();
3876 });
3877
3878 let mut snapshots = Vec::new();
3879 let mut mutations_len = operations;
3880 while mutations_len > 1 {
3881 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3882 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3883 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3884 let len = rng.gen_range(0..=buffered_event_count);
3885 log::info!("flushing {} events", len);
3886 fs.as_fake().flush_events(len).await;
3887 } else {
3888 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3889 mutations_len -= 1;
3890 }
3891
3892 cx.foreground().run_until_parked();
3893 if rng.gen_bool(0.2) {
3894 log::info!("storing snapshot {}", snapshots.len());
3895 let snapshot =
3896 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3897 snapshots.push(snapshot);
3898 }
3899 }
3900
3901 log::info!("quiescing");
3902 fs.as_fake().flush_events(usize::MAX).await;
3903 cx.foreground().run_until_parked();
3904 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3905 snapshot.check_invariants();
3906
3907 {
3908 let new_worktree = Worktree::local(
3909 client.clone(),
3910 root_dir,
3911 true,
3912 fs.clone(),
3913 Default::default(),
3914 &mut cx.to_async(),
3915 )
3916 .await
3917 .unwrap();
3918 new_worktree
3919 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3920 .await;
3921 let new_snapshot =
3922 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3923 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3924 }
3925
3926 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3927 let include_ignored = rng.gen::<bool>();
3928 if !include_ignored {
3929 let mut entries_by_path_edits = Vec::new();
3930 let mut entries_by_id_edits = Vec::new();
3931 for entry in prev_snapshot
3932 .entries_by_id
3933 .cursor::<()>()
3934 .filter(|e| e.is_ignored)
3935 {
3936 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3937 entries_by_id_edits.push(Edit::Remove(entry.id));
3938 }
3939
3940 prev_snapshot
3941 .entries_by_path
3942 .edit(entries_by_path_edits, &());
3943 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3944 }
3945
3946 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3947 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3948 assert_eq!(
3949 prev_snapshot.to_vec(include_ignored),
3950 snapshot.to_vec(include_ignored),
3951 "wrong update for snapshot {i}. update: {:?}",
3952 update
3953 );
3954 }
3955 }
3956
3957 fn randomly_mutate_worktree(
3958 worktree: &mut Worktree,
3959 rng: &mut impl Rng,
3960 cx: &mut ModelContext<Worktree>,
3961 ) -> Task<Result<()>> {
3962 let worktree = worktree.as_local_mut().unwrap();
3963 let snapshot = worktree.snapshot();
3964 let entry = snapshot.entries(false).choose(rng).unwrap();
3965
3966 match rng.gen_range(0_u32..100) {
3967 0..=33 if entry.path.as_ref() != Path::new("") => {
3968 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3969 worktree.delete_entry(entry.id, cx).unwrap()
3970 }
3971 ..=66 if entry.path.as_ref() != Path::new("") => {
3972 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3973 let new_parent_path = if other_entry.is_dir() {
3974 other_entry.path.clone()
3975 } else {
3976 other_entry.path.parent().unwrap().into()
3977 };
3978 let mut new_path = new_parent_path.join(gen_name(rng));
3979 if new_path.starts_with(&entry.path) {
3980 new_path = gen_name(rng).into();
3981 }
3982
3983 log::info!(
3984 "renaming entry {:?} ({}) to {:?}",
3985 entry.path,
3986 entry.id.0,
3987 new_path
3988 );
3989 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3990 cx.foreground().spawn(async move {
3991 task.await?;
3992 Ok(())
3993 })
3994 }
3995 _ => {
3996 let task = if entry.is_dir() {
3997 let child_path = entry.path.join(gen_name(rng));
3998 let is_dir = rng.gen_bool(0.3);
3999 log::info!(
4000 "creating {} at {:?}",
4001 if is_dir { "dir" } else { "file" },
4002 child_path,
4003 );
4004 worktree.create_entry(child_path, is_dir, cx)
4005 } else {
4006 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4007 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4008 };
4009 cx.foreground().spawn(async move {
4010 task.await?;
4011 Ok(())
4012 })
4013 }
4014 }
4015 }
4016
4017 async fn randomly_mutate_fs(
4018 fs: &Arc<dyn Fs>,
4019 root_path: &Path,
4020 insertion_probability: f64,
4021 rng: &mut impl Rng,
4022 ) {
4023 let mut files = Vec::new();
4024 let mut dirs = Vec::new();
4025 for path in fs.as_fake().paths() {
4026 if path.starts_with(root_path) {
4027 if fs.is_file(&path).await {
4028 files.push(path);
4029 } else {
4030 dirs.push(path);
4031 }
4032 }
4033 }
4034
4035 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4036 let path = dirs.choose(rng).unwrap();
4037 let new_path = path.join(gen_name(rng));
4038
4039 if rng.gen() {
4040 log::info!(
4041 "creating dir {:?}",
4042 new_path.strip_prefix(root_path).unwrap()
4043 );
4044 fs.create_dir(&new_path).await.unwrap();
4045 } else {
4046 log::info!(
4047 "creating file {:?}",
4048 new_path.strip_prefix(root_path).unwrap()
4049 );
4050 fs.create_file(&new_path, Default::default()).await.unwrap();
4051 }
4052 } else if rng.gen_bool(0.05) {
4053 let ignore_dir_path = dirs.choose(rng).unwrap();
4054 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4055
4056 let subdirs = dirs
4057 .iter()
4058 .filter(|d| d.starts_with(&ignore_dir_path))
4059 .cloned()
4060 .collect::<Vec<_>>();
4061 let subfiles = files
4062 .iter()
4063 .filter(|d| d.starts_with(&ignore_dir_path))
4064 .cloned()
4065 .collect::<Vec<_>>();
4066 let files_to_ignore = {
4067 let len = rng.gen_range(0..=subfiles.len());
4068 subfiles.choose_multiple(rng, len)
4069 };
4070 let dirs_to_ignore = {
4071 let len = rng.gen_range(0..subdirs.len());
4072 subdirs.choose_multiple(rng, len)
4073 };
4074
4075 let mut ignore_contents = String::new();
4076 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4077 writeln!(
4078 ignore_contents,
4079 "{}",
4080 path_to_ignore
4081 .strip_prefix(&ignore_dir_path)
4082 .unwrap()
4083 .to_str()
4084 .unwrap()
4085 )
4086 .unwrap();
4087 }
4088 log::info!(
4089 "creating gitignore {:?} with contents:\n{}",
4090 ignore_path.strip_prefix(&root_path).unwrap(),
4091 ignore_contents
4092 );
4093 fs.save(
4094 &ignore_path,
4095 &ignore_contents.as_str().into(),
4096 Default::default(),
4097 )
4098 .await
4099 .unwrap();
4100 } else {
4101 let old_path = {
4102 let file_path = files.choose(rng);
4103 let dir_path = dirs[1..].choose(rng);
4104 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4105 };
4106
4107 let is_rename = rng.gen();
4108 if is_rename {
4109 let new_path_parent = dirs
4110 .iter()
4111 .filter(|d| !d.starts_with(old_path))
4112 .choose(rng)
4113 .unwrap();
4114
4115 let overwrite_existing_dir =
4116 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4117 let new_path = if overwrite_existing_dir {
4118 fs.remove_dir(
4119 &new_path_parent,
4120 RemoveOptions {
4121 recursive: true,
4122 ignore_if_not_exists: true,
4123 },
4124 )
4125 .await
4126 .unwrap();
4127 new_path_parent.to_path_buf()
4128 } else {
4129 new_path_parent.join(gen_name(rng))
4130 };
4131
4132 log::info!(
4133 "renaming {:?} to {}{:?}",
4134 old_path.strip_prefix(&root_path).unwrap(),
4135 if overwrite_existing_dir {
4136 "overwrite "
4137 } else {
4138 ""
4139 },
4140 new_path.strip_prefix(&root_path).unwrap()
4141 );
4142 fs.rename(
4143 &old_path,
4144 &new_path,
4145 fs::RenameOptions {
4146 overwrite: true,
4147 ignore_if_exists: true,
4148 },
4149 )
4150 .await
4151 .unwrap();
4152 } else if fs.is_file(&old_path).await {
4153 log::info!(
4154 "deleting file {:?}",
4155 old_path.strip_prefix(&root_path).unwrap()
4156 );
4157 fs.remove_file(old_path, Default::default()).await.unwrap();
4158 } else {
4159 log::info!(
4160 "deleting dir {:?}",
4161 old_path.strip_prefix(&root_path).unwrap()
4162 );
4163 fs.remove_dir(
4164 &old_path,
4165 RemoveOptions {
4166 recursive: true,
4167 ignore_if_not_exists: true,
4168 },
4169 )
4170 .await
4171 .unwrap();
4172 }
4173 }
4174 }
4175
4176 fn gen_name(rng: &mut impl Rng) -> String {
4177 (0..6)
4178 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4179 .map(char::from)
4180 .collect()
4181 }
4182
4183 impl LocalSnapshot {
4184 fn check_invariants(&self) {
4185 assert_eq!(
4186 self.entries_by_path
4187 .cursor::<()>()
4188 .map(|e| (&e.path, e.id))
4189 .collect::<Vec<_>>(),
4190 self.entries_by_id
4191 .cursor::<()>()
4192 .map(|e| (&e.path, e.id))
4193 .collect::<collections::BTreeSet<_>>()
4194 .into_iter()
4195 .collect::<Vec<_>>(),
4196 "entries_by_path and entries_by_id are inconsistent"
4197 );
4198
4199 let mut files = self.files(true, 0);
4200 let mut visible_files = self.files(false, 0);
4201 for entry in self.entries_by_path.cursor::<()>() {
4202 if entry.is_file() {
4203 assert_eq!(files.next().unwrap().inode, entry.inode);
4204 if !entry.is_ignored {
4205 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4206 }
4207 }
4208 }
4209
4210 assert!(files.next().is_none());
4211 assert!(visible_files.next().is_none());
4212
4213 let mut bfs_paths = Vec::new();
4214 let mut stack = vec![Path::new("")];
4215 while let Some(path) = stack.pop() {
4216 bfs_paths.push(path);
4217 let ix = stack.len();
4218 for child_entry in self.child_entries(path) {
4219 stack.insert(ix, &child_entry.path);
4220 }
4221 }
4222
4223 let dfs_paths_via_iter = self
4224 .entries_by_path
4225 .cursor::<()>()
4226 .map(|e| e.path.as_ref())
4227 .collect::<Vec<_>>();
4228 assert_eq!(bfs_paths, dfs_paths_via_iter);
4229
4230 let dfs_paths_via_traversal = self
4231 .entries(true)
4232 .map(|e| e.path.as_ref())
4233 .collect::<Vec<_>>();
4234 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4235
4236 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4237 let ignore_parent_path =
4238 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4239 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4240 assert!(self
4241 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4242 .is_some());
4243 }
4244 }
4245
4246 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4247 let mut paths = Vec::new();
4248 for entry in self.entries_by_path.cursor::<()>() {
4249 if include_ignored || !entry.is_ignored {
4250 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4251 }
4252 }
4253 paths.sort_by(|a, b| a.0.cmp(b.0));
4254 paths
4255 }
4256 }
4257}