1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
184 self.work_directory
185 .relativize(snapshot, path)
186 .and_then(|repo_path| {
187 self.worktree_statuses
188 .get_from_while(&repo_path, |repo_path, key, _| key.starts_with(repo_path))
189 .map(|(_, status)| status)
190 // Short circut once we've found the highest level
191 .take_until(|status| status == &&GitFileStatus::Conflict)
192 .reduce(
193 |status_first, status_second| match (status_first, status_second) {
194 (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
195 &GitFileStatus::Conflict
196 }
197 (GitFileStatus::Added, _) | (_, GitFileStatus::Added) => {
198 &GitFileStatus::Added
199 }
200 _ => &GitFileStatus::Modified,
201 },
202 )
203 .copied()
204 })
205 }
206
207 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
208 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
209 let mut removed_statuses: Vec<String> = Vec::new();
210
211 let mut self_statuses = self.worktree_statuses.iter().peekable();
212 let mut other_statuses = other.worktree_statuses.iter().peekable();
213 loop {
214 match (self_statuses.peek(), other_statuses.peek()) {
215 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
216 match Ord::cmp(self_repo_path, other_repo_path) {
217 Ordering::Less => {
218 updated_statuses.push(make_status_entry(self_repo_path, self_status));
219 self_statuses.next();
220 }
221 Ordering::Equal => {
222 if self_status != other_status {
223 updated_statuses
224 .push(make_status_entry(self_repo_path, self_status));
225 }
226
227 self_statuses.next();
228 other_statuses.next();
229 }
230 Ordering::Greater => {
231 removed_statuses.push(make_repo_path(other_repo_path));
232 other_statuses.next();
233 }
234 }
235 }
236 (Some((self_repo_path, self_status)), None) => {
237 updated_statuses.push(make_status_entry(self_repo_path, self_status));
238 self_statuses.next();
239 }
240 (None, Some((other_repo_path, _))) => {
241 removed_statuses.push(make_repo_path(other_repo_path));
242 other_statuses.next();
243 }
244 (None, None) => break,
245 }
246 }
247
248 proto::RepositoryEntry {
249 work_directory_id: self.work_directory_id().to_proto(),
250 branch: self.branch.as_ref().map(|str| str.to_string()),
251 removed_worktree_repo_paths: removed_statuses,
252 updated_worktree_statuses: updated_statuses,
253 }
254 }
255}
256
257fn make_repo_path(path: &RepoPath) -> String {
258 path.as_os_str().to_string_lossy().to_string()
259}
260
261fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
262 proto::StatusEntry {
263 repo_path: make_repo_path(path),
264 status: match status {
265 GitFileStatus::Added => proto::GitStatus::Added.into(),
266 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
267 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
268 },
269 }
270}
271
272impl From<&RepositoryEntry> for proto::RepositoryEntry {
273 fn from(value: &RepositoryEntry) -> Self {
274 proto::RepositoryEntry {
275 work_directory_id: value.work_directory.to_proto(),
276 branch: value.branch.as_ref().map(|str| str.to_string()),
277 updated_worktree_statuses: value
278 .worktree_statuses
279 .iter()
280 .map(|(repo_path, status)| make_status_entry(repo_path, status))
281 .collect(),
282 removed_worktree_repo_paths: Default::default(),
283 }
284 }
285}
286
287/// This path corresponds to the 'content path' (the folder that contains the .git)
288#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
289pub struct RepositoryWorkDirectory(Arc<Path>);
290
291impl Default for RepositoryWorkDirectory {
292 fn default() -> Self {
293 RepositoryWorkDirectory(Arc::from(Path::new("")))
294 }
295}
296
297impl AsRef<Path> for RepositoryWorkDirectory {
298 fn as_ref(&self) -> &Path {
299 self.0.as_ref()
300 }
301}
302
303#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
304pub struct WorkDirectoryEntry(ProjectEntryId);
305
306impl WorkDirectoryEntry {
307 // Note that these paths should be relative to the worktree root.
308 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
309 snapshot
310 .entry_for_id(self.0)
311 .map(|entry| path.starts_with(&entry.path))
312 .unwrap_or(false)
313 }
314
315 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
316 worktree.entry_for_id(self.0).and_then(|entry| {
317 path.strip_prefix(&entry.path)
318 .ok()
319 .map(move |path| path.into())
320 })
321 }
322}
323
324impl Deref for WorkDirectoryEntry {
325 type Target = ProjectEntryId;
326
327 fn deref(&self) -> &Self::Target {
328 &self.0
329 }
330}
331
332impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
333 fn from(value: ProjectEntryId) -> Self {
334 WorkDirectoryEntry(value)
335 }
336}
337
338#[derive(Debug, Clone)]
339pub struct LocalSnapshot {
340 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
341 // The ProjectEntryId corresponds to the entry for the .git dir
342 // work_directory_id
343 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
344 removed_entry_ids: HashMap<u64, ProjectEntryId>,
345 next_entry_id: Arc<AtomicUsize>,
346 snapshot: Snapshot,
347}
348
349#[derive(Debug, Clone)]
350pub struct LocalRepositoryEntry {
351 pub(crate) scan_id: usize,
352 pub(crate) full_scan_id: usize,
353 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
354 /// Path to the actual .git folder.
355 /// Note: if .git is a file, this points to the folder indicated by the .git file
356 pub(crate) git_dir_path: Arc<Path>,
357}
358
359impl LocalRepositoryEntry {
360 // Note that this path should be relative to the worktree root.
361 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
362 path.starts_with(self.git_dir_path.as_ref())
363 }
364}
365
366impl Deref for LocalSnapshot {
367 type Target = Snapshot;
368
369 fn deref(&self) -> &Self::Target {
370 &self.snapshot
371 }
372}
373
374impl DerefMut for LocalSnapshot {
375 fn deref_mut(&mut self) -> &mut Self::Target {
376 &mut self.snapshot
377 }
378}
379
380enum ScanState {
381 Started,
382 Updated {
383 snapshot: LocalSnapshot,
384 changes: HashMap<Arc<Path>, PathChange>,
385 barrier: Option<barrier::Sender>,
386 scanning: bool,
387 },
388}
389
390struct ShareState {
391 project_id: u64,
392 snapshots_tx: watch::Sender<LocalSnapshot>,
393 resume_updates: watch::Sender<()>,
394 _maintain_remote_snapshot: Task<Option<()>>,
395}
396
397pub enum Event {
398 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
399 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
400}
401
402impl Entity for Worktree {
403 type Event = Event;
404}
405
406impl Worktree {
407 pub async fn local(
408 client: Arc<Client>,
409 path: impl Into<Arc<Path>>,
410 visible: bool,
411 fs: Arc<dyn Fs>,
412 next_entry_id: Arc<AtomicUsize>,
413 cx: &mut AsyncAppContext,
414 ) -> Result<ModelHandle<Self>> {
415 // After determining whether the root entry is a file or a directory, populate the
416 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
417 let abs_path = path.into();
418 let metadata = fs
419 .metadata(&abs_path)
420 .await
421 .context("failed to stat worktree path")?;
422
423 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
424 let root_name = abs_path
425 .file_name()
426 .map_or(String::new(), |f| f.to_string_lossy().to_string());
427
428 let mut snapshot = LocalSnapshot {
429 ignores_by_parent_abs_path: Default::default(),
430 removed_entry_ids: Default::default(),
431 git_repositories: Default::default(),
432 next_entry_id,
433 snapshot: Snapshot {
434 id: WorktreeId::from_usize(cx.model_id()),
435 abs_path: abs_path.clone(),
436 root_name: root_name.clone(),
437 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
438 entries_by_path: Default::default(),
439 entries_by_id: Default::default(),
440 repository_entries: Default::default(),
441 scan_id: 1,
442 completed_scan_id: 0,
443 },
444 };
445
446 if let Some(metadata) = metadata {
447 snapshot.insert_entry(
448 Entry::new(
449 Arc::from(Path::new("")),
450 &metadata,
451 &snapshot.next_entry_id,
452 snapshot.root_char_bag,
453 ),
454 fs.as_ref(),
455 );
456 }
457
458 let (path_changes_tx, path_changes_rx) = channel::unbounded();
459 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
460
461 cx.spawn_weak(|this, mut cx| async move {
462 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
463 this.update(&mut cx, |this, cx| {
464 let this = this.as_local_mut().unwrap();
465 match state {
466 ScanState::Started => {
467 *this.is_scanning.0.borrow_mut() = true;
468 }
469 ScanState::Updated {
470 snapshot,
471 changes,
472 barrier,
473 scanning,
474 } => {
475 *this.is_scanning.0.borrow_mut() = scanning;
476 this.set_snapshot(snapshot, cx);
477 cx.emit(Event::UpdatedEntries(changes));
478 drop(barrier);
479 }
480 }
481 cx.notify();
482 });
483 }
484 })
485 .detach();
486
487 let background_scanner_task = cx.background().spawn({
488 let fs = fs.clone();
489 let snapshot = snapshot.clone();
490 let background = cx.background().clone();
491 async move {
492 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
493 BackgroundScanner::new(
494 snapshot,
495 fs,
496 scan_states_tx,
497 background,
498 path_changes_rx,
499 )
500 .run(events)
501 .await;
502 }
503 });
504
505 Worktree::Local(LocalWorktree {
506 snapshot,
507 is_scanning: watch::channel_with(true),
508 share: None,
509 path_changes_tx,
510 _background_scanner_task: background_scanner_task,
511 diagnostics: Default::default(),
512 diagnostic_summaries: Default::default(),
513 client,
514 fs,
515 visible,
516 })
517 }))
518 }
519
520 pub fn remote(
521 project_remote_id: u64,
522 replica_id: ReplicaId,
523 worktree: proto::WorktreeMetadata,
524 client: Arc<Client>,
525 cx: &mut AppContext,
526 ) -> ModelHandle<Self> {
527 cx.add_model(|cx: &mut ModelContext<Self>| {
528 let snapshot = Snapshot {
529 id: WorktreeId(worktree.id as usize),
530 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
531 root_name: worktree.root_name.clone(),
532 root_char_bag: worktree
533 .root_name
534 .chars()
535 .map(|c| c.to_ascii_lowercase())
536 .collect(),
537 entries_by_path: Default::default(),
538 entries_by_id: Default::default(),
539 repository_entries: Default::default(),
540 scan_id: 1,
541 completed_scan_id: 0,
542 };
543
544 let (updates_tx, mut updates_rx) = mpsc::unbounded();
545 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
546 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
547
548 cx.background()
549 .spawn({
550 let background_snapshot = background_snapshot.clone();
551 async move {
552 while let Some(update) = updates_rx.next().await {
553 if let Err(error) =
554 background_snapshot.lock().apply_remote_update(update)
555 {
556 log::error!("error applying worktree update: {}", error);
557 }
558 snapshot_updated_tx.send(()).await.ok();
559 }
560 }
561 })
562 .detach();
563
564 cx.spawn_weak(|this, mut cx| async move {
565 while (snapshot_updated_rx.recv().await).is_some() {
566 if let Some(this) = this.upgrade(&cx) {
567 this.update(&mut cx, |this, cx| {
568 let this = this.as_remote_mut().unwrap();
569 this.snapshot = this.background_snapshot.lock().clone();
570 cx.emit(Event::UpdatedEntries(Default::default()));
571 cx.notify();
572 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
573 if this.observed_snapshot(*scan_id) {
574 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
575 let _ = tx.send(());
576 } else {
577 break;
578 }
579 }
580 });
581 } else {
582 break;
583 }
584 }
585 })
586 .detach();
587
588 Worktree::Remote(RemoteWorktree {
589 project_id: project_remote_id,
590 replica_id,
591 snapshot: snapshot.clone(),
592 background_snapshot,
593 updates_tx: Some(updates_tx),
594 snapshot_subscriptions: Default::default(),
595 client: client.clone(),
596 diagnostic_summaries: Default::default(),
597 visible: worktree.visible,
598 disconnected: false,
599 })
600 })
601 }
602
603 pub fn as_local(&self) -> Option<&LocalWorktree> {
604 if let Worktree::Local(worktree) = self {
605 Some(worktree)
606 } else {
607 None
608 }
609 }
610
611 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
612 if let Worktree::Remote(worktree) = self {
613 Some(worktree)
614 } else {
615 None
616 }
617 }
618
619 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
620 if let Worktree::Local(worktree) = self {
621 Some(worktree)
622 } else {
623 None
624 }
625 }
626
627 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
628 if let Worktree::Remote(worktree) = self {
629 Some(worktree)
630 } else {
631 None
632 }
633 }
634
635 pub fn is_local(&self) -> bool {
636 matches!(self, Worktree::Local(_))
637 }
638
639 pub fn is_remote(&self) -> bool {
640 !self.is_local()
641 }
642
643 pub fn snapshot(&self) -> Snapshot {
644 match self {
645 Worktree::Local(worktree) => worktree.snapshot().snapshot,
646 Worktree::Remote(worktree) => worktree.snapshot(),
647 }
648 }
649
650 pub fn scan_id(&self) -> usize {
651 match self {
652 Worktree::Local(worktree) => worktree.snapshot.scan_id,
653 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
654 }
655 }
656
657 pub fn completed_scan_id(&self) -> usize {
658 match self {
659 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
660 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
661 }
662 }
663
664 pub fn is_visible(&self) -> bool {
665 match self {
666 Worktree::Local(worktree) => worktree.visible,
667 Worktree::Remote(worktree) => worktree.visible,
668 }
669 }
670
671 pub fn replica_id(&self) -> ReplicaId {
672 match self {
673 Worktree::Local(_) => 0,
674 Worktree::Remote(worktree) => worktree.replica_id,
675 }
676 }
677
678 pub fn diagnostic_summaries(
679 &self,
680 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
681 match self {
682 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
683 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
684 }
685 .iter()
686 .flat_map(|(path, summaries)| {
687 summaries
688 .iter()
689 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
690 })
691 }
692
693 pub fn abs_path(&self) -> Arc<Path> {
694 match self {
695 Worktree::Local(worktree) => worktree.abs_path.clone(),
696 Worktree::Remote(worktree) => worktree.abs_path.clone(),
697 }
698 }
699}
700
701impl LocalWorktree {
702 pub fn contains_abs_path(&self, path: &Path) -> bool {
703 path.starts_with(&self.abs_path)
704 }
705
706 fn absolutize(&self, path: &Path) -> PathBuf {
707 if path.file_name().is_some() {
708 self.abs_path.join(path)
709 } else {
710 self.abs_path.to_path_buf()
711 }
712 }
713
714 pub(crate) fn load_buffer(
715 &mut self,
716 id: u64,
717 path: &Path,
718 cx: &mut ModelContext<Worktree>,
719 ) -> Task<Result<ModelHandle<Buffer>>> {
720 let path = Arc::from(path);
721 cx.spawn(move |this, mut cx| async move {
722 let (file, contents, diff_base) = this
723 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
724 .await?;
725 let text_buffer = cx
726 .background()
727 .spawn(async move { text::Buffer::new(0, id, contents) })
728 .await;
729 Ok(cx.add_model(|cx| {
730 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
731 buffer.git_diff_recalc(cx);
732 buffer
733 }))
734 })
735 }
736
737 pub fn diagnostics_for_path(
738 &self,
739 path: &Path,
740 ) -> Vec<(
741 LanguageServerId,
742 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
743 )> {
744 self.diagnostics.get(path).cloned().unwrap_or_default()
745 }
746
747 pub fn update_diagnostics(
748 &mut self,
749 server_id: LanguageServerId,
750 worktree_path: Arc<Path>,
751 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
752 _: &mut ModelContext<Worktree>,
753 ) -> Result<bool> {
754 let summaries_by_server_id = self
755 .diagnostic_summaries
756 .entry(worktree_path.clone())
757 .or_default();
758
759 let old_summary = summaries_by_server_id
760 .remove(&server_id)
761 .unwrap_or_default();
762
763 let new_summary = DiagnosticSummary::new(&diagnostics);
764 if new_summary.is_empty() {
765 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
766 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
767 diagnostics_by_server_id.remove(ix);
768 }
769 if diagnostics_by_server_id.is_empty() {
770 self.diagnostics.remove(&worktree_path);
771 }
772 }
773 } else {
774 summaries_by_server_id.insert(server_id, new_summary);
775 let diagnostics_by_server_id =
776 self.diagnostics.entry(worktree_path.clone()).or_default();
777 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
778 Ok(ix) => {
779 diagnostics_by_server_id[ix] = (server_id, diagnostics);
780 }
781 Err(ix) => {
782 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
783 }
784 }
785 }
786
787 if !old_summary.is_empty() || !new_summary.is_empty() {
788 if let Some(share) = self.share.as_ref() {
789 self.client
790 .send(proto::UpdateDiagnosticSummary {
791 project_id: share.project_id,
792 worktree_id: self.id().to_proto(),
793 summary: Some(proto::DiagnosticSummary {
794 path: worktree_path.to_string_lossy().to_string(),
795 language_server_id: server_id.0 as u64,
796 error_count: new_summary.error_count as u32,
797 warning_count: new_summary.warning_count as u32,
798 }),
799 })
800 .log_err();
801 }
802 }
803
804 Ok(!old_summary.is_empty() || !new_summary.is_empty())
805 }
806
807 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
808 let updated_repos =
809 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
810 self.snapshot = new_snapshot;
811
812 if let Some(share) = self.share.as_mut() {
813 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
814 }
815
816 if !updated_repos.is_empty() {
817 cx.emit(Event::UpdatedGitRepositories(updated_repos));
818 }
819 }
820
821 fn changed_repos(
822 &self,
823 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
824 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
825 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
826 let mut diff = HashMap::default();
827 let mut old_repos = old_repos.iter().peekable();
828 let mut new_repos = new_repos.iter().peekable();
829 loop {
830 match (old_repos.peek(), new_repos.peek()) {
831 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
832 match Ord::cmp(old_entry_id, new_entry_id) {
833 Ordering::Less => {
834 if let Some(entry) = self.entry_for_id(**old_entry_id) {
835 diff.insert(entry.path.clone(), (*old_repo).clone());
836 }
837 old_repos.next();
838 }
839 Ordering::Equal => {
840 if old_repo.scan_id != new_repo.scan_id {
841 if let Some(entry) = self.entry_for_id(**new_entry_id) {
842 diff.insert(entry.path.clone(), (*new_repo).clone());
843 }
844 }
845
846 old_repos.next();
847 new_repos.next();
848 }
849 Ordering::Greater => {
850 if let Some(entry) = self.entry_for_id(**new_entry_id) {
851 diff.insert(entry.path.clone(), (*new_repo).clone());
852 }
853 new_repos.next();
854 }
855 }
856 }
857 (Some((old_entry_id, old_repo)), None) => {
858 if let Some(entry) = self.entry_for_id(**old_entry_id) {
859 diff.insert(entry.path.clone(), (*old_repo).clone());
860 }
861 old_repos.next();
862 }
863 (None, Some((new_entry_id, new_repo))) => {
864 if let Some(entry) = self.entry_for_id(**new_entry_id) {
865 diff.insert(entry.path.clone(), (*new_repo).clone());
866 }
867 new_repos.next();
868 }
869 (None, None) => break,
870 }
871 }
872 diff
873 }
874
875 pub fn scan_complete(&self) -> impl Future<Output = ()> {
876 let mut is_scanning_rx = self.is_scanning.1.clone();
877 async move {
878 let mut is_scanning = is_scanning_rx.borrow().clone();
879 while is_scanning {
880 if let Some(value) = is_scanning_rx.recv().await {
881 is_scanning = value;
882 } else {
883 break;
884 }
885 }
886 }
887 }
888
889 pub fn snapshot(&self) -> LocalSnapshot {
890 self.snapshot.clone()
891 }
892
893 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
894 proto::WorktreeMetadata {
895 id: self.id().to_proto(),
896 root_name: self.root_name().to_string(),
897 visible: self.visible,
898 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
899 }
900 }
901
902 fn load(
903 &self,
904 path: &Path,
905 cx: &mut ModelContext<Worktree>,
906 ) -> Task<Result<(File, String, Option<String>)>> {
907 let handle = cx.handle();
908 let path = Arc::from(path);
909 let abs_path = self.absolutize(&path);
910 let fs = self.fs.clone();
911 let snapshot = self.snapshot();
912
913 let mut index_task = None;
914
915 if let Some(repo) = snapshot.repo_for(&path) {
916 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
917 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
918 let repo = repo.repo_ptr.to_owned();
919 index_task = Some(
920 cx.background()
921 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
922 );
923 }
924 }
925
926 cx.spawn(|this, mut cx| async move {
927 let text = fs.load(&abs_path).await?;
928
929 let diff_base = if let Some(index_task) = index_task {
930 index_task.await
931 } else {
932 None
933 };
934
935 // Eagerly populate the snapshot with an updated entry for the loaded file
936 let entry = this
937 .update(&mut cx, |this, cx| {
938 this.as_local().unwrap().refresh_entry(path, None, cx)
939 })
940 .await?;
941
942 Ok((
943 File {
944 entry_id: entry.id,
945 worktree: handle,
946 path: entry.path,
947 mtime: entry.mtime,
948 is_local: true,
949 is_deleted: false,
950 },
951 text,
952 diff_base,
953 ))
954 })
955 }
956
957 pub fn save_buffer(
958 &self,
959 buffer_handle: ModelHandle<Buffer>,
960 path: Arc<Path>,
961 has_changed_file: bool,
962 cx: &mut ModelContext<Worktree>,
963 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
964 let handle = cx.handle();
965 let buffer = buffer_handle.read(cx);
966
967 let rpc = self.client.clone();
968 let buffer_id = buffer.remote_id();
969 let project_id = self.share.as_ref().map(|share| share.project_id);
970
971 let text = buffer.as_rope().clone();
972 let fingerprint = text.fingerprint();
973 let version = buffer.version();
974 let save = self.write_file(path, text, buffer.line_ending(), cx);
975
976 cx.as_mut().spawn(|mut cx| async move {
977 let entry = save.await?;
978
979 if has_changed_file {
980 let new_file = Arc::new(File {
981 entry_id: entry.id,
982 worktree: handle,
983 path: entry.path,
984 mtime: entry.mtime,
985 is_local: true,
986 is_deleted: false,
987 });
988
989 if let Some(project_id) = project_id {
990 rpc.send(proto::UpdateBufferFile {
991 project_id,
992 buffer_id,
993 file: Some(new_file.to_proto()),
994 })
995 .log_err();
996 }
997
998 buffer_handle.update(&mut cx, |buffer, cx| {
999 if has_changed_file {
1000 buffer.file_updated(new_file, cx).detach();
1001 }
1002 });
1003 }
1004
1005 if let Some(project_id) = project_id {
1006 rpc.send(proto::BufferSaved {
1007 project_id,
1008 buffer_id,
1009 version: serialize_version(&version),
1010 mtime: Some(entry.mtime.into()),
1011 fingerprint: serialize_fingerprint(fingerprint),
1012 })?;
1013 }
1014
1015 buffer_handle.update(&mut cx, |buffer, cx| {
1016 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1017 });
1018
1019 Ok((version, fingerprint, entry.mtime))
1020 })
1021 }
1022
1023 pub fn create_entry(
1024 &self,
1025 path: impl Into<Arc<Path>>,
1026 is_dir: bool,
1027 cx: &mut ModelContext<Worktree>,
1028 ) -> Task<Result<Entry>> {
1029 let path = path.into();
1030 let abs_path = self.absolutize(&path);
1031 let fs = self.fs.clone();
1032 let write = cx.background().spawn(async move {
1033 if is_dir {
1034 fs.create_dir(&abs_path).await
1035 } else {
1036 fs.save(&abs_path, &Default::default(), Default::default())
1037 .await
1038 }
1039 });
1040
1041 cx.spawn(|this, mut cx| async move {
1042 write.await?;
1043 this.update(&mut cx, |this, cx| {
1044 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1045 })
1046 .await
1047 })
1048 }
1049
1050 pub fn write_file(
1051 &self,
1052 path: impl Into<Arc<Path>>,
1053 text: Rope,
1054 line_ending: LineEnding,
1055 cx: &mut ModelContext<Worktree>,
1056 ) -> Task<Result<Entry>> {
1057 let path = path.into();
1058 let abs_path = self.absolutize(&path);
1059 let fs = self.fs.clone();
1060 let write = cx
1061 .background()
1062 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1063
1064 cx.spawn(|this, mut cx| async move {
1065 write.await?;
1066 this.update(&mut cx, |this, cx| {
1067 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1068 })
1069 .await
1070 })
1071 }
1072
1073 pub fn delete_entry(
1074 &self,
1075 entry_id: ProjectEntryId,
1076 cx: &mut ModelContext<Worktree>,
1077 ) -> Option<Task<Result<()>>> {
1078 let entry = self.entry_for_id(entry_id)?.clone();
1079 let abs_path = self.abs_path.clone();
1080 let fs = self.fs.clone();
1081
1082 let delete = cx.background().spawn(async move {
1083 let mut abs_path = fs.canonicalize(&abs_path).await?;
1084 if entry.path.file_name().is_some() {
1085 abs_path = abs_path.join(&entry.path);
1086 }
1087 if entry.is_file() {
1088 fs.remove_file(&abs_path, Default::default()).await?;
1089 } else {
1090 fs.remove_dir(
1091 &abs_path,
1092 RemoveOptions {
1093 recursive: true,
1094 ignore_if_not_exists: false,
1095 },
1096 )
1097 .await?;
1098 }
1099 anyhow::Ok(abs_path)
1100 });
1101
1102 Some(cx.spawn(|this, mut cx| async move {
1103 let abs_path = delete.await?;
1104 let (tx, mut rx) = barrier::channel();
1105 this.update(&mut cx, |this, _| {
1106 this.as_local_mut()
1107 .unwrap()
1108 .path_changes_tx
1109 .try_send((vec![abs_path], tx))
1110 })?;
1111 rx.recv().await;
1112 Ok(())
1113 }))
1114 }
1115
1116 pub fn rename_entry(
1117 &self,
1118 entry_id: ProjectEntryId,
1119 new_path: impl Into<Arc<Path>>,
1120 cx: &mut ModelContext<Worktree>,
1121 ) -> Option<Task<Result<Entry>>> {
1122 let old_path = self.entry_for_id(entry_id)?.path.clone();
1123 let new_path = new_path.into();
1124 let abs_old_path = self.absolutize(&old_path);
1125 let abs_new_path = self.absolutize(&new_path);
1126 let fs = self.fs.clone();
1127 let rename = cx.background().spawn(async move {
1128 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1129 .await
1130 });
1131
1132 Some(cx.spawn(|this, mut cx| async move {
1133 rename.await?;
1134 this.update(&mut cx, |this, cx| {
1135 this.as_local_mut()
1136 .unwrap()
1137 .refresh_entry(new_path.clone(), Some(old_path), cx)
1138 })
1139 .await
1140 }))
1141 }
1142
1143 pub fn copy_entry(
1144 &self,
1145 entry_id: ProjectEntryId,
1146 new_path: impl Into<Arc<Path>>,
1147 cx: &mut ModelContext<Worktree>,
1148 ) -> Option<Task<Result<Entry>>> {
1149 let old_path = self.entry_for_id(entry_id)?.path.clone();
1150 let new_path = new_path.into();
1151 let abs_old_path = self.absolutize(&old_path);
1152 let abs_new_path = self.absolutize(&new_path);
1153 let fs = self.fs.clone();
1154 let copy = cx.background().spawn(async move {
1155 copy_recursive(
1156 fs.as_ref(),
1157 &abs_old_path,
1158 &abs_new_path,
1159 Default::default(),
1160 )
1161 .await
1162 });
1163
1164 Some(cx.spawn(|this, mut cx| async move {
1165 copy.await?;
1166 this.update(&mut cx, |this, cx| {
1167 this.as_local_mut()
1168 .unwrap()
1169 .refresh_entry(new_path.clone(), None, cx)
1170 })
1171 .await
1172 }))
1173 }
1174
1175 fn refresh_entry(
1176 &self,
1177 path: Arc<Path>,
1178 old_path: Option<Arc<Path>>,
1179 cx: &mut ModelContext<Worktree>,
1180 ) -> Task<Result<Entry>> {
1181 let fs = self.fs.clone();
1182 let abs_root_path = self.abs_path.clone();
1183 let path_changes_tx = self.path_changes_tx.clone();
1184 cx.spawn_weak(move |this, mut cx| async move {
1185 let abs_path = fs.canonicalize(&abs_root_path).await?;
1186 let mut paths = Vec::with_capacity(2);
1187 paths.push(if path.file_name().is_some() {
1188 abs_path.join(&path)
1189 } else {
1190 abs_path.clone()
1191 });
1192 if let Some(old_path) = old_path {
1193 paths.push(if old_path.file_name().is_some() {
1194 abs_path.join(&old_path)
1195 } else {
1196 abs_path.clone()
1197 });
1198 }
1199
1200 let (tx, mut rx) = barrier::channel();
1201 path_changes_tx.try_send((paths, tx))?;
1202 rx.recv().await;
1203 this.upgrade(&cx)
1204 .ok_or_else(|| anyhow!("worktree was dropped"))?
1205 .update(&mut cx, |this, _| {
1206 this.entry_for_path(path)
1207 .cloned()
1208 .ok_or_else(|| anyhow!("failed to read path after update"))
1209 })
1210 })
1211 }
1212
1213 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1214 let (share_tx, share_rx) = oneshot::channel();
1215
1216 if let Some(share) = self.share.as_mut() {
1217 let _ = share_tx.send(());
1218 *share.resume_updates.borrow_mut() = ();
1219 } else {
1220 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1221 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1222 let worktree_id = cx.model_id() as u64;
1223
1224 for (path, summaries) in &self.diagnostic_summaries {
1225 for (&server_id, summary) in summaries {
1226 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1227 project_id,
1228 worktree_id,
1229 summary: Some(summary.to_proto(server_id, &path)),
1230 }) {
1231 return Task::ready(Err(e));
1232 }
1233 }
1234 }
1235
1236 let _maintain_remote_snapshot = cx.background().spawn({
1237 let client = self.client.clone();
1238 async move {
1239 let mut share_tx = Some(share_tx);
1240 let mut prev_snapshot = LocalSnapshot {
1241 ignores_by_parent_abs_path: Default::default(),
1242 removed_entry_ids: Default::default(),
1243 next_entry_id: Default::default(),
1244 git_repositories: Default::default(),
1245 snapshot: Snapshot {
1246 id: WorktreeId(worktree_id as usize),
1247 abs_path: Path::new("").into(),
1248 root_name: Default::default(),
1249 root_char_bag: Default::default(),
1250 entries_by_path: Default::default(),
1251 entries_by_id: Default::default(),
1252 repository_entries: Default::default(),
1253 scan_id: 0,
1254 completed_scan_id: 0,
1255 },
1256 };
1257 while let Some(snapshot) = snapshots_rx.recv().await {
1258 #[cfg(any(test, feature = "test-support"))]
1259 const MAX_CHUNK_SIZE: usize = 2;
1260 #[cfg(not(any(test, feature = "test-support")))]
1261 const MAX_CHUNK_SIZE: usize = 256;
1262
1263 let update =
1264 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1265 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1266 let _ = resume_updates_rx.try_recv();
1267 while let Err(error) = client.request(update.clone()).await {
1268 log::error!("failed to send worktree update: {}", error);
1269 log::info!("waiting to resume updates");
1270 if resume_updates_rx.next().await.is_none() {
1271 return Ok(());
1272 }
1273 }
1274 }
1275
1276 if let Some(share_tx) = share_tx.take() {
1277 let _ = share_tx.send(());
1278 }
1279
1280 prev_snapshot = snapshot;
1281 }
1282
1283 Ok::<_, anyhow::Error>(())
1284 }
1285 .log_err()
1286 });
1287
1288 self.share = Some(ShareState {
1289 project_id,
1290 snapshots_tx,
1291 resume_updates: resume_updates_tx,
1292 _maintain_remote_snapshot,
1293 });
1294 }
1295
1296 cx.foreground()
1297 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1298 }
1299
1300 pub fn unshare(&mut self) {
1301 self.share.take();
1302 }
1303
1304 pub fn is_shared(&self) -> bool {
1305 self.share.is_some()
1306 }
1307}
1308
1309impl RemoteWorktree {
1310 fn snapshot(&self) -> Snapshot {
1311 self.snapshot.clone()
1312 }
1313
1314 pub fn disconnected_from_host(&mut self) {
1315 self.updates_tx.take();
1316 self.snapshot_subscriptions.clear();
1317 self.disconnected = true;
1318 }
1319
1320 pub fn save_buffer(
1321 &self,
1322 buffer_handle: ModelHandle<Buffer>,
1323 cx: &mut ModelContext<Worktree>,
1324 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1325 let buffer = buffer_handle.read(cx);
1326 let buffer_id = buffer.remote_id();
1327 let version = buffer.version();
1328 let rpc = self.client.clone();
1329 let project_id = self.project_id;
1330 cx.as_mut().spawn(|mut cx| async move {
1331 let response = rpc
1332 .request(proto::SaveBuffer {
1333 project_id,
1334 buffer_id,
1335 version: serialize_version(&version),
1336 })
1337 .await?;
1338 let version = deserialize_version(&response.version);
1339 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1340 let mtime = response
1341 .mtime
1342 .ok_or_else(|| anyhow!("missing mtime"))?
1343 .into();
1344
1345 buffer_handle.update(&mut cx, |buffer, cx| {
1346 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1347 });
1348
1349 Ok((version, fingerprint, mtime))
1350 })
1351 }
1352
1353 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1354 if let Some(updates_tx) = &self.updates_tx {
1355 updates_tx
1356 .unbounded_send(update)
1357 .expect("consumer runs to completion");
1358 }
1359 }
1360
1361 fn observed_snapshot(&self, scan_id: usize) -> bool {
1362 self.completed_scan_id >= scan_id
1363 }
1364
1365 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1366 let (tx, rx) = oneshot::channel();
1367 if self.observed_snapshot(scan_id) {
1368 let _ = tx.send(());
1369 } else if self.disconnected {
1370 drop(tx);
1371 } else {
1372 match self
1373 .snapshot_subscriptions
1374 .binary_search_by_key(&scan_id, |probe| probe.0)
1375 {
1376 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1377 }
1378 }
1379
1380 async move {
1381 rx.await?;
1382 Ok(())
1383 }
1384 }
1385
1386 pub fn update_diagnostic_summary(
1387 &mut self,
1388 path: Arc<Path>,
1389 summary: &proto::DiagnosticSummary,
1390 ) {
1391 let server_id = LanguageServerId(summary.language_server_id as usize);
1392 let summary = DiagnosticSummary {
1393 error_count: summary.error_count as usize,
1394 warning_count: summary.warning_count as usize,
1395 };
1396
1397 if summary.is_empty() {
1398 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1399 summaries.remove(&server_id);
1400 if summaries.is_empty() {
1401 self.diagnostic_summaries.remove(&path);
1402 }
1403 }
1404 } else {
1405 self.diagnostic_summaries
1406 .entry(path)
1407 .or_default()
1408 .insert(server_id, summary);
1409 }
1410 }
1411
1412 pub fn insert_entry(
1413 &mut self,
1414 entry: proto::Entry,
1415 scan_id: usize,
1416 cx: &mut ModelContext<Worktree>,
1417 ) -> Task<Result<Entry>> {
1418 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1419 cx.spawn(|this, mut cx| async move {
1420 wait_for_snapshot.await?;
1421 this.update(&mut cx, |worktree, _| {
1422 let worktree = worktree.as_remote_mut().unwrap();
1423 let mut snapshot = worktree.background_snapshot.lock();
1424 let entry = snapshot.insert_entry(entry);
1425 worktree.snapshot = snapshot.clone();
1426 entry
1427 })
1428 })
1429 }
1430
1431 pub(crate) fn delete_entry(
1432 &mut self,
1433 id: ProjectEntryId,
1434 scan_id: usize,
1435 cx: &mut ModelContext<Worktree>,
1436 ) -> Task<Result<()>> {
1437 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1438 cx.spawn(|this, mut cx| async move {
1439 wait_for_snapshot.await?;
1440 this.update(&mut cx, |worktree, _| {
1441 let worktree = worktree.as_remote_mut().unwrap();
1442 let mut snapshot = worktree.background_snapshot.lock();
1443 snapshot.delete_entry(id);
1444 worktree.snapshot = snapshot.clone();
1445 });
1446 Ok(())
1447 })
1448 }
1449}
1450
1451impl Snapshot {
1452 pub fn id(&self) -> WorktreeId {
1453 self.id
1454 }
1455
1456 pub fn abs_path(&self) -> &Arc<Path> {
1457 &self.abs_path
1458 }
1459
1460 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1461 self.entries_by_id.get(&entry_id, &()).is_some()
1462 }
1463
1464 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1465 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1466 let old_entry = self.entries_by_id.insert_or_replace(
1467 PathEntry {
1468 id: entry.id,
1469 path: entry.path.clone(),
1470 is_ignored: entry.is_ignored,
1471 scan_id: 0,
1472 },
1473 &(),
1474 );
1475 if let Some(old_entry) = old_entry {
1476 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1477 }
1478 self.entries_by_path.insert_or_replace(entry.clone(), &());
1479 Ok(entry)
1480 }
1481
1482 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1483 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1484 self.entries_by_path = {
1485 let mut cursor = self.entries_by_path.cursor();
1486 let mut new_entries_by_path =
1487 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1488 while let Some(entry) = cursor.item() {
1489 if entry.path.starts_with(&removed_entry.path) {
1490 self.entries_by_id.remove(&entry.id, &());
1491 cursor.next(&());
1492 } else {
1493 break;
1494 }
1495 }
1496 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1497 new_entries_by_path
1498 };
1499
1500 Some(removed_entry.path)
1501 }
1502
1503 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1504 let mut entries_by_path_edits = Vec::new();
1505 let mut entries_by_id_edits = Vec::new();
1506 for entry_id in update.removed_entries {
1507 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1508 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1509 entries_by_id_edits.push(Edit::Remove(entry.id));
1510 }
1511 }
1512
1513 for entry in update.updated_entries {
1514 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1515 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1516 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1517 }
1518 entries_by_id_edits.push(Edit::Insert(PathEntry {
1519 id: entry.id,
1520 path: entry.path.clone(),
1521 is_ignored: entry.is_ignored,
1522 scan_id: 0,
1523 }));
1524 entries_by_path_edits.push(Edit::Insert(entry));
1525 }
1526
1527 self.entries_by_path.edit(entries_by_path_edits, &());
1528 self.entries_by_id.edit(entries_by_id_edits, &());
1529
1530 update.removed_repositories.sort_unstable();
1531 self.repository_entries.retain(|_, entry| {
1532 if let Ok(_) = update
1533 .removed_repositories
1534 .binary_search(&entry.work_directory.to_proto())
1535 {
1536 false
1537 } else {
1538 true
1539 }
1540 });
1541
1542 for repository in update.updated_repositories {
1543 let work_directory_entry: WorkDirectoryEntry =
1544 ProjectEntryId::from_proto(repository.work_directory_id).into();
1545
1546 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1547 let mut statuses = TreeMap::default();
1548 for status_entry in repository.updated_worktree_statuses {
1549 let Some(git_file_status) = read_git_status(status_entry.status) else {
1550 continue;
1551 };
1552
1553 let repo_path = RepoPath::new(status_entry.repo_path.into());
1554 statuses.insert(repo_path, git_file_status);
1555 }
1556
1557 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1558 if self.repository_entries.get(&work_directory).is_some() {
1559 self.repository_entries.update(&work_directory, |repo| {
1560 repo.branch = repository.branch.map(Into::into);
1561 repo.worktree_statuses.insert_tree(statuses);
1562
1563 for repo_path in repository.removed_worktree_repo_paths {
1564 let repo_path = RepoPath::new(repo_path.into());
1565 repo.worktree_statuses.remove(&repo_path);
1566 }
1567 });
1568 } else {
1569 self.repository_entries.insert(
1570 work_directory,
1571 RepositoryEntry {
1572 work_directory: work_directory_entry,
1573 branch: repository.branch.map(Into::into),
1574 worktree_statuses: statuses,
1575 },
1576 )
1577 }
1578 } else {
1579 log::error!("no work directory entry for repository {:?}", repository)
1580 }
1581 }
1582
1583 self.scan_id = update.scan_id as usize;
1584 if update.is_last_update {
1585 self.completed_scan_id = update.scan_id as usize;
1586 }
1587
1588 Ok(())
1589 }
1590
1591 pub fn file_count(&self) -> usize {
1592 self.entries_by_path.summary().file_count
1593 }
1594
1595 pub fn visible_file_count(&self) -> usize {
1596 self.entries_by_path.summary().visible_file_count
1597 }
1598
1599 fn traverse_from_offset(
1600 &self,
1601 include_dirs: bool,
1602 include_ignored: bool,
1603 start_offset: usize,
1604 ) -> Traversal {
1605 let mut cursor = self.entries_by_path.cursor();
1606 cursor.seek(
1607 &TraversalTarget::Count {
1608 count: start_offset,
1609 include_dirs,
1610 include_ignored,
1611 },
1612 Bias::Right,
1613 &(),
1614 );
1615 Traversal {
1616 cursor,
1617 include_dirs,
1618 include_ignored,
1619 }
1620 }
1621
1622 fn traverse_from_path(
1623 &self,
1624 include_dirs: bool,
1625 include_ignored: bool,
1626 path: &Path,
1627 ) -> Traversal {
1628 let mut cursor = self.entries_by_path.cursor();
1629 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1630 Traversal {
1631 cursor,
1632 include_dirs,
1633 include_ignored,
1634 }
1635 }
1636
1637 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1638 self.traverse_from_offset(false, include_ignored, start)
1639 }
1640
1641 pub fn entries(&self, include_ignored: bool) -> Traversal {
1642 self.traverse_from_offset(true, include_ignored, 0)
1643 }
1644
1645 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1646 self.repository_entries.values()
1647 }
1648
1649 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1650 let empty_path = Path::new("");
1651 self.entries_by_path
1652 .cursor::<()>()
1653 .filter(move |entry| entry.path.as_ref() != empty_path)
1654 .map(|entry| &entry.path)
1655 }
1656
1657 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1658 let mut cursor = self.entries_by_path.cursor();
1659 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1660 let traversal = Traversal {
1661 cursor,
1662 include_dirs: true,
1663 include_ignored: true,
1664 };
1665 ChildEntriesIter {
1666 traversal,
1667 parent_path,
1668 }
1669 }
1670
1671 pub fn root_entry(&self) -> Option<&Entry> {
1672 self.entry_for_path("")
1673 }
1674
1675 pub fn root_name(&self) -> &str {
1676 &self.root_name
1677 }
1678
1679 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1680 self.repository_entries
1681 .get(&RepositoryWorkDirectory(Path::new("").into()))
1682 .map(|entry| entry.to_owned())
1683 }
1684
1685 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1686 self.repository_entries.values()
1687 }
1688
1689 pub fn scan_id(&self) -> usize {
1690 self.scan_id
1691 }
1692
1693 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1694 let path = path.as_ref();
1695 self.traverse_from_path(true, true, path)
1696 .entry()
1697 .and_then(|entry| {
1698 if entry.path.as_ref() == path {
1699 Some(entry)
1700 } else {
1701 None
1702 }
1703 })
1704 }
1705
1706 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1707 let entry = self.entries_by_id.get(&id, &())?;
1708 self.entry_for_path(&entry.path)
1709 }
1710
1711 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1712 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1713 }
1714}
1715
1716impl LocalSnapshot {
1717 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1718 self.git_repositories.get(&repo.work_directory.0)
1719 }
1720
1721 pub(crate) fn repo_for_metadata(
1722 &self,
1723 path: &Path,
1724 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1725 self.git_repositories
1726 .iter()
1727 .find(|(_, repo)| repo.in_dot_git(path))
1728 }
1729
1730 #[cfg(test)]
1731 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1732 let root_name = self.root_name.clone();
1733 proto::UpdateWorktree {
1734 project_id,
1735 worktree_id: self.id().to_proto(),
1736 abs_path: self.abs_path().to_string_lossy().into(),
1737 root_name,
1738 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1739 removed_entries: Default::default(),
1740 scan_id: self.scan_id as u64,
1741 is_last_update: true,
1742 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1743 removed_repositories: Default::default(),
1744 }
1745 }
1746
1747 pub(crate) fn build_update(
1748 &self,
1749 other: &Self,
1750 project_id: u64,
1751 worktree_id: u64,
1752 include_ignored: bool,
1753 ) -> proto::UpdateWorktree {
1754 let mut updated_entries = Vec::new();
1755 let mut removed_entries = Vec::new();
1756 let mut self_entries = self
1757 .entries_by_id
1758 .cursor::<()>()
1759 .filter(|e| include_ignored || !e.is_ignored)
1760 .peekable();
1761 let mut other_entries = other
1762 .entries_by_id
1763 .cursor::<()>()
1764 .filter(|e| include_ignored || !e.is_ignored)
1765 .peekable();
1766 loop {
1767 match (self_entries.peek(), other_entries.peek()) {
1768 (Some(self_entry), Some(other_entry)) => {
1769 match Ord::cmp(&self_entry.id, &other_entry.id) {
1770 Ordering::Less => {
1771 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1772 updated_entries.push(entry);
1773 self_entries.next();
1774 }
1775 Ordering::Equal => {
1776 if self_entry.scan_id != other_entry.scan_id {
1777 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1778 updated_entries.push(entry);
1779 }
1780
1781 self_entries.next();
1782 other_entries.next();
1783 }
1784 Ordering::Greater => {
1785 removed_entries.push(other_entry.id.to_proto());
1786 other_entries.next();
1787 }
1788 }
1789 }
1790 (Some(self_entry), None) => {
1791 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1792 updated_entries.push(entry);
1793 self_entries.next();
1794 }
1795 (None, Some(other_entry)) => {
1796 removed_entries.push(other_entry.id.to_proto());
1797 other_entries.next();
1798 }
1799 (None, None) => break,
1800 }
1801 }
1802
1803 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1804 let mut removed_repositories = Vec::new();
1805 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1806 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1807 loop {
1808 match (self_repos.peek(), other_repos.peek()) {
1809 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1810 match Ord::cmp(self_work_dir, other_work_dir) {
1811 Ordering::Less => {
1812 updated_repositories.push((*self_repo).into());
1813 self_repos.next();
1814 }
1815 Ordering::Equal => {
1816 if self_repo != other_repo {
1817 updated_repositories.push(self_repo.build_update(other_repo));
1818 }
1819
1820 self_repos.next();
1821 other_repos.next();
1822 }
1823 Ordering::Greater => {
1824 removed_repositories.push(other_repo.work_directory.to_proto());
1825 other_repos.next();
1826 }
1827 }
1828 }
1829 (Some((_, self_repo)), None) => {
1830 updated_repositories.push((*self_repo).into());
1831 self_repos.next();
1832 }
1833 (None, Some((_, other_repo))) => {
1834 removed_repositories.push(other_repo.work_directory.to_proto());
1835 other_repos.next();
1836 }
1837 (None, None) => break,
1838 }
1839 }
1840
1841 proto::UpdateWorktree {
1842 project_id,
1843 worktree_id,
1844 abs_path: self.abs_path().to_string_lossy().into(),
1845 root_name: self.root_name().to_string(),
1846 updated_entries,
1847 removed_entries,
1848 scan_id: self.scan_id as u64,
1849 is_last_update: self.completed_scan_id == self.scan_id,
1850 updated_repositories,
1851 removed_repositories,
1852 }
1853 }
1854
1855 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1856 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1857 let abs_path = self.abs_path.join(&entry.path);
1858 match smol::block_on(build_gitignore(&abs_path, fs)) {
1859 Ok(ignore) => {
1860 self.ignores_by_parent_abs_path.insert(
1861 abs_path.parent().unwrap().into(),
1862 (Arc::new(ignore), self.scan_id),
1863 );
1864 }
1865 Err(error) => {
1866 log::error!(
1867 "error loading .gitignore file {:?} - {:?}",
1868 &entry.path,
1869 error
1870 );
1871 }
1872 }
1873 }
1874
1875 self.reuse_entry_id(&mut entry);
1876
1877 if entry.kind == EntryKind::PendingDir {
1878 if let Some(existing_entry) =
1879 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1880 {
1881 entry.kind = existing_entry.kind;
1882 }
1883 }
1884
1885 let scan_id = self.scan_id;
1886 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1887 if let Some(removed) = removed {
1888 if removed.id != entry.id {
1889 self.entries_by_id.remove(&removed.id, &());
1890 }
1891 }
1892 self.entries_by_id.insert_or_replace(
1893 PathEntry {
1894 id: entry.id,
1895 path: entry.path.clone(),
1896 is_ignored: entry.is_ignored,
1897 scan_id,
1898 },
1899 &(),
1900 );
1901
1902 entry
1903 }
1904
1905 fn populate_dir(
1906 &mut self,
1907 parent_path: Arc<Path>,
1908 entries: impl IntoIterator<Item = Entry>,
1909 ignore: Option<Arc<Gitignore>>,
1910 fs: &dyn Fs,
1911 ) {
1912 let mut parent_entry = if let Some(parent_entry) =
1913 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1914 {
1915 parent_entry.clone()
1916 } else {
1917 log::warn!(
1918 "populating a directory {:?} that has been removed",
1919 parent_path
1920 );
1921 return;
1922 };
1923
1924 match parent_entry.kind {
1925 EntryKind::PendingDir => {
1926 parent_entry.kind = EntryKind::Dir;
1927 }
1928 EntryKind::Dir => {}
1929 _ => return,
1930 }
1931
1932 if let Some(ignore) = ignore {
1933 self.ignores_by_parent_abs_path.insert(
1934 self.abs_path.join(&parent_path).into(),
1935 (ignore, self.scan_id),
1936 );
1937 }
1938
1939 if parent_path.file_name() == Some(&DOT_GIT) {
1940 self.build_repo(parent_path, fs);
1941 }
1942
1943 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1944 let mut entries_by_id_edits = Vec::new();
1945
1946 for mut entry in entries {
1947 self.reuse_entry_id(&mut entry);
1948 entries_by_id_edits.push(Edit::Insert(PathEntry {
1949 id: entry.id,
1950 path: entry.path.clone(),
1951 is_ignored: entry.is_ignored,
1952 scan_id: self.scan_id,
1953 }));
1954 entries_by_path_edits.push(Edit::Insert(entry));
1955 }
1956
1957 self.entries_by_path.edit(entries_by_path_edits, &());
1958 self.entries_by_id.edit(entries_by_id_edits, &());
1959 }
1960
1961 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1962 let abs_path = self.abs_path.join(&parent_path);
1963 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1964
1965 // Guard against repositories inside the repository metadata
1966 if work_dir
1967 .components()
1968 .find(|component| component.as_os_str() == *DOT_GIT)
1969 .is_some()
1970 {
1971 return None;
1972 };
1973
1974 let work_dir_id = self
1975 .entry_for_path(work_dir.clone())
1976 .map(|entry| entry.id)?;
1977
1978 if self.git_repositories.get(&work_dir_id).is_none() {
1979 let repo = fs.open_repo(abs_path.as_path())?;
1980 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1981 let scan_id = self.scan_id;
1982
1983 let repo_lock = repo.lock();
1984
1985 self.repository_entries.insert(
1986 work_directory,
1987 RepositoryEntry {
1988 work_directory: work_dir_id.into(),
1989 branch: repo_lock.branch_name().map(Into::into),
1990 worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1991 },
1992 );
1993 drop(repo_lock);
1994
1995 self.git_repositories.insert(
1996 work_dir_id,
1997 LocalRepositoryEntry {
1998 scan_id,
1999 full_scan_id: scan_id,
2000 repo_ptr: repo,
2001 git_dir_path: parent_path.clone(),
2002 },
2003 )
2004 }
2005
2006 Some(())
2007 }
2008 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2009 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2010 entry.id = removed_entry_id;
2011 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2012 entry.id = existing_entry.id;
2013 }
2014 }
2015
2016 fn remove_path(&mut self, path: &Path) {
2017 let mut new_entries;
2018 let removed_entries;
2019 {
2020 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2021 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2022 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2023 new_entries.push_tree(cursor.suffix(&()), &());
2024 }
2025 self.entries_by_path = new_entries;
2026
2027 let mut entries_by_id_edits = Vec::new();
2028 for entry in removed_entries.cursor::<()>() {
2029 let removed_entry_id = self
2030 .removed_entry_ids
2031 .entry(entry.inode)
2032 .or_insert(entry.id);
2033 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2034 entries_by_id_edits.push(Edit::Remove(entry.id));
2035 }
2036 self.entries_by_id.edit(entries_by_id_edits, &());
2037
2038 if path.file_name() == Some(&GITIGNORE) {
2039 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2040 if let Some((_, scan_id)) = self
2041 .ignores_by_parent_abs_path
2042 .get_mut(abs_parent_path.as_path())
2043 {
2044 *scan_id = self.snapshot.scan_id;
2045 }
2046 }
2047 }
2048
2049 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2050 let mut inodes = TreeSet::default();
2051 for ancestor in path.ancestors().skip(1) {
2052 if let Some(entry) = self.entry_for_path(ancestor) {
2053 inodes.insert(entry.inode);
2054 }
2055 }
2056 inodes
2057 }
2058
2059 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2060 let mut new_ignores = Vec::new();
2061 for ancestor in abs_path.ancestors().skip(1) {
2062 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2063 new_ignores.push((ancestor, Some(ignore.clone())));
2064 } else {
2065 new_ignores.push((ancestor, None));
2066 }
2067 }
2068
2069 let mut ignore_stack = IgnoreStack::none();
2070 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2071 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2072 ignore_stack = IgnoreStack::all();
2073 break;
2074 } else if let Some(ignore) = ignore {
2075 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2076 }
2077 }
2078
2079 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2080 ignore_stack = IgnoreStack::all();
2081 }
2082
2083 ignore_stack
2084 }
2085}
2086
2087async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2088 let contents = fs.load(abs_path).await?;
2089 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2090 let mut builder = GitignoreBuilder::new(parent);
2091 for line in contents.lines() {
2092 builder.add_line(Some(abs_path.into()), line)?;
2093 }
2094 Ok(builder.build()?)
2095}
2096
2097impl WorktreeId {
2098 pub fn from_usize(handle_id: usize) -> Self {
2099 Self(handle_id)
2100 }
2101
2102 pub(crate) fn from_proto(id: u64) -> Self {
2103 Self(id as usize)
2104 }
2105
2106 pub fn to_proto(&self) -> u64 {
2107 self.0 as u64
2108 }
2109
2110 pub fn to_usize(&self) -> usize {
2111 self.0
2112 }
2113}
2114
2115impl fmt::Display for WorktreeId {
2116 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2117 self.0.fmt(f)
2118 }
2119}
2120
2121impl Deref for Worktree {
2122 type Target = Snapshot;
2123
2124 fn deref(&self) -> &Self::Target {
2125 match self {
2126 Worktree::Local(worktree) => &worktree.snapshot,
2127 Worktree::Remote(worktree) => &worktree.snapshot,
2128 }
2129 }
2130}
2131
2132impl Deref for LocalWorktree {
2133 type Target = LocalSnapshot;
2134
2135 fn deref(&self) -> &Self::Target {
2136 &self.snapshot
2137 }
2138}
2139
2140impl Deref for RemoteWorktree {
2141 type Target = Snapshot;
2142
2143 fn deref(&self) -> &Self::Target {
2144 &self.snapshot
2145 }
2146}
2147
2148impl fmt::Debug for LocalWorktree {
2149 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2150 self.snapshot.fmt(f)
2151 }
2152}
2153
2154impl fmt::Debug for Snapshot {
2155 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2156 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2157 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2158
2159 impl<'a> fmt::Debug for EntriesByPath<'a> {
2160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2161 f.debug_map()
2162 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2163 .finish()
2164 }
2165 }
2166
2167 impl<'a> fmt::Debug for EntriesById<'a> {
2168 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2169 f.debug_list().entries(self.0.iter()).finish()
2170 }
2171 }
2172
2173 f.debug_struct("Snapshot")
2174 .field("id", &self.id)
2175 .field("root_name", &self.root_name)
2176 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2177 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2178 .finish()
2179 }
2180}
2181
2182#[derive(Clone, PartialEq)]
2183pub struct File {
2184 pub worktree: ModelHandle<Worktree>,
2185 pub path: Arc<Path>,
2186 pub mtime: SystemTime,
2187 pub(crate) entry_id: ProjectEntryId,
2188 pub(crate) is_local: bool,
2189 pub(crate) is_deleted: bool,
2190}
2191
2192impl language::File for File {
2193 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2194 if self.is_local {
2195 Some(self)
2196 } else {
2197 None
2198 }
2199 }
2200
2201 fn mtime(&self) -> SystemTime {
2202 self.mtime
2203 }
2204
2205 fn path(&self) -> &Arc<Path> {
2206 &self.path
2207 }
2208
2209 fn full_path(&self, cx: &AppContext) -> PathBuf {
2210 let mut full_path = PathBuf::new();
2211 let worktree = self.worktree.read(cx);
2212
2213 if worktree.is_visible() {
2214 full_path.push(worktree.root_name());
2215 } else {
2216 let path = worktree.abs_path();
2217
2218 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2219 full_path.push("~");
2220 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2221 } else {
2222 full_path.push(path)
2223 }
2224 }
2225
2226 if self.path.components().next().is_some() {
2227 full_path.push(&self.path);
2228 }
2229
2230 full_path
2231 }
2232
2233 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2234 /// of its worktree, then this method will return the name of the worktree itself.
2235 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2236 self.path
2237 .file_name()
2238 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2239 }
2240
2241 fn is_deleted(&self) -> bool {
2242 self.is_deleted
2243 }
2244
2245 fn as_any(&self) -> &dyn Any {
2246 self
2247 }
2248
2249 fn to_proto(&self) -> rpc::proto::File {
2250 rpc::proto::File {
2251 worktree_id: self.worktree.id() as u64,
2252 entry_id: self.entry_id.to_proto(),
2253 path: self.path.to_string_lossy().into(),
2254 mtime: Some(self.mtime.into()),
2255 is_deleted: self.is_deleted,
2256 }
2257 }
2258}
2259
2260impl language::LocalFile for File {
2261 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2262 self.worktree
2263 .read(cx)
2264 .as_local()
2265 .unwrap()
2266 .abs_path
2267 .join(&self.path)
2268 }
2269
2270 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2271 let worktree = self.worktree.read(cx).as_local().unwrap();
2272 let abs_path = worktree.absolutize(&self.path);
2273 let fs = worktree.fs.clone();
2274 cx.background()
2275 .spawn(async move { fs.load(&abs_path).await })
2276 }
2277
2278 fn buffer_reloaded(
2279 &self,
2280 buffer_id: u64,
2281 version: &clock::Global,
2282 fingerprint: RopeFingerprint,
2283 line_ending: LineEnding,
2284 mtime: SystemTime,
2285 cx: &mut AppContext,
2286 ) {
2287 let worktree = self.worktree.read(cx).as_local().unwrap();
2288 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2289 worktree
2290 .client
2291 .send(proto::BufferReloaded {
2292 project_id,
2293 buffer_id,
2294 version: serialize_version(version),
2295 mtime: Some(mtime.into()),
2296 fingerprint: serialize_fingerprint(fingerprint),
2297 line_ending: serialize_line_ending(line_ending) as i32,
2298 })
2299 .log_err();
2300 }
2301 }
2302}
2303
2304impl File {
2305 pub fn from_proto(
2306 proto: rpc::proto::File,
2307 worktree: ModelHandle<Worktree>,
2308 cx: &AppContext,
2309 ) -> Result<Self> {
2310 let worktree_id = worktree
2311 .read(cx)
2312 .as_remote()
2313 .ok_or_else(|| anyhow!("not remote"))?
2314 .id();
2315
2316 if worktree_id.to_proto() != proto.worktree_id {
2317 return Err(anyhow!("worktree id does not match file"));
2318 }
2319
2320 Ok(Self {
2321 worktree,
2322 path: Path::new(&proto.path).into(),
2323 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2324 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2325 is_local: false,
2326 is_deleted: proto.is_deleted,
2327 })
2328 }
2329
2330 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2331 file.and_then(|f| f.as_any().downcast_ref())
2332 }
2333
2334 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2335 self.worktree.read(cx).id()
2336 }
2337
2338 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2339 if self.is_deleted {
2340 None
2341 } else {
2342 Some(self.entry_id)
2343 }
2344 }
2345}
2346
2347#[derive(Clone, Debug, PartialEq, Eq)]
2348pub struct Entry {
2349 pub id: ProjectEntryId,
2350 pub kind: EntryKind,
2351 pub path: Arc<Path>,
2352 pub inode: u64,
2353 pub mtime: SystemTime,
2354 pub is_symlink: bool,
2355 pub is_ignored: bool,
2356}
2357
2358#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2359pub enum EntryKind {
2360 PendingDir,
2361 Dir,
2362 File(CharBag),
2363}
2364
2365#[derive(Clone, Copy, Debug)]
2366pub enum PathChange {
2367 Added,
2368 Removed,
2369 Updated,
2370 AddedOrUpdated,
2371}
2372
2373impl Entry {
2374 fn new(
2375 path: Arc<Path>,
2376 metadata: &fs::Metadata,
2377 next_entry_id: &AtomicUsize,
2378 root_char_bag: CharBag,
2379 ) -> Self {
2380 Self {
2381 id: ProjectEntryId::new(next_entry_id),
2382 kind: if metadata.is_dir {
2383 EntryKind::PendingDir
2384 } else {
2385 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2386 },
2387 path,
2388 inode: metadata.inode,
2389 mtime: metadata.mtime,
2390 is_symlink: metadata.is_symlink,
2391 is_ignored: false,
2392 }
2393 }
2394
2395 pub fn is_dir(&self) -> bool {
2396 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2397 }
2398
2399 pub fn is_file(&self) -> bool {
2400 matches!(self.kind, EntryKind::File(_))
2401 }
2402}
2403
2404impl sum_tree::Item for Entry {
2405 type Summary = EntrySummary;
2406
2407 fn summary(&self) -> Self::Summary {
2408 let visible_count = if self.is_ignored { 0 } else { 1 };
2409 let file_count;
2410 let visible_file_count;
2411 if self.is_file() {
2412 file_count = 1;
2413 visible_file_count = visible_count;
2414 } else {
2415 file_count = 0;
2416 visible_file_count = 0;
2417 }
2418
2419 EntrySummary {
2420 max_path: self.path.clone(),
2421 count: 1,
2422 visible_count,
2423 file_count,
2424 visible_file_count,
2425 }
2426 }
2427}
2428
2429impl sum_tree::KeyedItem for Entry {
2430 type Key = PathKey;
2431
2432 fn key(&self) -> Self::Key {
2433 PathKey(self.path.clone())
2434 }
2435}
2436
2437#[derive(Clone, Debug)]
2438pub struct EntrySummary {
2439 max_path: Arc<Path>,
2440 count: usize,
2441 visible_count: usize,
2442 file_count: usize,
2443 visible_file_count: usize,
2444}
2445
2446impl Default for EntrySummary {
2447 fn default() -> Self {
2448 Self {
2449 max_path: Arc::from(Path::new("")),
2450 count: 0,
2451 visible_count: 0,
2452 file_count: 0,
2453 visible_file_count: 0,
2454 }
2455 }
2456}
2457
2458impl sum_tree::Summary for EntrySummary {
2459 type Context = ();
2460
2461 fn add_summary(&mut self, rhs: &Self, _: &()) {
2462 self.max_path = rhs.max_path.clone();
2463 self.count += rhs.count;
2464 self.visible_count += rhs.visible_count;
2465 self.file_count += rhs.file_count;
2466 self.visible_file_count += rhs.visible_file_count;
2467 }
2468}
2469
2470#[derive(Clone, Debug)]
2471struct PathEntry {
2472 id: ProjectEntryId,
2473 path: Arc<Path>,
2474 is_ignored: bool,
2475 scan_id: usize,
2476}
2477
2478impl sum_tree::Item for PathEntry {
2479 type Summary = PathEntrySummary;
2480
2481 fn summary(&self) -> Self::Summary {
2482 PathEntrySummary { max_id: self.id }
2483 }
2484}
2485
2486impl sum_tree::KeyedItem for PathEntry {
2487 type Key = ProjectEntryId;
2488
2489 fn key(&self) -> Self::Key {
2490 self.id
2491 }
2492}
2493
2494#[derive(Clone, Debug, Default)]
2495struct PathEntrySummary {
2496 max_id: ProjectEntryId,
2497}
2498
2499impl sum_tree::Summary for PathEntrySummary {
2500 type Context = ();
2501
2502 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2503 self.max_id = summary.max_id;
2504 }
2505}
2506
2507impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2508 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2509 *self = summary.max_id;
2510 }
2511}
2512
2513#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2514pub struct PathKey(Arc<Path>);
2515
2516impl Default for PathKey {
2517 fn default() -> Self {
2518 Self(Path::new("").into())
2519 }
2520}
2521
2522impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2523 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2524 self.0 = summary.max_path.clone();
2525 }
2526}
2527
2528struct BackgroundScanner {
2529 snapshot: Mutex<LocalSnapshot>,
2530 fs: Arc<dyn Fs>,
2531 status_updates_tx: UnboundedSender<ScanState>,
2532 executor: Arc<executor::Background>,
2533 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2534 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2535 finished_initial_scan: bool,
2536}
2537
2538impl BackgroundScanner {
2539 fn new(
2540 snapshot: LocalSnapshot,
2541 fs: Arc<dyn Fs>,
2542 status_updates_tx: UnboundedSender<ScanState>,
2543 executor: Arc<executor::Background>,
2544 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2545 ) -> Self {
2546 Self {
2547 fs,
2548 status_updates_tx,
2549 executor,
2550 refresh_requests_rx,
2551 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2552 snapshot: Mutex::new(snapshot),
2553 finished_initial_scan: false,
2554 }
2555 }
2556
2557 async fn run(
2558 &mut self,
2559 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2560 ) {
2561 use futures::FutureExt as _;
2562
2563 let (root_abs_path, root_inode) = {
2564 let snapshot = self.snapshot.lock();
2565 (
2566 snapshot.abs_path.clone(),
2567 snapshot.root_entry().map(|e| e.inode),
2568 )
2569 };
2570
2571 // Populate ignores above the root.
2572 let ignore_stack;
2573 for ancestor in root_abs_path.ancestors().skip(1) {
2574 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2575 {
2576 self.snapshot
2577 .lock()
2578 .ignores_by_parent_abs_path
2579 .insert(ancestor.into(), (ignore.into(), 0));
2580 }
2581 }
2582 {
2583 let mut snapshot = self.snapshot.lock();
2584 snapshot.scan_id += 1;
2585 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2586 if ignore_stack.is_all() {
2587 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2588 root_entry.is_ignored = true;
2589 snapshot.insert_entry(root_entry, self.fs.as_ref());
2590 }
2591 }
2592 };
2593
2594 // Perform an initial scan of the directory.
2595 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2596 smol::block_on(scan_job_tx.send(ScanJob {
2597 abs_path: root_abs_path,
2598 path: Arc::from(Path::new("")),
2599 ignore_stack,
2600 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2601 scan_queue: scan_job_tx.clone(),
2602 }))
2603 .unwrap();
2604 drop(scan_job_tx);
2605 self.scan_dirs(true, scan_job_rx).await;
2606 {
2607 let mut snapshot = self.snapshot.lock();
2608 snapshot.completed_scan_id = snapshot.scan_id;
2609 }
2610 self.send_status_update(false, None);
2611
2612 // Process any any FS events that occurred while performing the initial scan.
2613 // For these events, update events cannot be as precise, because we didn't
2614 // have the previous state loaded yet.
2615 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2616 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2617 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2618 paths.extend(more_events.into_iter().map(|e| e.path));
2619 }
2620 self.process_events(paths).await;
2621 }
2622
2623 self.finished_initial_scan = true;
2624
2625 // Continue processing events until the worktree is dropped.
2626 loop {
2627 select_biased! {
2628 // Process any path refresh requests from the worktree. Prioritize
2629 // these before handling changes reported by the filesystem.
2630 request = self.refresh_requests_rx.recv().fuse() => {
2631 let Ok((paths, barrier)) = request else { break };
2632 if !self.process_refresh_request(paths, barrier).await {
2633 return;
2634 }
2635 }
2636
2637 events = events_rx.next().fuse() => {
2638 let Some(events) = events else { break };
2639 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2640 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2641 paths.extend(more_events.into_iter().map(|e| e.path));
2642 }
2643 self.process_events(paths).await;
2644 }
2645 }
2646 }
2647 }
2648
2649 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2650 self.reload_entries_for_paths(paths, None).await;
2651 self.send_status_update(false, Some(barrier))
2652 }
2653
2654 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2655 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2656 if let Some(mut paths) = self
2657 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2658 .await
2659 {
2660 paths.sort_unstable();
2661 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2662 }
2663 drop(scan_job_tx);
2664 self.scan_dirs(false, scan_job_rx).await;
2665
2666 self.update_ignore_statuses().await;
2667
2668 let mut snapshot = self.snapshot.lock();
2669
2670 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2671 git_repositories.retain(|work_directory_id, _| {
2672 snapshot
2673 .entry_for_id(*work_directory_id)
2674 .map_or(false, |entry| {
2675 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2676 })
2677 });
2678 snapshot.git_repositories = git_repositories;
2679
2680 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2681 git_repository_entries.retain(|_, entry| {
2682 snapshot
2683 .git_repositories
2684 .get(&entry.work_directory.0)
2685 .is_some()
2686 });
2687 snapshot.snapshot.repository_entries = git_repository_entries;
2688
2689 snapshot.removed_entry_ids.clear();
2690 snapshot.completed_scan_id = snapshot.scan_id;
2691
2692 drop(snapshot);
2693
2694 self.send_status_update(false, None);
2695 }
2696
2697 async fn scan_dirs(
2698 &self,
2699 enable_progress_updates: bool,
2700 scan_jobs_rx: channel::Receiver<ScanJob>,
2701 ) {
2702 use futures::FutureExt as _;
2703
2704 if self
2705 .status_updates_tx
2706 .unbounded_send(ScanState::Started)
2707 .is_err()
2708 {
2709 return;
2710 }
2711
2712 let progress_update_count = AtomicUsize::new(0);
2713 self.executor
2714 .scoped(|scope| {
2715 for _ in 0..self.executor.num_cpus() {
2716 scope.spawn(async {
2717 let mut last_progress_update_count = 0;
2718 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2719 futures::pin_mut!(progress_update_timer);
2720
2721 loop {
2722 select_biased! {
2723 // Process any path refresh requests before moving on to process
2724 // the scan queue, so that user operations are prioritized.
2725 request = self.refresh_requests_rx.recv().fuse() => {
2726 let Ok((paths, barrier)) = request else { break };
2727 if !self.process_refresh_request(paths, barrier).await {
2728 return;
2729 }
2730 }
2731
2732 // Send periodic progress updates to the worktree. Use an atomic counter
2733 // to ensure that only one of the workers sends a progress update after
2734 // the update interval elapses.
2735 _ = progress_update_timer => {
2736 match progress_update_count.compare_exchange(
2737 last_progress_update_count,
2738 last_progress_update_count + 1,
2739 SeqCst,
2740 SeqCst
2741 ) {
2742 Ok(_) => {
2743 last_progress_update_count += 1;
2744 self.send_status_update(true, None);
2745 }
2746 Err(count) => {
2747 last_progress_update_count = count;
2748 }
2749 }
2750 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2751 }
2752
2753 // Recursively load directories from the file system.
2754 job = scan_jobs_rx.recv().fuse() => {
2755 let Ok(job) = job else { break };
2756 if let Err(err) = self.scan_dir(&job).await {
2757 if job.path.as_ref() != Path::new("") {
2758 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2759 }
2760 }
2761 }
2762 }
2763 }
2764 })
2765 }
2766 })
2767 .await;
2768 }
2769
2770 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2771 let mut prev_state = self.prev_state.lock();
2772 let snapshot = self.snapshot.lock().clone();
2773 let mut old_snapshot = snapshot.snapshot.clone();
2774 mem::swap(&mut old_snapshot, &mut prev_state.0);
2775 let changed_paths = mem::take(&mut prev_state.1);
2776 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2777 self.status_updates_tx
2778 .unbounded_send(ScanState::Updated {
2779 snapshot,
2780 changes,
2781 scanning,
2782 barrier,
2783 })
2784 .is_ok()
2785 }
2786
2787 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2788 let mut new_entries: Vec<Entry> = Vec::new();
2789 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2790 let mut ignore_stack = job.ignore_stack.clone();
2791 let mut new_ignore = None;
2792 let (root_abs_path, root_char_bag, next_entry_id) = {
2793 let snapshot = self.snapshot.lock();
2794 (
2795 snapshot.abs_path().clone(),
2796 snapshot.root_char_bag,
2797 snapshot.next_entry_id.clone(),
2798 )
2799 };
2800 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2801 while let Some(child_abs_path) = child_paths.next().await {
2802 let child_abs_path: Arc<Path> = match child_abs_path {
2803 Ok(child_abs_path) => child_abs_path.into(),
2804 Err(error) => {
2805 log::error!("error processing entry {:?}", error);
2806 continue;
2807 }
2808 };
2809
2810 let child_name = child_abs_path.file_name().unwrap();
2811 let child_path: Arc<Path> = job.path.join(child_name).into();
2812 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2813 Ok(Some(metadata)) => metadata,
2814 Ok(None) => continue,
2815 Err(err) => {
2816 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2817 continue;
2818 }
2819 };
2820
2821 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2822 if child_name == *GITIGNORE {
2823 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2824 Ok(ignore) => {
2825 let ignore = Arc::new(ignore);
2826 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2827 new_ignore = Some(ignore);
2828 }
2829 Err(error) => {
2830 log::error!(
2831 "error loading .gitignore file {:?} - {:?}",
2832 child_name,
2833 error
2834 );
2835 }
2836 }
2837
2838 // Update ignore status of any child entries we've already processed to reflect the
2839 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2840 // there should rarely be too numerous. Update the ignore stack associated with any
2841 // new jobs as well.
2842 let mut new_jobs = new_jobs.iter_mut();
2843 for entry in &mut new_entries {
2844 let entry_abs_path = root_abs_path.join(&entry.path);
2845 entry.is_ignored =
2846 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2847
2848 if entry.is_dir() {
2849 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2850 job.ignore_stack = if entry.is_ignored {
2851 IgnoreStack::all()
2852 } else {
2853 ignore_stack.clone()
2854 };
2855 }
2856 }
2857 }
2858 }
2859
2860 let mut child_entry = Entry::new(
2861 child_path.clone(),
2862 &child_metadata,
2863 &next_entry_id,
2864 root_char_bag,
2865 );
2866
2867 if child_entry.is_dir() {
2868 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2869 child_entry.is_ignored = is_ignored;
2870
2871 // Avoid recursing until crash in the case of a recursive symlink
2872 if !job.ancestor_inodes.contains(&child_entry.inode) {
2873 let mut ancestor_inodes = job.ancestor_inodes.clone();
2874 ancestor_inodes.insert(child_entry.inode);
2875
2876 new_jobs.push(Some(ScanJob {
2877 abs_path: child_abs_path,
2878 path: child_path,
2879 ignore_stack: if is_ignored {
2880 IgnoreStack::all()
2881 } else {
2882 ignore_stack.clone()
2883 },
2884 ancestor_inodes,
2885 scan_queue: job.scan_queue.clone(),
2886 }));
2887 } else {
2888 new_jobs.push(None);
2889 }
2890 } else {
2891 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2892 }
2893
2894 new_entries.push(child_entry);
2895 }
2896
2897 self.snapshot.lock().populate_dir(
2898 job.path.clone(),
2899 new_entries,
2900 new_ignore,
2901 self.fs.as_ref(),
2902 );
2903
2904 for new_job in new_jobs {
2905 if let Some(new_job) = new_job {
2906 job.scan_queue.send(new_job).await.unwrap();
2907 }
2908 }
2909
2910 Ok(())
2911 }
2912
2913 async fn reload_entries_for_paths(
2914 &self,
2915 mut abs_paths: Vec<PathBuf>,
2916 scan_queue_tx: Option<Sender<ScanJob>>,
2917 ) -> Option<Vec<Arc<Path>>> {
2918 let doing_recursive_update = scan_queue_tx.is_some();
2919
2920 abs_paths.sort_unstable();
2921 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2922
2923 let root_abs_path = self.snapshot.lock().abs_path.clone();
2924 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2925 let metadata = futures::future::join_all(
2926 abs_paths
2927 .iter()
2928 .map(|abs_path| self.fs.metadata(&abs_path))
2929 .collect::<Vec<_>>(),
2930 )
2931 .await;
2932
2933 let mut snapshot = self.snapshot.lock();
2934 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2935 snapshot.scan_id += 1;
2936 if is_idle && !doing_recursive_update {
2937 snapshot.completed_scan_id = snapshot.scan_id;
2938 }
2939
2940 // Remove any entries for paths that no longer exist or are being recursively
2941 // refreshed. Do this before adding any new entries, so that renames can be
2942 // detected regardless of the order of the paths.
2943 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2944 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2945 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2946 if matches!(metadata, Ok(None)) || doing_recursive_update {
2947 snapshot.remove_path(path);
2948 }
2949 event_paths.push(path.into());
2950 } else {
2951 log::error!(
2952 "unexpected event {:?} for root path {:?}",
2953 abs_path,
2954 root_canonical_path
2955 );
2956 }
2957 }
2958
2959 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2960 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2961
2962 match metadata {
2963 Ok(Some(metadata)) => {
2964 let ignore_stack =
2965 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2966 let mut fs_entry = Entry::new(
2967 path.clone(),
2968 &metadata,
2969 snapshot.next_entry_id.as_ref(),
2970 snapshot.root_char_bag,
2971 );
2972 fs_entry.is_ignored = ignore_stack.is_all();
2973 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2974
2975 self.reload_repo_for_path(&path, &mut snapshot);
2976
2977 if let Some(scan_queue_tx) = &scan_queue_tx {
2978 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2979 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2980 ancestor_inodes.insert(metadata.inode);
2981 smol::block_on(scan_queue_tx.send(ScanJob {
2982 abs_path,
2983 path,
2984 ignore_stack,
2985 ancestor_inodes,
2986 scan_queue: scan_queue_tx.clone(),
2987 }))
2988 .unwrap();
2989 }
2990 }
2991 }
2992 Ok(None) => {
2993 self.remove_repo_path(&path, &mut snapshot);
2994 }
2995 Err(err) => {
2996 // TODO - create a special 'error' entry in the entries tree to mark this
2997 log::error!("error reading file on event {:?}", err);
2998 }
2999 }
3000 }
3001
3002 Some(event_paths)
3003 }
3004
3005 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3006 if !path
3007 .components()
3008 .any(|component| component.as_os_str() == *DOT_GIT)
3009 {
3010 let scan_id = snapshot.scan_id;
3011 let repo = snapshot.repo_for(&path)?;
3012
3013 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3014
3015 let work_dir = repo.work_directory(snapshot)?;
3016 let work_dir_id = repo.work_directory;
3017
3018 snapshot
3019 .git_repositories
3020 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3021
3022 snapshot.repository_entries.update(&work_dir, |entry| {
3023 entry
3024 .worktree_statuses
3025 .remove_from_while(&repo_path, |stored_path, _| {
3026 stored_path.starts_with(&repo_path)
3027 })
3028 });
3029 }
3030
3031 Some(())
3032 }
3033
3034 fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3035 let scan_id = snapshot.scan_id;
3036
3037 if path
3038 .components()
3039 .any(|component| component.as_os_str() == *DOT_GIT)
3040 {
3041 let (entry_id, repo_ptr) = {
3042 let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
3043 if repo.full_scan_id == scan_id {
3044 return None;
3045 }
3046 (*entry_id, repo.repo_ptr.to_owned())
3047 };
3048
3049 let work_dir = snapshot
3050 .entry_for_id(entry_id)
3051 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3052
3053 let repo = repo_ptr.lock();
3054 repo.reload_index();
3055 let branch = repo.branch_name();
3056 let statuses = repo.worktree_statuses().unwrap_or_default();
3057
3058 snapshot.git_repositories.update(&entry_id, |entry| {
3059 entry.scan_id = scan_id;
3060 entry.full_scan_id = scan_id;
3061 });
3062
3063 snapshot.repository_entries.update(&work_dir, |entry| {
3064 entry.branch = branch.map(Into::into);
3065 entry.worktree_statuses = statuses;
3066 });
3067 } else {
3068 let repo = snapshot.repo_for(&path)?;
3069
3070 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3071
3072 let status = {
3073 let local_repo = snapshot.get_local_repo(&repo)?;
3074
3075 // Short circuit if we've already scanned everything
3076 if local_repo.full_scan_id == scan_id {
3077 return None;
3078 }
3079
3080 let git_ptr = local_repo.repo_ptr.lock();
3081 git_ptr.worktree_status(&repo_path)?
3082 };
3083
3084 let work_dir = repo.work_directory(snapshot)?;
3085 let work_dir_id = repo.work_directory;
3086
3087 snapshot
3088 .git_repositories
3089 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3090
3091 snapshot.repository_entries.update(&work_dir, |entry| {
3092 entry.worktree_statuses.insert(repo_path, status)
3093 });
3094 }
3095
3096 Some(())
3097 }
3098
3099 async fn update_ignore_statuses(&self) {
3100 use futures::FutureExt as _;
3101
3102 let mut snapshot = self.snapshot.lock().clone();
3103 let mut ignores_to_update = Vec::new();
3104 let mut ignores_to_delete = Vec::new();
3105 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3106 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3107 if *scan_id > snapshot.completed_scan_id
3108 && snapshot.entry_for_path(parent_path).is_some()
3109 {
3110 ignores_to_update.push(parent_abs_path.clone());
3111 }
3112
3113 let ignore_path = parent_path.join(&*GITIGNORE);
3114 if snapshot.entry_for_path(ignore_path).is_none() {
3115 ignores_to_delete.push(parent_abs_path.clone());
3116 }
3117 }
3118 }
3119
3120 for parent_abs_path in ignores_to_delete {
3121 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3122 self.snapshot
3123 .lock()
3124 .ignores_by_parent_abs_path
3125 .remove(&parent_abs_path);
3126 }
3127
3128 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3129 ignores_to_update.sort_unstable();
3130 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3131 while let Some(parent_abs_path) = ignores_to_update.next() {
3132 while ignores_to_update
3133 .peek()
3134 .map_or(false, |p| p.starts_with(&parent_abs_path))
3135 {
3136 ignores_to_update.next().unwrap();
3137 }
3138
3139 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3140 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3141 abs_path: parent_abs_path,
3142 ignore_stack,
3143 ignore_queue: ignore_queue_tx.clone(),
3144 }))
3145 .unwrap();
3146 }
3147 drop(ignore_queue_tx);
3148
3149 self.executor
3150 .scoped(|scope| {
3151 for _ in 0..self.executor.num_cpus() {
3152 scope.spawn(async {
3153 loop {
3154 select_biased! {
3155 // Process any path refresh requests before moving on to process
3156 // the queue of ignore statuses.
3157 request = self.refresh_requests_rx.recv().fuse() => {
3158 let Ok((paths, barrier)) = request else { break };
3159 if !self.process_refresh_request(paths, barrier).await {
3160 return;
3161 }
3162 }
3163
3164 // Recursively process directories whose ignores have changed.
3165 job = ignore_queue_rx.recv().fuse() => {
3166 let Ok(job) = job else { break };
3167 self.update_ignore_status(job, &snapshot).await;
3168 }
3169 }
3170 }
3171 });
3172 }
3173 })
3174 .await;
3175 }
3176
3177 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3178 let mut ignore_stack = job.ignore_stack;
3179 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3180 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3181 }
3182
3183 let mut entries_by_id_edits = Vec::new();
3184 let mut entries_by_path_edits = Vec::new();
3185 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3186 for mut entry in snapshot.child_entries(path).cloned() {
3187 let was_ignored = entry.is_ignored;
3188 let abs_path = snapshot.abs_path().join(&entry.path);
3189 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3190 if entry.is_dir() {
3191 let child_ignore_stack = if entry.is_ignored {
3192 IgnoreStack::all()
3193 } else {
3194 ignore_stack.clone()
3195 };
3196 job.ignore_queue
3197 .send(UpdateIgnoreStatusJob {
3198 abs_path: abs_path.into(),
3199 ignore_stack: child_ignore_stack,
3200 ignore_queue: job.ignore_queue.clone(),
3201 })
3202 .await
3203 .unwrap();
3204 }
3205
3206 if entry.is_ignored != was_ignored {
3207 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3208 path_entry.scan_id = snapshot.scan_id;
3209 path_entry.is_ignored = entry.is_ignored;
3210 entries_by_id_edits.push(Edit::Insert(path_entry));
3211 entries_by_path_edits.push(Edit::Insert(entry));
3212 }
3213 }
3214
3215 let mut snapshot = self.snapshot.lock();
3216 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3217 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3218 }
3219
3220 fn build_change_set(
3221 &self,
3222 old_snapshot: &Snapshot,
3223 new_snapshot: &Snapshot,
3224 event_paths: Vec<Arc<Path>>,
3225 ) -> HashMap<Arc<Path>, PathChange> {
3226 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3227
3228 let mut changes = HashMap::default();
3229 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3230 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3231 let received_before_initialized = !self.finished_initial_scan;
3232
3233 for path in event_paths {
3234 let path = PathKey(path);
3235 old_paths.seek(&path, Bias::Left, &());
3236 new_paths.seek(&path, Bias::Left, &());
3237
3238 loop {
3239 match (old_paths.item(), new_paths.item()) {
3240 (Some(old_entry), Some(new_entry)) => {
3241 if old_entry.path > path.0
3242 && new_entry.path > path.0
3243 && !old_entry.path.starts_with(&path.0)
3244 && !new_entry.path.starts_with(&path.0)
3245 {
3246 break;
3247 }
3248
3249 match Ord::cmp(&old_entry.path, &new_entry.path) {
3250 Ordering::Less => {
3251 changes.insert(old_entry.path.clone(), Removed);
3252 old_paths.next(&());
3253 }
3254 Ordering::Equal => {
3255 if received_before_initialized {
3256 // If the worktree was not fully initialized when this event was generated,
3257 // we can't know whether this entry was added during the scan or whether
3258 // it was merely updated.
3259 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3260 } else if old_entry.mtime != new_entry.mtime {
3261 changes.insert(new_entry.path.clone(), Updated);
3262 }
3263 old_paths.next(&());
3264 new_paths.next(&());
3265 }
3266 Ordering::Greater => {
3267 changes.insert(new_entry.path.clone(), Added);
3268 new_paths.next(&());
3269 }
3270 }
3271 }
3272 (Some(old_entry), None) => {
3273 changes.insert(old_entry.path.clone(), Removed);
3274 old_paths.next(&());
3275 }
3276 (None, Some(new_entry)) => {
3277 changes.insert(new_entry.path.clone(), Added);
3278 new_paths.next(&());
3279 }
3280 (None, None) => break,
3281 }
3282 }
3283 }
3284 changes
3285 }
3286
3287 async fn progress_timer(&self, running: bool) {
3288 if !running {
3289 return futures::future::pending().await;
3290 }
3291
3292 #[cfg(any(test, feature = "test-support"))]
3293 if self.fs.is_fake() {
3294 return self.executor.simulate_random_delay().await;
3295 }
3296
3297 smol::Timer::after(Duration::from_millis(100)).await;
3298 }
3299}
3300
3301fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3302 let mut result = root_char_bag;
3303 result.extend(
3304 path.to_string_lossy()
3305 .chars()
3306 .map(|c| c.to_ascii_lowercase()),
3307 );
3308 result
3309}
3310
3311struct ScanJob {
3312 abs_path: Arc<Path>,
3313 path: Arc<Path>,
3314 ignore_stack: Arc<IgnoreStack>,
3315 scan_queue: Sender<ScanJob>,
3316 ancestor_inodes: TreeSet<u64>,
3317}
3318
3319struct UpdateIgnoreStatusJob {
3320 abs_path: Arc<Path>,
3321 ignore_stack: Arc<IgnoreStack>,
3322 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3323}
3324
3325pub trait WorktreeHandle {
3326 #[cfg(any(test, feature = "test-support"))]
3327 fn flush_fs_events<'a>(
3328 &self,
3329 cx: &'a gpui::TestAppContext,
3330 ) -> futures::future::LocalBoxFuture<'a, ()>;
3331}
3332
3333impl WorktreeHandle for ModelHandle<Worktree> {
3334 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3335 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3336 // extra directory scans, and emit extra scan-state notifications.
3337 //
3338 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3339 // to ensure that all redundant FS events have already been processed.
3340 #[cfg(any(test, feature = "test-support"))]
3341 fn flush_fs_events<'a>(
3342 &self,
3343 cx: &'a gpui::TestAppContext,
3344 ) -> futures::future::LocalBoxFuture<'a, ()> {
3345 use smol::future::FutureExt;
3346
3347 let filename = "fs-event-sentinel";
3348 let tree = self.clone();
3349 let (fs, root_path) = self.read_with(cx, |tree, _| {
3350 let tree = tree.as_local().unwrap();
3351 (tree.fs.clone(), tree.abs_path().clone())
3352 });
3353
3354 async move {
3355 fs.create_file(&root_path.join(filename), Default::default())
3356 .await
3357 .unwrap();
3358 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3359 .await;
3360
3361 fs.remove_file(&root_path.join(filename), Default::default())
3362 .await
3363 .unwrap();
3364 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3365 .await;
3366
3367 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3368 .await;
3369 }
3370 .boxed_local()
3371 }
3372}
3373
3374#[derive(Clone, Debug)]
3375struct TraversalProgress<'a> {
3376 max_path: &'a Path,
3377 count: usize,
3378 visible_count: usize,
3379 file_count: usize,
3380 visible_file_count: usize,
3381}
3382
3383impl<'a> TraversalProgress<'a> {
3384 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3385 match (include_ignored, include_dirs) {
3386 (true, true) => self.count,
3387 (true, false) => self.file_count,
3388 (false, true) => self.visible_count,
3389 (false, false) => self.visible_file_count,
3390 }
3391 }
3392}
3393
3394impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3395 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3396 self.max_path = summary.max_path.as_ref();
3397 self.count += summary.count;
3398 self.visible_count += summary.visible_count;
3399 self.file_count += summary.file_count;
3400 self.visible_file_count += summary.visible_file_count;
3401 }
3402}
3403
3404impl<'a> Default for TraversalProgress<'a> {
3405 fn default() -> Self {
3406 Self {
3407 max_path: Path::new(""),
3408 count: 0,
3409 visible_count: 0,
3410 file_count: 0,
3411 visible_file_count: 0,
3412 }
3413 }
3414}
3415
3416pub struct Traversal<'a> {
3417 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3418 include_ignored: bool,
3419 include_dirs: bool,
3420}
3421
3422impl<'a> Traversal<'a> {
3423 pub fn advance(&mut self) -> bool {
3424 self.advance_to_offset(self.offset() + 1)
3425 }
3426
3427 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3428 self.cursor.seek_forward(
3429 &TraversalTarget::Count {
3430 count: offset,
3431 include_dirs: self.include_dirs,
3432 include_ignored: self.include_ignored,
3433 },
3434 Bias::Right,
3435 &(),
3436 )
3437 }
3438
3439 pub fn advance_to_sibling(&mut self) -> bool {
3440 while let Some(entry) = self.cursor.item() {
3441 self.cursor.seek_forward(
3442 &TraversalTarget::PathSuccessor(&entry.path),
3443 Bias::Left,
3444 &(),
3445 );
3446 if let Some(entry) = self.cursor.item() {
3447 if (self.include_dirs || !entry.is_dir())
3448 && (self.include_ignored || !entry.is_ignored)
3449 {
3450 return true;
3451 }
3452 }
3453 }
3454 false
3455 }
3456
3457 pub fn entry(&self) -> Option<&'a Entry> {
3458 self.cursor.item()
3459 }
3460
3461 pub fn offset(&self) -> usize {
3462 self.cursor
3463 .start()
3464 .count(self.include_dirs, self.include_ignored)
3465 }
3466}
3467
3468impl<'a> Iterator for Traversal<'a> {
3469 type Item = &'a Entry;
3470
3471 fn next(&mut self) -> Option<Self::Item> {
3472 if let Some(item) = self.entry() {
3473 self.advance();
3474 Some(item)
3475 } else {
3476 None
3477 }
3478 }
3479}
3480
3481#[derive(Debug)]
3482enum TraversalTarget<'a> {
3483 Path(&'a Path),
3484 PathSuccessor(&'a Path),
3485 Count {
3486 count: usize,
3487 include_ignored: bool,
3488 include_dirs: bool,
3489 },
3490}
3491
3492impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3493 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3494 match self {
3495 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3496 TraversalTarget::PathSuccessor(path) => {
3497 if !cursor_location.max_path.starts_with(path) {
3498 Ordering::Equal
3499 } else {
3500 Ordering::Greater
3501 }
3502 }
3503 TraversalTarget::Count {
3504 count,
3505 include_dirs,
3506 include_ignored,
3507 } => Ord::cmp(
3508 count,
3509 &cursor_location.count(*include_dirs, *include_ignored),
3510 ),
3511 }
3512 }
3513}
3514
3515struct ChildEntriesIter<'a> {
3516 parent_path: &'a Path,
3517 traversal: Traversal<'a>,
3518}
3519
3520impl<'a> Iterator for ChildEntriesIter<'a> {
3521 type Item = &'a Entry;
3522
3523 fn next(&mut self) -> Option<Self::Item> {
3524 if let Some(item) = self.traversal.entry() {
3525 if item.path.starts_with(&self.parent_path) {
3526 self.traversal.advance_to_sibling();
3527 return Some(item);
3528 }
3529 }
3530 None
3531 }
3532}
3533
3534impl<'a> From<&'a Entry> for proto::Entry {
3535 fn from(entry: &'a Entry) -> Self {
3536 Self {
3537 id: entry.id.to_proto(),
3538 is_dir: entry.is_dir(),
3539 path: entry.path.to_string_lossy().into(),
3540 inode: entry.inode,
3541 mtime: Some(entry.mtime.into()),
3542 is_symlink: entry.is_symlink,
3543 is_ignored: entry.is_ignored,
3544 }
3545 }
3546}
3547
3548impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3549 type Error = anyhow::Error;
3550
3551 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3552 if let Some(mtime) = entry.mtime {
3553 let kind = if entry.is_dir {
3554 EntryKind::Dir
3555 } else {
3556 let mut char_bag = *root_char_bag;
3557 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3558 EntryKind::File(char_bag)
3559 };
3560 let path: Arc<Path> = PathBuf::from(entry.path).into();
3561 Ok(Entry {
3562 id: ProjectEntryId::from_proto(entry.id),
3563 kind,
3564 path,
3565 inode: entry.inode,
3566 mtime: mtime.into(),
3567 is_symlink: entry.is_symlink,
3568 is_ignored: entry.is_ignored,
3569 })
3570 } else {
3571 Err(anyhow!(
3572 "missing mtime in remote worktree entry {:?}",
3573 entry.path
3574 ))
3575 }
3576 }
3577}
3578
3579#[cfg(test)]
3580mod tests {
3581 use super::*;
3582 use fs::{FakeFs, RealFs};
3583 use git2::Signature;
3584 use gpui::{executor::Deterministic, TestAppContext};
3585 use pretty_assertions::assert_eq;
3586 use rand::prelude::*;
3587 use serde_json::json;
3588 use std::{env, fmt::Write};
3589 use util::{http::FakeHttpClient, test::temp_tree};
3590
3591 #[gpui::test]
3592 async fn test_traversal(cx: &mut TestAppContext) {
3593 let fs = FakeFs::new(cx.background());
3594 fs.insert_tree(
3595 "/root",
3596 json!({
3597 ".gitignore": "a/b\n",
3598 "a": {
3599 "b": "",
3600 "c": "",
3601 }
3602 }),
3603 )
3604 .await;
3605
3606 let http_client = FakeHttpClient::with_404_response();
3607 let client = cx.read(|cx| Client::new(http_client, cx));
3608
3609 let tree = Worktree::local(
3610 client,
3611 Path::new("/root"),
3612 true,
3613 fs,
3614 Default::default(),
3615 &mut cx.to_async(),
3616 )
3617 .await
3618 .unwrap();
3619 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3620 .await;
3621
3622 tree.read_with(cx, |tree, _| {
3623 assert_eq!(
3624 tree.entries(false)
3625 .map(|entry| entry.path.as_ref())
3626 .collect::<Vec<_>>(),
3627 vec![
3628 Path::new(""),
3629 Path::new(".gitignore"),
3630 Path::new("a"),
3631 Path::new("a/c"),
3632 ]
3633 );
3634 assert_eq!(
3635 tree.entries(true)
3636 .map(|entry| entry.path.as_ref())
3637 .collect::<Vec<_>>(),
3638 vec![
3639 Path::new(""),
3640 Path::new(".gitignore"),
3641 Path::new("a"),
3642 Path::new("a/b"),
3643 Path::new("a/c"),
3644 ]
3645 );
3646 })
3647 }
3648
3649 #[gpui::test(iterations = 10)]
3650 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3651 let fs = FakeFs::new(cx.background());
3652 fs.insert_tree(
3653 "/root",
3654 json!({
3655 "lib": {
3656 "a": {
3657 "a.txt": ""
3658 },
3659 "b": {
3660 "b.txt": ""
3661 }
3662 }
3663 }),
3664 )
3665 .await;
3666 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3667 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3668
3669 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3670 let tree = Worktree::local(
3671 client,
3672 Path::new("/root"),
3673 true,
3674 fs.clone(),
3675 Default::default(),
3676 &mut cx.to_async(),
3677 )
3678 .await
3679 .unwrap();
3680
3681 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3682 .await;
3683
3684 tree.read_with(cx, |tree, _| {
3685 assert_eq!(
3686 tree.entries(false)
3687 .map(|entry| entry.path.as_ref())
3688 .collect::<Vec<_>>(),
3689 vec![
3690 Path::new(""),
3691 Path::new("lib"),
3692 Path::new("lib/a"),
3693 Path::new("lib/a/a.txt"),
3694 Path::new("lib/a/lib"),
3695 Path::new("lib/b"),
3696 Path::new("lib/b/b.txt"),
3697 Path::new("lib/b/lib"),
3698 ]
3699 );
3700 });
3701
3702 fs.rename(
3703 Path::new("/root/lib/a/lib"),
3704 Path::new("/root/lib/a/lib-2"),
3705 Default::default(),
3706 )
3707 .await
3708 .unwrap();
3709 executor.run_until_parked();
3710 tree.read_with(cx, |tree, _| {
3711 assert_eq!(
3712 tree.entries(false)
3713 .map(|entry| entry.path.as_ref())
3714 .collect::<Vec<_>>(),
3715 vec![
3716 Path::new(""),
3717 Path::new("lib"),
3718 Path::new("lib/a"),
3719 Path::new("lib/a/a.txt"),
3720 Path::new("lib/a/lib-2"),
3721 Path::new("lib/b"),
3722 Path::new("lib/b/b.txt"),
3723 Path::new("lib/b/lib"),
3724 ]
3725 );
3726 });
3727 }
3728
3729 #[gpui::test]
3730 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3731 let parent_dir = temp_tree(json!({
3732 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3733 "tree": {
3734 ".git": {},
3735 ".gitignore": "ignored-dir\n",
3736 "tracked-dir": {
3737 "tracked-file1": "",
3738 "ancestor-ignored-file1": "",
3739 },
3740 "ignored-dir": {
3741 "ignored-file1": ""
3742 }
3743 }
3744 }));
3745 let dir = parent_dir.path().join("tree");
3746
3747 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3748
3749 let tree = Worktree::local(
3750 client,
3751 dir.as_path(),
3752 true,
3753 Arc::new(RealFs),
3754 Default::default(),
3755 &mut cx.to_async(),
3756 )
3757 .await
3758 .unwrap();
3759 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3760 .await;
3761 tree.flush_fs_events(cx).await;
3762 cx.read(|cx| {
3763 let tree = tree.read(cx);
3764 assert!(
3765 !tree
3766 .entry_for_path("tracked-dir/tracked-file1")
3767 .unwrap()
3768 .is_ignored
3769 );
3770 assert!(
3771 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3772 .unwrap()
3773 .is_ignored
3774 );
3775 assert!(
3776 tree.entry_for_path("ignored-dir/ignored-file1")
3777 .unwrap()
3778 .is_ignored
3779 );
3780 });
3781
3782 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3783 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3784 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3785 tree.flush_fs_events(cx).await;
3786 cx.read(|cx| {
3787 let tree = tree.read(cx);
3788 assert!(
3789 !tree
3790 .entry_for_path("tracked-dir/tracked-file2")
3791 .unwrap()
3792 .is_ignored
3793 );
3794 assert!(
3795 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3796 .unwrap()
3797 .is_ignored
3798 );
3799 assert!(
3800 tree.entry_for_path("ignored-dir/ignored-file2")
3801 .unwrap()
3802 .is_ignored
3803 );
3804 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3805 });
3806 }
3807
3808 #[gpui::test]
3809 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3810 let root = temp_tree(json!({
3811 "dir1": {
3812 ".git": {},
3813 "deps": {
3814 "dep1": {
3815 ".git": {},
3816 "src": {
3817 "a.txt": ""
3818 }
3819 }
3820 },
3821 "src": {
3822 "b.txt": ""
3823 }
3824 },
3825 "c.txt": "",
3826 }));
3827
3828 let http_client = FakeHttpClient::with_404_response();
3829 let client = cx.read(|cx| Client::new(http_client, cx));
3830 let tree = Worktree::local(
3831 client,
3832 root.path(),
3833 true,
3834 Arc::new(RealFs),
3835 Default::default(),
3836 &mut cx.to_async(),
3837 )
3838 .await
3839 .unwrap();
3840
3841 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3842 .await;
3843 tree.flush_fs_events(cx).await;
3844
3845 tree.read_with(cx, |tree, _cx| {
3846 let tree = tree.as_local().unwrap();
3847
3848 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3849
3850 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3851 assert_eq!(
3852 entry
3853 .work_directory(tree)
3854 .map(|directory| directory.as_ref().to_owned()),
3855 Some(Path::new("dir1").to_owned())
3856 );
3857
3858 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3859 assert_eq!(
3860 entry
3861 .work_directory(tree)
3862 .map(|directory| directory.as_ref().to_owned()),
3863 Some(Path::new("dir1/deps/dep1").to_owned())
3864 );
3865 });
3866
3867 let repo_update_events = Arc::new(Mutex::new(vec![]));
3868 tree.update(cx, |_, cx| {
3869 let repo_update_events = repo_update_events.clone();
3870 cx.subscribe(&tree, move |_, _, event, _| {
3871 if let Event::UpdatedGitRepositories(update) = event {
3872 repo_update_events.lock().push(update.clone());
3873 }
3874 })
3875 .detach();
3876 });
3877
3878 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3879 tree.flush_fs_events(cx).await;
3880
3881 assert_eq!(
3882 repo_update_events.lock()[0]
3883 .keys()
3884 .cloned()
3885 .collect::<Vec<Arc<Path>>>(),
3886 vec![Path::new("dir1").into()]
3887 );
3888
3889 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3890 tree.flush_fs_events(cx).await;
3891
3892 tree.read_with(cx, |tree, _cx| {
3893 let tree = tree.as_local().unwrap();
3894
3895 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3896 });
3897 }
3898
3899 #[gpui::test]
3900 async fn test_git_status(cx: &mut TestAppContext) {
3901 #[track_caller]
3902 fn git_init(path: &Path) -> git2::Repository {
3903 git2::Repository::init(path).expect("Failed to initialize git repository")
3904 }
3905
3906 #[track_caller]
3907 fn git_add(path: &Path, repo: &git2::Repository) {
3908 let mut index = repo.index().expect("Failed to get index");
3909 index.add_path(path).expect("Failed to add a.txt");
3910 index.write().expect("Failed to write index");
3911 }
3912
3913 #[track_caller]
3914 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3915 let mut index = repo.index().expect("Failed to get index");
3916 index.remove_path(path).expect("Failed to add a.txt");
3917 index.write().expect("Failed to write index");
3918 }
3919
3920 #[track_caller]
3921 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3922 let signature = Signature::now("test", "test@zed.dev").unwrap();
3923 let oid = repo.index().unwrap().write_tree().unwrap();
3924 let tree = repo.find_tree(oid).unwrap();
3925 if let Some(head) = repo.head().ok() {
3926 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3927
3928 let parent_commit = parent_obj.as_commit().unwrap();
3929
3930 repo.commit(
3931 Some("HEAD"),
3932 &signature,
3933 &signature,
3934 msg,
3935 &tree,
3936 &[parent_commit],
3937 )
3938 .expect("Failed to commit with parent");
3939 } else {
3940 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3941 .expect("Failed to commit");
3942 }
3943 }
3944
3945 #[track_caller]
3946 fn git_stash(repo: &mut git2::Repository) {
3947 let signature = repo.signature().unwrap();
3948 repo.stash_save(&signature, "N/A", None)
3949 .expect("Failed to stash");
3950 }
3951
3952 #[track_caller]
3953 fn git_reset(offset: usize, repo: &git2::Repository) {
3954 let head = repo.head().expect("Couldn't get repo head");
3955 let object = head.peel(git2::ObjectType::Commit).unwrap();
3956 let commit = object.as_commit().unwrap();
3957 let new_head = commit
3958 .parents()
3959 .inspect(|parnet| {
3960 parnet.message();
3961 })
3962 .skip(offset)
3963 .next()
3964 .expect("Not enough history");
3965 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
3966 .expect("Could not reset");
3967 }
3968
3969 #[allow(dead_code)]
3970 #[track_caller]
3971 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
3972 repo.statuses(None)
3973 .unwrap()
3974 .iter()
3975 .map(|status| (status.path().unwrap().to_string(), status.status()))
3976 .collect()
3977 }
3978
3979 let root = temp_tree(json!({
3980 "project": {
3981 "a.txt": "a",
3982 "b.txt": "bb",
3983 "c": {
3984 "d": {
3985 "e.txt": "eee"
3986 }
3987 }
3988 },
3989
3990 }));
3991
3992 let http_client = FakeHttpClient::with_404_response();
3993 let client = cx.read(|cx| Client::new(http_client, cx));
3994 let tree = Worktree::local(
3995 client,
3996 root.path(),
3997 true,
3998 Arc::new(RealFs),
3999 Default::default(),
4000 &mut cx.to_async(),
4001 )
4002 .await
4003 .unwrap();
4004
4005 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4006 .await;
4007
4008 const A_TXT: &'static str = "a.txt";
4009 const B_TXT: &'static str = "b.txt";
4010 const E_TXT: &'static str = "c/d/e.txt";
4011
4012 let work_dir = root.path().join("project");
4013
4014 let mut repo = git_init(work_dir.as_path());
4015 git_add(Path::new(A_TXT), &repo);
4016 git_add(Path::new(E_TXT), &repo);
4017 git_commit("Initial commit", &repo);
4018
4019 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4020
4021 tree.flush_fs_events(cx).await;
4022
4023 // Check that the right git state is observed on startup
4024 tree.read_with(cx, |tree, _cx| {
4025 let snapshot = tree.snapshot();
4026 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4027 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4028 assert_eq!(dir.0.as_ref(), Path::new("project"));
4029
4030 assert_eq!(repo.worktree_statuses.iter().count(), 2);
4031 assert_eq!(
4032 repo.worktree_statuses.get(&Path::new(A_TXT).into()),
4033 Some(&GitFileStatus::Modified)
4034 );
4035 assert_eq!(
4036 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4037 Some(&GitFileStatus::Added)
4038 );
4039 });
4040
4041 git_add(Path::new(A_TXT), &repo);
4042 git_add(Path::new(B_TXT), &repo);
4043 git_commit("Committing modified and added", &repo);
4044 tree.flush_fs_events(cx).await;
4045
4046 // Check that repo only changes are tracked
4047 tree.read_with(cx, |tree, _cx| {
4048 let snapshot = tree.snapshot();
4049 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4050
4051 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4052 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4053 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4054 });
4055
4056 git_reset(0, &repo);
4057 git_remove_index(Path::new(B_TXT), &repo);
4058 git_stash(&mut repo);
4059 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4060 tree.flush_fs_events(cx).await;
4061
4062 // Check that more complex repo changes are tracked
4063 tree.read_with(cx, |tree, _cx| {
4064 let snapshot = tree.snapshot();
4065 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4066
4067 assert_eq!(repo.worktree_statuses.iter().count(), 2);
4068 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4069 assert_eq!(
4070 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4071 Some(&GitFileStatus::Added)
4072 );
4073 assert_eq!(
4074 repo.worktree_statuses.get(&Path::new(E_TXT).into()),
4075 Some(&GitFileStatus::Modified)
4076 );
4077 });
4078
4079 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4080 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4081
4082 tree.flush_fs_events(cx).await;
4083
4084 // Check that non-repo behavior is tracked
4085 tree.read_with(cx, |tree, _cx| {
4086 let snapshot = tree.snapshot();
4087 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4088
4089 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4090 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4091 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4092 assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
4093 });
4094 }
4095
4096 #[gpui::test]
4097 async fn test_write_file(cx: &mut TestAppContext) {
4098 let dir = temp_tree(json!({
4099 ".git": {},
4100 ".gitignore": "ignored-dir\n",
4101 "tracked-dir": {},
4102 "ignored-dir": {}
4103 }));
4104
4105 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4106
4107 let tree = Worktree::local(
4108 client,
4109 dir.path(),
4110 true,
4111 Arc::new(RealFs),
4112 Default::default(),
4113 &mut cx.to_async(),
4114 )
4115 .await
4116 .unwrap();
4117 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4118 .await;
4119 tree.flush_fs_events(cx).await;
4120
4121 tree.update(cx, |tree, cx| {
4122 tree.as_local().unwrap().write_file(
4123 Path::new("tracked-dir/file.txt"),
4124 "hello".into(),
4125 Default::default(),
4126 cx,
4127 )
4128 })
4129 .await
4130 .unwrap();
4131 tree.update(cx, |tree, cx| {
4132 tree.as_local().unwrap().write_file(
4133 Path::new("ignored-dir/file.txt"),
4134 "world".into(),
4135 Default::default(),
4136 cx,
4137 )
4138 })
4139 .await
4140 .unwrap();
4141
4142 tree.read_with(cx, |tree, _| {
4143 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4144 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4145 assert!(!tracked.is_ignored);
4146 assert!(ignored.is_ignored);
4147 });
4148 }
4149
4150 #[gpui::test(iterations = 30)]
4151 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4152 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4153
4154 let fs = FakeFs::new(cx.background());
4155 fs.insert_tree(
4156 "/root",
4157 json!({
4158 "b": {},
4159 "c": {},
4160 "d": {},
4161 }),
4162 )
4163 .await;
4164
4165 let tree = Worktree::local(
4166 client,
4167 "/root".as_ref(),
4168 true,
4169 fs,
4170 Default::default(),
4171 &mut cx.to_async(),
4172 )
4173 .await
4174 .unwrap();
4175
4176 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4177
4178 let entry = tree
4179 .update(cx, |tree, cx| {
4180 tree.as_local_mut()
4181 .unwrap()
4182 .create_entry("a/e".as_ref(), true, cx)
4183 })
4184 .await
4185 .unwrap();
4186 assert!(entry.is_dir());
4187
4188 cx.foreground().run_until_parked();
4189 tree.read_with(cx, |tree, _| {
4190 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4191 });
4192
4193 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4194 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4195 snapshot1.apply_remote_update(update).unwrap();
4196 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4197 }
4198
4199 #[gpui::test(iterations = 100)]
4200 async fn test_random_worktree_operations_during_initial_scan(
4201 cx: &mut TestAppContext,
4202 mut rng: StdRng,
4203 ) {
4204 let operations = env::var("OPERATIONS")
4205 .map(|o| o.parse().unwrap())
4206 .unwrap_or(5);
4207 let initial_entries = env::var("INITIAL_ENTRIES")
4208 .map(|o| o.parse().unwrap())
4209 .unwrap_or(20);
4210
4211 let root_dir = Path::new("/test");
4212 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4213 fs.as_fake().insert_tree(root_dir, json!({})).await;
4214 for _ in 0..initial_entries {
4215 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4216 }
4217 log::info!("generated initial tree");
4218
4219 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4220 let worktree = Worktree::local(
4221 client.clone(),
4222 root_dir,
4223 true,
4224 fs.clone(),
4225 Default::default(),
4226 &mut cx.to_async(),
4227 )
4228 .await
4229 .unwrap();
4230
4231 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4232
4233 for _ in 0..operations {
4234 worktree
4235 .update(cx, |worktree, cx| {
4236 randomly_mutate_worktree(worktree, &mut rng, cx)
4237 })
4238 .await
4239 .log_err();
4240 worktree.read_with(cx, |tree, _| {
4241 tree.as_local().unwrap().snapshot.check_invariants()
4242 });
4243
4244 if rng.gen_bool(0.6) {
4245 let new_snapshot =
4246 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4247 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4248 snapshot.apply_remote_update(update.clone()).unwrap();
4249 assert_eq!(
4250 snapshot.to_vec(true),
4251 new_snapshot.to_vec(true),
4252 "incorrect snapshot after update {:?}",
4253 update
4254 );
4255 }
4256 }
4257
4258 worktree
4259 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4260 .await;
4261 worktree.read_with(cx, |tree, _| {
4262 tree.as_local().unwrap().snapshot.check_invariants()
4263 });
4264
4265 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4266 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4267 snapshot.apply_remote_update(update.clone()).unwrap();
4268 assert_eq!(
4269 snapshot.to_vec(true),
4270 new_snapshot.to_vec(true),
4271 "incorrect snapshot after update {:?}",
4272 update
4273 );
4274 }
4275
4276 #[gpui::test(iterations = 100)]
4277 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4278 let operations = env::var("OPERATIONS")
4279 .map(|o| o.parse().unwrap())
4280 .unwrap_or(40);
4281 let initial_entries = env::var("INITIAL_ENTRIES")
4282 .map(|o| o.parse().unwrap())
4283 .unwrap_or(20);
4284
4285 let root_dir = Path::new("/test");
4286 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4287 fs.as_fake().insert_tree(root_dir, json!({})).await;
4288 for _ in 0..initial_entries {
4289 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4290 }
4291 log::info!("generated initial tree");
4292
4293 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4294 let worktree = Worktree::local(
4295 client.clone(),
4296 root_dir,
4297 true,
4298 fs.clone(),
4299 Default::default(),
4300 &mut cx.to_async(),
4301 )
4302 .await
4303 .unwrap();
4304
4305 worktree
4306 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4307 .await;
4308
4309 // After the initial scan is complete, the `UpdatedEntries` event can
4310 // be used to follow along with all changes to the worktree's snapshot.
4311 worktree.update(cx, |tree, cx| {
4312 let mut paths = tree
4313 .as_local()
4314 .unwrap()
4315 .paths()
4316 .cloned()
4317 .collect::<Vec<_>>();
4318
4319 cx.subscribe(&worktree, move |tree, _, event, _| {
4320 if let Event::UpdatedEntries(changes) = event {
4321 for (path, change_type) in changes.iter() {
4322 let path = path.clone();
4323 let ix = match paths.binary_search(&path) {
4324 Ok(ix) | Err(ix) => ix,
4325 };
4326 match change_type {
4327 PathChange::Added => {
4328 assert_ne!(paths.get(ix), Some(&path));
4329 paths.insert(ix, path);
4330 }
4331 PathChange::Removed => {
4332 assert_eq!(paths.get(ix), Some(&path));
4333 paths.remove(ix);
4334 }
4335 PathChange::Updated => {
4336 assert_eq!(paths.get(ix), Some(&path));
4337 }
4338 PathChange::AddedOrUpdated => {
4339 if paths[ix] != path {
4340 paths.insert(ix, path);
4341 }
4342 }
4343 }
4344 }
4345 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4346 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4347 }
4348 })
4349 .detach();
4350 });
4351
4352 let mut snapshots = Vec::new();
4353 let mut mutations_len = operations;
4354 while mutations_len > 1 {
4355 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4356 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4357 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4358 let len = rng.gen_range(0..=buffered_event_count);
4359 log::info!("flushing {} events", len);
4360 fs.as_fake().flush_events(len).await;
4361 } else {
4362 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4363 mutations_len -= 1;
4364 }
4365
4366 cx.foreground().run_until_parked();
4367 if rng.gen_bool(0.2) {
4368 log::info!("storing snapshot {}", snapshots.len());
4369 let snapshot =
4370 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4371 snapshots.push(snapshot);
4372 }
4373 }
4374
4375 log::info!("quiescing");
4376 fs.as_fake().flush_events(usize::MAX).await;
4377 cx.foreground().run_until_parked();
4378 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4379 snapshot.check_invariants();
4380
4381 {
4382 let new_worktree = Worktree::local(
4383 client.clone(),
4384 root_dir,
4385 true,
4386 fs.clone(),
4387 Default::default(),
4388 &mut cx.to_async(),
4389 )
4390 .await
4391 .unwrap();
4392 new_worktree
4393 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4394 .await;
4395 let new_snapshot =
4396 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4397 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4398 }
4399
4400 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4401 let include_ignored = rng.gen::<bool>();
4402 if !include_ignored {
4403 let mut entries_by_path_edits = Vec::new();
4404 let mut entries_by_id_edits = Vec::new();
4405 for entry in prev_snapshot
4406 .entries_by_id
4407 .cursor::<()>()
4408 .filter(|e| e.is_ignored)
4409 {
4410 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4411 entries_by_id_edits.push(Edit::Remove(entry.id));
4412 }
4413
4414 prev_snapshot
4415 .entries_by_path
4416 .edit(entries_by_path_edits, &());
4417 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4418 }
4419
4420 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4421 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4422 assert_eq!(
4423 prev_snapshot.to_vec(include_ignored),
4424 snapshot.to_vec(include_ignored),
4425 "wrong update for snapshot {i}. update: {:?}",
4426 update
4427 );
4428 }
4429 }
4430
4431 fn randomly_mutate_worktree(
4432 worktree: &mut Worktree,
4433 rng: &mut impl Rng,
4434 cx: &mut ModelContext<Worktree>,
4435 ) -> Task<Result<()>> {
4436 let worktree = worktree.as_local_mut().unwrap();
4437 let snapshot = worktree.snapshot();
4438 let entry = snapshot.entries(false).choose(rng).unwrap();
4439
4440 match rng.gen_range(0_u32..100) {
4441 0..=33 if entry.path.as_ref() != Path::new("") => {
4442 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4443 worktree.delete_entry(entry.id, cx).unwrap()
4444 }
4445 ..=66 if entry.path.as_ref() != Path::new("") => {
4446 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4447 let new_parent_path = if other_entry.is_dir() {
4448 other_entry.path.clone()
4449 } else {
4450 other_entry.path.parent().unwrap().into()
4451 };
4452 let mut new_path = new_parent_path.join(gen_name(rng));
4453 if new_path.starts_with(&entry.path) {
4454 new_path = gen_name(rng).into();
4455 }
4456
4457 log::info!(
4458 "renaming entry {:?} ({}) to {:?}",
4459 entry.path,
4460 entry.id.0,
4461 new_path
4462 );
4463 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4464 cx.foreground().spawn(async move {
4465 task.await?;
4466 Ok(())
4467 })
4468 }
4469 _ => {
4470 let task = if entry.is_dir() {
4471 let child_path = entry.path.join(gen_name(rng));
4472 let is_dir = rng.gen_bool(0.3);
4473 log::info!(
4474 "creating {} at {:?}",
4475 if is_dir { "dir" } else { "file" },
4476 child_path,
4477 );
4478 worktree.create_entry(child_path, is_dir, cx)
4479 } else {
4480 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4481 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4482 };
4483 cx.foreground().spawn(async move {
4484 task.await?;
4485 Ok(())
4486 })
4487 }
4488 }
4489 }
4490
4491 async fn randomly_mutate_fs(
4492 fs: &Arc<dyn Fs>,
4493 root_path: &Path,
4494 insertion_probability: f64,
4495 rng: &mut impl Rng,
4496 ) {
4497 let mut files = Vec::new();
4498 let mut dirs = Vec::new();
4499 for path in fs.as_fake().paths() {
4500 if path.starts_with(root_path) {
4501 if fs.is_file(&path).await {
4502 files.push(path);
4503 } else {
4504 dirs.push(path);
4505 }
4506 }
4507 }
4508
4509 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4510 let path = dirs.choose(rng).unwrap();
4511 let new_path = path.join(gen_name(rng));
4512
4513 if rng.gen() {
4514 log::info!(
4515 "creating dir {:?}",
4516 new_path.strip_prefix(root_path).unwrap()
4517 );
4518 fs.create_dir(&new_path).await.unwrap();
4519 } else {
4520 log::info!(
4521 "creating file {:?}",
4522 new_path.strip_prefix(root_path).unwrap()
4523 );
4524 fs.create_file(&new_path, Default::default()).await.unwrap();
4525 }
4526 } else if rng.gen_bool(0.05) {
4527 let ignore_dir_path = dirs.choose(rng).unwrap();
4528 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4529
4530 let subdirs = dirs
4531 .iter()
4532 .filter(|d| d.starts_with(&ignore_dir_path))
4533 .cloned()
4534 .collect::<Vec<_>>();
4535 let subfiles = files
4536 .iter()
4537 .filter(|d| d.starts_with(&ignore_dir_path))
4538 .cloned()
4539 .collect::<Vec<_>>();
4540 let files_to_ignore = {
4541 let len = rng.gen_range(0..=subfiles.len());
4542 subfiles.choose_multiple(rng, len)
4543 };
4544 let dirs_to_ignore = {
4545 let len = rng.gen_range(0..subdirs.len());
4546 subdirs.choose_multiple(rng, len)
4547 };
4548
4549 let mut ignore_contents = String::new();
4550 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4551 writeln!(
4552 ignore_contents,
4553 "{}",
4554 path_to_ignore
4555 .strip_prefix(&ignore_dir_path)
4556 .unwrap()
4557 .to_str()
4558 .unwrap()
4559 )
4560 .unwrap();
4561 }
4562 log::info!(
4563 "creating gitignore {:?} with contents:\n{}",
4564 ignore_path.strip_prefix(&root_path).unwrap(),
4565 ignore_contents
4566 );
4567 fs.save(
4568 &ignore_path,
4569 &ignore_contents.as_str().into(),
4570 Default::default(),
4571 )
4572 .await
4573 .unwrap();
4574 } else {
4575 let old_path = {
4576 let file_path = files.choose(rng);
4577 let dir_path = dirs[1..].choose(rng);
4578 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4579 };
4580
4581 let is_rename = rng.gen();
4582 if is_rename {
4583 let new_path_parent = dirs
4584 .iter()
4585 .filter(|d| !d.starts_with(old_path))
4586 .choose(rng)
4587 .unwrap();
4588
4589 let overwrite_existing_dir =
4590 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4591 let new_path = if overwrite_existing_dir {
4592 fs.remove_dir(
4593 &new_path_parent,
4594 RemoveOptions {
4595 recursive: true,
4596 ignore_if_not_exists: true,
4597 },
4598 )
4599 .await
4600 .unwrap();
4601 new_path_parent.to_path_buf()
4602 } else {
4603 new_path_parent.join(gen_name(rng))
4604 };
4605
4606 log::info!(
4607 "renaming {:?} to {}{:?}",
4608 old_path.strip_prefix(&root_path).unwrap(),
4609 if overwrite_existing_dir {
4610 "overwrite "
4611 } else {
4612 ""
4613 },
4614 new_path.strip_prefix(&root_path).unwrap()
4615 );
4616 fs.rename(
4617 &old_path,
4618 &new_path,
4619 fs::RenameOptions {
4620 overwrite: true,
4621 ignore_if_exists: true,
4622 },
4623 )
4624 .await
4625 .unwrap();
4626 } else if fs.is_file(&old_path).await {
4627 log::info!(
4628 "deleting file {:?}",
4629 old_path.strip_prefix(&root_path).unwrap()
4630 );
4631 fs.remove_file(old_path, Default::default()).await.unwrap();
4632 } else {
4633 log::info!(
4634 "deleting dir {:?}",
4635 old_path.strip_prefix(&root_path).unwrap()
4636 );
4637 fs.remove_dir(
4638 &old_path,
4639 RemoveOptions {
4640 recursive: true,
4641 ignore_if_not_exists: true,
4642 },
4643 )
4644 .await
4645 .unwrap();
4646 }
4647 }
4648 }
4649
4650 fn gen_name(rng: &mut impl Rng) -> String {
4651 (0..6)
4652 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4653 .map(char::from)
4654 .collect()
4655 }
4656
4657 impl LocalSnapshot {
4658 fn check_invariants(&self) {
4659 assert_eq!(
4660 self.entries_by_path
4661 .cursor::<()>()
4662 .map(|e| (&e.path, e.id))
4663 .collect::<Vec<_>>(),
4664 self.entries_by_id
4665 .cursor::<()>()
4666 .map(|e| (&e.path, e.id))
4667 .collect::<collections::BTreeSet<_>>()
4668 .into_iter()
4669 .collect::<Vec<_>>(),
4670 "entries_by_path and entries_by_id are inconsistent"
4671 );
4672
4673 let mut files = self.files(true, 0);
4674 let mut visible_files = self.files(false, 0);
4675 for entry in self.entries_by_path.cursor::<()>() {
4676 if entry.is_file() {
4677 assert_eq!(files.next().unwrap().inode, entry.inode);
4678 if !entry.is_ignored {
4679 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4680 }
4681 }
4682 }
4683
4684 assert!(files.next().is_none());
4685 assert!(visible_files.next().is_none());
4686
4687 let mut bfs_paths = Vec::new();
4688 let mut stack = vec![Path::new("")];
4689 while let Some(path) = stack.pop() {
4690 bfs_paths.push(path);
4691 let ix = stack.len();
4692 for child_entry in self.child_entries(path) {
4693 stack.insert(ix, &child_entry.path);
4694 }
4695 }
4696
4697 let dfs_paths_via_iter = self
4698 .entries_by_path
4699 .cursor::<()>()
4700 .map(|e| e.path.as_ref())
4701 .collect::<Vec<_>>();
4702 assert_eq!(bfs_paths, dfs_paths_via_iter);
4703
4704 let dfs_paths_via_traversal = self
4705 .entries(true)
4706 .map(|e| e.path.as_ref())
4707 .collect::<Vec<_>>();
4708 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4709
4710 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4711 let ignore_parent_path =
4712 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4713 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4714 assert!(self
4715 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4716 .is_some());
4717 }
4718 }
4719
4720 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4721 let mut paths = Vec::new();
4722 for entry in self.entries_by_path.cursor::<()>() {
4723 if include_ignored || !entry.is_ignored {
4724 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4725 }
4726 }
4727 paths.sort_by(|a, b| a.0.cmp(b.0));
4728 paths
4729 }
4730 }
4731}