1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
184 self.work_directory
185 .relativize(snapshot, path)
186 .and_then(|repo_path| {
187 self.worktree_statuses
188 .get_from_while(&repo_path, |repo_path, key, _| key.starts_with(repo_path))
189 .map(|(_, status)| status)
190 // Short circut once we've found the highest level
191 .take_until(|status| status == &&GitFileStatus::Conflict)
192 .reduce(
193 |status_first, status_second| match (status_first, status_second) {
194 (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
195 &GitFileStatus::Conflict
196 }
197 (GitFileStatus::Added, _) | (_, GitFileStatus::Added) => {
198 &GitFileStatus::Added
199 }
200 _ => &GitFileStatus::Modified,
201 },
202 )
203 .copied()
204 })
205 }
206
207 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
208 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
209 let mut removed_statuses: Vec<String> = Vec::new();
210
211 let mut self_statuses = self.worktree_statuses.iter().peekable();
212 let mut other_statuses = other.worktree_statuses.iter().peekable();
213 loop {
214 match (self_statuses.peek(), other_statuses.peek()) {
215 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
216 match Ord::cmp(self_repo_path, other_repo_path) {
217 Ordering::Less => {
218 updated_statuses.push(make_status_entry(self_repo_path, self_status));
219 self_statuses.next();
220 }
221 Ordering::Equal => {
222 if self_status != other_status {
223 updated_statuses
224 .push(make_status_entry(self_repo_path, self_status));
225 }
226
227 self_statuses.next();
228 other_statuses.next();
229 }
230 Ordering::Greater => {
231 removed_statuses.push(make_repo_path(other_repo_path));
232 other_statuses.next();
233 }
234 }
235 }
236 (Some((self_repo_path, self_status)), None) => {
237 updated_statuses.push(make_status_entry(self_repo_path, self_status));
238 self_statuses.next();
239 }
240 (None, Some((other_repo_path, _))) => {
241 removed_statuses.push(make_repo_path(other_repo_path));
242 other_statuses.next();
243 }
244 (None, None) => break,
245 }
246 }
247
248 proto::RepositoryEntry {
249 work_directory_id: self.work_directory_id().to_proto(),
250 branch: self.branch.as_ref().map(|str| str.to_string()),
251 removed_worktree_repo_paths: removed_statuses,
252 updated_worktree_statuses: updated_statuses,
253 }
254 }
255}
256
257fn make_repo_path(path: &RepoPath) -> String {
258 path.as_os_str().to_string_lossy().to_string()
259}
260
261fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
262 proto::StatusEntry {
263 repo_path: make_repo_path(path),
264 status: match status {
265 GitFileStatus::Added => proto::GitStatus::Added.into(),
266 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
267 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
268 },
269 }
270}
271
272impl From<&RepositoryEntry> for proto::RepositoryEntry {
273 fn from(value: &RepositoryEntry) -> Self {
274 proto::RepositoryEntry {
275 work_directory_id: value.work_directory.to_proto(),
276 branch: value.branch.as_ref().map(|str| str.to_string()),
277 updated_worktree_statuses: value
278 .worktree_statuses
279 .iter()
280 .map(|(repo_path, status)| make_status_entry(repo_path, status))
281 .collect(),
282 removed_worktree_repo_paths: Default::default(),
283 }
284 }
285}
286
287/// This path corresponds to the 'content path' (the folder that contains the .git)
288#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
289pub struct RepositoryWorkDirectory(Arc<Path>);
290
291impl Default for RepositoryWorkDirectory {
292 fn default() -> Self {
293 RepositoryWorkDirectory(Arc::from(Path::new("")))
294 }
295}
296
297impl AsRef<Path> for RepositoryWorkDirectory {
298 fn as_ref(&self) -> &Path {
299 self.0.as_ref()
300 }
301}
302
303#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
304pub struct WorkDirectoryEntry(ProjectEntryId);
305
306impl WorkDirectoryEntry {
307 // Note that these paths should be relative to the worktree root.
308 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
309 snapshot
310 .entry_for_id(self.0)
311 .map(|entry| path.starts_with(&entry.path))
312 .unwrap_or(false)
313 }
314
315 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
316 worktree.entry_for_id(self.0).and_then(|entry| {
317 path.strip_prefix(&entry.path)
318 .ok()
319 .map(move |path| path.into())
320 })
321 }
322}
323
324impl Deref for WorkDirectoryEntry {
325 type Target = ProjectEntryId;
326
327 fn deref(&self) -> &Self::Target {
328 &self.0
329 }
330}
331
332impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
333 fn from(value: ProjectEntryId) -> Self {
334 WorkDirectoryEntry(value)
335 }
336}
337
338#[derive(Debug, Clone)]
339pub struct LocalSnapshot {
340 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
341 // The ProjectEntryId corresponds to the entry for the .git dir
342 // work_directory_id
343 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
344 removed_entry_ids: HashMap<u64, ProjectEntryId>,
345 next_entry_id: Arc<AtomicUsize>,
346 snapshot: Snapshot,
347}
348
349#[derive(Debug, Clone)]
350pub struct LocalRepositoryEntry {
351 pub(crate) scan_id: usize,
352 pub(crate) full_scan_id: usize,
353 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
354 /// Path to the actual .git folder.
355 /// Note: if .git is a file, this points to the folder indicated by the .git file
356 pub(crate) git_dir_path: Arc<Path>,
357}
358
359impl LocalRepositoryEntry {
360 // Note that this path should be relative to the worktree root.
361 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
362 path.starts_with(self.git_dir_path.as_ref())
363 }
364}
365
366impl Deref for LocalSnapshot {
367 type Target = Snapshot;
368
369 fn deref(&self) -> &Self::Target {
370 &self.snapshot
371 }
372}
373
374impl DerefMut for LocalSnapshot {
375 fn deref_mut(&mut self) -> &mut Self::Target {
376 &mut self.snapshot
377 }
378}
379
380enum ScanState {
381 Started,
382 Updated {
383 snapshot: LocalSnapshot,
384 changes: HashMap<Arc<Path>, PathChange>,
385 barrier: Option<barrier::Sender>,
386 scanning: bool,
387 },
388}
389
390struct ShareState {
391 project_id: u64,
392 snapshots_tx: watch::Sender<LocalSnapshot>,
393 resume_updates: watch::Sender<()>,
394 _maintain_remote_snapshot: Task<Option<()>>,
395}
396
397pub enum Event {
398 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
399 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
400}
401
402impl Entity for Worktree {
403 type Event = Event;
404}
405
406impl Worktree {
407 pub async fn local(
408 client: Arc<Client>,
409 path: impl Into<Arc<Path>>,
410 visible: bool,
411 fs: Arc<dyn Fs>,
412 next_entry_id: Arc<AtomicUsize>,
413 cx: &mut AsyncAppContext,
414 ) -> Result<ModelHandle<Self>> {
415 // After determining whether the root entry is a file or a directory, populate the
416 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
417 let abs_path = path.into();
418 let metadata = fs
419 .metadata(&abs_path)
420 .await
421 .context("failed to stat worktree path")?;
422
423 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
424 let root_name = abs_path
425 .file_name()
426 .map_or(String::new(), |f| f.to_string_lossy().to_string());
427
428 let mut snapshot = LocalSnapshot {
429 ignores_by_parent_abs_path: Default::default(),
430 removed_entry_ids: Default::default(),
431 git_repositories: Default::default(),
432 next_entry_id,
433 snapshot: Snapshot {
434 id: WorktreeId::from_usize(cx.model_id()),
435 abs_path: abs_path.clone(),
436 root_name: root_name.clone(),
437 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
438 entries_by_path: Default::default(),
439 entries_by_id: Default::default(),
440 repository_entries: Default::default(),
441 scan_id: 1,
442 completed_scan_id: 0,
443 },
444 };
445
446 if let Some(metadata) = metadata {
447 snapshot.insert_entry(
448 Entry::new(
449 Arc::from(Path::new("")),
450 &metadata,
451 &snapshot.next_entry_id,
452 snapshot.root_char_bag,
453 ),
454 fs.as_ref(),
455 );
456 }
457
458 let (path_changes_tx, path_changes_rx) = channel::unbounded();
459 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
460
461 cx.spawn_weak(|this, mut cx| async move {
462 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
463 this.update(&mut cx, |this, cx| {
464 let this = this.as_local_mut().unwrap();
465 match state {
466 ScanState::Started => {
467 *this.is_scanning.0.borrow_mut() = true;
468 }
469 ScanState::Updated {
470 snapshot,
471 changes,
472 barrier,
473 scanning,
474 } => {
475 *this.is_scanning.0.borrow_mut() = scanning;
476 this.set_snapshot(snapshot, cx);
477 cx.emit(Event::UpdatedEntries(changes));
478 drop(barrier);
479 }
480 }
481 cx.notify();
482 });
483 }
484 })
485 .detach();
486
487 let background_scanner_task = cx.background().spawn({
488 let fs = fs.clone();
489 let snapshot = snapshot.clone();
490 let background = cx.background().clone();
491 async move {
492 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
493 BackgroundScanner::new(
494 snapshot,
495 fs,
496 scan_states_tx,
497 background,
498 path_changes_rx,
499 )
500 .run(events)
501 .await;
502 }
503 });
504
505 Worktree::Local(LocalWorktree {
506 snapshot,
507 is_scanning: watch::channel_with(true),
508 share: None,
509 path_changes_tx,
510 _background_scanner_task: background_scanner_task,
511 diagnostics: Default::default(),
512 diagnostic_summaries: Default::default(),
513 client,
514 fs,
515 visible,
516 })
517 }))
518 }
519
520 pub fn remote(
521 project_remote_id: u64,
522 replica_id: ReplicaId,
523 worktree: proto::WorktreeMetadata,
524 client: Arc<Client>,
525 cx: &mut AppContext,
526 ) -> ModelHandle<Self> {
527 cx.add_model(|cx: &mut ModelContext<Self>| {
528 let snapshot = Snapshot {
529 id: WorktreeId(worktree.id as usize),
530 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
531 root_name: worktree.root_name.clone(),
532 root_char_bag: worktree
533 .root_name
534 .chars()
535 .map(|c| c.to_ascii_lowercase())
536 .collect(),
537 entries_by_path: Default::default(),
538 entries_by_id: Default::default(),
539 repository_entries: Default::default(),
540 scan_id: 1,
541 completed_scan_id: 0,
542 };
543
544 let (updates_tx, mut updates_rx) = mpsc::unbounded();
545 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
546 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
547
548 cx.background()
549 .spawn({
550 let background_snapshot = background_snapshot.clone();
551 async move {
552 while let Some(update) = updates_rx.next().await {
553 if let Err(error) =
554 background_snapshot.lock().apply_remote_update(update)
555 {
556 log::error!("error applying worktree update: {}", error);
557 }
558 snapshot_updated_tx.send(()).await.ok();
559 }
560 }
561 })
562 .detach();
563
564 cx.spawn_weak(|this, mut cx| async move {
565 while (snapshot_updated_rx.recv().await).is_some() {
566 if let Some(this) = this.upgrade(&cx) {
567 this.update(&mut cx, |this, cx| {
568 let this = this.as_remote_mut().unwrap();
569 this.snapshot = this.background_snapshot.lock().clone();
570 cx.emit(Event::UpdatedEntries(Default::default()));
571 cx.notify();
572 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
573 if this.observed_snapshot(*scan_id) {
574 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
575 let _ = tx.send(());
576 } else {
577 break;
578 }
579 }
580 });
581 } else {
582 break;
583 }
584 }
585 })
586 .detach();
587
588 Worktree::Remote(RemoteWorktree {
589 project_id: project_remote_id,
590 replica_id,
591 snapshot: snapshot.clone(),
592 background_snapshot,
593 updates_tx: Some(updates_tx),
594 snapshot_subscriptions: Default::default(),
595 client: client.clone(),
596 diagnostic_summaries: Default::default(),
597 visible: worktree.visible,
598 disconnected: false,
599 })
600 })
601 }
602
603 pub fn as_local(&self) -> Option<&LocalWorktree> {
604 if let Worktree::Local(worktree) = self {
605 Some(worktree)
606 } else {
607 None
608 }
609 }
610
611 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
612 if let Worktree::Remote(worktree) = self {
613 Some(worktree)
614 } else {
615 None
616 }
617 }
618
619 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
620 if let Worktree::Local(worktree) = self {
621 Some(worktree)
622 } else {
623 None
624 }
625 }
626
627 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
628 if let Worktree::Remote(worktree) = self {
629 Some(worktree)
630 } else {
631 None
632 }
633 }
634
635 pub fn is_local(&self) -> bool {
636 matches!(self, Worktree::Local(_))
637 }
638
639 pub fn is_remote(&self) -> bool {
640 !self.is_local()
641 }
642
643 pub fn snapshot(&self) -> Snapshot {
644 match self {
645 Worktree::Local(worktree) => worktree.snapshot().snapshot,
646 Worktree::Remote(worktree) => worktree.snapshot(),
647 }
648 }
649
650 pub fn scan_id(&self) -> usize {
651 match self {
652 Worktree::Local(worktree) => worktree.snapshot.scan_id,
653 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
654 }
655 }
656
657 pub fn completed_scan_id(&self) -> usize {
658 match self {
659 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
660 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
661 }
662 }
663
664 pub fn is_visible(&self) -> bool {
665 match self {
666 Worktree::Local(worktree) => worktree.visible,
667 Worktree::Remote(worktree) => worktree.visible,
668 }
669 }
670
671 pub fn replica_id(&self) -> ReplicaId {
672 match self {
673 Worktree::Local(_) => 0,
674 Worktree::Remote(worktree) => worktree.replica_id,
675 }
676 }
677
678 pub fn diagnostic_summaries(
679 &self,
680 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
681 match self {
682 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
683 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
684 }
685 .iter()
686 .flat_map(|(path, summaries)| {
687 summaries
688 .iter()
689 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
690 })
691 }
692
693 pub fn abs_path(&self) -> Arc<Path> {
694 match self {
695 Worktree::Local(worktree) => worktree.abs_path.clone(),
696 Worktree::Remote(worktree) => worktree.abs_path.clone(),
697 }
698 }
699}
700
701impl LocalWorktree {
702 pub fn contains_abs_path(&self, path: &Path) -> bool {
703 path.starts_with(&self.abs_path)
704 }
705
706 fn absolutize(&self, path: &Path) -> PathBuf {
707 if path.file_name().is_some() {
708 self.abs_path.join(path)
709 } else {
710 self.abs_path.to_path_buf()
711 }
712 }
713
714 pub(crate) fn load_buffer(
715 &mut self,
716 id: u64,
717 path: &Path,
718 cx: &mut ModelContext<Worktree>,
719 ) -> Task<Result<ModelHandle<Buffer>>> {
720 let path = Arc::from(path);
721 cx.spawn(move |this, mut cx| async move {
722 let (file, contents, diff_base) = this
723 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
724 .await?;
725 let text_buffer = cx
726 .background()
727 .spawn(async move { text::Buffer::new(0, id, contents) })
728 .await;
729 Ok(cx.add_model(|cx| {
730 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
731 buffer.git_diff_recalc(cx);
732 buffer
733 }))
734 })
735 }
736
737 pub fn diagnostics_for_path(
738 &self,
739 path: &Path,
740 ) -> Vec<(
741 LanguageServerId,
742 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
743 )> {
744 self.diagnostics.get(path).cloned().unwrap_or_default()
745 }
746
747 pub fn update_diagnostics(
748 &mut self,
749 server_id: LanguageServerId,
750 worktree_path: Arc<Path>,
751 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
752 _: &mut ModelContext<Worktree>,
753 ) -> Result<bool> {
754 let summaries_by_server_id = self
755 .diagnostic_summaries
756 .entry(worktree_path.clone())
757 .or_default();
758
759 let old_summary = summaries_by_server_id
760 .remove(&server_id)
761 .unwrap_or_default();
762
763 let new_summary = DiagnosticSummary::new(&diagnostics);
764 if new_summary.is_empty() {
765 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
766 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
767 diagnostics_by_server_id.remove(ix);
768 }
769 if diagnostics_by_server_id.is_empty() {
770 self.diagnostics.remove(&worktree_path);
771 }
772 }
773 } else {
774 summaries_by_server_id.insert(server_id, new_summary);
775 let diagnostics_by_server_id =
776 self.diagnostics.entry(worktree_path.clone()).or_default();
777 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
778 Ok(ix) => {
779 diagnostics_by_server_id[ix] = (server_id, diagnostics);
780 }
781 Err(ix) => {
782 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
783 }
784 }
785 }
786
787 if !old_summary.is_empty() || !new_summary.is_empty() {
788 if let Some(share) = self.share.as_ref() {
789 self.client
790 .send(proto::UpdateDiagnosticSummary {
791 project_id: share.project_id,
792 worktree_id: self.id().to_proto(),
793 summary: Some(proto::DiagnosticSummary {
794 path: worktree_path.to_string_lossy().to_string(),
795 language_server_id: server_id.0 as u64,
796 error_count: new_summary.error_count as u32,
797 warning_count: new_summary.warning_count as u32,
798 }),
799 })
800 .log_err();
801 }
802 }
803
804 Ok(!old_summary.is_empty() || !new_summary.is_empty())
805 }
806
807 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
808 let updated_repos =
809 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
810 self.snapshot = new_snapshot;
811
812 if let Some(share) = self.share.as_mut() {
813 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
814 }
815
816 if !updated_repos.is_empty() {
817 cx.emit(Event::UpdatedGitRepositories(updated_repos));
818 }
819 }
820
821 fn changed_repos(
822 &self,
823 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
824 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
825 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
826 let mut diff = HashMap::default();
827 let mut old_repos = old_repos.iter().peekable();
828 let mut new_repos = new_repos.iter().peekable();
829 loop {
830 match (old_repos.peek(), new_repos.peek()) {
831 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
832 match Ord::cmp(old_entry_id, new_entry_id) {
833 Ordering::Less => {
834 if let Some(entry) = self.entry_for_id(**old_entry_id) {
835 diff.insert(entry.path.clone(), (*old_repo).clone());
836 }
837 old_repos.next();
838 }
839 Ordering::Equal => {
840 if old_repo.scan_id != new_repo.scan_id {
841 if let Some(entry) = self.entry_for_id(**new_entry_id) {
842 diff.insert(entry.path.clone(), (*new_repo).clone());
843 }
844 }
845
846 old_repos.next();
847 new_repos.next();
848 }
849 Ordering::Greater => {
850 if let Some(entry) = self.entry_for_id(**new_entry_id) {
851 diff.insert(entry.path.clone(), (*new_repo).clone());
852 }
853 new_repos.next();
854 }
855 }
856 }
857 (Some((old_entry_id, old_repo)), None) => {
858 if let Some(entry) = self.entry_for_id(**old_entry_id) {
859 diff.insert(entry.path.clone(), (*old_repo).clone());
860 }
861 old_repos.next();
862 }
863 (None, Some((new_entry_id, new_repo))) => {
864 if let Some(entry) = self.entry_for_id(**new_entry_id) {
865 diff.insert(entry.path.clone(), (*new_repo).clone());
866 }
867 new_repos.next();
868 }
869 (None, None) => break,
870 }
871 }
872 diff
873 }
874
875 pub fn scan_complete(&self) -> impl Future<Output = ()> {
876 let mut is_scanning_rx = self.is_scanning.1.clone();
877 async move {
878 let mut is_scanning = is_scanning_rx.borrow().clone();
879 while is_scanning {
880 if let Some(value) = is_scanning_rx.recv().await {
881 is_scanning = value;
882 } else {
883 break;
884 }
885 }
886 }
887 }
888
889 pub fn snapshot(&self) -> LocalSnapshot {
890 self.snapshot.clone()
891 }
892
893 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
894 proto::WorktreeMetadata {
895 id: self.id().to_proto(),
896 root_name: self.root_name().to_string(),
897 visible: self.visible,
898 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
899 }
900 }
901
902 fn load(
903 &self,
904 path: &Path,
905 cx: &mut ModelContext<Worktree>,
906 ) -> Task<Result<(File, String, Option<String>)>> {
907 let handle = cx.handle();
908 let path = Arc::from(path);
909 let abs_path = self.absolutize(&path);
910 let fs = self.fs.clone();
911 let snapshot = self.snapshot();
912
913 let mut index_task = None;
914
915 if let Some(repo) = snapshot.repo_for(&path) {
916 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
917 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
918 let repo = repo.repo_ptr.to_owned();
919 index_task = Some(
920 cx.background()
921 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
922 );
923 }
924 }
925
926 cx.spawn(|this, mut cx| async move {
927 let text = fs.load(&abs_path).await?;
928
929 let diff_base = if let Some(index_task) = index_task {
930 index_task.await
931 } else {
932 None
933 };
934
935 // Eagerly populate the snapshot with an updated entry for the loaded file
936 let entry = this
937 .update(&mut cx, |this, cx| {
938 this.as_local().unwrap().refresh_entry(path, None, cx)
939 })
940 .await?;
941
942 Ok((
943 File {
944 entry_id: entry.id,
945 worktree: handle,
946 path: entry.path,
947 mtime: entry.mtime,
948 is_local: true,
949 is_deleted: false,
950 },
951 text,
952 diff_base,
953 ))
954 })
955 }
956
957 pub fn save_buffer(
958 &self,
959 buffer_handle: ModelHandle<Buffer>,
960 path: Arc<Path>,
961 has_changed_file: bool,
962 cx: &mut ModelContext<Worktree>,
963 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
964 let handle = cx.handle();
965 let buffer = buffer_handle.read(cx);
966
967 let rpc = self.client.clone();
968 let buffer_id = buffer.remote_id();
969 let project_id = self.share.as_ref().map(|share| share.project_id);
970
971 let text = buffer.as_rope().clone();
972 let fingerprint = text.fingerprint();
973 let version = buffer.version();
974 let save = self.write_file(path, text, buffer.line_ending(), cx);
975
976 cx.as_mut().spawn(|mut cx| async move {
977 let entry = save.await?;
978
979 if has_changed_file {
980 let new_file = Arc::new(File {
981 entry_id: entry.id,
982 worktree: handle,
983 path: entry.path,
984 mtime: entry.mtime,
985 is_local: true,
986 is_deleted: false,
987 });
988
989 if let Some(project_id) = project_id {
990 rpc.send(proto::UpdateBufferFile {
991 project_id,
992 buffer_id,
993 file: Some(new_file.to_proto()),
994 })
995 .log_err();
996 }
997
998 buffer_handle.update(&mut cx, |buffer, cx| {
999 if has_changed_file {
1000 buffer.file_updated(new_file, cx).detach();
1001 }
1002 });
1003 }
1004
1005 if let Some(project_id) = project_id {
1006 rpc.send(proto::BufferSaved {
1007 project_id,
1008 buffer_id,
1009 version: serialize_version(&version),
1010 mtime: Some(entry.mtime.into()),
1011 fingerprint: serialize_fingerprint(fingerprint),
1012 })?;
1013 }
1014
1015 buffer_handle.update(&mut cx, |buffer, cx| {
1016 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1017 });
1018
1019 Ok((version, fingerprint, entry.mtime))
1020 })
1021 }
1022
1023 pub fn create_entry(
1024 &self,
1025 path: impl Into<Arc<Path>>,
1026 is_dir: bool,
1027 cx: &mut ModelContext<Worktree>,
1028 ) -> Task<Result<Entry>> {
1029 let path = path.into();
1030 let abs_path = self.absolutize(&path);
1031 let fs = self.fs.clone();
1032 let write = cx.background().spawn(async move {
1033 if is_dir {
1034 fs.create_dir(&abs_path).await
1035 } else {
1036 fs.save(&abs_path, &Default::default(), Default::default())
1037 .await
1038 }
1039 });
1040
1041 cx.spawn(|this, mut cx| async move {
1042 write.await?;
1043 this.update(&mut cx, |this, cx| {
1044 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1045 })
1046 .await
1047 })
1048 }
1049
1050 pub fn write_file(
1051 &self,
1052 path: impl Into<Arc<Path>>,
1053 text: Rope,
1054 line_ending: LineEnding,
1055 cx: &mut ModelContext<Worktree>,
1056 ) -> Task<Result<Entry>> {
1057 let path = path.into();
1058 let abs_path = self.absolutize(&path);
1059 let fs = self.fs.clone();
1060 let write = cx
1061 .background()
1062 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1063
1064 cx.spawn(|this, mut cx| async move {
1065 write.await?;
1066 this.update(&mut cx, |this, cx| {
1067 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1068 })
1069 .await
1070 })
1071 }
1072
1073 pub fn delete_entry(
1074 &self,
1075 entry_id: ProjectEntryId,
1076 cx: &mut ModelContext<Worktree>,
1077 ) -> Option<Task<Result<()>>> {
1078 let entry = self.entry_for_id(entry_id)?.clone();
1079 let abs_path = self.abs_path.clone();
1080 let fs = self.fs.clone();
1081
1082 let delete = cx.background().spawn(async move {
1083 let mut abs_path = fs.canonicalize(&abs_path).await?;
1084 if entry.path.file_name().is_some() {
1085 abs_path = abs_path.join(&entry.path);
1086 }
1087 if entry.is_file() {
1088 fs.remove_file(&abs_path, Default::default()).await?;
1089 } else {
1090 fs.remove_dir(
1091 &abs_path,
1092 RemoveOptions {
1093 recursive: true,
1094 ignore_if_not_exists: false,
1095 },
1096 )
1097 .await?;
1098 }
1099 anyhow::Ok(abs_path)
1100 });
1101
1102 Some(cx.spawn(|this, mut cx| async move {
1103 let abs_path = delete.await?;
1104 let (tx, mut rx) = barrier::channel();
1105 this.update(&mut cx, |this, _| {
1106 this.as_local_mut()
1107 .unwrap()
1108 .path_changes_tx
1109 .try_send((vec![abs_path], tx))
1110 })?;
1111 rx.recv().await;
1112 Ok(())
1113 }))
1114 }
1115
1116 pub fn rename_entry(
1117 &self,
1118 entry_id: ProjectEntryId,
1119 new_path: impl Into<Arc<Path>>,
1120 cx: &mut ModelContext<Worktree>,
1121 ) -> Option<Task<Result<Entry>>> {
1122 let old_path = self.entry_for_id(entry_id)?.path.clone();
1123 let new_path = new_path.into();
1124 let abs_old_path = self.absolutize(&old_path);
1125 let abs_new_path = self.absolutize(&new_path);
1126 let fs = self.fs.clone();
1127 let rename = cx.background().spawn(async move {
1128 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1129 .await
1130 });
1131
1132 Some(cx.spawn(|this, mut cx| async move {
1133 rename.await?;
1134 this.update(&mut cx, |this, cx| {
1135 this.as_local_mut()
1136 .unwrap()
1137 .refresh_entry(new_path.clone(), Some(old_path), cx)
1138 })
1139 .await
1140 }))
1141 }
1142
1143 pub fn copy_entry(
1144 &self,
1145 entry_id: ProjectEntryId,
1146 new_path: impl Into<Arc<Path>>,
1147 cx: &mut ModelContext<Worktree>,
1148 ) -> Option<Task<Result<Entry>>> {
1149 let old_path = self.entry_for_id(entry_id)?.path.clone();
1150 let new_path = new_path.into();
1151 let abs_old_path = self.absolutize(&old_path);
1152 let abs_new_path = self.absolutize(&new_path);
1153 let fs = self.fs.clone();
1154 let copy = cx.background().spawn(async move {
1155 copy_recursive(
1156 fs.as_ref(),
1157 &abs_old_path,
1158 &abs_new_path,
1159 Default::default(),
1160 )
1161 .await
1162 });
1163
1164 Some(cx.spawn(|this, mut cx| async move {
1165 copy.await?;
1166 this.update(&mut cx, |this, cx| {
1167 this.as_local_mut()
1168 .unwrap()
1169 .refresh_entry(new_path.clone(), None, cx)
1170 })
1171 .await
1172 }))
1173 }
1174
1175 fn refresh_entry(
1176 &self,
1177 path: Arc<Path>,
1178 old_path: Option<Arc<Path>>,
1179 cx: &mut ModelContext<Worktree>,
1180 ) -> Task<Result<Entry>> {
1181 let fs = self.fs.clone();
1182 let abs_root_path = self.abs_path.clone();
1183 let path_changes_tx = self.path_changes_tx.clone();
1184 cx.spawn_weak(move |this, mut cx| async move {
1185 let abs_path = fs.canonicalize(&abs_root_path).await?;
1186 let mut paths = Vec::with_capacity(2);
1187 paths.push(if path.file_name().is_some() {
1188 abs_path.join(&path)
1189 } else {
1190 abs_path.clone()
1191 });
1192 if let Some(old_path) = old_path {
1193 paths.push(if old_path.file_name().is_some() {
1194 abs_path.join(&old_path)
1195 } else {
1196 abs_path.clone()
1197 });
1198 }
1199
1200 let (tx, mut rx) = barrier::channel();
1201 path_changes_tx.try_send((paths, tx))?;
1202 rx.recv().await;
1203 this.upgrade(&cx)
1204 .ok_or_else(|| anyhow!("worktree was dropped"))?
1205 .update(&mut cx, |this, _| {
1206 this.entry_for_path(path)
1207 .cloned()
1208 .ok_or_else(|| anyhow!("failed to read path after update"))
1209 })
1210 })
1211 }
1212
1213 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1214 let (share_tx, share_rx) = oneshot::channel();
1215
1216 if let Some(share) = self.share.as_mut() {
1217 let _ = share_tx.send(());
1218 *share.resume_updates.borrow_mut() = ();
1219 } else {
1220 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1221 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1222 let worktree_id = cx.model_id() as u64;
1223
1224 for (path, summaries) in &self.diagnostic_summaries {
1225 for (&server_id, summary) in summaries {
1226 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1227 project_id,
1228 worktree_id,
1229 summary: Some(summary.to_proto(server_id, &path)),
1230 }) {
1231 return Task::ready(Err(e));
1232 }
1233 }
1234 }
1235
1236 let _maintain_remote_snapshot = cx.background().spawn({
1237 let client = self.client.clone();
1238 async move {
1239 let mut share_tx = Some(share_tx);
1240 let mut prev_snapshot = LocalSnapshot {
1241 ignores_by_parent_abs_path: Default::default(),
1242 removed_entry_ids: Default::default(),
1243 next_entry_id: Default::default(),
1244 git_repositories: Default::default(),
1245 snapshot: Snapshot {
1246 id: WorktreeId(worktree_id as usize),
1247 abs_path: Path::new("").into(),
1248 root_name: Default::default(),
1249 root_char_bag: Default::default(),
1250 entries_by_path: Default::default(),
1251 entries_by_id: Default::default(),
1252 repository_entries: Default::default(),
1253 scan_id: 0,
1254 completed_scan_id: 0,
1255 },
1256 };
1257 while let Some(snapshot) = snapshots_rx.recv().await {
1258 #[cfg(any(test, feature = "test-support"))]
1259 const MAX_CHUNK_SIZE: usize = 2;
1260 #[cfg(not(any(test, feature = "test-support")))]
1261 const MAX_CHUNK_SIZE: usize = 256;
1262
1263 let update =
1264 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1265 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1266 let _ = resume_updates_rx.try_recv();
1267 while let Err(error) = client.request(update.clone()).await {
1268 log::error!("failed to send worktree update: {}", error);
1269 log::info!("waiting to resume updates");
1270 if resume_updates_rx.next().await.is_none() {
1271 return Ok(());
1272 }
1273 }
1274 }
1275
1276 if let Some(share_tx) = share_tx.take() {
1277 let _ = share_tx.send(());
1278 }
1279
1280 prev_snapshot = snapshot;
1281 }
1282
1283 Ok::<_, anyhow::Error>(())
1284 }
1285 .log_err()
1286 });
1287
1288 self.share = Some(ShareState {
1289 project_id,
1290 snapshots_tx,
1291 resume_updates: resume_updates_tx,
1292 _maintain_remote_snapshot,
1293 });
1294 }
1295
1296 cx.foreground()
1297 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1298 }
1299
1300 pub fn unshare(&mut self) {
1301 self.share.take();
1302 }
1303
1304 pub fn is_shared(&self) -> bool {
1305 self.share.is_some()
1306 }
1307}
1308
1309impl RemoteWorktree {
1310 fn snapshot(&self) -> Snapshot {
1311 self.snapshot.clone()
1312 }
1313
1314 pub fn disconnected_from_host(&mut self) {
1315 self.updates_tx.take();
1316 self.snapshot_subscriptions.clear();
1317 self.disconnected = true;
1318 }
1319
1320 pub fn save_buffer(
1321 &self,
1322 buffer_handle: ModelHandle<Buffer>,
1323 cx: &mut ModelContext<Worktree>,
1324 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1325 let buffer = buffer_handle.read(cx);
1326 let buffer_id = buffer.remote_id();
1327 let version = buffer.version();
1328 let rpc = self.client.clone();
1329 let project_id = self.project_id;
1330 cx.as_mut().spawn(|mut cx| async move {
1331 let response = rpc
1332 .request(proto::SaveBuffer {
1333 project_id,
1334 buffer_id,
1335 version: serialize_version(&version),
1336 })
1337 .await?;
1338 let version = deserialize_version(&response.version);
1339 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1340 let mtime = response
1341 .mtime
1342 .ok_or_else(|| anyhow!("missing mtime"))?
1343 .into();
1344
1345 buffer_handle.update(&mut cx, |buffer, cx| {
1346 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1347 });
1348
1349 Ok((version, fingerprint, mtime))
1350 })
1351 }
1352
1353 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1354 if let Some(updates_tx) = &self.updates_tx {
1355 updates_tx
1356 .unbounded_send(update)
1357 .expect("consumer runs to completion");
1358 }
1359 }
1360
1361 fn observed_snapshot(&self, scan_id: usize) -> bool {
1362 self.completed_scan_id >= scan_id
1363 }
1364
1365 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1366 let (tx, rx) = oneshot::channel();
1367 if self.observed_snapshot(scan_id) {
1368 let _ = tx.send(());
1369 } else if self.disconnected {
1370 drop(tx);
1371 } else {
1372 match self
1373 .snapshot_subscriptions
1374 .binary_search_by_key(&scan_id, |probe| probe.0)
1375 {
1376 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1377 }
1378 }
1379
1380 async move {
1381 rx.await?;
1382 Ok(())
1383 }
1384 }
1385
1386 pub fn update_diagnostic_summary(
1387 &mut self,
1388 path: Arc<Path>,
1389 summary: &proto::DiagnosticSummary,
1390 ) {
1391 let server_id = LanguageServerId(summary.language_server_id as usize);
1392 let summary = DiagnosticSummary {
1393 error_count: summary.error_count as usize,
1394 warning_count: summary.warning_count as usize,
1395 };
1396
1397 if summary.is_empty() {
1398 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1399 summaries.remove(&server_id);
1400 if summaries.is_empty() {
1401 self.diagnostic_summaries.remove(&path);
1402 }
1403 }
1404 } else {
1405 self.diagnostic_summaries
1406 .entry(path)
1407 .or_default()
1408 .insert(server_id, summary);
1409 }
1410 }
1411
1412 pub fn insert_entry(
1413 &mut self,
1414 entry: proto::Entry,
1415 scan_id: usize,
1416 cx: &mut ModelContext<Worktree>,
1417 ) -> Task<Result<Entry>> {
1418 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1419 cx.spawn(|this, mut cx| async move {
1420 wait_for_snapshot.await?;
1421 this.update(&mut cx, |worktree, _| {
1422 let worktree = worktree.as_remote_mut().unwrap();
1423 let mut snapshot = worktree.background_snapshot.lock();
1424 let entry = snapshot.insert_entry(entry);
1425 worktree.snapshot = snapshot.clone();
1426 entry
1427 })
1428 })
1429 }
1430
1431 pub(crate) fn delete_entry(
1432 &mut self,
1433 id: ProjectEntryId,
1434 scan_id: usize,
1435 cx: &mut ModelContext<Worktree>,
1436 ) -> Task<Result<()>> {
1437 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1438 cx.spawn(|this, mut cx| async move {
1439 wait_for_snapshot.await?;
1440 this.update(&mut cx, |worktree, _| {
1441 let worktree = worktree.as_remote_mut().unwrap();
1442 let mut snapshot = worktree.background_snapshot.lock();
1443 snapshot.delete_entry(id);
1444 worktree.snapshot = snapshot.clone();
1445 });
1446 Ok(())
1447 })
1448 }
1449}
1450
1451impl Snapshot {
1452 pub fn id(&self) -> WorktreeId {
1453 self.id
1454 }
1455
1456 pub fn abs_path(&self) -> &Arc<Path> {
1457 &self.abs_path
1458 }
1459
1460 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1461 self.entries_by_id.get(&entry_id, &()).is_some()
1462 }
1463
1464 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1465 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1466 let old_entry = self.entries_by_id.insert_or_replace(
1467 PathEntry {
1468 id: entry.id,
1469 path: entry.path.clone(),
1470 is_ignored: entry.is_ignored,
1471 scan_id: 0,
1472 },
1473 &(),
1474 );
1475 if let Some(old_entry) = old_entry {
1476 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1477 }
1478 self.entries_by_path.insert_or_replace(entry.clone(), &());
1479 Ok(entry)
1480 }
1481
1482 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1483 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1484 self.entries_by_path = {
1485 let mut cursor = self.entries_by_path.cursor();
1486 let mut new_entries_by_path =
1487 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1488 while let Some(entry) = cursor.item() {
1489 if entry.path.starts_with(&removed_entry.path) {
1490 self.entries_by_id.remove(&entry.id, &());
1491 cursor.next(&());
1492 } else {
1493 break;
1494 }
1495 }
1496 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1497 new_entries_by_path
1498 };
1499
1500 Some(removed_entry.path)
1501 }
1502
1503 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1504 let mut entries_by_path_edits = Vec::new();
1505 let mut entries_by_id_edits = Vec::new();
1506 for entry_id in update.removed_entries {
1507 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1508 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1509 entries_by_id_edits.push(Edit::Remove(entry.id));
1510 }
1511 }
1512
1513 for entry in update.updated_entries {
1514 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1515 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1516 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1517 }
1518 entries_by_id_edits.push(Edit::Insert(PathEntry {
1519 id: entry.id,
1520 path: entry.path.clone(),
1521 is_ignored: entry.is_ignored,
1522 scan_id: 0,
1523 }));
1524 entries_by_path_edits.push(Edit::Insert(entry));
1525 }
1526
1527 self.entries_by_path.edit(entries_by_path_edits, &());
1528 self.entries_by_id.edit(entries_by_id_edits, &());
1529
1530 update.removed_repositories.sort_unstable();
1531 self.repository_entries.retain(|_, entry| {
1532 if let Ok(_) = update
1533 .removed_repositories
1534 .binary_search(&entry.work_directory.to_proto())
1535 {
1536 false
1537 } else {
1538 true
1539 }
1540 });
1541
1542 for repository in update.updated_repositories {
1543 let work_directory_entry: WorkDirectoryEntry =
1544 ProjectEntryId::from_proto(repository.work_directory_id).into();
1545
1546 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1547 let mut statuses = TreeMap::default();
1548 for status_entry in repository.updated_worktree_statuses {
1549 let Some(git_file_status) = read_git_status(status_entry.status) else {
1550 continue;
1551 };
1552
1553 let repo_path = RepoPath::new(status_entry.repo_path.into());
1554 statuses.insert(repo_path, git_file_status);
1555 }
1556
1557 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1558 if self.repository_entries.get(&work_directory).is_some() {
1559 self.repository_entries.update(&work_directory, |repo| {
1560 repo.branch = repository.branch.map(Into::into);
1561 repo.worktree_statuses.insert_tree(statuses);
1562
1563 for repo_path in repository.removed_worktree_repo_paths {
1564 let repo_path = RepoPath::new(repo_path.into());
1565 repo.worktree_statuses.remove(&repo_path);
1566 }
1567 });
1568 } else {
1569 self.repository_entries.insert(
1570 work_directory,
1571 RepositoryEntry {
1572 work_directory: work_directory_entry,
1573 branch: repository.branch.map(Into::into),
1574 worktree_statuses: statuses,
1575 },
1576 )
1577 }
1578 } else {
1579 log::error!("no work directory entry for repository {:?}", repository)
1580 }
1581 }
1582
1583 self.scan_id = update.scan_id as usize;
1584 if update.is_last_update {
1585 self.completed_scan_id = update.scan_id as usize;
1586 }
1587
1588 Ok(())
1589 }
1590
1591 pub fn file_count(&self) -> usize {
1592 self.entries_by_path.summary().file_count
1593 }
1594
1595 pub fn visible_file_count(&self) -> usize {
1596 self.entries_by_path.summary().visible_file_count
1597 }
1598
1599 fn traverse_from_offset(
1600 &self,
1601 include_dirs: bool,
1602 include_ignored: bool,
1603 start_offset: usize,
1604 ) -> Traversal {
1605 let mut cursor = self.entries_by_path.cursor();
1606 cursor.seek(
1607 &TraversalTarget::Count {
1608 count: start_offset,
1609 include_dirs,
1610 include_ignored,
1611 },
1612 Bias::Right,
1613 &(),
1614 );
1615 Traversal {
1616 cursor,
1617 include_dirs,
1618 include_ignored,
1619 }
1620 }
1621
1622 fn traverse_from_path(
1623 &self,
1624 include_dirs: bool,
1625 include_ignored: bool,
1626 path: &Path,
1627 ) -> Traversal {
1628 let mut cursor = self.entries_by_path.cursor();
1629 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1630 Traversal {
1631 cursor,
1632 include_dirs,
1633 include_ignored,
1634 }
1635 }
1636
1637 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1638 self.traverse_from_offset(false, include_ignored, start)
1639 }
1640
1641 pub fn entries(&self, include_ignored: bool) -> Traversal {
1642 self.traverse_from_offset(true, include_ignored, 0)
1643 }
1644
1645 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1646 self.repository_entries.values()
1647 }
1648
1649 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1650 let empty_path = Path::new("");
1651 self.entries_by_path
1652 .cursor::<()>()
1653 .filter(move |entry| entry.path.as_ref() != empty_path)
1654 .map(|entry| &entry.path)
1655 }
1656
1657 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1658 let mut cursor = self.entries_by_path.cursor();
1659 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1660 let traversal = Traversal {
1661 cursor,
1662 include_dirs: true,
1663 include_ignored: true,
1664 };
1665 ChildEntriesIter {
1666 traversal,
1667 parent_path,
1668 }
1669 }
1670
1671 pub fn root_entry(&self) -> Option<&Entry> {
1672 self.entry_for_path("")
1673 }
1674
1675 pub fn root_name(&self) -> &str {
1676 &self.root_name
1677 }
1678
1679 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1680 self.repository_entries
1681 .get(&RepositoryWorkDirectory(Path::new("").into()))
1682 .map(|entry| entry.to_owned())
1683 }
1684
1685 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1686 self.repository_entries.values()
1687 }
1688
1689 pub fn scan_id(&self) -> usize {
1690 self.scan_id
1691 }
1692
1693 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1694 let path = path.as_ref();
1695 self.traverse_from_path(true, true, path)
1696 .entry()
1697 .and_then(|entry| {
1698 if entry.path.as_ref() == path {
1699 Some(entry)
1700 } else {
1701 None
1702 }
1703 })
1704 }
1705
1706 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1707 let entry = self.entries_by_id.get(&id, &())?;
1708 self.entry_for_path(&entry.path)
1709 }
1710
1711 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1712 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1713 }
1714}
1715
1716impl LocalSnapshot {
1717 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1718 self.git_repositories.get(&repo.work_directory.0)
1719 }
1720
1721 pub(crate) fn repo_for_metadata(
1722 &self,
1723 path: &Path,
1724 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1725 self.git_repositories
1726 .iter()
1727 .find(|(_, repo)| repo.in_dot_git(path))
1728 }
1729
1730 #[cfg(test)]
1731 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1732 let root_name = self.root_name.clone();
1733 proto::UpdateWorktree {
1734 project_id,
1735 worktree_id: self.id().to_proto(),
1736 abs_path: self.abs_path().to_string_lossy().into(),
1737 root_name,
1738 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1739 removed_entries: Default::default(),
1740 scan_id: self.scan_id as u64,
1741 is_last_update: true,
1742 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1743 removed_repositories: Default::default(),
1744 }
1745 }
1746
1747 pub(crate) fn build_update(
1748 &self,
1749 other: &Self,
1750 project_id: u64,
1751 worktree_id: u64,
1752 include_ignored: bool,
1753 ) -> proto::UpdateWorktree {
1754 let mut updated_entries = Vec::new();
1755 let mut removed_entries = Vec::new();
1756 let mut self_entries = self
1757 .entries_by_id
1758 .cursor::<()>()
1759 .filter(|e| include_ignored || !e.is_ignored)
1760 .peekable();
1761 let mut other_entries = other
1762 .entries_by_id
1763 .cursor::<()>()
1764 .filter(|e| include_ignored || !e.is_ignored)
1765 .peekable();
1766 loop {
1767 match (self_entries.peek(), other_entries.peek()) {
1768 (Some(self_entry), Some(other_entry)) => {
1769 match Ord::cmp(&self_entry.id, &other_entry.id) {
1770 Ordering::Less => {
1771 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1772 updated_entries.push(entry);
1773 self_entries.next();
1774 }
1775 Ordering::Equal => {
1776 if self_entry.scan_id != other_entry.scan_id {
1777 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1778 updated_entries.push(entry);
1779 }
1780
1781 self_entries.next();
1782 other_entries.next();
1783 }
1784 Ordering::Greater => {
1785 removed_entries.push(other_entry.id.to_proto());
1786 other_entries.next();
1787 }
1788 }
1789 }
1790 (Some(self_entry), None) => {
1791 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1792 updated_entries.push(entry);
1793 self_entries.next();
1794 }
1795 (None, Some(other_entry)) => {
1796 removed_entries.push(other_entry.id.to_proto());
1797 other_entries.next();
1798 }
1799 (None, None) => break,
1800 }
1801 }
1802
1803 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1804 let mut removed_repositories = Vec::new();
1805 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1806 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1807 loop {
1808 match (self_repos.peek(), other_repos.peek()) {
1809 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1810 match Ord::cmp(self_work_dir, other_work_dir) {
1811 Ordering::Less => {
1812 updated_repositories.push((*self_repo).into());
1813 self_repos.next();
1814 }
1815 Ordering::Equal => {
1816 if self_repo != other_repo {
1817 updated_repositories.push(self_repo.build_update(other_repo));
1818 }
1819
1820 self_repos.next();
1821 other_repos.next();
1822 }
1823 Ordering::Greater => {
1824 removed_repositories.push(other_repo.work_directory.to_proto());
1825 other_repos.next();
1826 }
1827 }
1828 }
1829 (Some((_, self_repo)), None) => {
1830 updated_repositories.push((*self_repo).into());
1831 self_repos.next();
1832 }
1833 (None, Some((_, other_repo))) => {
1834 removed_repositories.push(other_repo.work_directory.to_proto());
1835 other_repos.next();
1836 }
1837 (None, None) => break,
1838 }
1839 }
1840
1841 proto::UpdateWorktree {
1842 project_id,
1843 worktree_id,
1844 abs_path: self.abs_path().to_string_lossy().into(),
1845 root_name: self.root_name().to_string(),
1846 updated_entries,
1847 removed_entries,
1848 scan_id: self.scan_id as u64,
1849 is_last_update: self.completed_scan_id == self.scan_id,
1850 updated_repositories,
1851 removed_repositories,
1852 }
1853 }
1854
1855 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1856 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1857 let abs_path = self.abs_path.join(&entry.path);
1858 match smol::block_on(build_gitignore(&abs_path, fs)) {
1859 Ok(ignore) => {
1860 self.ignores_by_parent_abs_path.insert(
1861 abs_path.parent().unwrap().into(),
1862 (Arc::new(ignore), self.scan_id),
1863 );
1864 }
1865 Err(error) => {
1866 log::error!(
1867 "error loading .gitignore file {:?} - {:?}",
1868 &entry.path,
1869 error
1870 );
1871 }
1872 }
1873 }
1874
1875 self.reuse_entry_id(&mut entry);
1876
1877 if entry.kind == EntryKind::PendingDir {
1878 if let Some(existing_entry) =
1879 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1880 {
1881 entry.kind = existing_entry.kind;
1882 }
1883 }
1884
1885 let scan_id = self.scan_id;
1886 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1887 if let Some(removed) = removed {
1888 if removed.id != entry.id {
1889 self.entries_by_id.remove(&removed.id, &());
1890 }
1891 }
1892 self.entries_by_id.insert_or_replace(
1893 PathEntry {
1894 id: entry.id,
1895 path: entry.path.clone(),
1896 is_ignored: entry.is_ignored,
1897 scan_id,
1898 },
1899 &(),
1900 );
1901
1902 entry
1903 }
1904
1905 fn populate_dir(
1906 &mut self,
1907 parent_path: Arc<Path>,
1908 entries: impl IntoIterator<Item = Entry>,
1909 ignore: Option<Arc<Gitignore>>,
1910 fs: &dyn Fs,
1911 ) {
1912 let mut parent_entry = if let Some(parent_entry) =
1913 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1914 {
1915 parent_entry.clone()
1916 } else {
1917 log::warn!(
1918 "populating a directory {:?} that has been removed",
1919 parent_path
1920 );
1921 return;
1922 };
1923
1924 match parent_entry.kind {
1925 EntryKind::PendingDir => {
1926 parent_entry.kind = EntryKind::Dir;
1927 }
1928 EntryKind::Dir => {}
1929 _ => return,
1930 }
1931
1932 if let Some(ignore) = ignore {
1933 self.ignores_by_parent_abs_path.insert(
1934 self.abs_path.join(&parent_path).into(),
1935 (ignore, self.scan_id),
1936 );
1937 }
1938
1939 if parent_path.file_name() == Some(&DOT_GIT) {
1940 self.build_repo(parent_path, fs);
1941 }
1942
1943 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1944 let mut entries_by_id_edits = Vec::new();
1945
1946 for mut entry in entries {
1947 self.reuse_entry_id(&mut entry);
1948 entries_by_id_edits.push(Edit::Insert(PathEntry {
1949 id: entry.id,
1950 path: entry.path.clone(),
1951 is_ignored: entry.is_ignored,
1952 scan_id: self.scan_id,
1953 }));
1954 entries_by_path_edits.push(Edit::Insert(entry));
1955 }
1956
1957 self.entries_by_path.edit(entries_by_path_edits, &());
1958 self.entries_by_id.edit(entries_by_id_edits, &());
1959 }
1960
1961 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1962 let abs_path = self.abs_path.join(&parent_path);
1963 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1964
1965 // Guard against repositories inside the repository metadata
1966 if work_dir
1967 .components()
1968 .find(|component| component.as_os_str() == *DOT_GIT)
1969 .is_some()
1970 {
1971 return None;
1972 };
1973
1974 let work_dir_id = self
1975 .entry_for_path(work_dir.clone())
1976 .map(|entry| entry.id)?;
1977
1978 if self.git_repositories.get(&work_dir_id).is_none() {
1979 let repo = fs.open_repo(abs_path.as_path())?;
1980 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1981 let scan_id = self.scan_id;
1982
1983 let repo_lock = repo.lock();
1984
1985 self.repository_entries.insert(
1986 work_directory,
1987 RepositoryEntry {
1988 work_directory: work_dir_id.into(),
1989 branch: repo_lock.branch_name().map(Into::into),
1990 worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1991 },
1992 );
1993 drop(repo_lock);
1994
1995 self.git_repositories.insert(
1996 work_dir_id,
1997 LocalRepositoryEntry {
1998 scan_id,
1999 full_scan_id: scan_id,
2000 repo_ptr: repo,
2001 git_dir_path: parent_path.clone(),
2002 },
2003 )
2004 }
2005
2006 Some(())
2007 }
2008 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2009 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2010 entry.id = removed_entry_id;
2011 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2012 entry.id = existing_entry.id;
2013 }
2014 }
2015
2016 fn remove_path(&mut self, path: &Path) {
2017 let mut new_entries;
2018 let removed_entries;
2019 {
2020 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2021 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2022 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2023 new_entries.push_tree(cursor.suffix(&()), &());
2024 }
2025 self.entries_by_path = new_entries;
2026
2027 let mut entries_by_id_edits = Vec::new();
2028 for entry in removed_entries.cursor::<()>() {
2029 let removed_entry_id = self
2030 .removed_entry_ids
2031 .entry(entry.inode)
2032 .or_insert(entry.id);
2033 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2034 entries_by_id_edits.push(Edit::Remove(entry.id));
2035 }
2036 self.entries_by_id.edit(entries_by_id_edits, &());
2037
2038 if path.file_name() == Some(&GITIGNORE) {
2039 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2040 if let Some((_, scan_id)) = self
2041 .ignores_by_parent_abs_path
2042 .get_mut(abs_parent_path.as_path())
2043 {
2044 *scan_id = self.snapshot.scan_id;
2045 }
2046 }
2047 }
2048
2049 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2050 let mut inodes = TreeSet::default();
2051 for ancestor in path.ancestors().skip(1) {
2052 if let Some(entry) = self.entry_for_path(ancestor) {
2053 inodes.insert(entry.inode);
2054 }
2055 }
2056 inodes
2057 }
2058
2059 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2060 let mut new_ignores = Vec::new();
2061 for ancestor in abs_path.ancestors().skip(1) {
2062 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2063 new_ignores.push((ancestor, Some(ignore.clone())));
2064 } else {
2065 new_ignores.push((ancestor, None));
2066 }
2067 }
2068
2069 let mut ignore_stack = IgnoreStack::none();
2070 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2071 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2072 ignore_stack = IgnoreStack::all();
2073 break;
2074 } else if let Some(ignore) = ignore {
2075 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2076 }
2077 }
2078
2079 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2080 ignore_stack = IgnoreStack::all();
2081 }
2082
2083 ignore_stack
2084 }
2085}
2086
2087async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2088 let contents = fs.load(abs_path).await?;
2089 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2090 let mut builder = GitignoreBuilder::new(parent);
2091 for line in contents.lines() {
2092 builder.add_line(Some(abs_path.into()), line)?;
2093 }
2094 Ok(builder.build()?)
2095}
2096
2097impl WorktreeId {
2098 pub fn from_usize(handle_id: usize) -> Self {
2099 Self(handle_id)
2100 }
2101
2102 pub(crate) fn from_proto(id: u64) -> Self {
2103 Self(id as usize)
2104 }
2105
2106 pub fn to_proto(&self) -> u64 {
2107 self.0 as u64
2108 }
2109
2110 pub fn to_usize(&self) -> usize {
2111 self.0
2112 }
2113}
2114
2115impl fmt::Display for WorktreeId {
2116 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2117 self.0.fmt(f)
2118 }
2119}
2120
2121impl Deref for Worktree {
2122 type Target = Snapshot;
2123
2124 fn deref(&self) -> &Self::Target {
2125 match self {
2126 Worktree::Local(worktree) => &worktree.snapshot,
2127 Worktree::Remote(worktree) => &worktree.snapshot,
2128 }
2129 }
2130}
2131
2132impl Deref for LocalWorktree {
2133 type Target = LocalSnapshot;
2134
2135 fn deref(&self) -> &Self::Target {
2136 &self.snapshot
2137 }
2138}
2139
2140impl Deref for RemoteWorktree {
2141 type Target = Snapshot;
2142
2143 fn deref(&self) -> &Self::Target {
2144 &self.snapshot
2145 }
2146}
2147
2148impl fmt::Debug for LocalWorktree {
2149 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2150 self.snapshot.fmt(f)
2151 }
2152}
2153
2154impl fmt::Debug for Snapshot {
2155 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2156 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2157 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2158
2159 impl<'a> fmt::Debug for EntriesByPath<'a> {
2160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2161 f.debug_map()
2162 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2163 .finish()
2164 }
2165 }
2166
2167 impl<'a> fmt::Debug for EntriesById<'a> {
2168 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2169 f.debug_list().entries(self.0.iter()).finish()
2170 }
2171 }
2172
2173 f.debug_struct("Snapshot")
2174 .field("id", &self.id)
2175 .field("root_name", &self.root_name)
2176 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2177 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2178 .finish()
2179 }
2180}
2181
2182#[derive(Clone, PartialEq)]
2183pub struct File {
2184 pub worktree: ModelHandle<Worktree>,
2185 pub path: Arc<Path>,
2186 pub mtime: SystemTime,
2187 pub(crate) entry_id: ProjectEntryId,
2188 pub(crate) is_local: bool,
2189 pub(crate) is_deleted: bool,
2190}
2191
2192impl language::File for File {
2193 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2194 if self.is_local {
2195 Some(self)
2196 } else {
2197 None
2198 }
2199 }
2200
2201 fn mtime(&self) -> SystemTime {
2202 self.mtime
2203 }
2204
2205 fn path(&self) -> &Arc<Path> {
2206 &self.path
2207 }
2208
2209 fn full_path(&self, cx: &AppContext) -> PathBuf {
2210 let mut full_path = PathBuf::new();
2211 let worktree = self.worktree.read(cx);
2212
2213 if worktree.is_visible() {
2214 full_path.push(worktree.root_name());
2215 } else {
2216 let path = worktree.abs_path();
2217
2218 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2219 full_path.push("~");
2220 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2221 } else {
2222 full_path.push(path)
2223 }
2224 }
2225
2226 if self.path.components().next().is_some() {
2227 full_path.push(&self.path);
2228 }
2229
2230 full_path
2231 }
2232
2233 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2234 /// of its worktree, then this method will return the name of the worktree itself.
2235 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2236 self.path
2237 .file_name()
2238 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2239 }
2240
2241 fn is_deleted(&self) -> bool {
2242 self.is_deleted
2243 }
2244
2245 fn as_any(&self) -> &dyn Any {
2246 self
2247 }
2248
2249 fn to_proto(&self) -> rpc::proto::File {
2250 rpc::proto::File {
2251 worktree_id: self.worktree.id() as u64,
2252 entry_id: self.entry_id.to_proto(),
2253 path: self.path.to_string_lossy().into(),
2254 mtime: Some(self.mtime.into()),
2255 is_deleted: self.is_deleted,
2256 }
2257 }
2258}
2259
2260impl language::LocalFile for File {
2261 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2262 self.worktree
2263 .read(cx)
2264 .as_local()
2265 .unwrap()
2266 .abs_path
2267 .join(&self.path)
2268 }
2269
2270 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2271 let worktree = self.worktree.read(cx).as_local().unwrap();
2272 let abs_path = worktree.absolutize(&self.path);
2273 let fs = worktree.fs.clone();
2274 cx.background()
2275 .spawn(async move { fs.load(&abs_path).await })
2276 }
2277
2278 fn buffer_reloaded(
2279 &self,
2280 buffer_id: u64,
2281 version: &clock::Global,
2282 fingerprint: RopeFingerprint,
2283 line_ending: LineEnding,
2284 mtime: SystemTime,
2285 cx: &mut AppContext,
2286 ) {
2287 let worktree = self.worktree.read(cx).as_local().unwrap();
2288 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2289 worktree
2290 .client
2291 .send(proto::BufferReloaded {
2292 project_id,
2293 buffer_id,
2294 version: serialize_version(version),
2295 mtime: Some(mtime.into()),
2296 fingerprint: serialize_fingerprint(fingerprint),
2297 line_ending: serialize_line_ending(line_ending) as i32,
2298 })
2299 .log_err();
2300 }
2301 }
2302}
2303
2304impl File {
2305 pub fn from_proto(
2306 proto: rpc::proto::File,
2307 worktree: ModelHandle<Worktree>,
2308 cx: &AppContext,
2309 ) -> Result<Self> {
2310 let worktree_id = worktree
2311 .read(cx)
2312 .as_remote()
2313 .ok_or_else(|| anyhow!("not remote"))?
2314 .id();
2315
2316 if worktree_id.to_proto() != proto.worktree_id {
2317 return Err(anyhow!("worktree id does not match file"));
2318 }
2319
2320 Ok(Self {
2321 worktree,
2322 path: Path::new(&proto.path).into(),
2323 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2324 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2325 is_local: false,
2326 is_deleted: proto.is_deleted,
2327 })
2328 }
2329
2330 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2331 file.and_then(|f| f.as_any().downcast_ref())
2332 }
2333
2334 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2335 self.worktree.read(cx).id()
2336 }
2337
2338 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2339 if self.is_deleted {
2340 None
2341 } else {
2342 Some(self.entry_id)
2343 }
2344 }
2345}
2346
2347#[derive(Clone, Debug, PartialEq, Eq)]
2348pub struct Entry {
2349 pub id: ProjectEntryId,
2350 pub kind: EntryKind,
2351 pub path: Arc<Path>,
2352 pub inode: u64,
2353 pub mtime: SystemTime,
2354 pub is_symlink: bool,
2355 pub is_ignored: bool,
2356}
2357
2358#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2359pub enum EntryKind {
2360 PendingDir,
2361 Dir,
2362 File(CharBag),
2363}
2364
2365#[derive(Clone, Copy, Debug)]
2366pub enum PathChange {
2367 Added,
2368 Removed,
2369 Updated,
2370 AddedOrUpdated,
2371}
2372
2373impl Entry {
2374 fn new(
2375 path: Arc<Path>,
2376 metadata: &fs::Metadata,
2377 next_entry_id: &AtomicUsize,
2378 root_char_bag: CharBag,
2379 ) -> Self {
2380 Self {
2381 id: ProjectEntryId::new(next_entry_id),
2382 kind: if metadata.is_dir {
2383 EntryKind::PendingDir
2384 } else {
2385 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2386 },
2387 path,
2388 inode: metadata.inode,
2389 mtime: metadata.mtime,
2390 is_symlink: metadata.is_symlink,
2391 is_ignored: false,
2392 }
2393 }
2394
2395 pub fn is_dir(&self) -> bool {
2396 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2397 }
2398
2399 pub fn is_file(&self) -> bool {
2400 matches!(self.kind, EntryKind::File(_))
2401 }
2402}
2403
2404impl sum_tree::Item for Entry {
2405 type Summary = EntrySummary;
2406
2407 fn summary(&self) -> Self::Summary {
2408 let visible_count = if self.is_ignored { 0 } else { 1 };
2409 let file_count;
2410 let visible_file_count;
2411 if self.is_file() {
2412 file_count = 1;
2413 visible_file_count = visible_count;
2414 } else {
2415 file_count = 0;
2416 visible_file_count = 0;
2417 }
2418
2419 EntrySummary {
2420 max_path: self.path.clone(),
2421 count: 1,
2422 visible_count,
2423 file_count,
2424 visible_file_count,
2425 }
2426 }
2427}
2428
2429impl sum_tree::KeyedItem for Entry {
2430 type Key = PathKey;
2431
2432 fn key(&self) -> Self::Key {
2433 PathKey(self.path.clone())
2434 }
2435}
2436
2437#[derive(Clone, Debug)]
2438pub struct EntrySummary {
2439 max_path: Arc<Path>,
2440 count: usize,
2441 visible_count: usize,
2442 file_count: usize,
2443 visible_file_count: usize,
2444}
2445
2446impl Default for EntrySummary {
2447 fn default() -> Self {
2448 Self {
2449 max_path: Arc::from(Path::new("")),
2450 count: 0,
2451 visible_count: 0,
2452 file_count: 0,
2453 visible_file_count: 0,
2454 }
2455 }
2456}
2457
2458impl sum_tree::Summary for EntrySummary {
2459 type Context = ();
2460
2461 fn add_summary(&mut self, rhs: &Self, _: &()) {
2462 self.max_path = rhs.max_path.clone();
2463 self.count += rhs.count;
2464 self.visible_count += rhs.visible_count;
2465 self.file_count += rhs.file_count;
2466 self.visible_file_count += rhs.visible_file_count;
2467 }
2468}
2469
2470#[derive(Clone, Debug)]
2471struct PathEntry {
2472 id: ProjectEntryId,
2473 path: Arc<Path>,
2474 is_ignored: bool,
2475 scan_id: usize,
2476}
2477
2478impl sum_tree::Item for PathEntry {
2479 type Summary = PathEntrySummary;
2480
2481 fn summary(&self) -> Self::Summary {
2482 PathEntrySummary { max_id: self.id }
2483 }
2484}
2485
2486impl sum_tree::KeyedItem for PathEntry {
2487 type Key = ProjectEntryId;
2488
2489 fn key(&self) -> Self::Key {
2490 self.id
2491 }
2492}
2493
2494#[derive(Clone, Debug, Default)]
2495struct PathEntrySummary {
2496 max_id: ProjectEntryId,
2497}
2498
2499impl sum_tree::Summary for PathEntrySummary {
2500 type Context = ();
2501
2502 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2503 self.max_id = summary.max_id;
2504 }
2505}
2506
2507impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2508 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2509 *self = summary.max_id;
2510 }
2511}
2512
2513#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2514pub struct PathKey(Arc<Path>);
2515
2516impl Default for PathKey {
2517 fn default() -> Self {
2518 Self(Path::new("").into())
2519 }
2520}
2521
2522impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2523 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2524 self.0 = summary.max_path.clone();
2525 }
2526}
2527
2528struct BackgroundScanner {
2529 snapshot: Mutex<LocalSnapshot>,
2530 fs: Arc<dyn Fs>,
2531 status_updates_tx: UnboundedSender<ScanState>,
2532 executor: Arc<executor::Background>,
2533 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2534 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2535 finished_initial_scan: bool,
2536}
2537
2538impl BackgroundScanner {
2539 fn new(
2540 snapshot: LocalSnapshot,
2541 fs: Arc<dyn Fs>,
2542 status_updates_tx: UnboundedSender<ScanState>,
2543 executor: Arc<executor::Background>,
2544 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2545 ) -> Self {
2546 Self {
2547 fs,
2548 status_updates_tx,
2549 executor,
2550 refresh_requests_rx,
2551 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2552 snapshot: Mutex::new(snapshot),
2553 finished_initial_scan: false,
2554 }
2555 }
2556
2557 async fn run(
2558 &mut self,
2559 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2560 ) {
2561 use futures::FutureExt as _;
2562
2563 let (root_abs_path, root_inode) = {
2564 let snapshot = self.snapshot.lock();
2565 (
2566 snapshot.abs_path.clone(),
2567 snapshot.root_entry().map(|e| e.inode),
2568 )
2569 };
2570
2571 // Populate ignores above the root.
2572 let ignore_stack;
2573 for ancestor in root_abs_path.ancestors().skip(1) {
2574 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2575 {
2576 self.snapshot
2577 .lock()
2578 .ignores_by_parent_abs_path
2579 .insert(ancestor.into(), (ignore.into(), 0));
2580 }
2581 }
2582 {
2583 let mut snapshot = self.snapshot.lock();
2584 snapshot.scan_id += 1;
2585 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2586 if ignore_stack.is_all() {
2587 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2588 root_entry.is_ignored = true;
2589 snapshot.insert_entry(root_entry, self.fs.as_ref());
2590 }
2591 }
2592 };
2593
2594 // Perform an initial scan of the directory.
2595 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2596 smol::block_on(scan_job_tx.send(ScanJob {
2597 abs_path: root_abs_path,
2598 path: Arc::from(Path::new("")),
2599 ignore_stack,
2600 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2601 scan_queue: scan_job_tx.clone(),
2602 }))
2603 .unwrap();
2604 drop(scan_job_tx);
2605 self.scan_dirs(true, scan_job_rx).await;
2606 {
2607 let mut snapshot = self.snapshot.lock();
2608 snapshot.completed_scan_id = snapshot.scan_id;
2609 }
2610 self.send_status_update(false, None);
2611
2612 // Process any any FS events that occurred while performing the initial scan.
2613 // For these events, update events cannot be as precise, because we didn't
2614 // have the previous state loaded yet.
2615 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2616 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2617 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2618 paths.extend(more_events.into_iter().map(|e| e.path));
2619 }
2620 self.process_events(paths).await;
2621 }
2622
2623 self.finished_initial_scan = true;
2624
2625 // Continue processing events until the worktree is dropped.
2626 loop {
2627 select_biased! {
2628 // Process any path refresh requests from the worktree. Prioritize
2629 // these before handling changes reported by the filesystem.
2630 request = self.refresh_requests_rx.recv().fuse() => {
2631 let Ok((paths, barrier)) = request else { break };
2632 if !self.process_refresh_request(paths, barrier).await {
2633 return;
2634 }
2635 }
2636
2637 events = events_rx.next().fuse() => {
2638 let Some(events) = events else { break };
2639 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2640 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2641 paths.extend(more_events.into_iter().map(|e| e.path));
2642 }
2643 self.process_events(paths).await;
2644 }
2645 }
2646 }
2647 }
2648
2649 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2650 self.reload_entries_for_paths(paths, None).await;
2651 self.send_status_update(false, Some(barrier))
2652 }
2653
2654 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2655 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2656 if let Some(mut paths) = self
2657 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2658 .await
2659 {
2660 paths.sort_unstable();
2661 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2662 }
2663 drop(scan_job_tx);
2664 self.scan_dirs(false, scan_job_rx).await;
2665
2666 self.update_ignore_statuses().await;
2667
2668 let mut snapshot = self.snapshot.lock();
2669
2670 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2671 git_repositories.retain(|work_directory_id, _| {
2672 snapshot
2673 .entry_for_id(*work_directory_id)
2674 .map_or(false, |entry| {
2675 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2676 })
2677 });
2678 snapshot.git_repositories = git_repositories;
2679
2680 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2681 git_repository_entries.retain(|_, entry| {
2682 snapshot
2683 .git_repositories
2684 .get(&entry.work_directory.0)
2685 .is_some()
2686 });
2687 snapshot.snapshot.repository_entries = git_repository_entries;
2688
2689 snapshot.removed_entry_ids.clear();
2690 snapshot.completed_scan_id = snapshot.scan_id;
2691
2692 drop(snapshot);
2693
2694 self.send_status_update(false, None);
2695 }
2696
2697 async fn scan_dirs(
2698 &self,
2699 enable_progress_updates: bool,
2700 scan_jobs_rx: channel::Receiver<ScanJob>,
2701 ) {
2702 use futures::FutureExt as _;
2703
2704 if self
2705 .status_updates_tx
2706 .unbounded_send(ScanState::Started)
2707 .is_err()
2708 {
2709 return;
2710 }
2711
2712 let progress_update_count = AtomicUsize::new(0);
2713 self.executor
2714 .scoped(|scope| {
2715 for _ in 0..self.executor.num_cpus() {
2716 scope.spawn(async {
2717 let mut last_progress_update_count = 0;
2718 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2719 futures::pin_mut!(progress_update_timer);
2720
2721 loop {
2722 select_biased! {
2723 // Process any path refresh requests before moving on to process
2724 // the scan queue, so that user operations are prioritized.
2725 request = self.refresh_requests_rx.recv().fuse() => {
2726 let Ok((paths, barrier)) = request else { break };
2727 if !self.process_refresh_request(paths, barrier).await {
2728 return;
2729 }
2730 }
2731
2732 // Send periodic progress updates to the worktree. Use an atomic counter
2733 // to ensure that only one of the workers sends a progress update after
2734 // the update interval elapses.
2735 _ = progress_update_timer => {
2736 match progress_update_count.compare_exchange(
2737 last_progress_update_count,
2738 last_progress_update_count + 1,
2739 SeqCst,
2740 SeqCst
2741 ) {
2742 Ok(_) => {
2743 last_progress_update_count += 1;
2744 self.send_status_update(true, None);
2745 }
2746 Err(count) => {
2747 last_progress_update_count = count;
2748 }
2749 }
2750 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2751 }
2752
2753 // Recursively load directories from the file system.
2754 job = scan_jobs_rx.recv().fuse() => {
2755 let Ok(job) = job else { break };
2756 if let Err(err) = self.scan_dir(&job).await {
2757 if job.path.as_ref() != Path::new("") {
2758 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2759 }
2760 }
2761 }
2762 }
2763 }
2764 })
2765 }
2766 })
2767 .await;
2768 }
2769
2770 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2771 let mut prev_state = self.prev_state.lock();
2772 let snapshot = self.snapshot.lock().clone();
2773 let mut old_snapshot = snapshot.snapshot.clone();
2774 mem::swap(&mut old_snapshot, &mut prev_state.0);
2775 let changed_paths = mem::take(&mut prev_state.1);
2776 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2777 self.status_updates_tx
2778 .unbounded_send(ScanState::Updated {
2779 snapshot,
2780 changes,
2781 scanning,
2782 barrier,
2783 })
2784 .is_ok()
2785 }
2786
2787 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2788 let mut new_entries: Vec<Entry> = Vec::new();
2789 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2790 let mut ignore_stack = job.ignore_stack.clone();
2791 let mut new_ignore = None;
2792 let (root_abs_path, root_char_bag, next_entry_id) = {
2793 let snapshot = self.snapshot.lock();
2794 (
2795 snapshot.abs_path().clone(),
2796 snapshot.root_char_bag,
2797 snapshot.next_entry_id.clone(),
2798 )
2799 };
2800 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2801 while let Some(child_abs_path) = child_paths.next().await {
2802 let child_abs_path: Arc<Path> = match child_abs_path {
2803 Ok(child_abs_path) => child_abs_path.into(),
2804 Err(error) => {
2805 log::error!("error processing entry {:?}", error);
2806 continue;
2807 }
2808 };
2809
2810 let child_name = child_abs_path.file_name().unwrap();
2811 let child_path: Arc<Path> = job.path.join(child_name).into();
2812 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2813 Ok(Some(metadata)) => metadata,
2814 Ok(None) => continue,
2815 Err(err) => {
2816 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2817 continue;
2818 }
2819 };
2820
2821 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2822 if child_name == *GITIGNORE {
2823 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2824 Ok(ignore) => {
2825 let ignore = Arc::new(ignore);
2826 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2827 new_ignore = Some(ignore);
2828 }
2829 Err(error) => {
2830 log::error!(
2831 "error loading .gitignore file {:?} - {:?}",
2832 child_name,
2833 error
2834 );
2835 }
2836 }
2837
2838 // Update ignore status of any child entries we've already processed to reflect the
2839 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2840 // there should rarely be too numerous. Update the ignore stack associated with any
2841 // new jobs as well.
2842 let mut new_jobs = new_jobs.iter_mut();
2843 for entry in &mut new_entries {
2844 let entry_abs_path = root_abs_path.join(&entry.path);
2845 entry.is_ignored =
2846 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2847
2848 if entry.is_dir() {
2849 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2850 job.ignore_stack = if entry.is_ignored {
2851 IgnoreStack::all()
2852 } else {
2853 ignore_stack.clone()
2854 };
2855 }
2856 }
2857 }
2858 }
2859
2860 let mut child_entry = Entry::new(
2861 child_path.clone(),
2862 &child_metadata,
2863 &next_entry_id,
2864 root_char_bag,
2865 );
2866
2867 if child_entry.is_dir() {
2868 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2869 child_entry.is_ignored = is_ignored;
2870
2871 // Avoid recursing until crash in the case of a recursive symlink
2872 if !job.ancestor_inodes.contains(&child_entry.inode) {
2873 let mut ancestor_inodes = job.ancestor_inodes.clone();
2874 ancestor_inodes.insert(child_entry.inode);
2875
2876 new_jobs.push(Some(ScanJob {
2877 abs_path: child_abs_path,
2878 path: child_path,
2879 ignore_stack: if is_ignored {
2880 IgnoreStack::all()
2881 } else {
2882 ignore_stack.clone()
2883 },
2884 ancestor_inodes,
2885 scan_queue: job.scan_queue.clone(),
2886 }));
2887 } else {
2888 new_jobs.push(None);
2889 }
2890 } else {
2891 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2892 }
2893
2894 new_entries.push(child_entry);
2895 }
2896
2897 self.snapshot.lock().populate_dir(
2898 job.path.clone(),
2899 new_entries,
2900 new_ignore,
2901 self.fs.as_ref(),
2902 );
2903
2904 for new_job in new_jobs {
2905 if let Some(new_job) = new_job {
2906 job.scan_queue.send(new_job).await.unwrap();
2907 }
2908 }
2909
2910 Ok(())
2911 }
2912
2913 async fn reload_entries_for_paths(
2914 &self,
2915 mut abs_paths: Vec<PathBuf>,
2916 scan_queue_tx: Option<Sender<ScanJob>>,
2917 ) -> Option<Vec<Arc<Path>>> {
2918 let doing_recursive_update = scan_queue_tx.is_some();
2919
2920 abs_paths.sort_unstable();
2921 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2922
2923 let root_abs_path = self.snapshot.lock().abs_path.clone();
2924 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2925 let metadata = futures::future::join_all(
2926 abs_paths
2927 .iter()
2928 .map(|abs_path| self.fs.metadata(&abs_path))
2929 .collect::<Vec<_>>(),
2930 )
2931 .await;
2932
2933 let mut snapshot = self.snapshot.lock();
2934 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2935 snapshot.scan_id += 1;
2936 if is_idle && !doing_recursive_update {
2937 snapshot.completed_scan_id = snapshot.scan_id;
2938 }
2939
2940 // Remove any entries for paths that no longer exist or are being recursively
2941 // refreshed. Do this before adding any new entries, so that renames can be
2942 // detected regardless of the order of the paths.
2943 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2944 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2945 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2946 if matches!(metadata, Ok(None)) || doing_recursive_update {
2947 snapshot.remove_path(path);
2948 }
2949 event_paths.push(path.into());
2950 } else {
2951 log::error!(
2952 "unexpected event {:?} for root path {:?}",
2953 abs_path,
2954 root_canonical_path
2955 );
2956 }
2957 }
2958
2959 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2960 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2961
2962 match metadata {
2963 Ok(Some(metadata)) => {
2964 let ignore_stack =
2965 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2966 let mut fs_entry = Entry::new(
2967 path.clone(),
2968 &metadata,
2969 snapshot.next_entry_id.as_ref(),
2970 snapshot.root_char_bag,
2971 );
2972 fs_entry.is_ignored = ignore_stack.is_all();
2973 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2974
2975 self.reload_repo_for_path(&path, &mut snapshot);
2976
2977 if let Some(scan_queue_tx) = &scan_queue_tx {
2978 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2979 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2980 ancestor_inodes.insert(metadata.inode);
2981 smol::block_on(scan_queue_tx.send(ScanJob {
2982 abs_path,
2983 path,
2984 ignore_stack,
2985 ancestor_inodes,
2986 scan_queue: scan_queue_tx.clone(),
2987 }))
2988 .unwrap();
2989 }
2990 }
2991 }
2992 Ok(None) => {
2993 self.remove_repo_path(&path, &mut snapshot);
2994 }
2995 Err(err) => {
2996 // TODO - create a special 'error' entry in the entries tree to mark this
2997 log::error!("error reading file on event {:?}", err);
2998 }
2999 }
3000 }
3001
3002 Some(event_paths)
3003 }
3004
3005 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3006 if !path
3007 .components()
3008 .any(|component| component.as_os_str() == *DOT_GIT)
3009 {
3010 let scan_id = snapshot.scan_id;
3011 let repo = snapshot.repo_for(&path)?;
3012
3013 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3014
3015 let work_dir = repo.work_directory(snapshot)?;
3016 let work_dir_id = repo.work_directory;
3017
3018 snapshot
3019 .git_repositories
3020 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3021
3022 snapshot.repository_entries.update(&work_dir, |entry| {
3023 entry
3024 .worktree_statuses
3025 .remove_from_while(&repo_path, |stored_path, _| {
3026 stored_path.starts_with(&repo_path)
3027 })
3028 });
3029 }
3030
3031 Some(())
3032 }
3033
3034 fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3035 let scan_id = snapshot.scan_id;
3036
3037 if path
3038 .components()
3039 .any(|component| component.as_os_str() == *DOT_GIT)
3040 {
3041 let (entry_id, repo_ptr) = {
3042 let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
3043 if repo.full_scan_id == scan_id {
3044 return None;
3045 }
3046 (*entry_id, repo.repo_ptr.to_owned())
3047 };
3048
3049 let work_dir = snapshot
3050 .entry_for_id(entry_id)
3051 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3052
3053 let repo = repo_ptr.lock();
3054 repo.reload_index();
3055 let branch = repo.branch_name();
3056 let statuses = repo.worktree_statuses().unwrap_or_default();
3057
3058 snapshot.git_repositories.update(&entry_id, |entry| {
3059 entry.scan_id = scan_id;
3060 entry.full_scan_id = scan_id;
3061 });
3062
3063 snapshot.repository_entries.update(&work_dir, |entry| {
3064 entry.branch = branch.map(Into::into);
3065 entry.worktree_statuses = statuses;
3066 });
3067 } else {
3068 if snapshot
3069 .entry_for_path(&path)
3070 .map(|entry| entry.is_ignored)
3071 .unwrap_or(false)
3072 {
3073 self.remove_repo_path(&path, snapshot);
3074 return None;
3075 }
3076
3077 let repo = snapshot.repo_for(&path)?;
3078
3079 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3080
3081 let status = {
3082 let local_repo = snapshot.get_local_repo(&repo)?;
3083
3084 // Short circuit if we've already scanned everything
3085 if local_repo.full_scan_id == scan_id {
3086 return None;
3087 }
3088
3089 let git_ptr = local_repo.repo_ptr.lock();
3090 git_ptr.worktree_status(&repo_path)?
3091 };
3092
3093 let work_dir = repo.work_directory(snapshot)?;
3094 let work_dir_id = repo.work_directory;
3095
3096 snapshot
3097 .git_repositories
3098 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3099
3100 snapshot.repository_entries.update(&work_dir, |entry| {
3101 entry.worktree_statuses.insert(repo_path, status)
3102 });
3103 }
3104
3105 Some(())
3106 }
3107
3108 async fn update_ignore_statuses(&self) {
3109 use futures::FutureExt as _;
3110
3111 let mut snapshot = self.snapshot.lock().clone();
3112 let mut ignores_to_update = Vec::new();
3113 let mut ignores_to_delete = Vec::new();
3114 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3115 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3116 if *scan_id > snapshot.completed_scan_id
3117 && snapshot.entry_for_path(parent_path).is_some()
3118 {
3119 ignores_to_update.push(parent_abs_path.clone());
3120 }
3121
3122 let ignore_path = parent_path.join(&*GITIGNORE);
3123 if snapshot.entry_for_path(ignore_path).is_none() {
3124 ignores_to_delete.push(parent_abs_path.clone());
3125 }
3126 }
3127 }
3128
3129 for parent_abs_path in ignores_to_delete {
3130 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3131 self.snapshot
3132 .lock()
3133 .ignores_by_parent_abs_path
3134 .remove(&parent_abs_path);
3135 }
3136
3137 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3138 ignores_to_update.sort_unstable();
3139 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3140 while let Some(parent_abs_path) = ignores_to_update.next() {
3141 while ignores_to_update
3142 .peek()
3143 .map_or(false, |p| p.starts_with(&parent_abs_path))
3144 {
3145 ignores_to_update.next().unwrap();
3146 }
3147
3148 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3149 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3150 abs_path: parent_abs_path,
3151 ignore_stack,
3152 ignore_queue: ignore_queue_tx.clone(),
3153 }))
3154 .unwrap();
3155 }
3156 drop(ignore_queue_tx);
3157
3158 self.executor
3159 .scoped(|scope| {
3160 for _ in 0..self.executor.num_cpus() {
3161 scope.spawn(async {
3162 loop {
3163 select_biased! {
3164 // Process any path refresh requests before moving on to process
3165 // the queue of ignore statuses.
3166 request = self.refresh_requests_rx.recv().fuse() => {
3167 let Ok((paths, barrier)) = request else { break };
3168 if !self.process_refresh_request(paths, barrier).await {
3169 return;
3170 }
3171 }
3172
3173 // Recursively process directories whose ignores have changed.
3174 job = ignore_queue_rx.recv().fuse() => {
3175 let Ok(job) = job else { break };
3176 self.update_ignore_status(job, &snapshot).await;
3177 }
3178 }
3179 }
3180 });
3181 }
3182 })
3183 .await;
3184 }
3185
3186 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3187 let mut ignore_stack = job.ignore_stack;
3188 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3189 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3190 }
3191
3192 let mut entries_by_id_edits = Vec::new();
3193 let mut entries_by_path_edits = Vec::new();
3194 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3195 for mut entry in snapshot.child_entries(path).cloned() {
3196 let was_ignored = entry.is_ignored;
3197 let abs_path = snapshot.abs_path().join(&entry.path);
3198 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3199 if entry.is_dir() {
3200 let child_ignore_stack = if entry.is_ignored {
3201 IgnoreStack::all()
3202 } else {
3203 ignore_stack.clone()
3204 };
3205 job.ignore_queue
3206 .send(UpdateIgnoreStatusJob {
3207 abs_path: abs_path.into(),
3208 ignore_stack: child_ignore_stack,
3209 ignore_queue: job.ignore_queue.clone(),
3210 })
3211 .await
3212 .unwrap();
3213 }
3214
3215 if entry.is_ignored != was_ignored {
3216 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3217 path_entry.scan_id = snapshot.scan_id;
3218 path_entry.is_ignored = entry.is_ignored;
3219 entries_by_id_edits.push(Edit::Insert(path_entry));
3220 entries_by_path_edits.push(Edit::Insert(entry));
3221 }
3222 }
3223
3224 let mut snapshot = self.snapshot.lock();
3225 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3226 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3227 }
3228
3229 fn build_change_set(
3230 &self,
3231 old_snapshot: &Snapshot,
3232 new_snapshot: &Snapshot,
3233 event_paths: Vec<Arc<Path>>,
3234 ) -> HashMap<Arc<Path>, PathChange> {
3235 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3236
3237 let mut changes = HashMap::default();
3238 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3239 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3240 let received_before_initialized = !self.finished_initial_scan;
3241
3242 for path in event_paths {
3243 let path = PathKey(path);
3244 old_paths.seek(&path, Bias::Left, &());
3245 new_paths.seek(&path, Bias::Left, &());
3246
3247 loop {
3248 match (old_paths.item(), new_paths.item()) {
3249 (Some(old_entry), Some(new_entry)) => {
3250 if old_entry.path > path.0
3251 && new_entry.path > path.0
3252 && !old_entry.path.starts_with(&path.0)
3253 && !new_entry.path.starts_with(&path.0)
3254 {
3255 break;
3256 }
3257
3258 match Ord::cmp(&old_entry.path, &new_entry.path) {
3259 Ordering::Less => {
3260 changes.insert(old_entry.path.clone(), Removed);
3261 old_paths.next(&());
3262 }
3263 Ordering::Equal => {
3264 if received_before_initialized {
3265 // If the worktree was not fully initialized when this event was generated,
3266 // we can't know whether this entry was added during the scan or whether
3267 // it was merely updated.
3268 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3269 } else if old_entry.mtime != new_entry.mtime {
3270 changes.insert(new_entry.path.clone(), Updated);
3271 }
3272 old_paths.next(&());
3273 new_paths.next(&());
3274 }
3275 Ordering::Greater => {
3276 changes.insert(new_entry.path.clone(), Added);
3277 new_paths.next(&());
3278 }
3279 }
3280 }
3281 (Some(old_entry), None) => {
3282 changes.insert(old_entry.path.clone(), Removed);
3283 old_paths.next(&());
3284 }
3285 (None, Some(new_entry)) => {
3286 changes.insert(new_entry.path.clone(), Added);
3287 new_paths.next(&());
3288 }
3289 (None, None) => break,
3290 }
3291 }
3292 }
3293 changes
3294 }
3295
3296 async fn progress_timer(&self, running: bool) {
3297 if !running {
3298 return futures::future::pending().await;
3299 }
3300
3301 #[cfg(any(test, feature = "test-support"))]
3302 if self.fs.is_fake() {
3303 return self.executor.simulate_random_delay().await;
3304 }
3305
3306 smol::Timer::after(Duration::from_millis(100)).await;
3307 }
3308}
3309
3310fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3311 let mut result = root_char_bag;
3312 result.extend(
3313 path.to_string_lossy()
3314 .chars()
3315 .map(|c| c.to_ascii_lowercase()),
3316 );
3317 result
3318}
3319
3320struct ScanJob {
3321 abs_path: Arc<Path>,
3322 path: Arc<Path>,
3323 ignore_stack: Arc<IgnoreStack>,
3324 scan_queue: Sender<ScanJob>,
3325 ancestor_inodes: TreeSet<u64>,
3326}
3327
3328struct UpdateIgnoreStatusJob {
3329 abs_path: Arc<Path>,
3330 ignore_stack: Arc<IgnoreStack>,
3331 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3332}
3333
3334pub trait WorktreeHandle {
3335 #[cfg(any(test, feature = "test-support"))]
3336 fn flush_fs_events<'a>(
3337 &self,
3338 cx: &'a gpui::TestAppContext,
3339 ) -> futures::future::LocalBoxFuture<'a, ()>;
3340}
3341
3342impl WorktreeHandle for ModelHandle<Worktree> {
3343 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3344 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3345 // extra directory scans, and emit extra scan-state notifications.
3346 //
3347 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3348 // to ensure that all redundant FS events have already been processed.
3349 #[cfg(any(test, feature = "test-support"))]
3350 fn flush_fs_events<'a>(
3351 &self,
3352 cx: &'a gpui::TestAppContext,
3353 ) -> futures::future::LocalBoxFuture<'a, ()> {
3354 use smol::future::FutureExt;
3355
3356 let filename = "fs-event-sentinel";
3357 let tree = self.clone();
3358 let (fs, root_path) = self.read_with(cx, |tree, _| {
3359 let tree = tree.as_local().unwrap();
3360 (tree.fs.clone(), tree.abs_path().clone())
3361 });
3362
3363 async move {
3364 fs.create_file(&root_path.join(filename), Default::default())
3365 .await
3366 .unwrap();
3367 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3368 .await;
3369
3370 fs.remove_file(&root_path.join(filename), Default::default())
3371 .await
3372 .unwrap();
3373 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3374 .await;
3375
3376 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3377 .await;
3378 }
3379 .boxed_local()
3380 }
3381}
3382
3383#[derive(Clone, Debug)]
3384struct TraversalProgress<'a> {
3385 max_path: &'a Path,
3386 count: usize,
3387 visible_count: usize,
3388 file_count: usize,
3389 visible_file_count: usize,
3390}
3391
3392impl<'a> TraversalProgress<'a> {
3393 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3394 match (include_ignored, include_dirs) {
3395 (true, true) => self.count,
3396 (true, false) => self.file_count,
3397 (false, true) => self.visible_count,
3398 (false, false) => self.visible_file_count,
3399 }
3400 }
3401}
3402
3403impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3404 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3405 self.max_path = summary.max_path.as_ref();
3406 self.count += summary.count;
3407 self.visible_count += summary.visible_count;
3408 self.file_count += summary.file_count;
3409 self.visible_file_count += summary.visible_file_count;
3410 }
3411}
3412
3413impl<'a> Default for TraversalProgress<'a> {
3414 fn default() -> Self {
3415 Self {
3416 max_path: Path::new(""),
3417 count: 0,
3418 visible_count: 0,
3419 file_count: 0,
3420 visible_file_count: 0,
3421 }
3422 }
3423}
3424
3425pub struct Traversal<'a> {
3426 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3427 include_ignored: bool,
3428 include_dirs: bool,
3429}
3430
3431impl<'a> Traversal<'a> {
3432 pub fn advance(&mut self) -> bool {
3433 self.advance_to_offset(self.offset() + 1)
3434 }
3435
3436 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3437 self.cursor.seek_forward(
3438 &TraversalTarget::Count {
3439 count: offset,
3440 include_dirs: self.include_dirs,
3441 include_ignored: self.include_ignored,
3442 },
3443 Bias::Right,
3444 &(),
3445 )
3446 }
3447
3448 pub fn advance_to_sibling(&mut self) -> bool {
3449 while let Some(entry) = self.cursor.item() {
3450 self.cursor.seek_forward(
3451 &TraversalTarget::PathSuccessor(&entry.path),
3452 Bias::Left,
3453 &(),
3454 );
3455 if let Some(entry) = self.cursor.item() {
3456 if (self.include_dirs || !entry.is_dir())
3457 && (self.include_ignored || !entry.is_ignored)
3458 {
3459 return true;
3460 }
3461 }
3462 }
3463 false
3464 }
3465
3466 pub fn entry(&self) -> Option<&'a Entry> {
3467 self.cursor.item()
3468 }
3469
3470 pub fn offset(&self) -> usize {
3471 self.cursor
3472 .start()
3473 .count(self.include_dirs, self.include_ignored)
3474 }
3475}
3476
3477impl<'a> Iterator for Traversal<'a> {
3478 type Item = &'a Entry;
3479
3480 fn next(&mut self) -> Option<Self::Item> {
3481 if let Some(item) = self.entry() {
3482 self.advance();
3483 Some(item)
3484 } else {
3485 None
3486 }
3487 }
3488}
3489
3490#[derive(Debug)]
3491enum TraversalTarget<'a> {
3492 Path(&'a Path),
3493 PathSuccessor(&'a Path),
3494 Count {
3495 count: usize,
3496 include_ignored: bool,
3497 include_dirs: bool,
3498 },
3499}
3500
3501impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3502 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3503 match self {
3504 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3505 TraversalTarget::PathSuccessor(path) => {
3506 if !cursor_location.max_path.starts_with(path) {
3507 Ordering::Equal
3508 } else {
3509 Ordering::Greater
3510 }
3511 }
3512 TraversalTarget::Count {
3513 count,
3514 include_dirs,
3515 include_ignored,
3516 } => Ord::cmp(
3517 count,
3518 &cursor_location.count(*include_dirs, *include_ignored),
3519 ),
3520 }
3521 }
3522}
3523
3524struct ChildEntriesIter<'a> {
3525 parent_path: &'a Path,
3526 traversal: Traversal<'a>,
3527}
3528
3529impl<'a> Iterator for ChildEntriesIter<'a> {
3530 type Item = &'a Entry;
3531
3532 fn next(&mut self) -> Option<Self::Item> {
3533 if let Some(item) = self.traversal.entry() {
3534 if item.path.starts_with(&self.parent_path) {
3535 self.traversal.advance_to_sibling();
3536 return Some(item);
3537 }
3538 }
3539 None
3540 }
3541}
3542
3543impl<'a> From<&'a Entry> for proto::Entry {
3544 fn from(entry: &'a Entry) -> Self {
3545 Self {
3546 id: entry.id.to_proto(),
3547 is_dir: entry.is_dir(),
3548 path: entry.path.to_string_lossy().into(),
3549 inode: entry.inode,
3550 mtime: Some(entry.mtime.into()),
3551 is_symlink: entry.is_symlink,
3552 is_ignored: entry.is_ignored,
3553 }
3554 }
3555}
3556
3557impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3558 type Error = anyhow::Error;
3559
3560 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3561 if let Some(mtime) = entry.mtime {
3562 let kind = if entry.is_dir {
3563 EntryKind::Dir
3564 } else {
3565 let mut char_bag = *root_char_bag;
3566 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3567 EntryKind::File(char_bag)
3568 };
3569 let path: Arc<Path> = PathBuf::from(entry.path).into();
3570 Ok(Entry {
3571 id: ProjectEntryId::from_proto(entry.id),
3572 kind,
3573 path,
3574 inode: entry.inode,
3575 mtime: mtime.into(),
3576 is_symlink: entry.is_symlink,
3577 is_ignored: entry.is_ignored,
3578 })
3579 } else {
3580 Err(anyhow!(
3581 "missing mtime in remote worktree entry {:?}",
3582 entry.path
3583 ))
3584 }
3585 }
3586}
3587
3588#[cfg(test)]
3589mod tests {
3590 use super::*;
3591 use fs::{FakeFs, RealFs};
3592 use gpui::{executor::Deterministic, TestAppContext};
3593 use pretty_assertions::assert_eq;
3594 use rand::prelude::*;
3595 use serde_json::json;
3596 use std::{env, fmt::Write};
3597 use util::{http::FakeHttpClient, test::temp_tree};
3598
3599 #[gpui::test]
3600 async fn test_traversal(cx: &mut TestAppContext) {
3601 let fs = FakeFs::new(cx.background());
3602 fs.insert_tree(
3603 "/root",
3604 json!({
3605 ".gitignore": "a/b\n",
3606 "a": {
3607 "b": "",
3608 "c": "",
3609 }
3610 }),
3611 )
3612 .await;
3613
3614 let http_client = FakeHttpClient::with_404_response();
3615 let client = cx.read(|cx| Client::new(http_client, cx));
3616
3617 let tree = Worktree::local(
3618 client,
3619 Path::new("/root"),
3620 true,
3621 fs,
3622 Default::default(),
3623 &mut cx.to_async(),
3624 )
3625 .await
3626 .unwrap();
3627 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3628 .await;
3629
3630 tree.read_with(cx, |tree, _| {
3631 assert_eq!(
3632 tree.entries(false)
3633 .map(|entry| entry.path.as_ref())
3634 .collect::<Vec<_>>(),
3635 vec![
3636 Path::new(""),
3637 Path::new(".gitignore"),
3638 Path::new("a"),
3639 Path::new("a/c"),
3640 ]
3641 );
3642 assert_eq!(
3643 tree.entries(true)
3644 .map(|entry| entry.path.as_ref())
3645 .collect::<Vec<_>>(),
3646 vec![
3647 Path::new(""),
3648 Path::new(".gitignore"),
3649 Path::new("a"),
3650 Path::new("a/b"),
3651 Path::new("a/c"),
3652 ]
3653 );
3654 })
3655 }
3656
3657 #[gpui::test(iterations = 10)]
3658 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3659 let fs = FakeFs::new(cx.background());
3660 fs.insert_tree(
3661 "/root",
3662 json!({
3663 "lib": {
3664 "a": {
3665 "a.txt": ""
3666 },
3667 "b": {
3668 "b.txt": ""
3669 }
3670 }
3671 }),
3672 )
3673 .await;
3674 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3675 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3676
3677 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3678 let tree = Worktree::local(
3679 client,
3680 Path::new("/root"),
3681 true,
3682 fs.clone(),
3683 Default::default(),
3684 &mut cx.to_async(),
3685 )
3686 .await
3687 .unwrap();
3688
3689 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3690 .await;
3691
3692 tree.read_with(cx, |tree, _| {
3693 assert_eq!(
3694 tree.entries(false)
3695 .map(|entry| entry.path.as_ref())
3696 .collect::<Vec<_>>(),
3697 vec![
3698 Path::new(""),
3699 Path::new("lib"),
3700 Path::new("lib/a"),
3701 Path::new("lib/a/a.txt"),
3702 Path::new("lib/a/lib"),
3703 Path::new("lib/b"),
3704 Path::new("lib/b/b.txt"),
3705 Path::new("lib/b/lib"),
3706 ]
3707 );
3708 });
3709
3710 fs.rename(
3711 Path::new("/root/lib/a/lib"),
3712 Path::new("/root/lib/a/lib-2"),
3713 Default::default(),
3714 )
3715 .await
3716 .unwrap();
3717 executor.run_until_parked();
3718 tree.read_with(cx, |tree, _| {
3719 assert_eq!(
3720 tree.entries(false)
3721 .map(|entry| entry.path.as_ref())
3722 .collect::<Vec<_>>(),
3723 vec![
3724 Path::new(""),
3725 Path::new("lib"),
3726 Path::new("lib/a"),
3727 Path::new("lib/a/a.txt"),
3728 Path::new("lib/a/lib-2"),
3729 Path::new("lib/b"),
3730 Path::new("lib/b/b.txt"),
3731 Path::new("lib/b/lib"),
3732 ]
3733 );
3734 });
3735 }
3736
3737 #[gpui::test]
3738 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3739 let parent_dir = temp_tree(json!({
3740 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3741 "tree": {
3742 ".git": {},
3743 ".gitignore": "ignored-dir\n",
3744 "tracked-dir": {
3745 "tracked-file1": "",
3746 "ancestor-ignored-file1": "",
3747 },
3748 "ignored-dir": {
3749 "ignored-file1": ""
3750 }
3751 }
3752 }));
3753 let dir = parent_dir.path().join("tree");
3754
3755 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3756
3757 let tree = Worktree::local(
3758 client,
3759 dir.as_path(),
3760 true,
3761 Arc::new(RealFs),
3762 Default::default(),
3763 &mut cx.to_async(),
3764 )
3765 .await
3766 .unwrap();
3767 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3768 .await;
3769 tree.flush_fs_events(cx).await;
3770 cx.read(|cx| {
3771 let tree = tree.read(cx);
3772 assert!(
3773 !tree
3774 .entry_for_path("tracked-dir/tracked-file1")
3775 .unwrap()
3776 .is_ignored
3777 );
3778 assert!(
3779 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3780 .unwrap()
3781 .is_ignored
3782 );
3783 assert!(
3784 tree.entry_for_path("ignored-dir/ignored-file1")
3785 .unwrap()
3786 .is_ignored
3787 );
3788 });
3789
3790 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3791 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3792 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3793 tree.flush_fs_events(cx).await;
3794 cx.read(|cx| {
3795 let tree = tree.read(cx);
3796 assert!(
3797 !tree
3798 .entry_for_path("tracked-dir/tracked-file2")
3799 .unwrap()
3800 .is_ignored
3801 );
3802 assert!(
3803 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3804 .unwrap()
3805 .is_ignored
3806 );
3807 assert!(
3808 tree.entry_for_path("ignored-dir/ignored-file2")
3809 .unwrap()
3810 .is_ignored
3811 );
3812 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3813 });
3814 }
3815
3816 #[gpui::test]
3817 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3818 let root = temp_tree(json!({
3819 "dir1": {
3820 ".git": {},
3821 "deps": {
3822 "dep1": {
3823 ".git": {},
3824 "src": {
3825 "a.txt": ""
3826 }
3827 }
3828 },
3829 "src": {
3830 "b.txt": ""
3831 }
3832 },
3833 "c.txt": "",
3834 }));
3835
3836 let http_client = FakeHttpClient::with_404_response();
3837 let client = cx.read(|cx| Client::new(http_client, cx));
3838 let tree = Worktree::local(
3839 client,
3840 root.path(),
3841 true,
3842 Arc::new(RealFs),
3843 Default::default(),
3844 &mut cx.to_async(),
3845 )
3846 .await
3847 .unwrap();
3848
3849 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3850 .await;
3851 tree.flush_fs_events(cx).await;
3852
3853 tree.read_with(cx, |tree, _cx| {
3854 let tree = tree.as_local().unwrap();
3855
3856 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3857
3858 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3859 assert_eq!(
3860 entry
3861 .work_directory(tree)
3862 .map(|directory| directory.as_ref().to_owned()),
3863 Some(Path::new("dir1").to_owned())
3864 );
3865
3866 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3867 assert_eq!(
3868 entry
3869 .work_directory(tree)
3870 .map(|directory| directory.as_ref().to_owned()),
3871 Some(Path::new("dir1/deps/dep1").to_owned())
3872 );
3873 });
3874
3875 let repo_update_events = Arc::new(Mutex::new(vec![]));
3876 tree.update(cx, |_, cx| {
3877 let repo_update_events = repo_update_events.clone();
3878 cx.subscribe(&tree, move |_, _, event, _| {
3879 if let Event::UpdatedGitRepositories(update) = event {
3880 repo_update_events.lock().push(update.clone());
3881 }
3882 })
3883 .detach();
3884 });
3885
3886 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3887 tree.flush_fs_events(cx).await;
3888
3889 assert_eq!(
3890 repo_update_events.lock()[0]
3891 .keys()
3892 .cloned()
3893 .collect::<Vec<Arc<Path>>>(),
3894 vec![Path::new("dir1").into()]
3895 );
3896
3897 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3898 tree.flush_fs_events(cx).await;
3899
3900 tree.read_with(cx, |tree, _cx| {
3901 let tree = tree.as_local().unwrap();
3902
3903 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3904 });
3905 }
3906
3907 #[gpui::test]
3908 async fn test_git_status(cx: &mut TestAppContext) {
3909 #[track_caller]
3910 fn git_init(path: &Path) -> git2::Repository {
3911 git2::Repository::init(path).expect("Failed to initialize git repository")
3912 }
3913
3914 #[track_caller]
3915 fn git_add(path: &Path, repo: &git2::Repository) {
3916 let mut index = repo.index().expect("Failed to get index");
3917 index.add_path(path).expect("Failed to add a.txt");
3918 index.write().expect("Failed to write index");
3919 }
3920
3921 #[track_caller]
3922 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3923 let mut index = repo.index().expect("Failed to get index");
3924 index.remove_path(path).expect("Failed to add a.txt");
3925 index.write().expect("Failed to write index");
3926 }
3927
3928 #[track_caller]
3929 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3930 use git2::Signature;
3931
3932 let signature = Signature::now("test", "test@zed.dev").unwrap();
3933 let oid = repo.index().unwrap().write_tree().unwrap();
3934 let tree = repo.find_tree(oid).unwrap();
3935 if let Some(head) = repo.head().ok() {
3936 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3937
3938 let parent_commit = parent_obj.as_commit().unwrap();
3939
3940 repo.commit(
3941 Some("HEAD"),
3942 &signature,
3943 &signature,
3944 msg,
3945 &tree,
3946 &[parent_commit],
3947 )
3948 .expect("Failed to commit with parent");
3949 } else {
3950 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3951 .expect("Failed to commit");
3952 }
3953 }
3954
3955 #[track_caller]
3956 fn git_stash(repo: &mut git2::Repository) {
3957 use git2::Signature;
3958
3959 let signature = Signature::now("test", "test@zed.dev").unwrap();
3960 repo.stash_save(&signature, "N/A", None)
3961 .expect("Failed to stash");
3962 }
3963
3964 #[track_caller]
3965 fn git_reset(offset: usize, repo: &git2::Repository) {
3966 let head = repo.head().expect("Couldn't get repo head");
3967 let object = head.peel(git2::ObjectType::Commit).unwrap();
3968 let commit = object.as_commit().unwrap();
3969 let new_head = commit
3970 .parents()
3971 .inspect(|parnet| {
3972 parnet.message();
3973 })
3974 .skip(offset)
3975 .next()
3976 .expect("Not enough history");
3977 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
3978 .expect("Could not reset");
3979 }
3980
3981 #[allow(dead_code)]
3982 #[track_caller]
3983 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
3984 repo.statuses(None)
3985 .unwrap()
3986 .iter()
3987 .map(|status| (status.path().unwrap().to_string(), status.status()))
3988 .collect()
3989 }
3990
3991 const IGNORE_RULE: &'static str = "**/target";
3992
3993 let root = temp_tree(json!({
3994 "project": {
3995 "a.txt": "a",
3996 "b.txt": "bb",
3997 "c": {
3998 "d": {
3999 "e.txt": "eee"
4000 }
4001 },
4002 "f.txt": "ffff",
4003 "target": {
4004 "build_file": "???"
4005 },
4006 ".gitignore": IGNORE_RULE
4007 },
4008
4009 }));
4010
4011 let http_client = FakeHttpClient::with_404_response();
4012 let client = cx.read(|cx| Client::new(http_client, cx));
4013 let tree = Worktree::local(
4014 client,
4015 root.path(),
4016 true,
4017 Arc::new(RealFs),
4018 Default::default(),
4019 &mut cx.to_async(),
4020 )
4021 .await
4022 .unwrap();
4023
4024 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4025 .await;
4026
4027 const A_TXT: &'static str = "a.txt";
4028 const B_TXT: &'static str = "b.txt";
4029 const E_TXT: &'static str = "c/d/e.txt";
4030 const F_TXT: &'static str = "f.txt";
4031 const DOTGITIGNORE: &'static str = ".gitignore";
4032 const BUILD_FILE: &'static str = "target/build_file";
4033
4034 let work_dir = root.path().join("project");
4035 let mut repo = git_init(work_dir.as_path());
4036 repo.add_ignore_rule(IGNORE_RULE).unwrap();
4037 git_add(Path::new(A_TXT), &repo);
4038 git_add(Path::new(E_TXT), &repo);
4039 git_add(Path::new(DOTGITIGNORE), &repo);
4040 git_commit("Initial commit", &repo);
4041
4042 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4043
4044 tree.flush_fs_events(cx).await;
4045
4046 // Check that the right git state is observed on startup
4047 tree.read_with(cx, |tree, _cx| {
4048 let snapshot = tree.snapshot();
4049 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4050 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4051 assert_eq!(dir.0.as_ref(), Path::new("project"));
4052
4053 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4054 assert_eq!(
4055 repo.worktree_statuses.get(&Path::new(A_TXT).into()),
4056 Some(&GitFileStatus::Modified)
4057 );
4058 assert_eq!(
4059 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4060 Some(&GitFileStatus::Added)
4061 );
4062 assert_eq!(
4063 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4064 Some(&GitFileStatus::Added)
4065 );
4066 });
4067
4068 git_add(Path::new(A_TXT), &repo);
4069 git_add(Path::new(B_TXT), &repo);
4070 git_commit("Committing modified and added", &repo);
4071 tree.flush_fs_events(cx).await;
4072
4073 // Check that repo only changes are tracked
4074 tree.read_with(cx, |tree, _cx| {
4075 let snapshot = tree.snapshot();
4076 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4077
4078 assert_eq!(repo.worktree_statuses.iter().count(), 1);
4079 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4080 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4081 assert_eq!(
4082 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4083 Some(&GitFileStatus::Added)
4084 );
4085 });
4086
4087 git_reset(0, &repo);
4088 git_remove_index(Path::new(B_TXT), &repo);
4089 git_stash(&mut repo);
4090 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4091 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4092 tree.flush_fs_events(cx).await;
4093
4094 // Check that more complex repo changes are tracked
4095 tree.read_with(cx, |tree, _cx| {
4096 let snapshot = tree.snapshot();
4097 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4098
4099 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4100 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4101 assert_eq!(
4102 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4103 Some(&GitFileStatus::Added)
4104 );
4105 assert_eq!(
4106 repo.worktree_statuses.get(&Path::new(E_TXT).into()),
4107 Some(&GitFileStatus::Modified)
4108 );
4109 assert_eq!(
4110 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4111 Some(&GitFileStatus::Added)
4112 );
4113 });
4114
4115 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4116 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4117 std::fs::write(work_dir.join(DOTGITIGNORE), [IGNORE_RULE, "f.txt"].join("\n")).unwrap();
4118
4119 git_add(Path::new(DOTGITIGNORE), &repo);
4120 git_commit("Committing modified git ignore", &repo);
4121
4122 tree.flush_fs_events(cx).await;
4123
4124 dbg!(git_status(&repo));
4125
4126 // Check that non-repo behavior is tracked
4127 tree.read_with(cx, |tree, _cx| {
4128 let snapshot = tree.snapshot();
4129 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4130
4131 dbg!(&repo.worktree_statuses);
4132
4133 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4134 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4135 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4136 assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
4137 assert_eq!(repo.worktree_statuses.get(&Path::new(F_TXT).into()), None);
4138 });
4139 }
4140
4141 #[gpui::test]
4142 async fn test_write_file(cx: &mut TestAppContext) {
4143 let dir = temp_tree(json!({
4144 ".git": {},
4145 ".gitignore": "ignored-dir\n",
4146 "tracked-dir": {},
4147 "ignored-dir": {}
4148 }));
4149
4150 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4151
4152 let tree = Worktree::local(
4153 client,
4154 dir.path(),
4155 true,
4156 Arc::new(RealFs),
4157 Default::default(),
4158 &mut cx.to_async(),
4159 )
4160 .await
4161 .unwrap();
4162 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4163 .await;
4164 tree.flush_fs_events(cx).await;
4165
4166 tree.update(cx, |tree, cx| {
4167 tree.as_local().unwrap().write_file(
4168 Path::new("tracked-dir/file.txt"),
4169 "hello".into(),
4170 Default::default(),
4171 cx,
4172 )
4173 })
4174 .await
4175 .unwrap();
4176 tree.update(cx, |tree, cx| {
4177 tree.as_local().unwrap().write_file(
4178 Path::new("ignored-dir/file.txt"),
4179 "world".into(),
4180 Default::default(),
4181 cx,
4182 )
4183 })
4184 .await
4185 .unwrap();
4186
4187 tree.read_with(cx, |tree, _| {
4188 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4189 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4190 assert!(!tracked.is_ignored);
4191 assert!(ignored.is_ignored);
4192 });
4193 }
4194
4195 #[gpui::test(iterations = 30)]
4196 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4197 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4198
4199 let fs = FakeFs::new(cx.background());
4200 fs.insert_tree(
4201 "/root",
4202 json!({
4203 "b": {},
4204 "c": {},
4205 "d": {},
4206 }),
4207 )
4208 .await;
4209
4210 let tree = Worktree::local(
4211 client,
4212 "/root".as_ref(),
4213 true,
4214 fs,
4215 Default::default(),
4216 &mut cx.to_async(),
4217 )
4218 .await
4219 .unwrap();
4220
4221 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4222
4223 let entry = tree
4224 .update(cx, |tree, cx| {
4225 tree.as_local_mut()
4226 .unwrap()
4227 .create_entry("a/e".as_ref(), true, cx)
4228 })
4229 .await
4230 .unwrap();
4231 assert!(entry.is_dir());
4232
4233 cx.foreground().run_until_parked();
4234 tree.read_with(cx, |tree, _| {
4235 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4236 });
4237
4238 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4239 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4240 snapshot1.apply_remote_update(update).unwrap();
4241 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4242 }
4243
4244 #[gpui::test(iterations = 100)]
4245 async fn test_random_worktree_operations_during_initial_scan(
4246 cx: &mut TestAppContext,
4247 mut rng: StdRng,
4248 ) {
4249 let operations = env::var("OPERATIONS")
4250 .map(|o| o.parse().unwrap())
4251 .unwrap_or(5);
4252 let initial_entries = env::var("INITIAL_ENTRIES")
4253 .map(|o| o.parse().unwrap())
4254 .unwrap_or(20);
4255
4256 let root_dir = Path::new("/test");
4257 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4258 fs.as_fake().insert_tree(root_dir, json!({})).await;
4259 for _ in 0..initial_entries {
4260 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4261 }
4262 log::info!("generated initial tree");
4263
4264 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4265 let worktree = Worktree::local(
4266 client.clone(),
4267 root_dir,
4268 true,
4269 fs.clone(),
4270 Default::default(),
4271 &mut cx.to_async(),
4272 )
4273 .await
4274 .unwrap();
4275
4276 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4277
4278 for _ in 0..operations {
4279 worktree
4280 .update(cx, |worktree, cx| {
4281 randomly_mutate_worktree(worktree, &mut rng, cx)
4282 })
4283 .await
4284 .log_err();
4285 worktree.read_with(cx, |tree, _| {
4286 tree.as_local().unwrap().snapshot.check_invariants()
4287 });
4288
4289 if rng.gen_bool(0.6) {
4290 let new_snapshot =
4291 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4292 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4293 snapshot.apply_remote_update(update.clone()).unwrap();
4294 assert_eq!(
4295 snapshot.to_vec(true),
4296 new_snapshot.to_vec(true),
4297 "incorrect snapshot after update {:?}",
4298 update
4299 );
4300 }
4301 }
4302
4303 worktree
4304 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4305 .await;
4306 worktree.read_with(cx, |tree, _| {
4307 tree.as_local().unwrap().snapshot.check_invariants()
4308 });
4309
4310 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4311 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4312 snapshot.apply_remote_update(update.clone()).unwrap();
4313 assert_eq!(
4314 snapshot.to_vec(true),
4315 new_snapshot.to_vec(true),
4316 "incorrect snapshot after update {:?}",
4317 update
4318 );
4319 }
4320
4321 #[gpui::test(iterations = 100)]
4322 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4323 let operations = env::var("OPERATIONS")
4324 .map(|o| o.parse().unwrap())
4325 .unwrap_or(40);
4326 let initial_entries = env::var("INITIAL_ENTRIES")
4327 .map(|o| o.parse().unwrap())
4328 .unwrap_or(20);
4329
4330 let root_dir = Path::new("/test");
4331 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4332 fs.as_fake().insert_tree(root_dir, json!({})).await;
4333 for _ in 0..initial_entries {
4334 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4335 }
4336 log::info!("generated initial tree");
4337
4338 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4339 let worktree = Worktree::local(
4340 client.clone(),
4341 root_dir,
4342 true,
4343 fs.clone(),
4344 Default::default(),
4345 &mut cx.to_async(),
4346 )
4347 .await
4348 .unwrap();
4349
4350 worktree
4351 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4352 .await;
4353
4354 // After the initial scan is complete, the `UpdatedEntries` event can
4355 // be used to follow along with all changes to the worktree's snapshot.
4356 worktree.update(cx, |tree, cx| {
4357 let mut paths = tree
4358 .as_local()
4359 .unwrap()
4360 .paths()
4361 .cloned()
4362 .collect::<Vec<_>>();
4363
4364 cx.subscribe(&worktree, move |tree, _, event, _| {
4365 if let Event::UpdatedEntries(changes) = event {
4366 for (path, change_type) in changes.iter() {
4367 let path = path.clone();
4368 let ix = match paths.binary_search(&path) {
4369 Ok(ix) | Err(ix) => ix,
4370 };
4371 match change_type {
4372 PathChange::Added => {
4373 assert_ne!(paths.get(ix), Some(&path));
4374 paths.insert(ix, path);
4375 }
4376 PathChange::Removed => {
4377 assert_eq!(paths.get(ix), Some(&path));
4378 paths.remove(ix);
4379 }
4380 PathChange::Updated => {
4381 assert_eq!(paths.get(ix), Some(&path));
4382 }
4383 PathChange::AddedOrUpdated => {
4384 if paths[ix] != path {
4385 paths.insert(ix, path);
4386 }
4387 }
4388 }
4389 }
4390 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4391 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4392 }
4393 })
4394 .detach();
4395 });
4396
4397 let mut snapshots = Vec::new();
4398 let mut mutations_len = operations;
4399 while mutations_len > 1 {
4400 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4401 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4402 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4403 let len = rng.gen_range(0..=buffered_event_count);
4404 log::info!("flushing {} events", len);
4405 fs.as_fake().flush_events(len).await;
4406 } else {
4407 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4408 mutations_len -= 1;
4409 }
4410
4411 cx.foreground().run_until_parked();
4412 if rng.gen_bool(0.2) {
4413 log::info!("storing snapshot {}", snapshots.len());
4414 let snapshot =
4415 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4416 snapshots.push(snapshot);
4417 }
4418 }
4419
4420 log::info!("quiescing");
4421 fs.as_fake().flush_events(usize::MAX).await;
4422 cx.foreground().run_until_parked();
4423 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4424 snapshot.check_invariants();
4425
4426 {
4427 let new_worktree = Worktree::local(
4428 client.clone(),
4429 root_dir,
4430 true,
4431 fs.clone(),
4432 Default::default(),
4433 &mut cx.to_async(),
4434 )
4435 .await
4436 .unwrap();
4437 new_worktree
4438 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4439 .await;
4440 let new_snapshot =
4441 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4442 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4443 }
4444
4445 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4446 let include_ignored = rng.gen::<bool>();
4447 if !include_ignored {
4448 let mut entries_by_path_edits = Vec::new();
4449 let mut entries_by_id_edits = Vec::new();
4450 for entry in prev_snapshot
4451 .entries_by_id
4452 .cursor::<()>()
4453 .filter(|e| e.is_ignored)
4454 {
4455 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4456 entries_by_id_edits.push(Edit::Remove(entry.id));
4457 }
4458
4459 prev_snapshot
4460 .entries_by_path
4461 .edit(entries_by_path_edits, &());
4462 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4463 }
4464
4465 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4466 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4467 assert_eq!(
4468 prev_snapshot.to_vec(include_ignored),
4469 snapshot.to_vec(include_ignored),
4470 "wrong update for snapshot {i}. update: {:?}",
4471 update
4472 );
4473 }
4474 }
4475
4476 fn randomly_mutate_worktree(
4477 worktree: &mut Worktree,
4478 rng: &mut impl Rng,
4479 cx: &mut ModelContext<Worktree>,
4480 ) -> Task<Result<()>> {
4481 let worktree = worktree.as_local_mut().unwrap();
4482 let snapshot = worktree.snapshot();
4483 let entry = snapshot.entries(false).choose(rng).unwrap();
4484
4485 match rng.gen_range(0_u32..100) {
4486 0..=33 if entry.path.as_ref() != Path::new("") => {
4487 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4488 worktree.delete_entry(entry.id, cx).unwrap()
4489 }
4490 ..=66 if entry.path.as_ref() != Path::new("") => {
4491 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4492 let new_parent_path = if other_entry.is_dir() {
4493 other_entry.path.clone()
4494 } else {
4495 other_entry.path.parent().unwrap().into()
4496 };
4497 let mut new_path = new_parent_path.join(gen_name(rng));
4498 if new_path.starts_with(&entry.path) {
4499 new_path = gen_name(rng).into();
4500 }
4501
4502 log::info!(
4503 "renaming entry {:?} ({}) to {:?}",
4504 entry.path,
4505 entry.id.0,
4506 new_path
4507 );
4508 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4509 cx.foreground().spawn(async move {
4510 task.await?;
4511 Ok(())
4512 })
4513 }
4514 _ => {
4515 let task = if entry.is_dir() {
4516 let child_path = entry.path.join(gen_name(rng));
4517 let is_dir = rng.gen_bool(0.3);
4518 log::info!(
4519 "creating {} at {:?}",
4520 if is_dir { "dir" } else { "file" },
4521 child_path,
4522 );
4523 worktree.create_entry(child_path, is_dir, cx)
4524 } else {
4525 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4526 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4527 };
4528 cx.foreground().spawn(async move {
4529 task.await?;
4530 Ok(())
4531 })
4532 }
4533 }
4534 }
4535
4536 async fn randomly_mutate_fs(
4537 fs: &Arc<dyn Fs>,
4538 root_path: &Path,
4539 insertion_probability: f64,
4540 rng: &mut impl Rng,
4541 ) {
4542 let mut files = Vec::new();
4543 let mut dirs = Vec::new();
4544 for path in fs.as_fake().paths() {
4545 if path.starts_with(root_path) {
4546 if fs.is_file(&path).await {
4547 files.push(path);
4548 } else {
4549 dirs.push(path);
4550 }
4551 }
4552 }
4553
4554 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4555 let path = dirs.choose(rng).unwrap();
4556 let new_path = path.join(gen_name(rng));
4557
4558 if rng.gen() {
4559 log::info!(
4560 "creating dir {:?}",
4561 new_path.strip_prefix(root_path).unwrap()
4562 );
4563 fs.create_dir(&new_path).await.unwrap();
4564 } else {
4565 log::info!(
4566 "creating file {:?}",
4567 new_path.strip_prefix(root_path).unwrap()
4568 );
4569 fs.create_file(&new_path, Default::default()).await.unwrap();
4570 }
4571 } else if rng.gen_bool(0.05) {
4572 let ignore_dir_path = dirs.choose(rng).unwrap();
4573 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4574
4575 let subdirs = dirs
4576 .iter()
4577 .filter(|d| d.starts_with(&ignore_dir_path))
4578 .cloned()
4579 .collect::<Vec<_>>();
4580 let subfiles = files
4581 .iter()
4582 .filter(|d| d.starts_with(&ignore_dir_path))
4583 .cloned()
4584 .collect::<Vec<_>>();
4585 let files_to_ignore = {
4586 let len = rng.gen_range(0..=subfiles.len());
4587 subfiles.choose_multiple(rng, len)
4588 };
4589 let dirs_to_ignore = {
4590 let len = rng.gen_range(0..subdirs.len());
4591 subdirs.choose_multiple(rng, len)
4592 };
4593
4594 let mut ignore_contents = String::new();
4595 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4596 writeln!(
4597 ignore_contents,
4598 "{}",
4599 path_to_ignore
4600 .strip_prefix(&ignore_dir_path)
4601 .unwrap()
4602 .to_str()
4603 .unwrap()
4604 )
4605 .unwrap();
4606 }
4607 log::info!(
4608 "creating gitignore {:?} with contents:\n{}",
4609 ignore_path.strip_prefix(&root_path).unwrap(),
4610 ignore_contents
4611 );
4612 fs.save(
4613 &ignore_path,
4614 &ignore_contents.as_str().into(),
4615 Default::default(),
4616 )
4617 .await
4618 .unwrap();
4619 } else {
4620 let old_path = {
4621 let file_path = files.choose(rng);
4622 let dir_path = dirs[1..].choose(rng);
4623 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4624 };
4625
4626 let is_rename = rng.gen();
4627 if is_rename {
4628 let new_path_parent = dirs
4629 .iter()
4630 .filter(|d| !d.starts_with(old_path))
4631 .choose(rng)
4632 .unwrap();
4633
4634 let overwrite_existing_dir =
4635 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4636 let new_path = if overwrite_existing_dir {
4637 fs.remove_dir(
4638 &new_path_parent,
4639 RemoveOptions {
4640 recursive: true,
4641 ignore_if_not_exists: true,
4642 },
4643 )
4644 .await
4645 .unwrap();
4646 new_path_parent.to_path_buf()
4647 } else {
4648 new_path_parent.join(gen_name(rng))
4649 };
4650
4651 log::info!(
4652 "renaming {:?} to {}{:?}",
4653 old_path.strip_prefix(&root_path).unwrap(),
4654 if overwrite_existing_dir {
4655 "overwrite "
4656 } else {
4657 ""
4658 },
4659 new_path.strip_prefix(&root_path).unwrap()
4660 );
4661 fs.rename(
4662 &old_path,
4663 &new_path,
4664 fs::RenameOptions {
4665 overwrite: true,
4666 ignore_if_exists: true,
4667 },
4668 )
4669 .await
4670 .unwrap();
4671 } else if fs.is_file(&old_path).await {
4672 log::info!(
4673 "deleting file {:?}",
4674 old_path.strip_prefix(&root_path).unwrap()
4675 );
4676 fs.remove_file(old_path, Default::default()).await.unwrap();
4677 } else {
4678 log::info!(
4679 "deleting dir {:?}",
4680 old_path.strip_prefix(&root_path).unwrap()
4681 );
4682 fs.remove_dir(
4683 &old_path,
4684 RemoveOptions {
4685 recursive: true,
4686 ignore_if_not_exists: true,
4687 },
4688 )
4689 .await
4690 .unwrap();
4691 }
4692 }
4693 }
4694
4695 fn gen_name(rng: &mut impl Rng) -> String {
4696 (0..6)
4697 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4698 .map(char::from)
4699 .collect()
4700 }
4701
4702 impl LocalSnapshot {
4703 fn check_invariants(&self) {
4704 assert_eq!(
4705 self.entries_by_path
4706 .cursor::<()>()
4707 .map(|e| (&e.path, e.id))
4708 .collect::<Vec<_>>(),
4709 self.entries_by_id
4710 .cursor::<()>()
4711 .map(|e| (&e.path, e.id))
4712 .collect::<collections::BTreeSet<_>>()
4713 .into_iter()
4714 .collect::<Vec<_>>(),
4715 "entries_by_path and entries_by_id are inconsistent"
4716 );
4717
4718 let mut files = self.files(true, 0);
4719 let mut visible_files = self.files(false, 0);
4720 for entry in self.entries_by_path.cursor::<()>() {
4721 if entry.is_file() {
4722 assert_eq!(files.next().unwrap().inode, entry.inode);
4723 if !entry.is_ignored {
4724 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4725 }
4726 }
4727 }
4728
4729 assert!(files.next().is_none());
4730 assert!(visible_files.next().is_none());
4731
4732 let mut bfs_paths = Vec::new();
4733 let mut stack = vec![Path::new("")];
4734 while let Some(path) = stack.pop() {
4735 bfs_paths.push(path);
4736 let ix = stack.len();
4737 for child_entry in self.child_entries(path) {
4738 stack.insert(ix, &child_entry.path);
4739 }
4740 }
4741
4742 let dfs_paths_via_iter = self
4743 .entries_by_path
4744 .cursor::<()>()
4745 .map(|e| e.path.as_ref())
4746 .collect::<Vec<_>>();
4747 assert_eq!(bfs_paths, dfs_paths_via_iter);
4748
4749 let dfs_paths_via_traversal = self
4750 .entries(true)
4751 .map(|e| e.path.as_ref())
4752 .collect::<Vec<_>>();
4753 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4754
4755 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4756 let ignore_parent_path =
4757 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4758 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4759 assert!(self
4760 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4761 .is_some());
4762 }
4763 }
4764
4765 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4766 let mut paths = Vec::new();
4767 for entry in self.entries_by_path.cursor::<()>() {
4768 if include_ignored || !entry.is_ignored {
4769 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4770 }
4771 }
4772 paths.sort_by(|a, b| a.0.cmp(b.0));
4773 paths
4774 }
4775 }
4776}