1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
184 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
185 let mut removed_statuses: Vec<String> = Vec::new();
186
187 let mut self_statuses = self.worktree_statuses.iter().peekable();
188 let mut other_statuses = other.worktree_statuses.iter().peekable();
189 loop {
190 match (self_statuses.peek(), other_statuses.peek()) {
191 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
192 match Ord::cmp(self_repo_path, other_repo_path) {
193 Ordering::Less => {
194 updated_statuses.push(make_status_entry(self_repo_path, self_status));
195 self_statuses.next();
196 }
197 Ordering::Equal => {
198 if self_status != other_status {
199 updated_statuses
200 .push(make_status_entry(self_repo_path, self_status));
201 }
202
203 self_statuses.next();
204 other_statuses.next();
205 }
206 Ordering::Greater => {
207 removed_statuses.push(make_repo_path(other_repo_path));
208 other_statuses.next();
209 }
210 }
211 }
212 (Some((self_repo_path, self_status)), None) => {
213 updated_statuses.push(make_status_entry(self_repo_path, self_status));
214 self_statuses.next();
215 }
216 (None, Some((other_repo_path, _))) => {
217 removed_statuses.push(make_repo_path(other_repo_path));
218 other_statuses.next();
219 }
220 (None, None) => break,
221 }
222 }
223
224 proto::RepositoryEntry {
225 work_directory_id: self.work_directory_id().to_proto(),
226 branch: self.branch.as_ref().map(|str| str.to_string()),
227 removed_worktree_repo_paths: removed_statuses,
228 updated_worktree_statuses: updated_statuses,
229 }
230 }
231}
232
233fn make_repo_path(path: &RepoPath) -> String {
234 path.as_os_str().to_string_lossy().to_string()
235}
236
237fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
238 proto::StatusEntry {
239 repo_path: make_repo_path(path),
240 status: match status {
241 GitFileStatus::Added => proto::GitStatus::Added.into(),
242 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
243 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
244 },
245 }
246}
247
248impl From<&RepositoryEntry> for proto::RepositoryEntry {
249 fn from(value: &RepositoryEntry) -> Self {
250 proto::RepositoryEntry {
251 work_directory_id: value.work_directory.to_proto(),
252 branch: value.branch.as_ref().map(|str| str.to_string()),
253 updated_worktree_statuses: value
254 .worktree_statuses
255 .iter()
256 .map(|(repo_path, status)| make_status_entry(repo_path, status))
257 .collect(),
258 removed_worktree_repo_paths: Default::default(),
259 }
260 }
261}
262
263/// This path corresponds to the 'content path' (the folder that contains the .git)
264#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
265pub struct RepositoryWorkDirectory(Arc<Path>);
266
267impl Default for RepositoryWorkDirectory {
268 fn default() -> Self {
269 RepositoryWorkDirectory(Arc::from(Path::new("")))
270 }
271}
272
273impl AsRef<Path> for RepositoryWorkDirectory {
274 fn as_ref(&self) -> &Path {
275 self.0.as_ref()
276 }
277}
278
279#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
280pub struct WorkDirectoryEntry(ProjectEntryId);
281
282impl WorkDirectoryEntry {
283 // Note that these paths should be relative to the worktree root.
284 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
285 snapshot
286 .entry_for_id(self.0)
287 .map(|entry| path.starts_with(&entry.path))
288 .unwrap_or(false)
289 }
290
291 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
292 worktree.entry_for_id(self.0).and_then(|entry| {
293 path.strip_prefix(&entry.path)
294 .ok()
295 .map(move |path| path.into())
296 })
297 }
298}
299
300impl Deref for WorkDirectoryEntry {
301 type Target = ProjectEntryId;
302
303 fn deref(&self) -> &Self::Target {
304 &self.0
305 }
306}
307
308impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
309 fn from(value: ProjectEntryId) -> Self {
310 WorkDirectoryEntry(value)
311 }
312}
313
314#[derive(Debug, Clone)]
315pub struct LocalSnapshot {
316 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
317 // The ProjectEntryId corresponds to the entry for the .git dir
318 // work_directory_id
319 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
320 removed_entry_ids: HashMap<u64, ProjectEntryId>,
321 next_entry_id: Arc<AtomicUsize>,
322 snapshot: Snapshot,
323}
324
325#[derive(Debug, Clone)]
326pub struct LocalRepositoryEntry {
327 pub(crate) scan_id: usize,
328 pub(crate) full_scan_id: usize,
329 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
330 /// Path to the actual .git folder.
331 /// Note: if .git is a file, this points to the folder indicated by the .git file
332 pub(crate) git_dir_path: Arc<Path>,
333}
334
335impl LocalRepositoryEntry {
336 // Note that this path should be relative to the worktree root.
337 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
338 path.starts_with(self.git_dir_path.as_ref())
339 }
340}
341
342impl Deref for LocalSnapshot {
343 type Target = Snapshot;
344
345 fn deref(&self) -> &Self::Target {
346 &self.snapshot
347 }
348}
349
350impl DerefMut for LocalSnapshot {
351 fn deref_mut(&mut self) -> &mut Self::Target {
352 &mut self.snapshot
353 }
354}
355
356enum ScanState {
357 Started,
358 Updated {
359 snapshot: LocalSnapshot,
360 changes: HashMap<Arc<Path>, PathChange>,
361 barrier: Option<barrier::Sender>,
362 scanning: bool,
363 },
364}
365
366struct ShareState {
367 project_id: u64,
368 snapshots_tx: watch::Sender<LocalSnapshot>,
369 resume_updates: watch::Sender<()>,
370 _maintain_remote_snapshot: Task<Option<()>>,
371}
372
373pub enum Event {
374 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
375 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
376}
377
378impl Entity for Worktree {
379 type Event = Event;
380}
381
382impl Worktree {
383 pub async fn local(
384 client: Arc<Client>,
385 path: impl Into<Arc<Path>>,
386 visible: bool,
387 fs: Arc<dyn Fs>,
388 next_entry_id: Arc<AtomicUsize>,
389 cx: &mut AsyncAppContext,
390 ) -> Result<ModelHandle<Self>> {
391 // After determining whether the root entry is a file or a directory, populate the
392 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
393 let abs_path = path.into();
394 let metadata = fs
395 .metadata(&abs_path)
396 .await
397 .context("failed to stat worktree path")?;
398
399 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
400 let root_name = abs_path
401 .file_name()
402 .map_or(String::new(), |f| f.to_string_lossy().to_string());
403
404 let mut snapshot = LocalSnapshot {
405 ignores_by_parent_abs_path: Default::default(),
406 removed_entry_ids: Default::default(),
407 git_repositories: Default::default(),
408 next_entry_id,
409 snapshot: Snapshot {
410 id: WorktreeId::from_usize(cx.model_id()),
411 abs_path: abs_path.clone(),
412 root_name: root_name.clone(),
413 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
414 entries_by_path: Default::default(),
415 entries_by_id: Default::default(),
416 repository_entries: Default::default(),
417 scan_id: 1,
418 completed_scan_id: 0,
419 },
420 };
421
422 if let Some(metadata) = metadata {
423 snapshot.insert_entry(
424 Entry::new(
425 Arc::from(Path::new("")),
426 &metadata,
427 &snapshot.next_entry_id,
428 snapshot.root_char_bag,
429 ),
430 fs.as_ref(),
431 );
432 }
433
434 let (path_changes_tx, path_changes_rx) = channel::unbounded();
435 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
436
437 cx.spawn_weak(|this, mut cx| async move {
438 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
439 this.update(&mut cx, |this, cx| {
440 let this = this.as_local_mut().unwrap();
441 match state {
442 ScanState::Started => {
443 *this.is_scanning.0.borrow_mut() = true;
444 }
445 ScanState::Updated {
446 snapshot,
447 changes,
448 barrier,
449 scanning,
450 } => {
451 *this.is_scanning.0.borrow_mut() = scanning;
452 this.set_snapshot(snapshot, cx);
453 cx.emit(Event::UpdatedEntries(changes));
454 drop(barrier);
455 }
456 }
457 cx.notify();
458 });
459 }
460 })
461 .detach();
462
463 let background_scanner_task = cx.background().spawn({
464 let fs = fs.clone();
465 let snapshot = snapshot.clone();
466 let background = cx.background().clone();
467 async move {
468 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
469 BackgroundScanner::new(
470 snapshot,
471 fs,
472 scan_states_tx,
473 background,
474 path_changes_rx,
475 )
476 .run(events)
477 .await;
478 }
479 });
480
481 Worktree::Local(LocalWorktree {
482 snapshot,
483 is_scanning: watch::channel_with(true),
484 share: None,
485 path_changes_tx,
486 _background_scanner_task: background_scanner_task,
487 diagnostics: Default::default(),
488 diagnostic_summaries: Default::default(),
489 client,
490 fs,
491 visible,
492 })
493 }))
494 }
495
496 pub fn remote(
497 project_remote_id: u64,
498 replica_id: ReplicaId,
499 worktree: proto::WorktreeMetadata,
500 client: Arc<Client>,
501 cx: &mut AppContext,
502 ) -> ModelHandle<Self> {
503 cx.add_model(|cx: &mut ModelContext<Self>| {
504 let snapshot = Snapshot {
505 id: WorktreeId(worktree.id as usize),
506 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
507 root_name: worktree.root_name.clone(),
508 root_char_bag: worktree
509 .root_name
510 .chars()
511 .map(|c| c.to_ascii_lowercase())
512 .collect(),
513 entries_by_path: Default::default(),
514 entries_by_id: Default::default(),
515 repository_entries: Default::default(),
516 scan_id: 1,
517 completed_scan_id: 0,
518 };
519
520 let (updates_tx, mut updates_rx) = mpsc::unbounded();
521 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
522 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
523
524 cx.background()
525 .spawn({
526 let background_snapshot = background_snapshot.clone();
527 async move {
528 while let Some(update) = updates_rx.next().await {
529 if let Err(error) =
530 background_snapshot.lock().apply_remote_update(update)
531 {
532 log::error!("error applying worktree update: {}", error);
533 }
534 snapshot_updated_tx.send(()).await.ok();
535 }
536 }
537 })
538 .detach();
539
540 cx.spawn_weak(|this, mut cx| async move {
541 while (snapshot_updated_rx.recv().await).is_some() {
542 if let Some(this) = this.upgrade(&cx) {
543 this.update(&mut cx, |this, cx| {
544 let this = this.as_remote_mut().unwrap();
545 this.snapshot = this.background_snapshot.lock().clone();
546 cx.emit(Event::UpdatedEntries(Default::default()));
547 cx.notify();
548 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
549 if this.observed_snapshot(*scan_id) {
550 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
551 let _ = tx.send(());
552 } else {
553 break;
554 }
555 }
556 });
557 } else {
558 break;
559 }
560 }
561 })
562 .detach();
563
564 Worktree::Remote(RemoteWorktree {
565 project_id: project_remote_id,
566 replica_id,
567 snapshot: snapshot.clone(),
568 background_snapshot,
569 updates_tx: Some(updates_tx),
570 snapshot_subscriptions: Default::default(),
571 client: client.clone(),
572 diagnostic_summaries: Default::default(),
573 visible: worktree.visible,
574 disconnected: false,
575 })
576 })
577 }
578
579 pub fn as_local(&self) -> Option<&LocalWorktree> {
580 if let Worktree::Local(worktree) = self {
581 Some(worktree)
582 } else {
583 None
584 }
585 }
586
587 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
588 if let Worktree::Remote(worktree) = self {
589 Some(worktree)
590 } else {
591 None
592 }
593 }
594
595 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
596 if let Worktree::Local(worktree) = self {
597 Some(worktree)
598 } else {
599 None
600 }
601 }
602
603 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
604 if let Worktree::Remote(worktree) = self {
605 Some(worktree)
606 } else {
607 None
608 }
609 }
610
611 pub fn is_local(&self) -> bool {
612 matches!(self, Worktree::Local(_))
613 }
614
615 pub fn is_remote(&self) -> bool {
616 !self.is_local()
617 }
618
619 pub fn snapshot(&self) -> Snapshot {
620 match self {
621 Worktree::Local(worktree) => worktree.snapshot().snapshot,
622 Worktree::Remote(worktree) => worktree.snapshot(),
623 }
624 }
625
626 pub fn scan_id(&self) -> usize {
627 match self {
628 Worktree::Local(worktree) => worktree.snapshot.scan_id,
629 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
630 }
631 }
632
633 pub fn completed_scan_id(&self) -> usize {
634 match self {
635 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
636 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
637 }
638 }
639
640 pub fn is_visible(&self) -> bool {
641 match self {
642 Worktree::Local(worktree) => worktree.visible,
643 Worktree::Remote(worktree) => worktree.visible,
644 }
645 }
646
647 pub fn replica_id(&self) -> ReplicaId {
648 match self {
649 Worktree::Local(_) => 0,
650 Worktree::Remote(worktree) => worktree.replica_id,
651 }
652 }
653
654 pub fn diagnostic_summaries(
655 &self,
656 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
657 match self {
658 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
659 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
660 }
661 .iter()
662 .flat_map(|(path, summaries)| {
663 summaries
664 .iter()
665 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
666 })
667 }
668
669 pub fn abs_path(&self) -> Arc<Path> {
670 match self {
671 Worktree::Local(worktree) => worktree.abs_path.clone(),
672 Worktree::Remote(worktree) => worktree.abs_path.clone(),
673 }
674 }
675}
676
677impl LocalWorktree {
678 pub fn contains_abs_path(&self, path: &Path) -> bool {
679 path.starts_with(&self.abs_path)
680 }
681
682 fn absolutize(&self, path: &Path) -> PathBuf {
683 if path.file_name().is_some() {
684 self.abs_path.join(path)
685 } else {
686 self.abs_path.to_path_buf()
687 }
688 }
689
690 pub(crate) fn load_buffer(
691 &mut self,
692 id: u64,
693 path: &Path,
694 cx: &mut ModelContext<Worktree>,
695 ) -> Task<Result<ModelHandle<Buffer>>> {
696 let path = Arc::from(path);
697 cx.spawn(move |this, mut cx| async move {
698 let (file, contents, diff_base) = this
699 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
700 .await?;
701 let text_buffer = cx
702 .background()
703 .spawn(async move { text::Buffer::new(0, id, contents) })
704 .await;
705 Ok(cx.add_model(|cx| {
706 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
707 buffer.git_diff_recalc(cx);
708 buffer
709 }))
710 })
711 }
712
713 pub fn diagnostics_for_path(
714 &self,
715 path: &Path,
716 ) -> Vec<(
717 LanguageServerId,
718 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
719 )> {
720 self.diagnostics.get(path).cloned().unwrap_or_default()
721 }
722
723 pub fn update_diagnostics(
724 &mut self,
725 server_id: LanguageServerId,
726 worktree_path: Arc<Path>,
727 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
728 _: &mut ModelContext<Worktree>,
729 ) -> Result<bool> {
730 let summaries_by_server_id = self
731 .diagnostic_summaries
732 .entry(worktree_path.clone())
733 .or_default();
734
735 let old_summary = summaries_by_server_id
736 .remove(&server_id)
737 .unwrap_or_default();
738
739 let new_summary = DiagnosticSummary::new(&diagnostics);
740 if new_summary.is_empty() {
741 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
742 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
743 diagnostics_by_server_id.remove(ix);
744 }
745 if diagnostics_by_server_id.is_empty() {
746 self.diagnostics.remove(&worktree_path);
747 }
748 }
749 } else {
750 summaries_by_server_id.insert(server_id, new_summary);
751 let diagnostics_by_server_id =
752 self.diagnostics.entry(worktree_path.clone()).or_default();
753 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
754 Ok(ix) => {
755 diagnostics_by_server_id[ix] = (server_id, diagnostics);
756 }
757 Err(ix) => {
758 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
759 }
760 }
761 }
762
763 if !old_summary.is_empty() || !new_summary.is_empty() {
764 if let Some(share) = self.share.as_ref() {
765 self.client
766 .send(proto::UpdateDiagnosticSummary {
767 project_id: share.project_id,
768 worktree_id: self.id().to_proto(),
769 summary: Some(proto::DiagnosticSummary {
770 path: worktree_path.to_string_lossy().to_string(),
771 language_server_id: server_id.0 as u64,
772 error_count: new_summary.error_count as u32,
773 warning_count: new_summary.warning_count as u32,
774 }),
775 })
776 .log_err();
777 }
778 }
779
780 Ok(!old_summary.is_empty() || !new_summary.is_empty())
781 }
782
783 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
784 let updated_repos =
785 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
786 self.snapshot = new_snapshot;
787
788 if let Some(share) = self.share.as_mut() {
789 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
790 }
791
792 if !updated_repos.is_empty() {
793 cx.emit(Event::UpdatedGitRepositories(updated_repos));
794 }
795 }
796
797 fn changed_repos(
798 &self,
799 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
800 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
801 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
802 let mut diff = HashMap::default();
803 let mut old_repos = old_repos.iter().peekable();
804 let mut new_repos = new_repos.iter().peekable();
805 loop {
806 match (old_repos.peek(), new_repos.peek()) {
807 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
808 match Ord::cmp(old_entry_id, new_entry_id) {
809 Ordering::Less => {
810 if let Some(entry) = self.entry_for_id(**old_entry_id) {
811 diff.insert(entry.path.clone(), (*old_repo).clone());
812 }
813 old_repos.next();
814 }
815 Ordering::Equal => {
816 if old_repo.scan_id != new_repo.scan_id {
817 if let Some(entry) = self.entry_for_id(**new_entry_id) {
818 diff.insert(entry.path.clone(), (*new_repo).clone());
819 }
820 }
821
822 old_repos.next();
823 new_repos.next();
824 }
825 Ordering::Greater => {
826 if let Some(entry) = self.entry_for_id(**new_entry_id) {
827 diff.insert(entry.path.clone(), (*new_repo).clone());
828 }
829 new_repos.next();
830 }
831 }
832 }
833 (Some((old_entry_id, old_repo)), None) => {
834 if let Some(entry) = self.entry_for_id(**old_entry_id) {
835 diff.insert(entry.path.clone(), (*old_repo).clone());
836 }
837 old_repos.next();
838 }
839 (None, Some((new_entry_id, new_repo))) => {
840 if let Some(entry) = self.entry_for_id(**new_entry_id) {
841 diff.insert(entry.path.clone(), (*new_repo).clone());
842 }
843 new_repos.next();
844 }
845 (None, None) => break,
846 }
847 }
848 diff
849 }
850
851 pub fn scan_complete(&self) -> impl Future<Output = ()> {
852 let mut is_scanning_rx = self.is_scanning.1.clone();
853 async move {
854 let mut is_scanning = is_scanning_rx.borrow().clone();
855 while is_scanning {
856 if let Some(value) = is_scanning_rx.recv().await {
857 is_scanning = value;
858 } else {
859 break;
860 }
861 }
862 }
863 }
864
865 pub fn snapshot(&self) -> LocalSnapshot {
866 self.snapshot.clone()
867 }
868
869 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
870 proto::WorktreeMetadata {
871 id: self.id().to_proto(),
872 root_name: self.root_name().to_string(),
873 visible: self.visible,
874 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
875 }
876 }
877
878 fn load(
879 &self,
880 path: &Path,
881 cx: &mut ModelContext<Worktree>,
882 ) -> Task<Result<(File, String, Option<String>)>> {
883 let handle = cx.handle();
884 let path = Arc::from(path);
885 let abs_path = self.absolutize(&path);
886 let fs = self.fs.clone();
887 let snapshot = self.snapshot();
888
889 let mut index_task = None;
890
891 if let Some(repo) = snapshot.repo_for(&path) {
892 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
893 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
894 let repo = repo.repo_ptr.to_owned();
895 index_task = Some(
896 cx.background()
897 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
898 );
899 }
900 }
901
902 cx.spawn(|this, mut cx| async move {
903 let text = fs.load(&abs_path).await?;
904
905 let diff_base = if let Some(index_task) = index_task {
906 index_task.await
907 } else {
908 None
909 };
910
911 // Eagerly populate the snapshot with an updated entry for the loaded file
912 let entry = this
913 .update(&mut cx, |this, cx| {
914 this.as_local().unwrap().refresh_entry(path, None, cx)
915 })
916 .await?;
917
918 Ok((
919 File {
920 entry_id: entry.id,
921 worktree: handle,
922 path: entry.path,
923 mtime: entry.mtime,
924 is_local: true,
925 is_deleted: false,
926 },
927 text,
928 diff_base,
929 ))
930 })
931 }
932
933 pub fn save_buffer(
934 &self,
935 buffer_handle: ModelHandle<Buffer>,
936 path: Arc<Path>,
937 has_changed_file: bool,
938 cx: &mut ModelContext<Worktree>,
939 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
940 let handle = cx.handle();
941 let buffer = buffer_handle.read(cx);
942
943 let rpc = self.client.clone();
944 let buffer_id = buffer.remote_id();
945 let project_id = self.share.as_ref().map(|share| share.project_id);
946
947 let text = buffer.as_rope().clone();
948 let fingerprint = text.fingerprint();
949 let version = buffer.version();
950 let save = self.write_file(path, text, buffer.line_ending(), cx);
951
952 cx.as_mut().spawn(|mut cx| async move {
953 let entry = save.await?;
954
955 if has_changed_file {
956 let new_file = Arc::new(File {
957 entry_id: entry.id,
958 worktree: handle,
959 path: entry.path,
960 mtime: entry.mtime,
961 is_local: true,
962 is_deleted: false,
963 });
964
965 if let Some(project_id) = project_id {
966 rpc.send(proto::UpdateBufferFile {
967 project_id,
968 buffer_id,
969 file: Some(new_file.to_proto()),
970 })
971 .log_err();
972 }
973
974 buffer_handle.update(&mut cx, |buffer, cx| {
975 if has_changed_file {
976 buffer.file_updated(new_file, cx).detach();
977 }
978 });
979 }
980
981 if let Some(project_id) = project_id {
982 rpc.send(proto::BufferSaved {
983 project_id,
984 buffer_id,
985 version: serialize_version(&version),
986 mtime: Some(entry.mtime.into()),
987 fingerprint: serialize_fingerprint(fingerprint),
988 })?;
989 }
990
991 buffer_handle.update(&mut cx, |buffer, cx| {
992 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
993 });
994
995 Ok((version, fingerprint, entry.mtime))
996 })
997 }
998
999 pub fn create_entry(
1000 &self,
1001 path: impl Into<Arc<Path>>,
1002 is_dir: bool,
1003 cx: &mut ModelContext<Worktree>,
1004 ) -> Task<Result<Entry>> {
1005 let path = path.into();
1006 let abs_path = self.absolutize(&path);
1007 let fs = self.fs.clone();
1008 let write = cx.background().spawn(async move {
1009 if is_dir {
1010 fs.create_dir(&abs_path).await
1011 } else {
1012 fs.save(&abs_path, &Default::default(), Default::default())
1013 .await
1014 }
1015 });
1016
1017 cx.spawn(|this, mut cx| async move {
1018 write.await?;
1019 this.update(&mut cx, |this, cx| {
1020 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1021 })
1022 .await
1023 })
1024 }
1025
1026 pub fn write_file(
1027 &self,
1028 path: impl Into<Arc<Path>>,
1029 text: Rope,
1030 line_ending: LineEnding,
1031 cx: &mut ModelContext<Worktree>,
1032 ) -> Task<Result<Entry>> {
1033 let path = path.into();
1034 let abs_path = self.absolutize(&path);
1035 let fs = self.fs.clone();
1036 let write = cx
1037 .background()
1038 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1039
1040 cx.spawn(|this, mut cx| async move {
1041 write.await?;
1042 this.update(&mut cx, |this, cx| {
1043 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1044 })
1045 .await
1046 })
1047 }
1048
1049 pub fn delete_entry(
1050 &self,
1051 entry_id: ProjectEntryId,
1052 cx: &mut ModelContext<Worktree>,
1053 ) -> Option<Task<Result<()>>> {
1054 let entry = self.entry_for_id(entry_id)?.clone();
1055 let abs_path = self.abs_path.clone();
1056 let fs = self.fs.clone();
1057
1058 let delete = cx.background().spawn(async move {
1059 let mut abs_path = fs.canonicalize(&abs_path).await?;
1060 if entry.path.file_name().is_some() {
1061 abs_path = abs_path.join(&entry.path);
1062 }
1063 if entry.is_file() {
1064 fs.remove_file(&abs_path, Default::default()).await?;
1065 } else {
1066 fs.remove_dir(
1067 &abs_path,
1068 RemoveOptions {
1069 recursive: true,
1070 ignore_if_not_exists: false,
1071 },
1072 )
1073 .await?;
1074 }
1075 anyhow::Ok(abs_path)
1076 });
1077
1078 Some(cx.spawn(|this, mut cx| async move {
1079 let abs_path = delete.await?;
1080 let (tx, mut rx) = barrier::channel();
1081 this.update(&mut cx, |this, _| {
1082 this.as_local_mut()
1083 .unwrap()
1084 .path_changes_tx
1085 .try_send((vec![abs_path], tx))
1086 })?;
1087 rx.recv().await;
1088 Ok(())
1089 }))
1090 }
1091
1092 pub fn rename_entry(
1093 &self,
1094 entry_id: ProjectEntryId,
1095 new_path: impl Into<Arc<Path>>,
1096 cx: &mut ModelContext<Worktree>,
1097 ) -> Option<Task<Result<Entry>>> {
1098 let old_path = self.entry_for_id(entry_id)?.path.clone();
1099 let new_path = new_path.into();
1100 let abs_old_path = self.absolutize(&old_path);
1101 let abs_new_path = self.absolutize(&new_path);
1102 let fs = self.fs.clone();
1103 let rename = cx.background().spawn(async move {
1104 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1105 .await
1106 });
1107
1108 Some(cx.spawn(|this, mut cx| async move {
1109 rename.await?;
1110 this.update(&mut cx, |this, cx| {
1111 this.as_local_mut()
1112 .unwrap()
1113 .refresh_entry(new_path.clone(), Some(old_path), cx)
1114 })
1115 .await
1116 }))
1117 }
1118
1119 pub fn copy_entry(
1120 &self,
1121 entry_id: ProjectEntryId,
1122 new_path: impl Into<Arc<Path>>,
1123 cx: &mut ModelContext<Worktree>,
1124 ) -> Option<Task<Result<Entry>>> {
1125 let old_path = self.entry_for_id(entry_id)?.path.clone();
1126 let new_path = new_path.into();
1127 let abs_old_path = self.absolutize(&old_path);
1128 let abs_new_path = self.absolutize(&new_path);
1129 let fs = self.fs.clone();
1130 let copy = cx.background().spawn(async move {
1131 copy_recursive(
1132 fs.as_ref(),
1133 &abs_old_path,
1134 &abs_new_path,
1135 Default::default(),
1136 )
1137 .await
1138 });
1139
1140 Some(cx.spawn(|this, mut cx| async move {
1141 copy.await?;
1142 this.update(&mut cx, |this, cx| {
1143 this.as_local_mut()
1144 .unwrap()
1145 .refresh_entry(new_path.clone(), None, cx)
1146 })
1147 .await
1148 }))
1149 }
1150
1151 fn refresh_entry(
1152 &self,
1153 path: Arc<Path>,
1154 old_path: Option<Arc<Path>>,
1155 cx: &mut ModelContext<Worktree>,
1156 ) -> Task<Result<Entry>> {
1157 let fs = self.fs.clone();
1158 let abs_root_path = self.abs_path.clone();
1159 let path_changes_tx = self.path_changes_tx.clone();
1160 cx.spawn_weak(move |this, mut cx| async move {
1161 let abs_path = fs.canonicalize(&abs_root_path).await?;
1162 let mut paths = Vec::with_capacity(2);
1163 paths.push(if path.file_name().is_some() {
1164 abs_path.join(&path)
1165 } else {
1166 abs_path.clone()
1167 });
1168 if let Some(old_path) = old_path {
1169 paths.push(if old_path.file_name().is_some() {
1170 abs_path.join(&old_path)
1171 } else {
1172 abs_path.clone()
1173 });
1174 }
1175
1176 let (tx, mut rx) = barrier::channel();
1177 path_changes_tx.try_send((paths, tx))?;
1178 rx.recv().await;
1179 this.upgrade(&cx)
1180 .ok_or_else(|| anyhow!("worktree was dropped"))?
1181 .update(&mut cx, |this, _| {
1182 this.entry_for_path(path)
1183 .cloned()
1184 .ok_or_else(|| anyhow!("failed to read path after update"))
1185 })
1186 })
1187 }
1188
1189 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1190 let (share_tx, share_rx) = oneshot::channel();
1191
1192 if let Some(share) = self.share.as_mut() {
1193 let _ = share_tx.send(());
1194 *share.resume_updates.borrow_mut() = ();
1195 } else {
1196 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1197 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1198 let worktree_id = cx.model_id() as u64;
1199
1200 for (path, summaries) in &self.diagnostic_summaries {
1201 for (&server_id, summary) in summaries {
1202 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1203 project_id,
1204 worktree_id,
1205 summary: Some(summary.to_proto(server_id, &path)),
1206 }) {
1207 return Task::ready(Err(e));
1208 }
1209 }
1210 }
1211
1212 let _maintain_remote_snapshot = cx.background().spawn({
1213 let client = self.client.clone();
1214 async move {
1215 let mut share_tx = Some(share_tx);
1216 let mut prev_snapshot = LocalSnapshot {
1217 ignores_by_parent_abs_path: Default::default(),
1218 removed_entry_ids: Default::default(),
1219 next_entry_id: Default::default(),
1220 git_repositories: Default::default(),
1221 snapshot: Snapshot {
1222 id: WorktreeId(worktree_id as usize),
1223 abs_path: Path::new("").into(),
1224 root_name: Default::default(),
1225 root_char_bag: Default::default(),
1226 entries_by_path: Default::default(),
1227 entries_by_id: Default::default(),
1228 repository_entries: Default::default(),
1229 scan_id: 0,
1230 completed_scan_id: 0,
1231 },
1232 };
1233 while let Some(snapshot) = snapshots_rx.recv().await {
1234 #[cfg(any(test, feature = "test-support"))]
1235 const MAX_CHUNK_SIZE: usize = 2;
1236 #[cfg(not(any(test, feature = "test-support")))]
1237 const MAX_CHUNK_SIZE: usize = 256;
1238
1239 let update =
1240 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1241 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1242 let _ = resume_updates_rx.try_recv();
1243 while let Err(error) = client.request(update.clone()).await {
1244 log::error!("failed to send worktree update: {}", error);
1245 log::info!("waiting to resume updates");
1246 if resume_updates_rx.next().await.is_none() {
1247 return Ok(());
1248 }
1249 }
1250 }
1251
1252 if let Some(share_tx) = share_tx.take() {
1253 let _ = share_tx.send(());
1254 }
1255
1256 prev_snapshot = snapshot;
1257 }
1258
1259 Ok::<_, anyhow::Error>(())
1260 }
1261 .log_err()
1262 });
1263
1264 self.share = Some(ShareState {
1265 project_id,
1266 snapshots_tx,
1267 resume_updates: resume_updates_tx,
1268 _maintain_remote_snapshot,
1269 });
1270 }
1271
1272 cx.foreground()
1273 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1274 }
1275
1276 pub fn unshare(&mut self) {
1277 self.share.take();
1278 }
1279
1280 pub fn is_shared(&self) -> bool {
1281 self.share.is_some()
1282 }
1283}
1284
1285impl RemoteWorktree {
1286 fn snapshot(&self) -> Snapshot {
1287 self.snapshot.clone()
1288 }
1289
1290 pub fn disconnected_from_host(&mut self) {
1291 self.updates_tx.take();
1292 self.snapshot_subscriptions.clear();
1293 self.disconnected = true;
1294 }
1295
1296 pub fn save_buffer(
1297 &self,
1298 buffer_handle: ModelHandle<Buffer>,
1299 cx: &mut ModelContext<Worktree>,
1300 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1301 let buffer = buffer_handle.read(cx);
1302 let buffer_id = buffer.remote_id();
1303 let version = buffer.version();
1304 let rpc = self.client.clone();
1305 let project_id = self.project_id;
1306 cx.as_mut().spawn(|mut cx| async move {
1307 let response = rpc
1308 .request(proto::SaveBuffer {
1309 project_id,
1310 buffer_id,
1311 version: serialize_version(&version),
1312 })
1313 .await?;
1314 let version = deserialize_version(&response.version);
1315 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1316 let mtime = response
1317 .mtime
1318 .ok_or_else(|| anyhow!("missing mtime"))?
1319 .into();
1320
1321 buffer_handle.update(&mut cx, |buffer, cx| {
1322 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1323 });
1324
1325 Ok((version, fingerprint, mtime))
1326 })
1327 }
1328
1329 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1330 if let Some(updates_tx) = &self.updates_tx {
1331 updates_tx
1332 .unbounded_send(update)
1333 .expect("consumer runs to completion");
1334 }
1335 }
1336
1337 fn observed_snapshot(&self, scan_id: usize) -> bool {
1338 self.completed_scan_id >= scan_id
1339 }
1340
1341 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1342 let (tx, rx) = oneshot::channel();
1343 if self.observed_snapshot(scan_id) {
1344 let _ = tx.send(());
1345 } else if self.disconnected {
1346 drop(tx);
1347 } else {
1348 match self
1349 .snapshot_subscriptions
1350 .binary_search_by_key(&scan_id, |probe| probe.0)
1351 {
1352 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1353 }
1354 }
1355
1356 async move {
1357 rx.await?;
1358 Ok(())
1359 }
1360 }
1361
1362 pub fn update_diagnostic_summary(
1363 &mut self,
1364 path: Arc<Path>,
1365 summary: &proto::DiagnosticSummary,
1366 ) {
1367 let server_id = LanguageServerId(summary.language_server_id as usize);
1368 let summary = DiagnosticSummary {
1369 error_count: summary.error_count as usize,
1370 warning_count: summary.warning_count as usize,
1371 };
1372
1373 if summary.is_empty() {
1374 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1375 summaries.remove(&server_id);
1376 if summaries.is_empty() {
1377 self.diagnostic_summaries.remove(&path);
1378 }
1379 }
1380 } else {
1381 self.diagnostic_summaries
1382 .entry(path)
1383 .or_default()
1384 .insert(server_id, summary);
1385 }
1386 }
1387
1388 pub fn insert_entry(
1389 &mut self,
1390 entry: proto::Entry,
1391 scan_id: usize,
1392 cx: &mut ModelContext<Worktree>,
1393 ) -> Task<Result<Entry>> {
1394 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1395 cx.spawn(|this, mut cx| async move {
1396 wait_for_snapshot.await?;
1397 this.update(&mut cx, |worktree, _| {
1398 let worktree = worktree.as_remote_mut().unwrap();
1399 let mut snapshot = worktree.background_snapshot.lock();
1400 let entry = snapshot.insert_entry(entry);
1401 worktree.snapshot = snapshot.clone();
1402 entry
1403 })
1404 })
1405 }
1406
1407 pub(crate) fn delete_entry(
1408 &mut self,
1409 id: ProjectEntryId,
1410 scan_id: usize,
1411 cx: &mut ModelContext<Worktree>,
1412 ) -> Task<Result<()>> {
1413 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1414 cx.spawn(|this, mut cx| async move {
1415 wait_for_snapshot.await?;
1416 this.update(&mut cx, |worktree, _| {
1417 let worktree = worktree.as_remote_mut().unwrap();
1418 let mut snapshot = worktree.background_snapshot.lock();
1419 snapshot.delete_entry(id);
1420 worktree.snapshot = snapshot.clone();
1421 });
1422 Ok(())
1423 })
1424 }
1425}
1426
1427impl Snapshot {
1428 pub fn id(&self) -> WorktreeId {
1429 self.id
1430 }
1431
1432 pub fn abs_path(&self) -> &Arc<Path> {
1433 &self.abs_path
1434 }
1435
1436 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1437 self.entries_by_id.get(&entry_id, &()).is_some()
1438 }
1439
1440 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1441 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1442 let old_entry = self.entries_by_id.insert_or_replace(
1443 PathEntry {
1444 id: entry.id,
1445 path: entry.path.clone(),
1446 is_ignored: entry.is_ignored,
1447 scan_id: 0,
1448 },
1449 &(),
1450 );
1451 if let Some(old_entry) = old_entry {
1452 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1453 }
1454 self.entries_by_path.insert_or_replace(entry.clone(), &());
1455 Ok(entry)
1456 }
1457
1458 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1459 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1460 self.entries_by_path = {
1461 let mut cursor = self.entries_by_path.cursor();
1462 let mut new_entries_by_path =
1463 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1464 while let Some(entry) = cursor.item() {
1465 if entry.path.starts_with(&removed_entry.path) {
1466 self.entries_by_id.remove(&entry.id, &());
1467 cursor.next(&());
1468 } else {
1469 break;
1470 }
1471 }
1472 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1473 new_entries_by_path
1474 };
1475
1476 Some(removed_entry.path)
1477 }
1478
1479 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1480 let mut entries_by_path_edits = Vec::new();
1481 let mut entries_by_id_edits = Vec::new();
1482 for entry_id in update.removed_entries {
1483 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1484 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1485 entries_by_id_edits.push(Edit::Remove(entry.id));
1486 }
1487 }
1488
1489 for entry in update.updated_entries {
1490 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1491 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1492 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1493 }
1494 entries_by_id_edits.push(Edit::Insert(PathEntry {
1495 id: entry.id,
1496 path: entry.path.clone(),
1497 is_ignored: entry.is_ignored,
1498 scan_id: 0,
1499 }));
1500 entries_by_path_edits.push(Edit::Insert(entry));
1501 }
1502
1503 self.entries_by_path.edit(entries_by_path_edits, &());
1504 self.entries_by_id.edit(entries_by_id_edits, &());
1505
1506 update.removed_repositories.sort_unstable();
1507 self.repository_entries.retain(|_, entry| {
1508 if let Ok(_) = update
1509 .removed_repositories
1510 .binary_search(&entry.work_directory.to_proto())
1511 {
1512 false
1513 } else {
1514 true
1515 }
1516 });
1517
1518 for repository in update.updated_repositories {
1519 let work_directory_entry: WorkDirectoryEntry =
1520 ProjectEntryId::from_proto(repository.work_directory_id).into();
1521
1522 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1523 let mut statuses = TreeMap::default();
1524 for status_entry in repository.updated_worktree_statuses {
1525 let Some(git_file_status) = read_git_status(status_entry.status) else {
1526 continue;
1527 };
1528
1529 let repo_path = RepoPath::new(status_entry.repo_path.into());
1530 statuses.insert(repo_path, git_file_status);
1531 }
1532
1533 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1534 if self.repository_entries.get(&work_directory).is_some() {
1535 self.repository_entries.update(&work_directory, |repo| {
1536 repo.branch = repository.branch.map(Into::into);
1537 repo.worktree_statuses.insert_tree(statuses);
1538
1539 for repo_path in repository.removed_worktree_repo_paths {
1540 let repo_path = RepoPath::new(repo_path.into());
1541 repo.worktree_statuses.remove(&repo_path);
1542 }
1543 });
1544 } else {
1545 self.repository_entries.insert(
1546 work_directory,
1547 RepositoryEntry {
1548 work_directory: work_directory_entry,
1549 branch: repository.branch.map(Into::into),
1550 worktree_statuses: statuses,
1551 },
1552 )
1553 }
1554 } else {
1555 log::error!("no work directory entry for repository {:?}", repository)
1556 }
1557 }
1558
1559 self.scan_id = update.scan_id as usize;
1560 if update.is_last_update {
1561 self.completed_scan_id = update.scan_id as usize;
1562 }
1563
1564 Ok(())
1565 }
1566
1567 pub fn file_count(&self) -> usize {
1568 self.entries_by_path.summary().file_count
1569 }
1570
1571 pub fn visible_file_count(&self) -> usize {
1572 self.entries_by_path.summary().visible_file_count
1573 }
1574
1575 fn traverse_from_offset(
1576 &self,
1577 include_dirs: bool,
1578 include_ignored: bool,
1579 start_offset: usize,
1580 ) -> Traversal {
1581 let mut cursor = self.entries_by_path.cursor();
1582 cursor.seek(
1583 &TraversalTarget::Count {
1584 count: start_offset,
1585 include_dirs,
1586 include_ignored,
1587 },
1588 Bias::Right,
1589 &(),
1590 );
1591 Traversal {
1592 cursor,
1593 include_dirs,
1594 include_ignored,
1595 }
1596 }
1597
1598 fn traverse_from_path(
1599 &self,
1600 include_dirs: bool,
1601 include_ignored: bool,
1602 path: &Path,
1603 ) -> Traversal {
1604 let mut cursor = self.entries_by_path.cursor();
1605 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1606 Traversal {
1607 cursor,
1608 include_dirs,
1609 include_ignored,
1610 }
1611 }
1612
1613 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1614 self.traverse_from_offset(false, include_ignored, start)
1615 }
1616
1617 pub fn entries(&self, include_ignored: bool) -> Traversal {
1618 self.traverse_from_offset(true, include_ignored, 0)
1619 }
1620
1621 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1622 self.repository_entries.values()
1623 }
1624
1625 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1626 let empty_path = Path::new("");
1627 self.entries_by_path
1628 .cursor::<()>()
1629 .filter(move |entry| entry.path.as_ref() != empty_path)
1630 .map(|entry| &entry.path)
1631 }
1632
1633 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1634 let mut cursor = self.entries_by_path.cursor();
1635 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1636 let traversal = Traversal {
1637 cursor,
1638 include_dirs: true,
1639 include_ignored: true,
1640 };
1641 ChildEntriesIter {
1642 traversal,
1643 parent_path,
1644 }
1645 }
1646
1647 pub fn root_entry(&self) -> Option<&Entry> {
1648 self.entry_for_path("")
1649 }
1650
1651 pub fn root_name(&self) -> &str {
1652 &self.root_name
1653 }
1654
1655 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1656 self.repository_entries
1657 .get(&RepositoryWorkDirectory(Path::new("").into()))
1658 .map(|entry| entry.to_owned())
1659 }
1660
1661 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1662 self.repository_entries.values()
1663 }
1664
1665 pub fn scan_id(&self) -> usize {
1666 self.scan_id
1667 }
1668
1669 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1670 let path = path.as_ref();
1671 self.traverse_from_path(true, true, path)
1672 .entry()
1673 .and_then(|entry| {
1674 if entry.path.as_ref() == path {
1675 Some(entry)
1676 } else {
1677 None
1678 }
1679 })
1680 }
1681
1682 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1683 let entry = self.entries_by_id.get(&id, &())?;
1684 self.entry_for_path(&entry.path)
1685 }
1686
1687 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1688 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1689 }
1690}
1691
1692impl LocalSnapshot {
1693 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1694 self.git_repositories.get(&repo.work_directory.0)
1695 }
1696
1697 pub(crate) fn repo_for_metadata(
1698 &self,
1699 path: &Path,
1700 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1701 self.git_repositories
1702 .iter()
1703 .find(|(_, repo)| repo.in_dot_git(path))
1704 }
1705
1706 #[cfg(test)]
1707 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1708 let root_name = self.root_name.clone();
1709 proto::UpdateWorktree {
1710 project_id,
1711 worktree_id: self.id().to_proto(),
1712 abs_path: self.abs_path().to_string_lossy().into(),
1713 root_name,
1714 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1715 removed_entries: Default::default(),
1716 scan_id: self.scan_id as u64,
1717 is_last_update: true,
1718 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1719 removed_repositories: Default::default(),
1720 }
1721 }
1722
1723 pub(crate) fn build_update(
1724 &self,
1725 other: &Self,
1726 project_id: u64,
1727 worktree_id: u64,
1728 include_ignored: bool,
1729 ) -> proto::UpdateWorktree {
1730 let mut updated_entries = Vec::new();
1731 let mut removed_entries = Vec::new();
1732 let mut self_entries = self
1733 .entries_by_id
1734 .cursor::<()>()
1735 .filter(|e| include_ignored || !e.is_ignored)
1736 .peekable();
1737 let mut other_entries = other
1738 .entries_by_id
1739 .cursor::<()>()
1740 .filter(|e| include_ignored || !e.is_ignored)
1741 .peekable();
1742 loop {
1743 match (self_entries.peek(), other_entries.peek()) {
1744 (Some(self_entry), Some(other_entry)) => {
1745 match Ord::cmp(&self_entry.id, &other_entry.id) {
1746 Ordering::Less => {
1747 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1748 updated_entries.push(entry);
1749 self_entries.next();
1750 }
1751 Ordering::Equal => {
1752 if self_entry.scan_id != other_entry.scan_id {
1753 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1754 updated_entries.push(entry);
1755 }
1756
1757 self_entries.next();
1758 other_entries.next();
1759 }
1760 Ordering::Greater => {
1761 removed_entries.push(other_entry.id.to_proto());
1762 other_entries.next();
1763 }
1764 }
1765 }
1766 (Some(self_entry), None) => {
1767 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1768 updated_entries.push(entry);
1769 self_entries.next();
1770 }
1771 (None, Some(other_entry)) => {
1772 removed_entries.push(other_entry.id.to_proto());
1773 other_entries.next();
1774 }
1775 (None, None) => break,
1776 }
1777 }
1778
1779 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1780 let mut removed_repositories = Vec::new();
1781 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1782 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1783 loop {
1784 match (self_repos.peek(), other_repos.peek()) {
1785 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1786 match Ord::cmp(self_work_dir, other_work_dir) {
1787 Ordering::Less => {
1788 updated_repositories.push((*self_repo).into());
1789 self_repos.next();
1790 }
1791 Ordering::Equal => {
1792 if self_repo != other_repo {
1793 updated_repositories.push(self_repo.build_update(other_repo));
1794 }
1795
1796 self_repos.next();
1797 other_repos.next();
1798 }
1799 Ordering::Greater => {
1800 removed_repositories.push(other_repo.work_directory.to_proto());
1801 other_repos.next();
1802 }
1803 }
1804 }
1805 (Some((_, self_repo)), None) => {
1806 updated_repositories.push((*self_repo).into());
1807 self_repos.next();
1808 }
1809 (None, Some((_, other_repo))) => {
1810 removed_repositories.push(other_repo.work_directory.to_proto());
1811 other_repos.next();
1812 }
1813 (None, None) => break,
1814 }
1815 }
1816
1817 proto::UpdateWorktree {
1818 project_id,
1819 worktree_id,
1820 abs_path: self.abs_path().to_string_lossy().into(),
1821 root_name: self.root_name().to_string(),
1822 updated_entries,
1823 removed_entries,
1824 scan_id: self.scan_id as u64,
1825 is_last_update: self.completed_scan_id == self.scan_id,
1826 updated_repositories,
1827 removed_repositories,
1828 }
1829 }
1830
1831 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1832 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1833 let abs_path = self.abs_path.join(&entry.path);
1834 match smol::block_on(build_gitignore(&abs_path, fs)) {
1835 Ok(ignore) => {
1836 self.ignores_by_parent_abs_path.insert(
1837 abs_path.parent().unwrap().into(),
1838 (Arc::new(ignore), self.scan_id),
1839 );
1840 }
1841 Err(error) => {
1842 log::error!(
1843 "error loading .gitignore file {:?} - {:?}",
1844 &entry.path,
1845 error
1846 );
1847 }
1848 }
1849 }
1850
1851 self.reuse_entry_id(&mut entry);
1852
1853 if entry.kind == EntryKind::PendingDir {
1854 if let Some(existing_entry) =
1855 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1856 {
1857 entry.kind = existing_entry.kind;
1858 }
1859 }
1860
1861 let scan_id = self.scan_id;
1862 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1863 if let Some(removed) = removed {
1864 if removed.id != entry.id {
1865 self.entries_by_id.remove(&removed.id, &());
1866 }
1867 }
1868 self.entries_by_id.insert_or_replace(
1869 PathEntry {
1870 id: entry.id,
1871 path: entry.path.clone(),
1872 is_ignored: entry.is_ignored,
1873 scan_id,
1874 },
1875 &(),
1876 );
1877
1878 entry
1879 }
1880
1881 fn populate_dir(
1882 &mut self,
1883 parent_path: Arc<Path>,
1884 entries: impl IntoIterator<Item = Entry>,
1885 ignore: Option<Arc<Gitignore>>,
1886 fs: &dyn Fs,
1887 ) {
1888 let mut parent_entry = if let Some(parent_entry) =
1889 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1890 {
1891 parent_entry.clone()
1892 } else {
1893 log::warn!(
1894 "populating a directory {:?} that has been removed",
1895 parent_path
1896 );
1897 return;
1898 };
1899
1900 match parent_entry.kind {
1901 EntryKind::PendingDir => {
1902 parent_entry.kind = EntryKind::Dir;
1903 }
1904 EntryKind::Dir => {}
1905 _ => return,
1906 }
1907
1908 if let Some(ignore) = ignore {
1909 self.ignores_by_parent_abs_path.insert(
1910 self.abs_path.join(&parent_path).into(),
1911 (ignore, self.scan_id),
1912 );
1913 }
1914
1915 if parent_path.file_name() == Some(&DOT_GIT) {
1916 self.build_repo(parent_path, fs);
1917 }
1918
1919 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1920 let mut entries_by_id_edits = Vec::new();
1921
1922 for mut entry in entries {
1923 self.reuse_entry_id(&mut entry);
1924 entries_by_id_edits.push(Edit::Insert(PathEntry {
1925 id: entry.id,
1926 path: entry.path.clone(),
1927 is_ignored: entry.is_ignored,
1928 scan_id: self.scan_id,
1929 }));
1930 entries_by_path_edits.push(Edit::Insert(entry));
1931 }
1932
1933 self.entries_by_path.edit(entries_by_path_edits, &());
1934 self.entries_by_id.edit(entries_by_id_edits, &());
1935 }
1936
1937 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1938 let abs_path = self.abs_path.join(&parent_path);
1939 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1940
1941 // Guard against repositories inside the repository metadata
1942 if work_dir
1943 .components()
1944 .find(|component| component.as_os_str() == *DOT_GIT)
1945 .is_some()
1946 {
1947 return None;
1948 };
1949
1950 let work_dir_id = self
1951 .entry_for_path(work_dir.clone())
1952 .map(|entry| entry.id)?;
1953
1954 if self.git_repositories.get(&work_dir_id).is_none() {
1955 let repo = fs.open_repo(abs_path.as_path())?;
1956 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1957 let scan_id = self.scan_id;
1958
1959 let repo_lock = repo.lock();
1960
1961 self.repository_entries.insert(
1962 work_directory,
1963 RepositoryEntry {
1964 work_directory: work_dir_id.into(),
1965 branch: repo_lock.branch_name().map(Into::into),
1966 worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1967 },
1968 );
1969 drop(repo_lock);
1970
1971 self.git_repositories.insert(
1972 work_dir_id,
1973 LocalRepositoryEntry {
1974 scan_id,
1975 full_scan_id: scan_id,
1976 repo_ptr: repo,
1977 git_dir_path: parent_path.clone(),
1978 },
1979 )
1980 }
1981
1982 Some(())
1983 }
1984 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1985 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1986 entry.id = removed_entry_id;
1987 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1988 entry.id = existing_entry.id;
1989 }
1990 }
1991
1992 fn remove_path(&mut self, path: &Path) {
1993 let mut new_entries;
1994 let removed_entries;
1995 {
1996 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1997 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1998 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1999 new_entries.push_tree(cursor.suffix(&()), &());
2000 }
2001 self.entries_by_path = new_entries;
2002
2003 let mut entries_by_id_edits = Vec::new();
2004 for entry in removed_entries.cursor::<()>() {
2005 let removed_entry_id = self
2006 .removed_entry_ids
2007 .entry(entry.inode)
2008 .or_insert(entry.id);
2009 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2010 entries_by_id_edits.push(Edit::Remove(entry.id));
2011 }
2012 self.entries_by_id.edit(entries_by_id_edits, &());
2013
2014 if path.file_name() == Some(&GITIGNORE) {
2015 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2016 if let Some((_, scan_id)) = self
2017 .ignores_by_parent_abs_path
2018 .get_mut(abs_parent_path.as_path())
2019 {
2020 *scan_id = self.snapshot.scan_id;
2021 }
2022 }
2023 }
2024
2025 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2026 let mut inodes = TreeSet::default();
2027 for ancestor in path.ancestors().skip(1) {
2028 if let Some(entry) = self.entry_for_path(ancestor) {
2029 inodes.insert(entry.inode);
2030 }
2031 }
2032 inodes
2033 }
2034
2035 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2036 let mut new_ignores = Vec::new();
2037 for ancestor in abs_path.ancestors().skip(1) {
2038 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2039 new_ignores.push((ancestor, Some(ignore.clone())));
2040 } else {
2041 new_ignores.push((ancestor, None));
2042 }
2043 }
2044
2045 let mut ignore_stack = IgnoreStack::none();
2046 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2047 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2048 ignore_stack = IgnoreStack::all();
2049 break;
2050 } else if let Some(ignore) = ignore {
2051 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2052 }
2053 }
2054
2055 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2056 ignore_stack = IgnoreStack::all();
2057 }
2058
2059 ignore_stack
2060 }
2061}
2062
2063async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2064 let contents = fs.load(abs_path).await?;
2065 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2066 let mut builder = GitignoreBuilder::new(parent);
2067 for line in contents.lines() {
2068 builder.add_line(Some(abs_path.into()), line)?;
2069 }
2070 Ok(builder.build()?)
2071}
2072
2073impl WorktreeId {
2074 pub fn from_usize(handle_id: usize) -> Self {
2075 Self(handle_id)
2076 }
2077
2078 pub(crate) fn from_proto(id: u64) -> Self {
2079 Self(id as usize)
2080 }
2081
2082 pub fn to_proto(&self) -> u64 {
2083 self.0 as u64
2084 }
2085
2086 pub fn to_usize(&self) -> usize {
2087 self.0
2088 }
2089}
2090
2091impl fmt::Display for WorktreeId {
2092 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2093 self.0.fmt(f)
2094 }
2095}
2096
2097impl Deref for Worktree {
2098 type Target = Snapshot;
2099
2100 fn deref(&self) -> &Self::Target {
2101 match self {
2102 Worktree::Local(worktree) => &worktree.snapshot,
2103 Worktree::Remote(worktree) => &worktree.snapshot,
2104 }
2105 }
2106}
2107
2108impl Deref for LocalWorktree {
2109 type Target = LocalSnapshot;
2110
2111 fn deref(&self) -> &Self::Target {
2112 &self.snapshot
2113 }
2114}
2115
2116impl Deref for RemoteWorktree {
2117 type Target = Snapshot;
2118
2119 fn deref(&self) -> &Self::Target {
2120 &self.snapshot
2121 }
2122}
2123
2124impl fmt::Debug for LocalWorktree {
2125 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2126 self.snapshot.fmt(f)
2127 }
2128}
2129
2130impl fmt::Debug for Snapshot {
2131 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2132 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2133 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2134
2135 impl<'a> fmt::Debug for EntriesByPath<'a> {
2136 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2137 f.debug_map()
2138 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2139 .finish()
2140 }
2141 }
2142
2143 impl<'a> fmt::Debug for EntriesById<'a> {
2144 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2145 f.debug_list().entries(self.0.iter()).finish()
2146 }
2147 }
2148
2149 f.debug_struct("Snapshot")
2150 .field("id", &self.id)
2151 .field("root_name", &self.root_name)
2152 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2153 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2154 .finish()
2155 }
2156}
2157
2158#[derive(Clone, PartialEq)]
2159pub struct File {
2160 pub worktree: ModelHandle<Worktree>,
2161 pub path: Arc<Path>,
2162 pub mtime: SystemTime,
2163 pub(crate) entry_id: ProjectEntryId,
2164 pub(crate) is_local: bool,
2165 pub(crate) is_deleted: bool,
2166}
2167
2168impl language::File for File {
2169 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2170 if self.is_local {
2171 Some(self)
2172 } else {
2173 None
2174 }
2175 }
2176
2177 fn mtime(&self) -> SystemTime {
2178 self.mtime
2179 }
2180
2181 fn path(&self) -> &Arc<Path> {
2182 &self.path
2183 }
2184
2185 fn full_path(&self, cx: &AppContext) -> PathBuf {
2186 let mut full_path = PathBuf::new();
2187 let worktree = self.worktree.read(cx);
2188
2189 if worktree.is_visible() {
2190 full_path.push(worktree.root_name());
2191 } else {
2192 let path = worktree.abs_path();
2193
2194 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2195 full_path.push("~");
2196 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2197 } else {
2198 full_path.push(path)
2199 }
2200 }
2201
2202 if self.path.components().next().is_some() {
2203 full_path.push(&self.path);
2204 }
2205
2206 full_path
2207 }
2208
2209 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2210 /// of its worktree, then this method will return the name of the worktree itself.
2211 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2212 self.path
2213 .file_name()
2214 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2215 }
2216
2217 fn is_deleted(&self) -> bool {
2218 self.is_deleted
2219 }
2220
2221 fn as_any(&self) -> &dyn Any {
2222 self
2223 }
2224
2225 fn to_proto(&self) -> rpc::proto::File {
2226 rpc::proto::File {
2227 worktree_id: self.worktree.id() as u64,
2228 entry_id: self.entry_id.to_proto(),
2229 path: self.path.to_string_lossy().into(),
2230 mtime: Some(self.mtime.into()),
2231 is_deleted: self.is_deleted,
2232 }
2233 }
2234}
2235
2236impl language::LocalFile for File {
2237 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2238 self.worktree
2239 .read(cx)
2240 .as_local()
2241 .unwrap()
2242 .abs_path
2243 .join(&self.path)
2244 }
2245
2246 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2247 let worktree = self.worktree.read(cx).as_local().unwrap();
2248 let abs_path = worktree.absolutize(&self.path);
2249 let fs = worktree.fs.clone();
2250 cx.background()
2251 .spawn(async move { fs.load(&abs_path).await })
2252 }
2253
2254 fn buffer_reloaded(
2255 &self,
2256 buffer_id: u64,
2257 version: &clock::Global,
2258 fingerprint: RopeFingerprint,
2259 line_ending: LineEnding,
2260 mtime: SystemTime,
2261 cx: &mut AppContext,
2262 ) {
2263 let worktree = self.worktree.read(cx).as_local().unwrap();
2264 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2265 worktree
2266 .client
2267 .send(proto::BufferReloaded {
2268 project_id,
2269 buffer_id,
2270 version: serialize_version(version),
2271 mtime: Some(mtime.into()),
2272 fingerprint: serialize_fingerprint(fingerprint),
2273 line_ending: serialize_line_ending(line_ending) as i32,
2274 })
2275 .log_err();
2276 }
2277 }
2278}
2279
2280impl File {
2281 pub fn from_proto(
2282 proto: rpc::proto::File,
2283 worktree: ModelHandle<Worktree>,
2284 cx: &AppContext,
2285 ) -> Result<Self> {
2286 let worktree_id = worktree
2287 .read(cx)
2288 .as_remote()
2289 .ok_or_else(|| anyhow!("not remote"))?
2290 .id();
2291
2292 if worktree_id.to_proto() != proto.worktree_id {
2293 return Err(anyhow!("worktree id does not match file"));
2294 }
2295
2296 Ok(Self {
2297 worktree,
2298 path: Path::new(&proto.path).into(),
2299 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2300 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2301 is_local: false,
2302 is_deleted: proto.is_deleted,
2303 })
2304 }
2305
2306 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2307 file.and_then(|f| f.as_any().downcast_ref())
2308 }
2309
2310 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2311 self.worktree.read(cx).id()
2312 }
2313
2314 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2315 if self.is_deleted {
2316 None
2317 } else {
2318 Some(self.entry_id)
2319 }
2320 }
2321}
2322
2323#[derive(Clone, Debug, PartialEq, Eq)]
2324pub struct Entry {
2325 pub id: ProjectEntryId,
2326 pub kind: EntryKind,
2327 pub path: Arc<Path>,
2328 pub inode: u64,
2329 pub mtime: SystemTime,
2330 pub is_symlink: bool,
2331 pub is_ignored: bool,
2332}
2333
2334#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2335pub enum EntryKind {
2336 PendingDir,
2337 Dir,
2338 File(CharBag),
2339}
2340
2341#[derive(Clone, Copy, Debug)]
2342pub enum PathChange {
2343 Added,
2344 Removed,
2345 Updated,
2346 AddedOrUpdated,
2347}
2348
2349impl Entry {
2350 fn new(
2351 path: Arc<Path>,
2352 metadata: &fs::Metadata,
2353 next_entry_id: &AtomicUsize,
2354 root_char_bag: CharBag,
2355 ) -> Self {
2356 Self {
2357 id: ProjectEntryId::new(next_entry_id),
2358 kind: if metadata.is_dir {
2359 EntryKind::PendingDir
2360 } else {
2361 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2362 },
2363 path,
2364 inode: metadata.inode,
2365 mtime: metadata.mtime,
2366 is_symlink: metadata.is_symlink,
2367 is_ignored: false,
2368 }
2369 }
2370
2371 pub fn is_dir(&self) -> bool {
2372 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2373 }
2374
2375 pub fn is_file(&self) -> bool {
2376 matches!(self.kind, EntryKind::File(_))
2377 }
2378}
2379
2380impl sum_tree::Item for Entry {
2381 type Summary = EntrySummary;
2382
2383 fn summary(&self) -> Self::Summary {
2384 let visible_count = if self.is_ignored { 0 } else { 1 };
2385 let file_count;
2386 let visible_file_count;
2387 if self.is_file() {
2388 file_count = 1;
2389 visible_file_count = visible_count;
2390 } else {
2391 file_count = 0;
2392 visible_file_count = 0;
2393 }
2394
2395 EntrySummary {
2396 max_path: self.path.clone(),
2397 count: 1,
2398 visible_count,
2399 file_count,
2400 visible_file_count,
2401 }
2402 }
2403}
2404
2405impl sum_tree::KeyedItem for Entry {
2406 type Key = PathKey;
2407
2408 fn key(&self) -> Self::Key {
2409 PathKey(self.path.clone())
2410 }
2411}
2412
2413#[derive(Clone, Debug)]
2414pub struct EntrySummary {
2415 max_path: Arc<Path>,
2416 count: usize,
2417 visible_count: usize,
2418 file_count: usize,
2419 visible_file_count: usize,
2420}
2421
2422impl Default for EntrySummary {
2423 fn default() -> Self {
2424 Self {
2425 max_path: Arc::from(Path::new("")),
2426 count: 0,
2427 visible_count: 0,
2428 file_count: 0,
2429 visible_file_count: 0,
2430 }
2431 }
2432}
2433
2434impl sum_tree::Summary for EntrySummary {
2435 type Context = ();
2436
2437 fn add_summary(&mut self, rhs: &Self, _: &()) {
2438 self.max_path = rhs.max_path.clone();
2439 self.count += rhs.count;
2440 self.visible_count += rhs.visible_count;
2441 self.file_count += rhs.file_count;
2442 self.visible_file_count += rhs.visible_file_count;
2443 }
2444}
2445
2446#[derive(Clone, Debug)]
2447struct PathEntry {
2448 id: ProjectEntryId,
2449 path: Arc<Path>,
2450 is_ignored: bool,
2451 scan_id: usize,
2452}
2453
2454impl sum_tree::Item for PathEntry {
2455 type Summary = PathEntrySummary;
2456
2457 fn summary(&self) -> Self::Summary {
2458 PathEntrySummary { max_id: self.id }
2459 }
2460}
2461
2462impl sum_tree::KeyedItem for PathEntry {
2463 type Key = ProjectEntryId;
2464
2465 fn key(&self) -> Self::Key {
2466 self.id
2467 }
2468}
2469
2470#[derive(Clone, Debug, Default)]
2471struct PathEntrySummary {
2472 max_id: ProjectEntryId,
2473}
2474
2475impl sum_tree::Summary for PathEntrySummary {
2476 type Context = ();
2477
2478 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2479 self.max_id = summary.max_id;
2480 }
2481}
2482
2483impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2484 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2485 *self = summary.max_id;
2486 }
2487}
2488
2489#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2490pub struct PathKey(Arc<Path>);
2491
2492impl Default for PathKey {
2493 fn default() -> Self {
2494 Self(Path::new("").into())
2495 }
2496}
2497
2498impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2499 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2500 self.0 = summary.max_path.clone();
2501 }
2502}
2503
2504struct BackgroundScanner {
2505 snapshot: Mutex<LocalSnapshot>,
2506 fs: Arc<dyn Fs>,
2507 status_updates_tx: UnboundedSender<ScanState>,
2508 executor: Arc<executor::Background>,
2509 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2510 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2511 finished_initial_scan: bool,
2512}
2513
2514impl BackgroundScanner {
2515 fn new(
2516 snapshot: LocalSnapshot,
2517 fs: Arc<dyn Fs>,
2518 status_updates_tx: UnboundedSender<ScanState>,
2519 executor: Arc<executor::Background>,
2520 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2521 ) -> Self {
2522 Self {
2523 fs,
2524 status_updates_tx,
2525 executor,
2526 refresh_requests_rx,
2527 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2528 snapshot: Mutex::new(snapshot),
2529 finished_initial_scan: false,
2530 }
2531 }
2532
2533 async fn run(
2534 &mut self,
2535 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2536 ) {
2537 use futures::FutureExt as _;
2538
2539 let (root_abs_path, root_inode) = {
2540 let snapshot = self.snapshot.lock();
2541 (
2542 snapshot.abs_path.clone(),
2543 snapshot.root_entry().map(|e| e.inode),
2544 )
2545 };
2546
2547 // Populate ignores above the root.
2548 let ignore_stack;
2549 for ancestor in root_abs_path.ancestors().skip(1) {
2550 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2551 {
2552 self.snapshot
2553 .lock()
2554 .ignores_by_parent_abs_path
2555 .insert(ancestor.into(), (ignore.into(), 0));
2556 }
2557 }
2558 {
2559 let mut snapshot = self.snapshot.lock();
2560 snapshot.scan_id += 1;
2561 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2562 if ignore_stack.is_all() {
2563 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2564 root_entry.is_ignored = true;
2565 snapshot.insert_entry(root_entry, self.fs.as_ref());
2566 }
2567 }
2568 };
2569
2570 // Perform an initial scan of the directory.
2571 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2572 smol::block_on(scan_job_tx.send(ScanJob {
2573 abs_path: root_abs_path,
2574 path: Arc::from(Path::new("")),
2575 ignore_stack,
2576 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2577 scan_queue: scan_job_tx.clone(),
2578 }))
2579 .unwrap();
2580 drop(scan_job_tx);
2581 self.scan_dirs(true, scan_job_rx).await;
2582 {
2583 let mut snapshot = self.snapshot.lock();
2584 snapshot.completed_scan_id = snapshot.scan_id;
2585 }
2586 self.send_status_update(false, None);
2587
2588 // Process any any FS events that occurred while performing the initial scan.
2589 // For these events, update events cannot be as precise, because we didn't
2590 // have the previous state loaded yet.
2591 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2592 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2593 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2594 paths.extend(more_events.into_iter().map(|e| e.path));
2595 }
2596 self.process_events(paths).await;
2597 }
2598
2599 self.finished_initial_scan = true;
2600
2601 // Continue processing events until the worktree is dropped.
2602 loop {
2603 select_biased! {
2604 // Process any path refresh requests from the worktree. Prioritize
2605 // these before handling changes reported by the filesystem.
2606 request = self.refresh_requests_rx.recv().fuse() => {
2607 let Ok((paths, barrier)) = request else { break };
2608 if !self.process_refresh_request(paths, barrier).await {
2609 return;
2610 }
2611 }
2612
2613 events = events_rx.next().fuse() => {
2614 let Some(events) = events else { break };
2615 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2616 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2617 paths.extend(more_events.into_iter().map(|e| e.path));
2618 }
2619 self.process_events(paths).await;
2620 }
2621 }
2622 }
2623 }
2624
2625 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2626 self.reload_entries_for_paths(paths, None).await;
2627 self.send_status_update(false, Some(barrier))
2628 }
2629
2630 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2631 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2632 if let Some(mut paths) = self
2633 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2634 .await
2635 {
2636 paths.sort_unstable();
2637 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2638 }
2639 drop(scan_job_tx);
2640 self.scan_dirs(false, scan_job_rx).await;
2641
2642 self.update_ignore_statuses().await;
2643
2644 let mut snapshot = self.snapshot.lock();
2645
2646 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2647 git_repositories.retain(|work_directory_id, _| {
2648 snapshot
2649 .entry_for_id(*work_directory_id)
2650 .map_or(false, |entry| {
2651 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2652 })
2653 });
2654 snapshot.git_repositories = git_repositories;
2655
2656 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2657 git_repository_entries.retain(|_, entry| {
2658 snapshot
2659 .git_repositories
2660 .get(&entry.work_directory.0)
2661 .is_some()
2662 });
2663 snapshot.snapshot.repository_entries = git_repository_entries;
2664
2665 snapshot.removed_entry_ids.clear();
2666 snapshot.completed_scan_id = snapshot.scan_id;
2667
2668 drop(snapshot);
2669
2670 self.send_status_update(false, None);
2671 }
2672
2673 async fn scan_dirs(
2674 &self,
2675 enable_progress_updates: bool,
2676 scan_jobs_rx: channel::Receiver<ScanJob>,
2677 ) {
2678 use futures::FutureExt as _;
2679
2680 if self
2681 .status_updates_tx
2682 .unbounded_send(ScanState::Started)
2683 .is_err()
2684 {
2685 return;
2686 }
2687
2688 let progress_update_count = AtomicUsize::new(0);
2689 self.executor
2690 .scoped(|scope| {
2691 for _ in 0..self.executor.num_cpus() {
2692 scope.spawn(async {
2693 let mut last_progress_update_count = 0;
2694 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2695 futures::pin_mut!(progress_update_timer);
2696
2697 loop {
2698 select_biased! {
2699 // Process any path refresh requests before moving on to process
2700 // the scan queue, so that user operations are prioritized.
2701 request = self.refresh_requests_rx.recv().fuse() => {
2702 let Ok((paths, barrier)) = request else { break };
2703 if !self.process_refresh_request(paths, barrier).await {
2704 return;
2705 }
2706 }
2707
2708 // Send periodic progress updates to the worktree. Use an atomic counter
2709 // to ensure that only one of the workers sends a progress update after
2710 // the update interval elapses.
2711 _ = progress_update_timer => {
2712 match progress_update_count.compare_exchange(
2713 last_progress_update_count,
2714 last_progress_update_count + 1,
2715 SeqCst,
2716 SeqCst
2717 ) {
2718 Ok(_) => {
2719 last_progress_update_count += 1;
2720 self.send_status_update(true, None);
2721 }
2722 Err(count) => {
2723 last_progress_update_count = count;
2724 }
2725 }
2726 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2727 }
2728
2729 // Recursively load directories from the file system.
2730 job = scan_jobs_rx.recv().fuse() => {
2731 let Ok(job) = job else { break };
2732 if let Err(err) = self.scan_dir(&job).await {
2733 if job.path.as_ref() != Path::new("") {
2734 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2735 }
2736 }
2737 }
2738 }
2739 }
2740 })
2741 }
2742 })
2743 .await;
2744 }
2745
2746 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2747 let mut prev_state = self.prev_state.lock();
2748 let snapshot = self.snapshot.lock().clone();
2749 let mut old_snapshot = snapshot.snapshot.clone();
2750 mem::swap(&mut old_snapshot, &mut prev_state.0);
2751 let changed_paths = mem::take(&mut prev_state.1);
2752 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2753 self.status_updates_tx
2754 .unbounded_send(ScanState::Updated {
2755 snapshot,
2756 changes,
2757 scanning,
2758 barrier,
2759 })
2760 .is_ok()
2761 }
2762
2763 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2764 let mut new_entries: Vec<Entry> = Vec::new();
2765 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2766 let mut ignore_stack = job.ignore_stack.clone();
2767 let mut new_ignore = None;
2768 let (root_abs_path, root_char_bag, next_entry_id) = {
2769 let snapshot = self.snapshot.lock();
2770 (
2771 snapshot.abs_path().clone(),
2772 snapshot.root_char_bag,
2773 snapshot.next_entry_id.clone(),
2774 )
2775 };
2776 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2777 while let Some(child_abs_path) = child_paths.next().await {
2778 let child_abs_path: Arc<Path> = match child_abs_path {
2779 Ok(child_abs_path) => child_abs_path.into(),
2780 Err(error) => {
2781 log::error!("error processing entry {:?}", error);
2782 continue;
2783 }
2784 };
2785
2786 let child_name = child_abs_path.file_name().unwrap();
2787 let child_path: Arc<Path> = job.path.join(child_name).into();
2788 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2789 Ok(Some(metadata)) => metadata,
2790 Ok(None) => continue,
2791 Err(err) => {
2792 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2793 continue;
2794 }
2795 };
2796
2797 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2798 if child_name == *GITIGNORE {
2799 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2800 Ok(ignore) => {
2801 let ignore = Arc::new(ignore);
2802 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2803 new_ignore = Some(ignore);
2804 }
2805 Err(error) => {
2806 log::error!(
2807 "error loading .gitignore file {:?} - {:?}",
2808 child_name,
2809 error
2810 );
2811 }
2812 }
2813
2814 // Update ignore status of any child entries we've already processed to reflect the
2815 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2816 // there should rarely be too numerous. Update the ignore stack associated with any
2817 // new jobs as well.
2818 let mut new_jobs = new_jobs.iter_mut();
2819 for entry in &mut new_entries {
2820 let entry_abs_path = root_abs_path.join(&entry.path);
2821 entry.is_ignored =
2822 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2823
2824 if entry.is_dir() {
2825 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2826 job.ignore_stack = if entry.is_ignored {
2827 IgnoreStack::all()
2828 } else {
2829 ignore_stack.clone()
2830 };
2831 }
2832 }
2833 }
2834 }
2835
2836 let mut child_entry = Entry::new(
2837 child_path.clone(),
2838 &child_metadata,
2839 &next_entry_id,
2840 root_char_bag,
2841 );
2842
2843 if child_entry.is_dir() {
2844 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2845 child_entry.is_ignored = is_ignored;
2846
2847 // Avoid recursing until crash in the case of a recursive symlink
2848 if !job.ancestor_inodes.contains(&child_entry.inode) {
2849 let mut ancestor_inodes = job.ancestor_inodes.clone();
2850 ancestor_inodes.insert(child_entry.inode);
2851
2852 new_jobs.push(Some(ScanJob {
2853 abs_path: child_abs_path,
2854 path: child_path,
2855 ignore_stack: if is_ignored {
2856 IgnoreStack::all()
2857 } else {
2858 ignore_stack.clone()
2859 },
2860 ancestor_inodes,
2861 scan_queue: job.scan_queue.clone(),
2862 }));
2863 } else {
2864 new_jobs.push(None);
2865 }
2866 } else {
2867 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2868 }
2869
2870 new_entries.push(child_entry);
2871 }
2872
2873 self.snapshot.lock().populate_dir(
2874 job.path.clone(),
2875 new_entries,
2876 new_ignore,
2877 self.fs.as_ref(),
2878 );
2879
2880 for new_job in new_jobs {
2881 if let Some(new_job) = new_job {
2882 job.scan_queue.send(new_job).await.unwrap();
2883 }
2884 }
2885
2886 Ok(())
2887 }
2888
2889 async fn reload_entries_for_paths(
2890 &self,
2891 mut abs_paths: Vec<PathBuf>,
2892 scan_queue_tx: Option<Sender<ScanJob>>,
2893 ) -> Option<Vec<Arc<Path>>> {
2894 let doing_recursive_update = scan_queue_tx.is_some();
2895
2896 abs_paths.sort_unstable();
2897 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2898
2899 let root_abs_path = self.snapshot.lock().abs_path.clone();
2900 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2901 let metadata = futures::future::join_all(
2902 abs_paths
2903 .iter()
2904 .map(|abs_path| self.fs.metadata(&abs_path))
2905 .collect::<Vec<_>>(),
2906 )
2907 .await;
2908
2909 let mut snapshot = self.snapshot.lock();
2910 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2911 snapshot.scan_id += 1;
2912 if is_idle && !doing_recursive_update {
2913 snapshot.completed_scan_id = snapshot.scan_id;
2914 }
2915
2916 // Remove any entries for paths that no longer exist or are being recursively
2917 // refreshed. Do this before adding any new entries, so that renames can be
2918 // detected regardless of the order of the paths.
2919 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2920 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2921 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2922 if matches!(metadata, Ok(None)) || doing_recursive_update {
2923 snapshot.remove_path(path);
2924 }
2925 event_paths.push(path.into());
2926 } else {
2927 log::error!(
2928 "unexpected event {:?} for root path {:?}",
2929 abs_path,
2930 root_canonical_path
2931 );
2932 }
2933 }
2934
2935 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2936 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2937
2938 match metadata {
2939 Ok(Some(metadata)) => {
2940 let ignore_stack =
2941 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2942 let mut fs_entry = Entry::new(
2943 path.clone(),
2944 &metadata,
2945 snapshot.next_entry_id.as_ref(),
2946 snapshot.root_char_bag,
2947 );
2948 fs_entry.is_ignored = ignore_stack.is_all();
2949 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2950
2951 self.reload_repo_for_path(&path, &mut snapshot);
2952
2953 if let Some(scan_queue_tx) = &scan_queue_tx {
2954 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2955 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2956 ancestor_inodes.insert(metadata.inode);
2957 smol::block_on(scan_queue_tx.send(ScanJob {
2958 abs_path,
2959 path,
2960 ignore_stack,
2961 ancestor_inodes,
2962 scan_queue: scan_queue_tx.clone(),
2963 }))
2964 .unwrap();
2965 }
2966 }
2967 }
2968 Ok(None) => {
2969 self.remove_repo_path(&path, &mut snapshot);
2970 }
2971 Err(err) => {
2972 // TODO - create a special 'error' entry in the entries tree to mark this
2973 log::error!("error reading file on event {:?}", err);
2974 }
2975 }
2976 }
2977
2978 Some(event_paths)
2979 }
2980
2981 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2982 if !path
2983 .components()
2984 .any(|component| component.as_os_str() == *DOT_GIT)
2985 {
2986 let scan_id = snapshot.scan_id;
2987 let repo = snapshot.repo_for(&path)?;
2988
2989 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2990
2991 let work_dir = repo.work_directory(snapshot)?;
2992 let work_dir_id = repo.work_directory;
2993
2994 snapshot
2995 .git_repositories
2996 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2997
2998 snapshot.repository_entries.update(&work_dir, |entry| {
2999 entry
3000 .worktree_statuses
3001 .remove_from_while(&repo_path, |stored_path, _| {
3002 stored_path.starts_with(&repo_path)
3003 })
3004 });
3005 }
3006
3007 Some(())
3008 }
3009
3010 fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3011 let scan_id = snapshot.scan_id;
3012
3013 if path
3014 .components()
3015 .any(|component| component.as_os_str() == *DOT_GIT)
3016 {
3017 let (entry_id, repo_ptr) = {
3018 let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
3019 if repo.full_scan_id == scan_id {
3020 return None;
3021 }
3022 (*entry_id, repo.repo_ptr.to_owned())
3023 };
3024
3025 let work_dir = snapshot
3026 .entry_for_id(entry_id)
3027 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3028
3029 let repo = repo_ptr.lock();
3030 repo.reload_index();
3031 let branch = repo.branch_name();
3032 let statuses = repo.worktree_statuses().unwrap_or_default();
3033
3034 snapshot.git_repositories.update(&entry_id, |entry| {
3035 entry.scan_id = scan_id;
3036 entry.full_scan_id = scan_id;
3037 });
3038
3039 snapshot.repository_entries.update(&work_dir, |entry| {
3040 entry.branch = branch.map(Into::into);
3041 entry.worktree_statuses = statuses;
3042 });
3043 } else {
3044 let repo = snapshot.repo_for(&path)?;
3045
3046 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3047
3048 let status = {
3049 let local_repo = snapshot.get_local_repo(&repo)?;
3050
3051 // Short circuit if we've already scanned everything
3052 if local_repo.full_scan_id == scan_id {
3053 return None;
3054 }
3055
3056 let git_ptr = local_repo.repo_ptr.lock();
3057 git_ptr.worktree_status(&repo_path)?
3058 };
3059
3060 let work_dir = repo.work_directory(snapshot)?;
3061 let work_dir_id = repo.work_directory;
3062
3063 snapshot
3064 .git_repositories
3065 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3066
3067 snapshot.repository_entries.update(&work_dir, |entry| {
3068 entry.worktree_statuses.insert(repo_path, status)
3069 });
3070 }
3071
3072 Some(())
3073 }
3074
3075 async fn update_ignore_statuses(&self) {
3076 use futures::FutureExt as _;
3077
3078 let mut snapshot = self.snapshot.lock().clone();
3079 let mut ignores_to_update = Vec::new();
3080 let mut ignores_to_delete = Vec::new();
3081 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3082 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3083 if *scan_id > snapshot.completed_scan_id
3084 && snapshot.entry_for_path(parent_path).is_some()
3085 {
3086 ignores_to_update.push(parent_abs_path.clone());
3087 }
3088
3089 let ignore_path = parent_path.join(&*GITIGNORE);
3090 if snapshot.entry_for_path(ignore_path).is_none() {
3091 ignores_to_delete.push(parent_abs_path.clone());
3092 }
3093 }
3094 }
3095
3096 for parent_abs_path in ignores_to_delete {
3097 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3098 self.snapshot
3099 .lock()
3100 .ignores_by_parent_abs_path
3101 .remove(&parent_abs_path);
3102 }
3103
3104 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3105 ignores_to_update.sort_unstable();
3106 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3107 while let Some(parent_abs_path) = ignores_to_update.next() {
3108 while ignores_to_update
3109 .peek()
3110 .map_or(false, |p| p.starts_with(&parent_abs_path))
3111 {
3112 ignores_to_update.next().unwrap();
3113 }
3114
3115 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3116 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3117 abs_path: parent_abs_path,
3118 ignore_stack,
3119 ignore_queue: ignore_queue_tx.clone(),
3120 }))
3121 .unwrap();
3122 }
3123 drop(ignore_queue_tx);
3124
3125 self.executor
3126 .scoped(|scope| {
3127 for _ in 0..self.executor.num_cpus() {
3128 scope.spawn(async {
3129 loop {
3130 select_biased! {
3131 // Process any path refresh requests before moving on to process
3132 // the queue of ignore statuses.
3133 request = self.refresh_requests_rx.recv().fuse() => {
3134 let Ok((paths, barrier)) = request else { break };
3135 if !self.process_refresh_request(paths, barrier).await {
3136 return;
3137 }
3138 }
3139
3140 // Recursively process directories whose ignores have changed.
3141 job = ignore_queue_rx.recv().fuse() => {
3142 let Ok(job) = job else { break };
3143 self.update_ignore_status(job, &snapshot).await;
3144 }
3145 }
3146 }
3147 });
3148 }
3149 })
3150 .await;
3151 }
3152
3153 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3154 let mut ignore_stack = job.ignore_stack;
3155 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3156 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3157 }
3158
3159 let mut entries_by_id_edits = Vec::new();
3160 let mut entries_by_path_edits = Vec::new();
3161 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3162 for mut entry in snapshot.child_entries(path).cloned() {
3163 let was_ignored = entry.is_ignored;
3164 let abs_path = snapshot.abs_path().join(&entry.path);
3165 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3166 if entry.is_dir() {
3167 let child_ignore_stack = if entry.is_ignored {
3168 IgnoreStack::all()
3169 } else {
3170 ignore_stack.clone()
3171 };
3172 job.ignore_queue
3173 .send(UpdateIgnoreStatusJob {
3174 abs_path: abs_path.into(),
3175 ignore_stack: child_ignore_stack,
3176 ignore_queue: job.ignore_queue.clone(),
3177 })
3178 .await
3179 .unwrap();
3180 }
3181
3182 if entry.is_ignored != was_ignored {
3183 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3184 path_entry.scan_id = snapshot.scan_id;
3185 path_entry.is_ignored = entry.is_ignored;
3186 entries_by_id_edits.push(Edit::Insert(path_entry));
3187 entries_by_path_edits.push(Edit::Insert(entry));
3188 }
3189 }
3190
3191 let mut snapshot = self.snapshot.lock();
3192 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3193 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3194 }
3195
3196 fn build_change_set(
3197 &self,
3198 old_snapshot: &Snapshot,
3199 new_snapshot: &Snapshot,
3200 event_paths: Vec<Arc<Path>>,
3201 ) -> HashMap<Arc<Path>, PathChange> {
3202 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3203
3204 let mut changes = HashMap::default();
3205 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3206 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3207 let received_before_initialized = !self.finished_initial_scan;
3208
3209 for path in event_paths {
3210 let path = PathKey(path);
3211 old_paths.seek(&path, Bias::Left, &());
3212 new_paths.seek(&path, Bias::Left, &());
3213
3214 loop {
3215 match (old_paths.item(), new_paths.item()) {
3216 (Some(old_entry), Some(new_entry)) => {
3217 if old_entry.path > path.0
3218 && new_entry.path > path.0
3219 && !old_entry.path.starts_with(&path.0)
3220 && !new_entry.path.starts_with(&path.0)
3221 {
3222 break;
3223 }
3224
3225 match Ord::cmp(&old_entry.path, &new_entry.path) {
3226 Ordering::Less => {
3227 changes.insert(old_entry.path.clone(), Removed);
3228 old_paths.next(&());
3229 }
3230 Ordering::Equal => {
3231 if received_before_initialized {
3232 // If the worktree was not fully initialized when this event was generated,
3233 // we can't know whether this entry was added during the scan or whether
3234 // it was merely updated.
3235 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3236 } else if old_entry.mtime != new_entry.mtime {
3237 changes.insert(new_entry.path.clone(), Updated);
3238 }
3239 old_paths.next(&());
3240 new_paths.next(&());
3241 }
3242 Ordering::Greater => {
3243 changes.insert(new_entry.path.clone(), Added);
3244 new_paths.next(&());
3245 }
3246 }
3247 }
3248 (Some(old_entry), None) => {
3249 changes.insert(old_entry.path.clone(), Removed);
3250 old_paths.next(&());
3251 }
3252 (None, Some(new_entry)) => {
3253 changes.insert(new_entry.path.clone(), Added);
3254 new_paths.next(&());
3255 }
3256 (None, None) => break,
3257 }
3258 }
3259 }
3260 changes
3261 }
3262
3263 async fn progress_timer(&self, running: bool) {
3264 if !running {
3265 return futures::future::pending().await;
3266 }
3267
3268 #[cfg(any(test, feature = "test-support"))]
3269 if self.fs.is_fake() {
3270 return self.executor.simulate_random_delay().await;
3271 }
3272
3273 smol::Timer::after(Duration::from_millis(100)).await;
3274 }
3275}
3276
3277fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3278 let mut result = root_char_bag;
3279 result.extend(
3280 path.to_string_lossy()
3281 .chars()
3282 .map(|c| c.to_ascii_lowercase()),
3283 );
3284 result
3285}
3286
3287struct ScanJob {
3288 abs_path: Arc<Path>,
3289 path: Arc<Path>,
3290 ignore_stack: Arc<IgnoreStack>,
3291 scan_queue: Sender<ScanJob>,
3292 ancestor_inodes: TreeSet<u64>,
3293}
3294
3295struct UpdateIgnoreStatusJob {
3296 abs_path: Arc<Path>,
3297 ignore_stack: Arc<IgnoreStack>,
3298 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3299}
3300
3301pub trait WorktreeHandle {
3302 #[cfg(any(test, feature = "test-support"))]
3303 fn flush_fs_events<'a>(
3304 &self,
3305 cx: &'a gpui::TestAppContext,
3306 ) -> futures::future::LocalBoxFuture<'a, ()>;
3307}
3308
3309impl WorktreeHandle for ModelHandle<Worktree> {
3310 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3311 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3312 // extra directory scans, and emit extra scan-state notifications.
3313 //
3314 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3315 // to ensure that all redundant FS events have already been processed.
3316 #[cfg(any(test, feature = "test-support"))]
3317 fn flush_fs_events<'a>(
3318 &self,
3319 cx: &'a gpui::TestAppContext,
3320 ) -> futures::future::LocalBoxFuture<'a, ()> {
3321 use smol::future::FutureExt;
3322
3323 let filename = "fs-event-sentinel";
3324 let tree = self.clone();
3325 let (fs, root_path) = self.read_with(cx, |tree, _| {
3326 let tree = tree.as_local().unwrap();
3327 (tree.fs.clone(), tree.abs_path().clone())
3328 });
3329
3330 async move {
3331 fs.create_file(&root_path.join(filename), Default::default())
3332 .await
3333 .unwrap();
3334 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3335 .await;
3336
3337 fs.remove_file(&root_path.join(filename), Default::default())
3338 .await
3339 .unwrap();
3340 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3341 .await;
3342
3343 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3344 .await;
3345 }
3346 .boxed_local()
3347 }
3348}
3349
3350#[derive(Clone, Debug)]
3351struct TraversalProgress<'a> {
3352 max_path: &'a Path,
3353 count: usize,
3354 visible_count: usize,
3355 file_count: usize,
3356 visible_file_count: usize,
3357}
3358
3359impl<'a> TraversalProgress<'a> {
3360 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3361 match (include_ignored, include_dirs) {
3362 (true, true) => self.count,
3363 (true, false) => self.file_count,
3364 (false, true) => self.visible_count,
3365 (false, false) => self.visible_file_count,
3366 }
3367 }
3368}
3369
3370impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3371 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3372 self.max_path = summary.max_path.as_ref();
3373 self.count += summary.count;
3374 self.visible_count += summary.visible_count;
3375 self.file_count += summary.file_count;
3376 self.visible_file_count += summary.visible_file_count;
3377 }
3378}
3379
3380impl<'a> Default for TraversalProgress<'a> {
3381 fn default() -> Self {
3382 Self {
3383 max_path: Path::new(""),
3384 count: 0,
3385 visible_count: 0,
3386 file_count: 0,
3387 visible_file_count: 0,
3388 }
3389 }
3390}
3391
3392pub struct Traversal<'a> {
3393 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3394 include_ignored: bool,
3395 include_dirs: bool,
3396}
3397
3398impl<'a> Traversal<'a> {
3399 pub fn advance(&mut self) -> bool {
3400 self.advance_to_offset(self.offset() + 1)
3401 }
3402
3403 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3404 self.cursor.seek_forward(
3405 &TraversalTarget::Count {
3406 count: offset,
3407 include_dirs: self.include_dirs,
3408 include_ignored: self.include_ignored,
3409 },
3410 Bias::Right,
3411 &(),
3412 )
3413 }
3414
3415 pub fn advance_to_sibling(&mut self) -> bool {
3416 while let Some(entry) = self.cursor.item() {
3417 self.cursor.seek_forward(
3418 &TraversalTarget::PathSuccessor(&entry.path),
3419 Bias::Left,
3420 &(),
3421 );
3422 if let Some(entry) = self.cursor.item() {
3423 if (self.include_dirs || !entry.is_dir())
3424 && (self.include_ignored || !entry.is_ignored)
3425 {
3426 return true;
3427 }
3428 }
3429 }
3430 false
3431 }
3432
3433 pub fn entry(&self) -> Option<&'a Entry> {
3434 self.cursor.item()
3435 }
3436
3437 pub fn offset(&self) -> usize {
3438 self.cursor
3439 .start()
3440 .count(self.include_dirs, self.include_ignored)
3441 }
3442}
3443
3444impl<'a> Iterator for Traversal<'a> {
3445 type Item = &'a Entry;
3446
3447 fn next(&mut self) -> Option<Self::Item> {
3448 if let Some(item) = self.entry() {
3449 self.advance();
3450 Some(item)
3451 } else {
3452 None
3453 }
3454 }
3455}
3456
3457#[derive(Debug)]
3458enum TraversalTarget<'a> {
3459 Path(&'a Path),
3460 PathSuccessor(&'a Path),
3461 Count {
3462 count: usize,
3463 include_ignored: bool,
3464 include_dirs: bool,
3465 },
3466}
3467
3468impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3469 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3470 match self {
3471 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3472 TraversalTarget::PathSuccessor(path) => {
3473 if !cursor_location.max_path.starts_with(path) {
3474 Ordering::Equal
3475 } else {
3476 Ordering::Greater
3477 }
3478 }
3479 TraversalTarget::Count {
3480 count,
3481 include_dirs,
3482 include_ignored,
3483 } => Ord::cmp(
3484 count,
3485 &cursor_location.count(*include_dirs, *include_ignored),
3486 ),
3487 }
3488 }
3489}
3490
3491struct ChildEntriesIter<'a> {
3492 parent_path: &'a Path,
3493 traversal: Traversal<'a>,
3494}
3495
3496impl<'a> Iterator for ChildEntriesIter<'a> {
3497 type Item = &'a Entry;
3498
3499 fn next(&mut self) -> Option<Self::Item> {
3500 if let Some(item) = self.traversal.entry() {
3501 if item.path.starts_with(&self.parent_path) {
3502 self.traversal.advance_to_sibling();
3503 return Some(item);
3504 }
3505 }
3506 None
3507 }
3508}
3509
3510impl<'a> From<&'a Entry> for proto::Entry {
3511 fn from(entry: &'a Entry) -> Self {
3512 Self {
3513 id: entry.id.to_proto(),
3514 is_dir: entry.is_dir(),
3515 path: entry.path.to_string_lossy().into(),
3516 inode: entry.inode,
3517 mtime: Some(entry.mtime.into()),
3518 is_symlink: entry.is_symlink,
3519 is_ignored: entry.is_ignored,
3520 }
3521 }
3522}
3523
3524impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3525 type Error = anyhow::Error;
3526
3527 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3528 if let Some(mtime) = entry.mtime {
3529 let kind = if entry.is_dir {
3530 EntryKind::Dir
3531 } else {
3532 let mut char_bag = *root_char_bag;
3533 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3534 EntryKind::File(char_bag)
3535 };
3536 let path: Arc<Path> = PathBuf::from(entry.path).into();
3537 Ok(Entry {
3538 id: ProjectEntryId::from_proto(entry.id),
3539 kind,
3540 path,
3541 inode: entry.inode,
3542 mtime: mtime.into(),
3543 is_symlink: entry.is_symlink,
3544 is_ignored: entry.is_ignored,
3545 })
3546 } else {
3547 Err(anyhow!(
3548 "missing mtime in remote worktree entry {:?}",
3549 entry.path
3550 ))
3551 }
3552 }
3553}
3554
3555#[cfg(test)]
3556mod tests {
3557 use super::*;
3558 use fs::{FakeFs, RealFs};
3559 use git2::Signature;
3560 use gpui::{executor::Deterministic, TestAppContext};
3561 use pretty_assertions::assert_eq;
3562 use rand::prelude::*;
3563 use serde_json::json;
3564 use std::{env, fmt::Write};
3565 use util::{http::FakeHttpClient, test::temp_tree};
3566
3567 #[gpui::test]
3568 async fn test_traversal(cx: &mut TestAppContext) {
3569 let fs = FakeFs::new(cx.background());
3570 fs.insert_tree(
3571 "/root",
3572 json!({
3573 ".gitignore": "a/b\n",
3574 "a": {
3575 "b": "",
3576 "c": "",
3577 }
3578 }),
3579 )
3580 .await;
3581
3582 let http_client = FakeHttpClient::with_404_response();
3583 let client = cx.read(|cx| Client::new(http_client, cx));
3584
3585 let tree = Worktree::local(
3586 client,
3587 Path::new("/root"),
3588 true,
3589 fs,
3590 Default::default(),
3591 &mut cx.to_async(),
3592 )
3593 .await
3594 .unwrap();
3595 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3596 .await;
3597
3598 tree.read_with(cx, |tree, _| {
3599 assert_eq!(
3600 tree.entries(false)
3601 .map(|entry| entry.path.as_ref())
3602 .collect::<Vec<_>>(),
3603 vec![
3604 Path::new(""),
3605 Path::new(".gitignore"),
3606 Path::new("a"),
3607 Path::new("a/c"),
3608 ]
3609 );
3610 assert_eq!(
3611 tree.entries(true)
3612 .map(|entry| entry.path.as_ref())
3613 .collect::<Vec<_>>(),
3614 vec![
3615 Path::new(""),
3616 Path::new(".gitignore"),
3617 Path::new("a"),
3618 Path::new("a/b"),
3619 Path::new("a/c"),
3620 ]
3621 );
3622 })
3623 }
3624
3625 #[gpui::test(iterations = 10)]
3626 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3627 let fs = FakeFs::new(cx.background());
3628 fs.insert_tree(
3629 "/root",
3630 json!({
3631 "lib": {
3632 "a": {
3633 "a.txt": ""
3634 },
3635 "b": {
3636 "b.txt": ""
3637 }
3638 }
3639 }),
3640 )
3641 .await;
3642 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3643 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3644
3645 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3646 let tree = Worktree::local(
3647 client,
3648 Path::new("/root"),
3649 true,
3650 fs.clone(),
3651 Default::default(),
3652 &mut cx.to_async(),
3653 )
3654 .await
3655 .unwrap();
3656
3657 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3658 .await;
3659
3660 tree.read_with(cx, |tree, _| {
3661 assert_eq!(
3662 tree.entries(false)
3663 .map(|entry| entry.path.as_ref())
3664 .collect::<Vec<_>>(),
3665 vec![
3666 Path::new(""),
3667 Path::new("lib"),
3668 Path::new("lib/a"),
3669 Path::new("lib/a/a.txt"),
3670 Path::new("lib/a/lib"),
3671 Path::new("lib/b"),
3672 Path::new("lib/b/b.txt"),
3673 Path::new("lib/b/lib"),
3674 ]
3675 );
3676 });
3677
3678 fs.rename(
3679 Path::new("/root/lib/a/lib"),
3680 Path::new("/root/lib/a/lib-2"),
3681 Default::default(),
3682 )
3683 .await
3684 .unwrap();
3685 executor.run_until_parked();
3686 tree.read_with(cx, |tree, _| {
3687 assert_eq!(
3688 tree.entries(false)
3689 .map(|entry| entry.path.as_ref())
3690 .collect::<Vec<_>>(),
3691 vec![
3692 Path::new(""),
3693 Path::new("lib"),
3694 Path::new("lib/a"),
3695 Path::new("lib/a/a.txt"),
3696 Path::new("lib/a/lib-2"),
3697 Path::new("lib/b"),
3698 Path::new("lib/b/b.txt"),
3699 Path::new("lib/b/lib"),
3700 ]
3701 );
3702 });
3703 }
3704
3705 #[gpui::test]
3706 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3707 let parent_dir = temp_tree(json!({
3708 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3709 "tree": {
3710 ".git": {},
3711 ".gitignore": "ignored-dir\n",
3712 "tracked-dir": {
3713 "tracked-file1": "",
3714 "ancestor-ignored-file1": "",
3715 },
3716 "ignored-dir": {
3717 "ignored-file1": ""
3718 }
3719 }
3720 }));
3721 let dir = parent_dir.path().join("tree");
3722
3723 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3724
3725 let tree = Worktree::local(
3726 client,
3727 dir.as_path(),
3728 true,
3729 Arc::new(RealFs),
3730 Default::default(),
3731 &mut cx.to_async(),
3732 )
3733 .await
3734 .unwrap();
3735 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3736 .await;
3737 tree.flush_fs_events(cx).await;
3738 cx.read(|cx| {
3739 let tree = tree.read(cx);
3740 assert!(
3741 !tree
3742 .entry_for_path("tracked-dir/tracked-file1")
3743 .unwrap()
3744 .is_ignored
3745 );
3746 assert!(
3747 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3748 .unwrap()
3749 .is_ignored
3750 );
3751 assert!(
3752 tree.entry_for_path("ignored-dir/ignored-file1")
3753 .unwrap()
3754 .is_ignored
3755 );
3756 });
3757
3758 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3759 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3760 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3761 tree.flush_fs_events(cx).await;
3762 cx.read(|cx| {
3763 let tree = tree.read(cx);
3764 assert!(
3765 !tree
3766 .entry_for_path("tracked-dir/tracked-file2")
3767 .unwrap()
3768 .is_ignored
3769 );
3770 assert!(
3771 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3772 .unwrap()
3773 .is_ignored
3774 );
3775 assert!(
3776 tree.entry_for_path("ignored-dir/ignored-file2")
3777 .unwrap()
3778 .is_ignored
3779 );
3780 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3781 });
3782 }
3783
3784 #[gpui::test]
3785 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3786 let root = temp_tree(json!({
3787 "dir1": {
3788 ".git": {},
3789 "deps": {
3790 "dep1": {
3791 ".git": {},
3792 "src": {
3793 "a.txt": ""
3794 }
3795 }
3796 },
3797 "src": {
3798 "b.txt": ""
3799 }
3800 },
3801 "c.txt": "",
3802 }));
3803
3804 let http_client = FakeHttpClient::with_404_response();
3805 let client = cx.read(|cx| Client::new(http_client, cx));
3806 let tree = Worktree::local(
3807 client,
3808 root.path(),
3809 true,
3810 Arc::new(RealFs),
3811 Default::default(),
3812 &mut cx.to_async(),
3813 )
3814 .await
3815 .unwrap();
3816
3817 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3818 .await;
3819 tree.flush_fs_events(cx).await;
3820
3821 tree.read_with(cx, |tree, _cx| {
3822 let tree = tree.as_local().unwrap();
3823
3824 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3825
3826 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3827 assert_eq!(
3828 entry
3829 .work_directory(tree)
3830 .map(|directory| directory.as_ref().to_owned()),
3831 Some(Path::new("dir1").to_owned())
3832 );
3833
3834 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3835 assert_eq!(
3836 entry
3837 .work_directory(tree)
3838 .map(|directory| directory.as_ref().to_owned()),
3839 Some(Path::new("dir1/deps/dep1").to_owned())
3840 );
3841 });
3842
3843 let repo_update_events = Arc::new(Mutex::new(vec![]));
3844 tree.update(cx, |_, cx| {
3845 let repo_update_events = repo_update_events.clone();
3846 cx.subscribe(&tree, move |_, _, event, _| {
3847 if let Event::UpdatedGitRepositories(update) = event {
3848 repo_update_events.lock().push(update.clone());
3849 }
3850 })
3851 .detach();
3852 });
3853
3854 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3855 tree.flush_fs_events(cx).await;
3856
3857 assert_eq!(
3858 repo_update_events.lock()[0]
3859 .keys()
3860 .cloned()
3861 .collect::<Vec<Arc<Path>>>(),
3862 vec![Path::new("dir1").into()]
3863 );
3864
3865 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3866 tree.flush_fs_events(cx).await;
3867
3868 tree.read_with(cx, |tree, _cx| {
3869 let tree = tree.as_local().unwrap();
3870
3871 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3872 });
3873 }
3874
3875 #[gpui::test]
3876 async fn test_git_status(cx: &mut TestAppContext) {
3877 #[track_caller]
3878 fn git_init(path: &Path) -> git2::Repository {
3879 git2::Repository::init(path).expect("Failed to initialize git repository")
3880 }
3881
3882 #[track_caller]
3883 fn git_add(path: &Path, repo: &git2::Repository) {
3884 let mut index = repo.index().expect("Failed to get index");
3885 index.add_path(path).expect("Failed to add a.txt");
3886 index.write().expect("Failed to write index");
3887 }
3888
3889 #[track_caller]
3890 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3891 let mut index = repo.index().expect("Failed to get index");
3892 index.remove_path(path).expect("Failed to add a.txt");
3893 index.write().expect("Failed to write index");
3894 }
3895
3896 #[track_caller]
3897 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3898 let signature = Signature::now("test", "test@zed.dev").unwrap();
3899 let oid = repo.index().unwrap().write_tree().unwrap();
3900 let tree = repo.find_tree(oid).unwrap();
3901 if let Some(head) = repo.head().ok() {
3902 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3903
3904 let parent_commit = parent_obj.as_commit().unwrap();
3905
3906 repo.commit(
3907 Some("HEAD"),
3908 &signature,
3909 &signature,
3910 msg,
3911 &tree,
3912 &[parent_commit],
3913 )
3914 .expect("Failed to commit with parent");
3915 } else {
3916 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3917 .expect("Failed to commit");
3918 }
3919 }
3920
3921 #[track_caller]
3922 fn git_stash(repo: &mut git2::Repository) {
3923 let signature = repo.signature().unwrap();
3924 repo.stash_save(&signature, "N/A", None)
3925 .expect("Failed to stash");
3926 }
3927
3928 #[track_caller]
3929 fn git_reset(offset: usize, repo: &git2::Repository) {
3930 let head = repo.head().expect("Couldn't get repo head");
3931 let object = head.peel(git2::ObjectType::Commit).unwrap();
3932 let commit = object.as_commit().unwrap();
3933 let new_head = commit
3934 .parents()
3935 .inspect(|parnet| {
3936 parnet.message();
3937 })
3938 .skip(offset)
3939 .next()
3940 .expect("Not enough history");
3941 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
3942 .expect("Could not reset");
3943 }
3944
3945 #[allow(dead_code)]
3946 #[track_caller]
3947 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
3948 repo.statuses(None)
3949 .unwrap()
3950 .iter()
3951 .map(|status| (status.path().unwrap().to_string(), status.status()))
3952 .collect()
3953 }
3954
3955 let root = temp_tree(json!({
3956 "project": {
3957 "a.txt": "a",
3958 "b.txt": "bb",
3959 "c": {
3960 "d": {
3961 "e.txt": "eee"
3962 }
3963 }
3964 },
3965
3966 }));
3967
3968 let http_client = FakeHttpClient::with_404_response();
3969 let client = cx.read(|cx| Client::new(http_client, cx));
3970 let tree = Worktree::local(
3971 client,
3972 root.path(),
3973 true,
3974 Arc::new(RealFs),
3975 Default::default(),
3976 &mut cx.to_async(),
3977 )
3978 .await
3979 .unwrap();
3980
3981 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3982 .await;
3983
3984 const A_TXT: &'static str = "a.txt";
3985 const B_TXT: &'static str = "b.txt";
3986 const E_TXT: &'static str = "c/d/e.txt";
3987
3988 let work_dir = root.path().join("project");
3989
3990 let mut repo = git_init(work_dir.as_path());
3991 git_add(Path::new(A_TXT), &repo);
3992 git_add(Path::new(E_TXT), &repo);
3993 git_commit("Initial commit", &repo);
3994
3995 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
3996
3997 tree.flush_fs_events(cx).await;
3998
3999 // Check that the right git state is observed on startup
4000 tree.read_with(cx, |tree, _cx| {
4001 let snapshot = tree.snapshot();
4002 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4003 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4004 assert_eq!(dir.0.as_ref(), Path::new("project"));
4005
4006 assert_eq!(repo.worktree_statuses.iter().count(), 2);
4007 assert_eq!(
4008 repo.worktree_statuses.get(&Path::new(A_TXT).into()),
4009 Some(&GitFileStatus::Modified)
4010 );
4011 assert_eq!(
4012 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4013 Some(&GitFileStatus::Added)
4014 );
4015 });
4016
4017 git_add(Path::new(A_TXT), &repo);
4018 git_add(Path::new(B_TXT), &repo);
4019 git_commit("Committing modified and added", &repo);
4020 tree.flush_fs_events(cx).await;
4021
4022 // Check that repo only changes are tracked
4023 tree.read_with(cx, |tree, _cx| {
4024 let snapshot = tree.snapshot();
4025 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4026
4027 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4028 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4029 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4030 });
4031
4032 git_reset(0, &repo);
4033 git_remove_index(Path::new(B_TXT), &repo);
4034 git_stash(&mut repo);
4035 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4036 tree.flush_fs_events(cx).await;
4037
4038 // Check that more complex repo changes are tracked
4039 tree.read_with(cx, |tree, _cx| {
4040 let snapshot = tree.snapshot();
4041 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4042
4043 assert_eq!(repo.worktree_statuses.iter().count(), 2);
4044 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4045 assert_eq!(
4046 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4047 Some(&GitFileStatus::Added)
4048 );
4049 assert_eq!(
4050 repo.worktree_statuses.get(&Path::new(E_TXT).into()),
4051 Some(&GitFileStatus::Modified)
4052 );
4053 });
4054
4055 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4056 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4057
4058 tree.flush_fs_events(cx).await;
4059
4060 // Check that non-repo behavior is tracked
4061 tree.read_with(cx, |tree, _cx| {
4062 let snapshot = tree.snapshot();
4063 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4064
4065 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4066 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4067 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4068 assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
4069 });
4070 }
4071
4072 #[gpui::test]
4073 async fn test_write_file(cx: &mut TestAppContext) {
4074 let dir = temp_tree(json!({
4075 ".git": {},
4076 ".gitignore": "ignored-dir\n",
4077 "tracked-dir": {},
4078 "ignored-dir": {}
4079 }));
4080
4081 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4082
4083 let tree = Worktree::local(
4084 client,
4085 dir.path(),
4086 true,
4087 Arc::new(RealFs),
4088 Default::default(),
4089 &mut cx.to_async(),
4090 )
4091 .await
4092 .unwrap();
4093 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4094 .await;
4095 tree.flush_fs_events(cx).await;
4096
4097 tree.update(cx, |tree, cx| {
4098 tree.as_local().unwrap().write_file(
4099 Path::new("tracked-dir/file.txt"),
4100 "hello".into(),
4101 Default::default(),
4102 cx,
4103 )
4104 })
4105 .await
4106 .unwrap();
4107 tree.update(cx, |tree, cx| {
4108 tree.as_local().unwrap().write_file(
4109 Path::new("ignored-dir/file.txt"),
4110 "world".into(),
4111 Default::default(),
4112 cx,
4113 )
4114 })
4115 .await
4116 .unwrap();
4117
4118 tree.read_with(cx, |tree, _| {
4119 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4120 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4121 assert!(!tracked.is_ignored);
4122 assert!(ignored.is_ignored);
4123 });
4124 }
4125
4126 #[gpui::test(iterations = 30)]
4127 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4128 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4129
4130 let fs = FakeFs::new(cx.background());
4131 fs.insert_tree(
4132 "/root",
4133 json!({
4134 "b": {},
4135 "c": {},
4136 "d": {},
4137 }),
4138 )
4139 .await;
4140
4141 let tree = Worktree::local(
4142 client,
4143 "/root".as_ref(),
4144 true,
4145 fs,
4146 Default::default(),
4147 &mut cx.to_async(),
4148 )
4149 .await
4150 .unwrap();
4151
4152 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4153
4154 let entry = tree
4155 .update(cx, |tree, cx| {
4156 tree.as_local_mut()
4157 .unwrap()
4158 .create_entry("a/e".as_ref(), true, cx)
4159 })
4160 .await
4161 .unwrap();
4162 assert!(entry.is_dir());
4163
4164 cx.foreground().run_until_parked();
4165 tree.read_with(cx, |tree, _| {
4166 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4167 });
4168
4169 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4170 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4171 snapshot1.apply_remote_update(update).unwrap();
4172 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4173 }
4174
4175 #[gpui::test(iterations = 100)]
4176 async fn test_random_worktree_operations_during_initial_scan(
4177 cx: &mut TestAppContext,
4178 mut rng: StdRng,
4179 ) {
4180 let operations = env::var("OPERATIONS")
4181 .map(|o| o.parse().unwrap())
4182 .unwrap_or(5);
4183 let initial_entries = env::var("INITIAL_ENTRIES")
4184 .map(|o| o.parse().unwrap())
4185 .unwrap_or(20);
4186
4187 let root_dir = Path::new("/test");
4188 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4189 fs.as_fake().insert_tree(root_dir, json!({})).await;
4190 for _ in 0..initial_entries {
4191 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4192 }
4193 log::info!("generated initial tree");
4194
4195 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4196 let worktree = Worktree::local(
4197 client.clone(),
4198 root_dir,
4199 true,
4200 fs.clone(),
4201 Default::default(),
4202 &mut cx.to_async(),
4203 )
4204 .await
4205 .unwrap();
4206
4207 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4208
4209 for _ in 0..operations {
4210 worktree
4211 .update(cx, |worktree, cx| {
4212 randomly_mutate_worktree(worktree, &mut rng, cx)
4213 })
4214 .await
4215 .log_err();
4216 worktree.read_with(cx, |tree, _| {
4217 tree.as_local().unwrap().snapshot.check_invariants()
4218 });
4219
4220 if rng.gen_bool(0.6) {
4221 let new_snapshot =
4222 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4223 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4224 snapshot.apply_remote_update(update.clone()).unwrap();
4225 assert_eq!(
4226 snapshot.to_vec(true),
4227 new_snapshot.to_vec(true),
4228 "incorrect snapshot after update {:?}",
4229 update
4230 );
4231 }
4232 }
4233
4234 worktree
4235 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4236 .await;
4237 worktree.read_with(cx, |tree, _| {
4238 tree.as_local().unwrap().snapshot.check_invariants()
4239 });
4240
4241 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4242 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4243 snapshot.apply_remote_update(update.clone()).unwrap();
4244 assert_eq!(
4245 snapshot.to_vec(true),
4246 new_snapshot.to_vec(true),
4247 "incorrect snapshot after update {:?}",
4248 update
4249 );
4250 }
4251
4252 #[gpui::test(iterations = 100)]
4253 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4254 let operations = env::var("OPERATIONS")
4255 .map(|o| o.parse().unwrap())
4256 .unwrap_or(40);
4257 let initial_entries = env::var("INITIAL_ENTRIES")
4258 .map(|o| o.parse().unwrap())
4259 .unwrap_or(20);
4260
4261 let root_dir = Path::new("/test");
4262 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4263 fs.as_fake().insert_tree(root_dir, json!({})).await;
4264 for _ in 0..initial_entries {
4265 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4266 }
4267 log::info!("generated initial tree");
4268
4269 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4270 let worktree = Worktree::local(
4271 client.clone(),
4272 root_dir,
4273 true,
4274 fs.clone(),
4275 Default::default(),
4276 &mut cx.to_async(),
4277 )
4278 .await
4279 .unwrap();
4280
4281 worktree
4282 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4283 .await;
4284
4285 // After the initial scan is complete, the `UpdatedEntries` event can
4286 // be used to follow along with all changes to the worktree's snapshot.
4287 worktree.update(cx, |tree, cx| {
4288 let mut paths = tree
4289 .as_local()
4290 .unwrap()
4291 .paths()
4292 .cloned()
4293 .collect::<Vec<_>>();
4294
4295 cx.subscribe(&worktree, move |tree, _, event, _| {
4296 if let Event::UpdatedEntries(changes) = event {
4297 for (path, change_type) in changes.iter() {
4298 let path = path.clone();
4299 let ix = match paths.binary_search(&path) {
4300 Ok(ix) | Err(ix) => ix,
4301 };
4302 match change_type {
4303 PathChange::Added => {
4304 assert_ne!(paths.get(ix), Some(&path));
4305 paths.insert(ix, path);
4306 }
4307 PathChange::Removed => {
4308 assert_eq!(paths.get(ix), Some(&path));
4309 paths.remove(ix);
4310 }
4311 PathChange::Updated => {
4312 assert_eq!(paths.get(ix), Some(&path));
4313 }
4314 PathChange::AddedOrUpdated => {
4315 if paths[ix] != path {
4316 paths.insert(ix, path);
4317 }
4318 }
4319 }
4320 }
4321 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4322 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4323 }
4324 })
4325 .detach();
4326 });
4327
4328 let mut snapshots = Vec::new();
4329 let mut mutations_len = operations;
4330 while mutations_len > 1 {
4331 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4332 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4333 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4334 let len = rng.gen_range(0..=buffered_event_count);
4335 log::info!("flushing {} events", len);
4336 fs.as_fake().flush_events(len).await;
4337 } else {
4338 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4339 mutations_len -= 1;
4340 }
4341
4342 cx.foreground().run_until_parked();
4343 if rng.gen_bool(0.2) {
4344 log::info!("storing snapshot {}", snapshots.len());
4345 let snapshot =
4346 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4347 snapshots.push(snapshot);
4348 }
4349 }
4350
4351 log::info!("quiescing");
4352 fs.as_fake().flush_events(usize::MAX).await;
4353 cx.foreground().run_until_parked();
4354 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4355 snapshot.check_invariants();
4356
4357 {
4358 let new_worktree = Worktree::local(
4359 client.clone(),
4360 root_dir,
4361 true,
4362 fs.clone(),
4363 Default::default(),
4364 &mut cx.to_async(),
4365 )
4366 .await
4367 .unwrap();
4368 new_worktree
4369 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4370 .await;
4371 let new_snapshot =
4372 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4373 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4374 }
4375
4376 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4377 let include_ignored = rng.gen::<bool>();
4378 if !include_ignored {
4379 let mut entries_by_path_edits = Vec::new();
4380 let mut entries_by_id_edits = Vec::new();
4381 for entry in prev_snapshot
4382 .entries_by_id
4383 .cursor::<()>()
4384 .filter(|e| e.is_ignored)
4385 {
4386 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4387 entries_by_id_edits.push(Edit::Remove(entry.id));
4388 }
4389
4390 prev_snapshot
4391 .entries_by_path
4392 .edit(entries_by_path_edits, &());
4393 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4394 }
4395
4396 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4397 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4398 assert_eq!(
4399 prev_snapshot.to_vec(include_ignored),
4400 snapshot.to_vec(include_ignored),
4401 "wrong update for snapshot {i}. update: {:?}",
4402 update
4403 );
4404 }
4405 }
4406
4407 fn randomly_mutate_worktree(
4408 worktree: &mut Worktree,
4409 rng: &mut impl Rng,
4410 cx: &mut ModelContext<Worktree>,
4411 ) -> Task<Result<()>> {
4412 let worktree = worktree.as_local_mut().unwrap();
4413 let snapshot = worktree.snapshot();
4414 let entry = snapshot.entries(false).choose(rng).unwrap();
4415
4416 match rng.gen_range(0_u32..100) {
4417 0..=33 if entry.path.as_ref() != Path::new("") => {
4418 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4419 worktree.delete_entry(entry.id, cx).unwrap()
4420 }
4421 ..=66 if entry.path.as_ref() != Path::new("") => {
4422 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4423 let new_parent_path = if other_entry.is_dir() {
4424 other_entry.path.clone()
4425 } else {
4426 other_entry.path.parent().unwrap().into()
4427 };
4428 let mut new_path = new_parent_path.join(gen_name(rng));
4429 if new_path.starts_with(&entry.path) {
4430 new_path = gen_name(rng).into();
4431 }
4432
4433 log::info!(
4434 "renaming entry {:?} ({}) to {:?}",
4435 entry.path,
4436 entry.id.0,
4437 new_path
4438 );
4439 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4440 cx.foreground().spawn(async move {
4441 task.await?;
4442 Ok(())
4443 })
4444 }
4445 _ => {
4446 let task = if entry.is_dir() {
4447 let child_path = entry.path.join(gen_name(rng));
4448 let is_dir = rng.gen_bool(0.3);
4449 log::info!(
4450 "creating {} at {:?}",
4451 if is_dir { "dir" } else { "file" },
4452 child_path,
4453 );
4454 worktree.create_entry(child_path, is_dir, cx)
4455 } else {
4456 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4457 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4458 };
4459 cx.foreground().spawn(async move {
4460 task.await?;
4461 Ok(())
4462 })
4463 }
4464 }
4465 }
4466
4467 async fn randomly_mutate_fs(
4468 fs: &Arc<dyn Fs>,
4469 root_path: &Path,
4470 insertion_probability: f64,
4471 rng: &mut impl Rng,
4472 ) {
4473 let mut files = Vec::new();
4474 let mut dirs = Vec::new();
4475 for path in fs.as_fake().paths() {
4476 if path.starts_with(root_path) {
4477 if fs.is_file(&path).await {
4478 files.push(path);
4479 } else {
4480 dirs.push(path);
4481 }
4482 }
4483 }
4484
4485 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4486 let path = dirs.choose(rng).unwrap();
4487 let new_path = path.join(gen_name(rng));
4488
4489 if rng.gen() {
4490 log::info!(
4491 "creating dir {:?}",
4492 new_path.strip_prefix(root_path).unwrap()
4493 );
4494 fs.create_dir(&new_path).await.unwrap();
4495 } else {
4496 log::info!(
4497 "creating file {:?}",
4498 new_path.strip_prefix(root_path).unwrap()
4499 );
4500 fs.create_file(&new_path, Default::default()).await.unwrap();
4501 }
4502 } else if rng.gen_bool(0.05) {
4503 let ignore_dir_path = dirs.choose(rng).unwrap();
4504 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4505
4506 let subdirs = dirs
4507 .iter()
4508 .filter(|d| d.starts_with(&ignore_dir_path))
4509 .cloned()
4510 .collect::<Vec<_>>();
4511 let subfiles = files
4512 .iter()
4513 .filter(|d| d.starts_with(&ignore_dir_path))
4514 .cloned()
4515 .collect::<Vec<_>>();
4516 let files_to_ignore = {
4517 let len = rng.gen_range(0..=subfiles.len());
4518 subfiles.choose_multiple(rng, len)
4519 };
4520 let dirs_to_ignore = {
4521 let len = rng.gen_range(0..subdirs.len());
4522 subdirs.choose_multiple(rng, len)
4523 };
4524
4525 let mut ignore_contents = String::new();
4526 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4527 writeln!(
4528 ignore_contents,
4529 "{}",
4530 path_to_ignore
4531 .strip_prefix(&ignore_dir_path)
4532 .unwrap()
4533 .to_str()
4534 .unwrap()
4535 )
4536 .unwrap();
4537 }
4538 log::info!(
4539 "creating gitignore {:?} with contents:\n{}",
4540 ignore_path.strip_prefix(&root_path).unwrap(),
4541 ignore_contents
4542 );
4543 fs.save(
4544 &ignore_path,
4545 &ignore_contents.as_str().into(),
4546 Default::default(),
4547 )
4548 .await
4549 .unwrap();
4550 } else {
4551 let old_path = {
4552 let file_path = files.choose(rng);
4553 let dir_path = dirs[1..].choose(rng);
4554 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4555 };
4556
4557 let is_rename = rng.gen();
4558 if is_rename {
4559 let new_path_parent = dirs
4560 .iter()
4561 .filter(|d| !d.starts_with(old_path))
4562 .choose(rng)
4563 .unwrap();
4564
4565 let overwrite_existing_dir =
4566 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4567 let new_path = if overwrite_existing_dir {
4568 fs.remove_dir(
4569 &new_path_parent,
4570 RemoveOptions {
4571 recursive: true,
4572 ignore_if_not_exists: true,
4573 },
4574 )
4575 .await
4576 .unwrap();
4577 new_path_parent.to_path_buf()
4578 } else {
4579 new_path_parent.join(gen_name(rng))
4580 };
4581
4582 log::info!(
4583 "renaming {:?} to {}{:?}",
4584 old_path.strip_prefix(&root_path).unwrap(),
4585 if overwrite_existing_dir {
4586 "overwrite "
4587 } else {
4588 ""
4589 },
4590 new_path.strip_prefix(&root_path).unwrap()
4591 );
4592 fs.rename(
4593 &old_path,
4594 &new_path,
4595 fs::RenameOptions {
4596 overwrite: true,
4597 ignore_if_exists: true,
4598 },
4599 )
4600 .await
4601 .unwrap();
4602 } else if fs.is_file(&old_path).await {
4603 log::info!(
4604 "deleting file {:?}",
4605 old_path.strip_prefix(&root_path).unwrap()
4606 );
4607 fs.remove_file(old_path, Default::default()).await.unwrap();
4608 } else {
4609 log::info!(
4610 "deleting dir {:?}",
4611 old_path.strip_prefix(&root_path).unwrap()
4612 );
4613 fs.remove_dir(
4614 &old_path,
4615 RemoveOptions {
4616 recursive: true,
4617 ignore_if_not_exists: true,
4618 },
4619 )
4620 .await
4621 .unwrap();
4622 }
4623 }
4624 }
4625
4626 fn gen_name(rng: &mut impl Rng) -> String {
4627 (0..6)
4628 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4629 .map(char::from)
4630 .collect()
4631 }
4632
4633 impl LocalSnapshot {
4634 fn check_invariants(&self) {
4635 assert_eq!(
4636 self.entries_by_path
4637 .cursor::<()>()
4638 .map(|e| (&e.path, e.id))
4639 .collect::<Vec<_>>(),
4640 self.entries_by_id
4641 .cursor::<()>()
4642 .map(|e| (&e.path, e.id))
4643 .collect::<collections::BTreeSet<_>>()
4644 .into_iter()
4645 .collect::<Vec<_>>(),
4646 "entries_by_path and entries_by_id are inconsistent"
4647 );
4648
4649 let mut files = self.files(true, 0);
4650 let mut visible_files = self.files(false, 0);
4651 for entry in self.entries_by_path.cursor::<()>() {
4652 if entry.is_file() {
4653 assert_eq!(files.next().unwrap().inode, entry.inode);
4654 if !entry.is_ignored {
4655 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4656 }
4657 }
4658 }
4659
4660 assert!(files.next().is_none());
4661 assert!(visible_files.next().is_none());
4662
4663 let mut bfs_paths = Vec::new();
4664 let mut stack = vec![Path::new("")];
4665 while let Some(path) = stack.pop() {
4666 bfs_paths.push(path);
4667 let ix = stack.len();
4668 for child_entry in self.child_entries(path) {
4669 stack.insert(ix, &child_entry.path);
4670 }
4671 }
4672
4673 let dfs_paths_via_iter = self
4674 .entries_by_path
4675 .cursor::<()>()
4676 .map(|e| e.path.as_ref())
4677 .collect::<Vec<_>>();
4678 assert_eq!(bfs_paths, dfs_paths_via_iter);
4679
4680 let dfs_paths_via_traversal = self
4681 .entries(true)
4682 .map(|e| e.path.as_ref())
4683 .collect::<Vec<_>>();
4684 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4685
4686 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4687 let ignore_parent_path =
4688 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4689 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4690 assert!(self
4691 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4692 .is_some());
4693 }
4694 }
4695
4696 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4697 let mut paths = Vec::new();
4698 for entry in self.entries_by_path.cursor::<()>() {
4699 if include_ignored || !entry.is_ignored {
4700 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4701 }
4702 }
4703 paths.sort_by(|a, b| a.0.cmp(b.0));
4704 paths
4705 }
4706 }
4707}