1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
184 self.work_directory
185 .relativize(snapshot, path)
186 .and_then(|repo_path| {
187 self.statuses
188 .iter_from(&repo_path)
189 .take_while(|(key, _)| key.starts_with(&repo_path))
190 .map(|(path, status)| {
191 if path == &repo_path {
192 status
193 } else {
194 &GitFileStatus::Modified
195 }
196 })
197 .next()
198 .copied()
199 })
200 }
201
202 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
203 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
204 let mut removed_statuses: Vec<String> = Vec::new();
205
206 let mut self_statuses = self.statuses.iter().peekable();
207 let mut other_statuses = other.statuses.iter().peekable();
208 loop {
209 match (self_statuses.peek(), other_statuses.peek()) {
210 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
211 match Ord::cmp(self_repo_path, other_repo_path) {
212 Ordering::Less => {
213 updated_statuses.push(make_status_entry(self_repo_path, self_status));
214 self_statuses.next();
215 }
216 Ordering::Equal => {
217 if self_status != other_status {
218 updated_statuses
219 .push(make_status_entry(self_repo_path, self_status));
220 }
221
222 self_statuses.next();
223 other_statuses.next();
224 }
225 Ordering::Greater => {
226 removed_statuses.push(make_repo_path(other_repo_path));
227 other_statuses.next();
228 }
229 }
230 }
231 (Some((self_repo_path, self_status)), None) => {
232 updated_statuses.push(make_status_entry(self_repo_path, self_status));
233 self_statuses.next();
234 }
235 (None, Some((other_repo_path, _))) => {
236 removed_statuses.push(make_repo_path(other_repo_path));
237 other_statuses.next();
238 }
239 (None, None) => break,
240 }
241 }
242
243 proto::RepositoryEntry {
244 work_directory_id: self.work_directory_id().to_proto(),
245 branch: self.branch.as_ref().map(|str| str.to_string()),
246 removed_repo_paths: removed_statuses,
247 updated_statuses: updated_statuses,
248 }
249 }
250}
251
252fn make_repo_path(path: &RepoPath) -> String {
253 path.as_os_str().to_string_lossy().to_string()
254}
255
256fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
257 proto::StatusEntry {
258 repo_path: make_repo_path(path),
259 status: match status {
260 GitFileStatus::Added => proto::GitStatus::Added.into(),
261 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
262 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
263 },
264 }
265}
266
267impl From<&RepositoryEntry> for proto::RepositoryEntry {
268 fn from(value: &RepositoryEntry) -> Self {
269 proto::RepositoryEntry {
270 work_directory_id: value.work_directory.to_proto(),
271 branch: value.branch.as_ref().map(|str| str.to_string()),
272 updated_statuses: value
273 .statuses
274 .iter()
275 .map(|(repo_path, status)| make_status_entry(repo_path, status))
276 .collect(),
277 removed_repo_paths: Default::default(),
278 }
279 }
280}
281
282/// This path corresponds to the 'content path' (the folder that contains the .git)
283#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
284pub struct RepositoryWorkDirectory(Arc<Path>);
285
286impl Default for RepositoryWorkDirectory {
287 fn default() -> Self {
288 RepositoryWorkDirectory(Arc::from(Path::new("")))
289 }
290}
291
292impl AsRef<Path> for RepositoryWorkDirectory {
293 fn as_ref(&self) -> &Path {
294 self.0.as_ref()
295 }
296}
297
298#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
299pub struct WorkDirectoryEntry(ProjectEntryId);
300
301impl WorkDirectoryEntry {
302 // Note that these paths should be relative to the worktree root.
303 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
304 snapshot
305 .entry_for_id(self.0)
306 .map(|entry| path.starts_with(&entry.path))
307 .unwrap_or(false)
308 }
309
310 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
311 worktree.entry_for_id(self.0).and_then(|entry| {
312 path.strip_prefix(&entry.path)
313 .ok()
314 .map(move |path| path.into())
315 })
316 }
317}
318
319impl Deref for WorkDirectoryEntry {
320 type Target = ProjectEntryId;
321
322 fn deref(&self) -> &Self::Target {
323 &self.0
324 }
325}
326
327impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
328 fn from(value: ProjectEntryId) -> Self {
329 WorkDirectoryEntry(value)
330 }
331}
332
333#[derive(Debug, Clone)]
334pub struct LocalSnapshot {
335 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
336 // The ProjectEntryId corresponds to the entry for the .git dir
337 // work_directory_id
338 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
339 removed_entry_ids: HashMap<u64, ProjectEntryId>,
340 next_entry_id: Arc<AtomicUsize>,
341 snapshot: Snapshot,
342}
343
344#[derive(Debug, Clone)]
345pub struct LocalRepositoryEntry {
346 pub(crate) scan_id: usize,
347 pub(crate) full_scan_id: usize,
348 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
349 /// Path to the actual .git folder.
350 /// Note: if .git is a file, this points to the folder indicated by the .git file
351 pub(crate) git_dir_path: Arc<Path>,
352}
353
354impl LocalRepositoryEntry {
355 // Note that this path should be relative to the worktree root.
356 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
357 path.starts_with(self.git_dir_path.as_ref())
358 }
359}
360
361impl Deref for LocalSnapshot {
362 type Target = Snapshot;
363
364 fn deref(&self) -> &Self::Target {
365 &self.snapshot
366 }
367}
368
369impl DerefMut for LocalSnapshot {
370 fn deref_mut(&mut self) -> &mut Self::Target {
371 &mut self.snapshot
372 }
373}
374
375enum ScanState {
376 Started,
377 Updated {
378 snapshot: LocalSnapshot,
379 changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
380 barrier: Option<barrier::Sender>,
381 scanning: bool,
382 },
383}
384
385struct ShareState {
386 project_id: u64,
387 snapshots_tx: watch::Sender<LocalSnapshot>,
388 resume_updates: watch::Sender<()>,
389 _maintain_remote_snapshot: Task<Option<()>>,
390}
391
392pub enum Event {
393 UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
394 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
395}
396
397impl Entity for Worktree {
398 type Event = Event;
399}
400
401impl Worktree {
402 pub async fn local(
403 client: Arc<Client>,
404 path: impl Into<Arc<Path>>,
405 visible: bool,
406 fs: Arc<dyn Fs>,
407 next_entry_id: Arc<AtomicUsize>,
408 cx: &mut AsyncAppContext,
409 ) -> Result<ModelHandle<Self>> {
410 // After determining whether the root entry is a file or a directory, populate the
411 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
412 let abs_path = path.into();
413 let metadata = fs
414 .metadata(&abs_path)
415 .await
416 .context("failed to stat worktree path")?;
417
418 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
419 let root_name = abs_path
420 .file_name()
421 .map_or(String::new(), |f| f.to_string_lossy().to_string());
422
423 let mut snapshot = LocalSnapshot {
424 ignores_by_parent_abs_path: Default::default(),
425 removed_entry_ids: Default::default(),
426 git_repositories: Default::default(),
427 next_entry_id,
428 snapshot: Snapshot {
429 id: WorktreeId::from_usize(cx.model_id()),
430 abs_path: abs_path.clone(),
431 root_name: root_name.clone(),
432 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
433 entries_by_path: Default::default(),
434 entries_by_id: Default::default(),
435 repository_entries: Default::default(),
436 scan_id: 1,
437 completed_scan_id: 0,
438 },
439 };
440
441 if let Some(metadata) = metadata {
442 snapshot.insert_entry(
443 Entry::new(
444 Arc::from(Path::new("")),
445 &metadata,
446 &snapshot.next_entry_id,
447 snapshot.root_char_bag,
448 ),
449 fs.as_ref(),
450 );
451 }
452
453 let (path_changes_tx, path_changes_rx) = channel::unbounded();
454 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
455
456 cx.spawn_weak(|this, mut cx| async move {
457 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
458 this.update(&mut cx, |this, cx| {
459 let this = this.as_local_mut().unwrap();
460 match state {
461 ScanState::Started => {
462 *this.is_scanning.0.borrow_mut() = true;
463 }
464 ScanState::Updated {
465 snapshot,
466 changes,
467 barrier,
468 scanning,
469 } => {
470 *this.is_scanning.0.borrow_mut() = scanning;
471 this.set_snapshot(snapshot, cx);
472 cx.emit(Event::UpdatedEntries(changes));
473 drop(barrier);
474 }
475 }
476 cx.notify();
477 });
478 }
479 })
480 .detach();
481
482 let background_scanner_task = cx.background().spawn({
483 let fs = fs.clone();
484 let snapshot = snapshot.clone();
485 let background = cx.background().clone();
486 async move {
487 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
488 BackgroundScanner::new(
489 snapshot,
490 fs,
491 scan_states_tx,
492 background,
493 path_changes_rx,
494 )
495 .run(events)
496 .await;
497 }
498 });
499
500 Worktree::Local(LocalWorktree {
501 snapshot,
502 is_scanning: watch::channel_with(true),
503 share: None,
504 path_changes_tx,
505 _background_scanner_task: background_scanner_task,
506 diagnostics: Default::default(),
507 diagnostic_summaries: Default::default(),
508 client,
509 fs,
510 visible,
511 })
512 }))
513 }
514
515 pub fn remote(
516 project_remote_id: u64,
517 replica_id: ReplicaId,
518 worktree: proto::WorktreeMetadata,
519 client: Arc<Client>,
520 cx: &mut AppContext,
521 ) -> ModelHandle<Self> {
522 cx.add_model(|cx: &mut ModelContext<Self>| {
523 let snapshot = Snapshot {
524 id: WorktreeId(worktree.id as usize),
525 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
526 root_name: worktree.root_name.clone(),
527 root_char_bag: worktree
528 .root_name
529 .chars()
530 .map(|c| c.to_ascii_lowercase())
531 .collect(),
532 entries_by_path: Default::default(),
533 entries_by_id: Default::default(),
534 repository_entries: Default::default(),
535 scan_id: 1,
536 completed_scan_id: 0,
537 };
538
539 let (updates_tx, mut updates_rx) = mpsc::unbounded();
540 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
541 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
542
543 cx.background()
544 .spawn({
545 let background_snapshot = background_snapshot.clone();
546 async move {
547 while let Some(update) = updates_rx.next().await {
548 if let Err(error) =
549 background_snapshot.lock().apply_remote_update(update)
550 {
551 log::error!("error applying worktree update: {}", error);
552 }
553 snapshot_updated_tx.send(()).await.ok();
554 }
555 }
556 })
557 .detach();
558
559 cx.spawn_weak(|this, mut cx| async move {
560 while (snapshot_updated_rx.recv().await).is_some() {
561 if let Some(this) = this.upgrade(&cx) {
562 this.update(&mut cx, |this, cx| {
563 let this = this.as_remote_mut().unwrap();
564 this.snapshot = this.background_snapshot.lock().clone();
565 cx.emit(Event::UpdatedEntries(Default::default()));
566 cx.notify();
567 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
568 if this.observed_snapshot(*scan_id) {
569 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
570 let _ = tx.send(());
571 } else {
572 break;
573 }
574 }
575 });
576 } else {
577 break;
578 }
579 }
580 })
581 .detach();
582
583 Worktree::Remote(RemoteWorktree {
584 project_id: project_remote_id,
585 replica_id,
586 snapshot: snapshot.clone(),
587 background_snapshot,
588 updates_tx: Some(updates_tx),
589 snapshot_subscriptions: Default::default(),
590 client: client.clone(),
591 diagnostic_summaries: Default::default(),
592 visible: worktree.visible,
593 disconnected: false,
594 })
595 })
596 }
597
598 pub fn as_local(&self) -> Option<&LocalWorktree> {
599 if let Worktree::Local(worktree) = self {
600 Some(worktree)
601 } else {
602 None
603 }
604 }
605
606 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
607 if let Worktree::Remote(worktree) = self {
608 Some(worktree)
609 } else {
610 None
611 }
612 }
613
614 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
615 if let Worktree::Local(worktree) = self {
616 Some(worktree)
617 } else {
618 None
619 }
620 }
621
622 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
623 if let Worktree::Remote(worktree) = self {
624 Some(worktree)
625 } else {
626 None
627 }
628 }
629
630 pub fn is_local(&self) -> bool {
631 matches!(self, Worktree::Local(_))
632 }
633
634 pub fn is_remote(&self) -> bool {
635 !self.is_local()
636 }
637
638 pub fn snapshot(&self) -> Snapshot {
639 match self {
640 Worktree::Local(worktree) => worktree.snapshot().snapshot,
641 Worktree::Remote(worktree) => worktree.snapshot(),
642 }
643 }
644
645 pub fn scan_id(&self) -> usize {
646 match self {
647 Worktree::Local(worktree) => worktree.snapshot.scan_id,
648 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
649 }
650 }
651
652 pub fn completed_scan_id(&self) -> usize {
653 match self {
654 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
655 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
656 }
657 }
658
659 pub fn is_visible(&self) -> bool {
660 match self {
661 Worktree::Local(worktree) => worktree.visible,
662 Worktree::Remote(worktree) => worktree.visible,
663 }
664 }
665
666 pub fn replica_id(&self) -> ReplicaId {
667 match self {
668 Worktree::Local(_) => 0,
669 Worktree::Remote(worktree) => worktree.replica_id,
670 }
671 }
672
673 pub fn diagnostic_summaries(
674 &self,
675 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
676 match self {
677 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
678 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
679 }
680 .iter()
681 .flat_map(|(path, summaries)| {
682 summaries
683 .iter()
684 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
685 })
686 }
687
688 pub fn abs_path(&self) -> Arc<Path> {
689 match self {
690 Worktree::Local(worktree) => worktree.abs_path.clone(),
691 Worktree::Remote(worktree) => worktree.abs_path.clone(),
692 }
693 }
694}
695
696impl LocalWorktree {
697 pub fn contains_abs_path(&self, path: &Path) -> bool {
698 path.starts_with(&self.abs_path)
699 }
700
701 fn absolutize(&self, path: &Path) -> PathBuf {
702 if path.file_name().is_some() {
703 self.abs_path.join(path)
704 } else {
705 self.abs_path.to_path_buf()
706 }
707 }
708
709 pub(crate) fn load_buffer(
710 &mut self,
711 id: u64,
712 path: &Path,
713 cx: &mut ModelContext<Worktree>,
714 ) -> Task<Result<ModelHandle<Buffer>>> {
715 let path = Arc::from(path);
716 cx.spawn(move |this, mut cx| async move {
717 let (file, contents, diff_base) = this
718 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
719 .await?;
720 let text_buffer = cx
721 .background()
722 .spawn(async move { text::Buffer::new(0, id, contents) })
723 .await;
724 Ok(cx.add_model(|cx| {
725 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
726 buffer.git_diff_recalc(cx);
727 buffer
728 }))
729 })
730 }
731
732 pub fn diagnostics_for_path(
733 &self,
734 path: &Path,
735 ) -> Vec<(
736 LanguageServerId,
737 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
738 )> {
739 self.diagnostics.get(path).cloned().unwrap_or_default()
740 }
741
742 pub fn update_diagnostics(
743 &mut self,
744 server_id: LanguageServerId,
745 worktree_path: Arc<Path>,
746 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
747 _: &mut ModelContext<Worktree>,
748 ) -> Result<bool> {
749 let summaries_by_server_id = self
750 .diagnostic_summaries
751 .entry(worktree_path.clone())
752 .or_default();
753
754 let old_summary = summaries_by_server_id
755 .remove(&server_id)
756 .unwrap_or_default();
757
758 let new_summary = DiagnosticSummary::new(&diagnostics);
759 if new_summary.is_empty() {
760 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
761 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
762 diagnostics_by_server_id.remove(ix);
763 }
764 if diagnostics_by_server_id.is_empty() {
765 self.diagnostics.remove(&worktree_path);
766 }
767 }
768 } else {
769 summaries_by_server_id.insert(server_id, new_summary);
770 let diagnostics_by_server_id =
771 self.diagnostics.entry(worktree_path.clone()).or_default();
772 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
773 Ok(ix) => {
774 diagnostics_by_server_id[ix] = (server_id, diagnostics);
775 }
776 Err(ix) => {
777 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
778 }
779 }
780 }
781
782 if !old_summary.is_empty() || !new_summary.is_empty() {
783 if let Some(share) = self.share.as_ref() {
784 self.client
785 .send(proto::UpdateDiagnosticSummary {
786 project_id: share.project_id,
787 worktree_id: self.id().to_proto(),
788 summary: Some(proto::DiagnosticSummary {
789 path: worktree_path.to_string_lossy().to_string(),
790 language_server_id: server_id.0 as u64,
791 error_count: new_summary.error_count as u32,
792 warning_count: new_summary.warning_count as u32,
793 }),
794 })
795 .log_err();
796 }
797 }
798
799 Ok(!old_summary.is_empty() || !new_summary.is_empty())
800 }
801
802 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
803 let updated_repos =
804 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
805 self.snapshot = new_snapshot;
806
807 if let Some(share) = self.share.as_mut() {
808 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
809 }
810
811 if !updated_repos.is_empty() {
812 cx.emit(Event::UpdatedGitRepositories(updated_repos));
813 }
814 }
815
816 fn changed_repos(
817 &self,
818 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
819 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
820 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
821 let mut diff = HashMap::default();
822 let mut old_repos = old_repos.iter().peekable();
823 let mut new_repos = new_repos.iter().peekable();
824 loop {
825 match (old_repos.peek(), new_repos.peek()) {
826 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
827 match Ord::cmp(old_entry_id, new_entry_id) {
828 Ordering::Less => {
829 if let Some(entry) = self.entry_for_id(**old_entry_id) {
830 diff.insert(entry.path.clone(), (*old_repo).clone());
831 }
832 old_repos.next();
833 }
834 Ordering::Equal => {
835 if old_repo.scan_id != new_repo.scan_id {
836 if let Some(entry) = self.entry_for_id(**new_entry_id) {
837 diff.insert(entry.path.clone(), (*new_repo).clone());
838 }
839 }
840
841 old_repos.next();
842 new_repos.next();
843 }
844 Ordering::Greater => {
845 if let Some(entry) = self.entry_for_id(**new_entry_id) {
846 diff.insert(entry.path.clone(), (*new_repo).clone());
847 }
848 new_repos.next();
849 }
850 }
851 }
852 (Some((old_entry_id, old_repo)), None) => {
853 if let Some(entry) = self.entry_for_id(**old_entry_id) {
854 diff.insert(entry.path.clone(), (*old_repo).clone());
855 }
856 old_repos.next();
857 }
858 (None, Some((new_entry_id, new_repo))) => {
859 if let Some(entry) = self.entry_for_id(**new_entry_id) {
860 diff.insert(entry.path.clone(), (*new_repo).clone());
861 }
862 new_repos.next();
863 }
864 (None, None) => break,
865 }
866 }
867 diff
868 }
869
870 pub fn scan_complete(&self) -> impl Future<Output = ()> {
871 let mut is_scanning_rx = self.is_scanning.1.clone();
872 async move {
873 let mut is_scanning = is_scanning_rx.borrow().clone();
874 while is_scanning {
875 if let Some(value) = is_scanning_rx.recv().await {
876 is_scanning = value;
877 } else {
878 break;
879 }
880 }
881 }
882 }
883
884 pub fn snapshot(&self) -> LocalSnapshot {
885 self.snapshot.clone()
886 }
887
888 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
889 proto::WorktreeMetadata {
890 id: self.id().to_proto(),
891 root_name: self.root_name().to_string(),
892 visible: self.visible,
893 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
894 }
895 }
896
897 fn load(
898 &self,
899 path: &Path,
900 cx: &mut ModelContext<Worktree>,
901 ) -> Task<Result<(File, String, Option<String>)>> {
902 let handle = cx.handle();
903 let path = Arc::from(path);
904 let abs_path = self.absolutize(&path);
905 let fs = self.fs.clone();
906 let snapshot = self.snapshot();
907
908 let mut index_task = None;
909
910 if let Some(repo) = snapshot.repo_for(&path) {
911 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
912 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
913 let repo = repo.repo_ptr.to_owned();
914 index_task = Some(
915 cx.background()
916 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
917 );
918 }
919 }
920
921 cx.spawn(|this, mut cx| async move {
922 let text = fs.load(&abs_path).await?;
923
924 let diff_base = if let Some(index_task) = index_task {
925 index_task.await
926 } else {
927 None
928 };
929
930 // Eagerly populate the snapshot with an updated entry for the loaded file
931 let entry = this
932 .update(&mut cx, |this, cx| {
933 this.as_local().unwrap().refresh_entry(path, None, cx)
934 })
935 .await?;
936
937 Ok((
938 File {
939 entry_id: entry.id,
940 worktree: handle,
941 path: entry.path,
942 mtime: entry.mtime,
943 is_local: true,
944 is_deleted: false,
945 },
946 text,
947 diff_base,
948 ))
949 })
950 }
951
952 pub fn save_buffer(
953 &self,
954 buffer_handle: ModelHandle<Buffer>,
955 path: Arc<Path>,
956 has_changed_file: bool,
957 cx: &mut ModelContext<Worktree>,
958 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
959 let handle = cx.handle();
960 let buffer = buffer_handle.read(cx);
961
962 let rpc = self.client.clone();
963 let buffer_id = buffer.remote_id();
964 let project_id = self.share.as_ref().map(|share| share.project_id);
965
966 let text = buffer.as_rope().clone();
967 let fingerprint = text.fingerprint();
968 let version = buffer.version();
969 let save = self.write_file(path, text, buffer.line_ending(), cx);
970
971 cx.as_mut().spawn(|mut cx| async move {
972 let entry = save.await?;
973
974 if has_changed_file {
975 let new_file = Arc::new(File {
976 entry_id: entry.id,
977 worktree: handle,
978 path: entry.path,
979 mtime: entry.mtime,
980 is_local: true,
981 is_deleted: false,
982 });
983
984 if let Some(project_id) = project_id {
985 rpc.send(proto::UpdateBufferFile {
986 project_id,
987 buffer_id,
988 file: Some(new_file.to_proto()),
989 })
990 .log_err();
991 }
992
993 buffer_handle.update(&mut cx, |buffer, cx| {
994 if has_changed_file {
995 buffer.file_updated(new_file, cx).detach();
996 }
997 });
998 }
999
1000 if let Some(project_id) = project_id {
1001 rpc.send(proto::BufferSaved {
1002 project_id,
1003 buffer_id,
1004 version: serialize_version(&version),
1005 mtime: Some(entry.mtime.into()),
1006 fingerprint: serialize_fingerprint(fingerprint),
1007 })?;
1008 }
1009
1010 buffer_handle.update(&mut cx, |buffer, cx| {
1011 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1012 });
1013
1014 Ok((version, fingerprint, entry.mtime))
1015 })
1016 }
1017
1018 pub fn create_entry(
1019 &self,
1020 path: impl Into<Arc<Path>>,
1021 is_dir: bool,
1022 cx: &mut ModelContext<Worktree>,
1023 ) -> Task<Result<Entry>> {
1024 let path = path.into();
1025 let abs_path = self.absolutize(&path);
1026 let fs = self.fs.clone();
1027 let write = cx.background().spawn(async move {
1028 if is_dir {
1029 fs.create_dir(&abs_path).await
1030 } else {
1031 fs.save(&abs_path, &Default::default(), Default::default())
1032 .await
1033 }
1034 });
1035
1036 cx.spawn(|this, mut cx| async move {
1037 write.await?;
1038 this.update(&mut cx, |this, cx| {
1039 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1040 })
1041 .await
1042 })
1043 }
1044
1045 pub fn write_file(
1046 &self,
1047 path: impl Into<Arc<Path>>,
1048 text: Rope,
1049 line_ending: LineEnding,
1050 cx: &mut ModelContext<Worktree>,
1051 ) -> Task<Result<Entry>> {
1052 let path = path.into();
1053 let abs_path = self.absolutize(&path);
1054 let fs = self.fs.clone();
1055 let write = cx
1056 .background()
1057 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1058
1059 cx.spawn(|this, mut cx| async move {
1060 write.await?;
1061 this.update(&mut cx, |this, cx| {
1062 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1063 })
1064 .await
1065 })
1066 }
1067
1068 pub fn delete_entry(
1069 &self,
1070 entry_id: ProjectEntryId,
1071 cx: &mut ModelContext<Worktree>,
1072 ) -> Option<Task<Result<()>>> {
1073 let entry = self.entry_for_id(entry_id)?.clone();
1074 let abs_path = self.abs_path.clone();
1075 let fs = self.fs.clone();
1076
1077 let delete = cx.background().spawn(async move {
1078 let mut abs_path = fs.canonicalize(&abs_path).await?;
1079 if entry.path.file_name().is_some() {
1080 abs_path = abs_path.join(&entry.path);
1081 }
1082 if entry.is_file() {
1083 fs.remove_file(&abs_path, Default::default()).await?;
1084 } else {
1085 fs.remove_dir(
1086 &abs_path,
1087 RemoveOptions {
1088 recursive: true,
1089 ignore_if_not_exists: false,
1090 },
1091 )
1092 .await?;
1093 }
1094 anyhow::Ok(abs_path)
1095 });
1096
1097 Some(cx.spawn(|this, mut cx| async move {
1098 let abs_path = delete.await?;
1099 let (tx, mut rx) = barrier::channel();
1100 this.update(&mut cx, |this, _| {
1101 this.as_local_mut()
1102 .unwrap()
1103 .path_changes_tx
1104 .try_send((vec![abs_path], tx))
1105 })?;
1106 rx.recv().await;
1107 Ok(())
1108 }))
1109 }
1110
1111 pub fn rename_entry(
1112 &self,
1113 entry_id: ProjectEntryId,
1114 new_path: impl Into<Arc<Path>>,
1115 cx: &mut ModelContext<Worktree>,
1116 ) -> Option<Task<Result<Entry>>> {
1117 let old_path = self.entry_for_id(entry_id)?.path.clone();
1118 let new_path = new_path.into();
1119 let abs_old_path = self.absolutize(&old_path);
1120 let abs_new_path = self.absolutize(&new_path);
1121 let fs = self.fs.clone();
1122 let rename = cx.background().spawn(async move {
1123 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1124 .await
1125 });
1126
1127 Some(cx.spawn(|this, mut cx| async move {
1128 rename.await?;
1129 this.update(&mut cx, |this, cx| {
1130 this.as_local_mut()
1131 .unwrap()
1132 .refresh_entry(new_path.clone(), Some(old_path), cx)
1133 })
1134 .await
1135 }))
1136 }
1137
1138 pub fn copy_entry(
1139 &self,
1140 entry_id: ProjectEntryId,
1141 new_path: impl Into<Arc<Path>>,
1142 cx: &mut ModelContext<Worktree>,
1143 ) -> Option<Task<Result<Entry>>> {
1144 let old_path = self.entry_for_id(entry_id)?.path.clone();
1145 let new_path = new_path.into();
1146 let abs_old_path = self.absolutize(&old_path);
1147 let abs_new_path = self.absolutize(&new_path);
1148 let fs = self.fs.clone();
1149 let copy = cx.background().spawn(async move {
1150 copy_recursive(
1151 fs.as_ref(),
1152 &abs_old_path,
1153 &abs_new_path,
1154 Default::default(),
1155 )
1156 .await
1157 });
1158
1159 Some(cx.spawn(|this, mut cx| async move {
1160 copy.await?;
1161 this.update(&mut cx, |this, cx| {
1162 this.as_local_mut()
1163 .unwrap()
1164 .refresh_entry(new_path.clone(), None, cx)
1165 })
1166 .await
1167 }))
1168 }
1169
1170 fn refresh_entry(
1171 &self,
1172 path: Arc<Path>,
1173 old_path: Option<Arc<Path>>,
1174 cx: &mut ModelContext<Worktree>,
1175 ) -> Task<Result<Entry>> {
1176 let fs = self.fs.clone();
1177 let abs_root_path = self.abs_path.clone();
1178 let path_changes_tx = self.path_changes_tx.clone();
1179 cx.spawn_weak(move |this, mut cx| async move {
1180 let abs_path = fs.canonicalize(&abs_root_path).await?;
1181 let mut paths = Vec::with_capacity(2);
1182 paths.push(if path.file_name().is_some() {
1183 abs_path.join(&path)
1184 } else {
1185 abs_path.clone()
1186 });
1187 if let Some(old_path) = old_path {
1188 paths.push(if old_path.file_name().is_some() {
1189 abs_path.join(&old_path)
1190 } else {
1191 abs_path.clone()
1192 });
1193 }
1194
1195 let (tx, mut rx) = barrier::channel();
1196 path_changes_tx.try_send((paths, tx))?;
1197 rx.recv().await;
1198 this.upgrade(&cx)
1199 .ok_or_else(|| anyhow!("worktree was dropped"))?
1200 .update(&mut cx, |this, _| {
1201 this.entry_for_path(path)
1202 .cloned()
1203 .ok_or_else(|| anyhow!("failed to read path after update"))
1204 })
1205 })
1206 }
1207
1208 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1209 let (share_tx, share_rx) = oneshot::channel();
1210
1211 if let Some(share) = self.share.as_mut() {
1212 let _ = share_tx.send(());
1213 *share.resume_updates.borrow_mut() = ();
1214 } else {
1215 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1216 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1217 let worktree_id = cx.model_id() as u64;
1218
1219 for (path, summaries) in &self.diagnostic_summaries {
1220 for (&server_id, summary) in summaries {
1221 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1222 project_id,
1223 worktree_id,
1224 summary: Some(summary.to_proto(server_id, &path)),
1225 }) {
1226 return Task::ready(Err(e));
1227 }
1228 }
1229 }
1230
1231 let _maintain_remote_snapshot = cx.background().spawn({
1232 let client = self.client.clone();
1233 async move {
1234 let mut share_tx = Some(share_tx);
1235 let mut prev_snapshot = LocalSnapshot {
1236 ignores_by_parent_abs_path: Default::default(),
1237 removed_entry_ids: Default::default(),
1238 next_entry_id: Default::default(),
1239 git_repositories: Default::default(),
1240 snapshot: Snapshot {
1241 id: WorktreeId(worktree_id as usize),
1242 abs_path: Path::new("").into(),
1243 root_name: Default::default(),
1244 root_char_bag: Default::default(),
1245 entries_by_path: Default::default(),
1246 entries_by_id: Default::default(),
1247 repository_entries: Default::default(),
1248 scan_id: 0,
1249 completed_scan_id: 0,
1250 },
1251 };
1252 while let Some(snapshot) = snapshots_rx.recv().await {
1253 #[cfg(any(test, feature = "test-support"))]
1254 const MAX_CHUNK_SIZE: usize = 2;
1255 #[cfg(not(any(test, feature = "test-support")))]
1256 const MAX_CHUNK_SIZE: usize = 256;
1257
1258 let update =
1259 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1260 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1261 let _ = resume_updates_rx.try_recv();
1262 while let Err(error) = client.request(update.clone()).await {
1263 log::error!("failed to send worktree update: {}", error);
1264 log::info!("waiting to resume updates");
1265 if resume_updates_rx.next().await.is_none() {
1266 return Ok(());
1267 }
1268 }
1269 }
1270
1271 if let Some(share_tx) = share_tx.take() {
1272 let _ = share_tx.send(());
1273 }
1274
1275 prev_snapshot = snapshot;
1276 }
1277
1278 Ok::<_, anyhow::Error>(())
1279 }
1280 .log_err()
1281 });
1282
1283 self.share = Some(ShareState {
1284 project_id,
1285 snapshots_tx,
1286 resume_updates: resume_updates_tx,
1287 _maintain_remote_snapshot,
1288 });
1289 }
1290
1291 cx.foreground()
1292 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1293 }
1294
1295 pub fn unshare(&mut self) {
1296 self.share.take();
1297 }
1298
1299 pub fn is_shared(&self) -> bool {
1300 self.share.is_some()
1301 }
1302}
1303
1304impl RemoteWorktree {
1305 fn snapshot(&self) -> Snapshot {
1306 self.snapshot.clone()
1307 }
1308
1309 pub fn disconnected_from_host(&mut self) {
1310 self.updates_tx.take();
1311 self.snapshot_subscriptions.clear();
1312 self.disconnected = true;
1313 }
1314
1315 pub fn save_buffer(
1316 &self,
1317 buffer_handle: ModelHandle<Buffer>,
1318 cx: &mut ModelContext<Worktree>,
1319 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1320 let buffer = buffer_handle.read(cx);
1321 let buffer_id = buffer.remote_id();
1322 let version = buffer.version();
1323 let rpc = self.client.clone();
1324 let project_id = self.project_id;
1325 cx.as_mut().spawn(|mut cx| async move {
1326 let response = rpc
1327 .request(proto::SaveBuffer {
1328 project_id,
1329 buffer_id,
1330 version: serialize_version(&version),
1331 })
1332 .await?;
1333 let version = deserialize_version(&response.version);
1334 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1335 let mtime = response
1336 .mtime
1337 .ok_or_else(|| anyhow!("missing mtime"))?
1338 .into();
1339
1340 buffer_handle.update(&mut cx, |buffer, cx| {
1341 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1342 });
1343
1344 Ok((version, fingerprint, mtime))
1345 })
1346 }
1347
1348 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1349 if let Some(updates_tx) = &self.updates_tx {
1350 updates_tx
1351 .unbounded_send(update)
1352 .expect("consumer runs to completion");
1353 }
1354 }
1355
1356 fn observed_snapshot(&self, scan_id: usize) -> bool {
1357 self.completed_scan_id >= scan_id
1358 }
1359
1360 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1361 let (tx, rx) = oneshot::channel();
1362 if self.observed_snapshot(scan_id) {
1363 let _ = tx.send(());
1364 } else if self.disconnected {
1365 drop(tx);
1366 } else {
1367 match self
1368 .snapshot_subscriptions
1369 .binary_search_by_key(&scan_id, |probe| probe.0)
1370 {
1371 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1372 }
1373 }
1374
1375 async move {
1376 rx.await?;
1377 Ok(())
1378 }
1379 }
1380
1381 pub fn update_diagnostic_summary(
1382 &mut self,
1383 path: Arc<Path>,
1384 summary: &proto::DiagnosticSummary,
1385 ) {
1386 let server_id = LanguageServerId(summary.language_server_id as usize);
1387 let summary = DiagnosticSummary {
1388 error_count: summary.error_count as usize,
1389 warning_count: summary.warning_count as usize,
1390 };
1391
1392 if summary.is_empty() {
1393 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1394 summaries.remove(&server_id);
1395 if summaries.is_empty() {
1396 self.diagnostic_summaries.remove(&path);
1397 }
1398 }
1399 } else {
1400 self.diagnostic_summaries
1401 .entry(path)
1402 .or_default()
1403 .insert(server_id, summary);
1404 }
1405 }
1406
1407 pub fn insert_entry(
1408 &mut self,
1409 entry: proto::Entry,
1410 scan_id: usize,
1411 cx: &mut ModelContext<Worktree>,
1412 ) -> Task<Result<Entry>> {
1413 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1414 cx.spawn(|this, mut cx| async move {
1415 wait_for_snapshot.await?;
1416 this.update(&mut cx, |worktree, _| {
1417 let worktree = worktree.as_remote_mut().unwrap();
1418 let mut snapshot = worktree.background_snapshot.lock();
1419 let entry = snapshot.insert_entry(entry);
1420 worktree.snapshot = snapshot.clone();
1421 entry
1422 })
1423 })
1424 }
1425
1426 pub(crate) fn delete_entry(
1427 &mut self,
1428 id: ProjectEntryId,
1429 scan_id: usize,
1430 cx: &mut ModelContext<Worktree>,
1431 ) -> Task<Result<()>> {
1432 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1433 cx.spawn(|this, mut cx| async move {
1434 wait_for_snapshot.await?;
1435 this.update(&mut cx, |worktree, _| {
1436 let worktree = worktree.as_remote_mut().unwrap();
1437 let mut snapshot = worktree.background_snapshot.lock();
1438 snapshot.delete_entry(id);
1439 worktree.snapshot = snapshot.clone();
1440 });
1441 Ok(())
1442 })
1443 }
1444}
1445
1446impl Snapshot {
1447 pub fn id(&self) -> WorktreeId {
1448 self.id
1449 }
1450
1451 pub fn abs_path(&self) -> &Arc<Path> {
1452 &self.abs_path
1453 }
1454
1455 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1456 self.entries_by_id.get(&entry_id, &()).is_some()
1457 }
1458
1459 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1460 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1461 let old_entry = self.entries_by_id.insert_or_replace(
1462 PathEntry {
1463 id: entry.id,
1464 path: entry.path.clone(),
1465 is_ignored: entry.is_ignored,
1466 scan_id: 0,
1467 },
1468 &(),
1469 );
1470 if let Some(old_entry) = old_entry {
1471 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1472 }
1473 self.entries_by_path.insert_or_replace(entry.clone(), &());
1474 Ok(entry)
1475 }
1476
1477 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1478 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1479 self.entries_by_path = {
1480 let mut cursor = self.entries_by_path.cursor();
1481 let mut new_entries_by_path =
1482 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1483 while let Some(entry) = cursor.item() {
1484 if entry.path.starts_with(&removed_entry.path) {
1485 self.entries_by_id.remove(&entry.id, &());
1486 cursor.next(&());
1487 } else {
1488 break;
1489 }
1490 }
1491 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1492 new_entries_by_path
1493 };
1494
1495 Some(removed_entry.path)
1496 }
1497
1498 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1499 let mut entries_by_path_edits = Vec::new();
1500 let mut entries_by_id_edits = Vec::new();
1501 for entry_id in update.removed_entries {
1502 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1503 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1504 entries_by_id_edits.push(Edit::Remove(entry.id));
1505 }
1506 }
1507
1508 for entry in update.updated_entries {
1509 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1510 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1511 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1512 }
1513 entries_by_id_edits.push(Edit::Insert(PathEntry {
1514 id: entry.id,
1515 path: entry.path.clone(),
1516 is_ignored: entry.is_ignored,
1517 scan_id: 0,
1518 }));
1519 entries_by_path_edits.push(Edit::Insert(entry));
1520 }
1521
1522 self.entries_by_path.edit(entries_by_path_edits, &());
1523 self.entries_by_id.edit(entries_by_id_edits, &());
1524
1525 update.removed_repositories.sort_unstable();
1526 self.repository_entries.retain(|_, entry| {
1527 if let Ok(_) = update
1528 .removed_repositories
1529 .binary_search(&entry.work_directory.to_proto())
1530 {
1531 false
1532 } else {
1533 true
1534 }
1535 });
1536
1537 for repository in update.updated_repositories {
1538 let work_directory_entry: WorkDirectoryEntry =
1539 ProjectEntryId::from_proto(repository.work_directory_id).into();
1540
1541 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1542 let mut statuses = TreeMap::default();
1543 for status_entry in repository.updated_statuses {
1544 let Some(git_file_status) = read_git_status(status_entry.status) else {
1545 continue;
1546 };
1547
1548 let repo_path = RepoPath::new(status_entry.repo_path.into());
1549 statuses.insert(repo_path, git_file_status);
1550 }
1551
1552 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1553 if self.repository_entries.get(&work_directory).is_some() {
1554 self.repository_entries.update(&work_directory, |repo| {
1555 repo.branch = repository.branch.map(Into::into);
1556 repo.statuses.insert_tree(statuses);
1557
1558 for repo_path in repository.removed_repo_paths {
1559 let repo_path = RepoPath::new(repo_path.into());
1560 repo.statuses.remove(&repo_path);
1561 }
1562 });
1563 } else {
1564 self.repository_entries.insert(
1565 work_directory,
1566 RepositoryEntry {
1567 work_directory: work_directory_entry,
1568 branch: repository.branch.map(Into::into),
1569 statuses,
1570 },
1571 )
1572 }
1573 } else {
1574 log::error!("no work directory entry for repository {:?}", repository)
1575 }
1576 }
1577
1578 self.scan_id = update.scan_id as usize;
1579 if update.is_last_update {
1580 self.completed_scan_id = update.scan_id as usize;
1581 }
1582
1583 Ok(())
1584 }
1585
1586 pub fn file_count(&self) -> usize {
1587 self.entries_by_path.summary().file_count
1588 }
1589
1590 pub fn visible_file_count(&self) -> usize {
1591 self.entries_by_path.summary().visible_file_count
1592 }
1593
1594 fn traverse_from_offset(
1595 &self,
1596 include_dirs: bool,
1597 include_ignored: bool,
1598 start_offset: usize,
1599 ) -> Traversal {
1600 let mut cursor = self.entries_by_path.cursor();
1601 cursor.seek(
1602 &TraversalTarget::Count {
1603 count: start_offset,
1604 include_dirs,
1605 include_ignored,
1606 },
1607 Bias::Right,
1608 &(),
1609 );
1610 Traversal {
1611 cursor,
1612 include_dirs,
1613 include_ignored,
1614 }
1615 }
1616
1617 fn traverse_from_path(
1618 &self,
1619 include_dirs: bool,
1620 include_ignored: bool,
1621 path: &Path,
1622 ) -> Traversal {
1623 let mut cursor = self.entries_by_path.cursor();
1624 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1625 Traversal {
1626 cursor,
1627 include_dirs,
1628 include_ignored,
1629 }
1630 }
1631
1632 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1633 self.traverse_from_offset(false, include_ignored, start)
1634 }
1635
1636 pub fn entries(&self, include_ignored: bool) -> Traversal {
1637 self.traverse_from_offset(true, include_ignored, 0)
1638 }
1639
1640 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1641 self.repository_entries.values()
1642 }
1643
1644 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1645 let empty_path = Path::new("");
1646 self.entries_by_path
1647 .cursor::<()>()
1648 .filter(move |entry| entry.path.as_ref() != empty_path)
1649 .map(|entry| &entry.path)
1650 }
1651
1652 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1653 let mut cursor = self.entries_by_path.cursor();
1654 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1655 let traversal = Traversal {
1656 cursor,
1657 include_dirs: true,
1658 include_ignored: true,
1659 };
1660 ChildEntriesIter {
1661 traversal,
1662 parent_path,
1663 }
1664 }
1665
1666 pub fn root_entry(&self) -> Option<&Entry> {
1667 self.entry_for_path("")
1668 }
1669
1670 pub fn root_name(&self) -> &str {
1671 &self.root_name
1672 }
1673
1674 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1675 self.repository_entries
1676 .get(&RepositoryWorkDirectory(Path::new("").into()))
1677 .map(|entry| entry.to_owned())
1678 }
1679
1680 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1681 self.repository_entries.values()
1682 }
1683
1684 pub fn scan_id(&self) -> usize {
1685 self.scan_id
1686 }
1687
1688 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1689 let path = path.as_ref();
1690 self.traverse_from_path(true, true, path)
1691 .entry()
1692 .and_then(|entry| {
1693 if entry.path.as_ref() == path {
1694 Some(entry)
1695 } else {
1696 None
1697 }
1698 })
1699 }
1700
1701 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1702 let entry = self.entries_by_id.get(&id, &())?;
1703 self.entry_for_path(&entry.path)
1704 }
1705
1706 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1707 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1708 }
1709}
1710
1711impl LocalSnapshot {
1712 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1713 self.git_repositories.get(&repo.work_directory.0)
1714 }
1715
1716 pub(crate) fn repo_for_metadata(
1717 &self,
1718 path: &Path,
1719 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1720 self.git_repositories
1721 .iter()
1722 .find(|(_, repo)| repo.in_dot_git(path))
1723 }
1724
1725 #[cfg(test)]
1726 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1727 let root_name = self.root_name.clone();
1728 proto::UpdateWorktree {
1729 project_id,
1730 worktree_id: self.id().to_proto(),
1731 abs_path: self.abs_path().to_string_lossy().into(),
1732 root_name,
1733 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1734 removed_entries: Default::default(),
1735 scan_id: self.scan_id as u64,
1736 is_last_update: true,
1737 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1738 removed_repositories: Default::default(),
1739 }
1740 }
1741
1742 pub(crate) fn build_update(
1743 &self,
1744 other: &Self,
1745 project_id: u64,
1746 worktree_id: u64,
1747 include_ignored: bool,
1748 ) -> proto::UpdateWorktree {
1749 let mut updated_entries = Vec::new();
1750 let mut removed_entries = Vec::new();
1751 let mut self_entries = self
1752 .entries_by_id
1753 .cursor::<()>()
1754 .filter(|e| include_ignored || !e.is_ignored)
1755 .peekable();
1756 let mut other_entries = other
1757 .entries_by_id
1758 .cursor::<()>()
1759 .filter(|e| include_ignored || !e.is_ignored)
1760 .peekable();
1761 loop {
1762 match (self_entries.peek(), other_entries.peek()) {
1763 (Some(self_entry), Some(other_entry)) => {
1764 match Ord::cmp(&self_entry.id, &other_entry.id) {
1765 Ordering::Less => {
1766 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1767 updated_entries.push(entry);
1768 self_entries.next();
1769 }
1770 Ordering::Equal => {
1771 if self_entry.scan_id != other_entry.scan_id {
1772 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1773 updated_entries.push(entry);
1774 }
1775
1776 self_entries.next();
1777 other_entries.next();
1778 }
1779 Ordering::Greater => {
1780 removed_entries.push(other_entry.id.to_proto());
1781 other_entries.next();
1782 }
1783 }
1784 }
1785 (Some(self_entry), None) => {
1786 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1787 updated_entries.push(entry);
1788 self_entries.next();
1789 }
1790 (None, Some(other_entry)) => {
1791 removed_entries.push(other_entry.id.to_proto());
1792 other_entries.next();
1793 }
1794 (None, None) => break,
1795 }
1796 }
1797
1798 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1799 let mut removed_repositories = Vec::new();
1800 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1801 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1802 loop {
1803 match (self_repos.peek(), other_repos.peek()) {
1804 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1805 match Ord::cmp(self_work_dir, other_work_dir) {
1806 Ordering::Less => {
1807 updated_repositories.push((*self_repo).into());
1808 self_repos.next();
1809 }
1810 Ordering::Equal => {
1811 if self_repo != other_repo {
1812 updated_repositories.push(self_repo.build_update(other_repo));
1813 }
1814
1815 self_repos.next();
1816 other_repos.next();
1817 }
1818 Ordering::Greater => {
1819 removed_repositories.push(other_repo.work_directory.to_proto());
1820 other_repos.next();
1821 }
1822 }
1823 }
1824 (Some((_, self_repo)), None) => {
1825 updated_repositories.push((*self_repo).into());
1826 self_repos.next();
1827 }
1828 (None, Some((_, other_repo))) => {
1829 removed_repositories.push(other_repo.work_directory.to_proto());
1830 other_repos.next();
1831 }
1832 (None, None) => break,
1833 }
1834 }
1835
1836 proto::UpdateWorktree {
1837 project_id,
1838 worktree_id,
1839 abs_path: self.abs_path().to_string_lossy().into(),
1840 root_name: self.root_name().to_string(),
1841 updated_entries,
1842 removed_entries,
1843 scan_id: self.scan_id as u64,
1844 is_last_update: self.completed_scan_id == self.scan_id,
1845 updated_repositories,
1846 removed_repositories,
1847 }
1848 }
1849
1850 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1851 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1852 let abs_path = self.abs_path.join(&entry.path);
1853 match smol::block_on(build_gitignore(&abs_path, fs)) {
1854 Ok(ignore) => {
1855 self.ignores_by_parent_abs_path.insert(
1856 abs_path.parent().unwrap().into(),
1857 (Arc::new(ignore), self.scan_id),
1858 );
1859 }
1860 Err(error) => {
1861 log::error!(
1862 "error loading .gitignore file {:?} - {:?}",
1863 &entry.path,
1864 error
1865 );
1866 }
1867 }
1868 }
1869
1870 self.reuse_entry_id(&mut entry);
1871
1872 if entry.kind == EntryKind::PendingDir {
1873 if let Some(existing_entry) =
1874 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1875 {
1876 entry.kind = existing_entry.kind;
1877 }
1878 }
1879
1880 let scan_id = self.scan_id;
1881 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1882 if let Some(removed) = removed {
1883 if removed.id != entry.id {
1884 self.entries_by_id.remove(&removed.id, &());
1885 }
1886 }
1887 self.entries_by_id.insert_or_replace(
1888 PathEntry {
1889 id: entry.id,
1890 path: entry.path.clone(),
1891 is_ignored: entry.is_ignored,
1892 scan_id,
1893 },
1894 &(),
1895 );
1896
1897 entry
1898 }
1899
1900 fn populate_dir(
1901 &mut self,
1902 parent_path: Arc<Path>,
1903 entries: impl IntoIterator<Item = Entry>,
1904 ignore: Option<Arc<Gitignore>>,
1905 fs: &dyn Fs,
1906 ) {
1907 let mut parent_entry = if let Some(parent_entry) =
1908 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1909 {
1910 parent_entry.clone()
1911 } else {
1912 log::warn!(
1913 "populating a directory {:?} that has been removed",
1914 parent_path
1915 );
1916 return;
1917 };
1918
1919 match parent_entry.kind {
1920 EntryKind::PendingDir => {
1921 parent_entry.kind = EntryKind::Dir;
1922 }
1923 EntryKind::Dir => {}
1924 _ => return,
1925 }
1926
1927 if let Some(ignore) = ignore {
1928 self.ignores_by_parent_abs_path.insert(
1929 self.abs_path.join(&parent_path).into(),
1930 (ignore, self.scan_id),
1931 );
1932 }
1933
1934 if parent_path.file_name() == Some(&DOT_GIT) {
1935 self.build_repo(parent_path, fs);
1936 }
1937
1938 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1939 let mut entries_by_id_edits = Vec::new();
1940
1941 for mut entry in entries {
1942 self.reuse_entry_id(&mut entry);
1943 entries_by_id_edits.push(Edit::Insert(PathEntry {
1944 id: entry.id,
1945 path: entry.path.clone(),
1946 is_ignored: entry.is_ignored,
1947 scan_id: self.scan_id,
1948 }));
1949 entries_by_path_edits.push(Edit::Insert(entry));
1950 }
1951
1952 self.entries_by_path.edit(entries_by_path_edits, &());
1953 self.entries_by_id.edit(entries_by_id_edits, &());
1954 }
1955
1956 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1957 let abs_path = self.abs_path.join(&parent_path);
1958 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1959
1960 // Guard against repositories inside the repository metadata
1961 if work_dir
1962 .components()
1963 .find(|component| component.as_os_str() == *DOT_GIT)
1964 .is_some()
1965 {
1966 return None;
1967 };
1968
1969 let work_dir_id = self
1970 .entry_for_path(work_dir.clone())
1971 .map(|entry| entry.id)?;
1972
1973 if self.git_repositories.get(&work_dir_id).is_none() {
1974 let repo = fs.open_repo(abs_path.as_path())?;
1975 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1976 let scan_id = self.scan_id;
1977
1978 let repo_lock = repo.lock();
1979
1980 self.repository_entries.insert(
1981 work_directory,
1982 RepositoryEntry {
1983 work_directory: work_dir_id.into(),
1984 branch: repo_lock.branch_name().map(Into::into),
1985 statuses: repo_lock.statuses().unwrap_or_default(),
1986 },
1987 );
1988 drop(repo_lock);
1989
1990 self.git_repositories.insert(
1991 work_dir_id,
1992 LocalRepositoryEntry {
1993 scan_id,
1994 full_scan_id: scan_id,
1995 repo_ptr: repo,
1996 git_dir_path: parent_path.clone(),
1997 },
1998 )
1999 }
2000
2001 Some(())
2002 }
2003 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2004 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2005 entry.id = removed_entry_id;
2006 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2007 entry.id = existing_entry.id;
2008 }
2009 }
2010
2011 fn remove_path(&mut self, path: &Path) {
2012 let mut new_entries;
2013 let removed_entries;
2014 {
2015 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2016 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2017 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2018 new_entries.push_tree(cursor.suffix(&()), &());
2019 }
2020 self.entries_by_path = new_entries;
2021
2022 let mut entries_by_id_edits = Vec::new();
2023 for entry in removed_entries.cursor::<()>() {
2024 let removed_entry_id = self
2025 .removed_entry_ids
2026 .entry(entry.inode)
2027 .or_insert(entry.id);
2028 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2029 entries_by_id_edits.push(Edit::Remove(entry.id));
2030 }
2031 self.entries_by_id.edit(entries_by_id_edits, &());
2032
2033 if path.file_name() == Some(&GITIGNORE) {
2034 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2035 if let Some((_, scan_id)) = self
2036 .ignores_by_parent_abs_path
2037 .get_mut(abs_parent_path.as_path())
2038 {
2039 *scan_id = self.snapshot.scan_id;
2040 }
2041 }
2042 }
2043
2044 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2045 let mut inodes = TreeSet::default();
2046 for ancestor in path.ancestors().skip(1) {
2047 if let Some(entry) = self.entry_for_path(ancestor) {
2048 inodes.insert(entry.inode);
2049 }
2050 }
2051 inodes
2052 }
2053
2054 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2055 let mut new_ignores = Vec::new();
2056 for ancestor in abs_path.ancestors().skip(1) {
2057 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2058 new_ignores.push((ancestor, Some(ignore.clone())));
2059 } else {
2060 new_ignores.push((ancestor, None));
2061 }
2062 }
2063
2064 let mut ignore_stack = IgnoreStack::none();
2065 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2066 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2067 ignore_stack = IgnoreStack::all();
2068 break;
2069 } else if let Some(ignore) = ignore {
2070 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2071 }
2072 }
2073
2074 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2075 ignore_stack = IgnoreStack::all();
2076 }
2077
2078 ignore_stack
2079 }
2080}
2081
2082async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2083 let contents = fs.load(abs_path).await?;
2084 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2085 let mut builder = GitignoreBuilder::new(parent);
2086 for line in contents.lines() {
2087 builder.add_line(Some(abs_path.into()), line)?;
2088 }
2089 Ok(builder.build()?)
2090}
2091
2092impl WorktreeId {
2093 pub fn from_usize(handle_id: usize) -> Self {
2094 Self(handle_id)
2095 }
2096
2097 pub(crate) fn from_proto(id: u64) -> Self {
2098 Self(id as usize)
2099 }
2100
2101 pub fn to_proto(&self) -> u64 {
2102 self.0 as u64
2103 }
2104
2105 pub fn to_usize(&self) -> usize {
2106 self.0
2107 }
2108}
2109
2110impl fmt::Display for WorktreeId {
2111 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2112 self.0.fmt(f)
2113 }
2114}
2115
2116impl Deref for Worktree {
2117 type Target = Snapshot;
2118
2119 fn deref(&self) -> &Self::Target {
2120 match self {
2121 Worktree::Local(worktree) => &worktree.snapshot,
2122 Worktree::Remote(worktree) => &worktree.snapshot,
2123 }
2124 }
2125}
2126
2127impl Deref for LocalWorktree {
2128 type Target = LocalSnapshot;
2129
2130 fn deref(&self) -> &Self::Target {
2131 &self.snapshot
2132 }
2133}
2134
2135impl Deref for RemoteWorktree {
2136 type Target = Snapshot;
2137
2138 fn deref(&self) -> &Self::Target {
2139 &self.snapshot
2140 }
2141}
2142
2143impl fmt::Debug for LocalWorktree {
2144 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2145 self.snapshot.fmt(f)
2146 }
2147}
2148
2149impl fmt::Debug for Snapshot {
2150 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2151 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2152 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2153
2154 impl<'a> fmt::Debug for EntriesByPath<'a> {
2155 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2156 f.debug_map()
2157 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2158 .finish()
2159 }
2160 }
2161
2162 impl<'a> fmt::Debug for EntriesById<'a> {
2163 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2164 f.debug_list().entries(self.0.iter()).finish()
2165 }
2166 }
2167
2168 f.debug_struct("Snapshot")
2169 .field("id", &self.id)
2170 .field("root_name", &self.root_name)
2171 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2172 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2173 .finish()
2174 }
2175}
2176
2177#[derive(Clone, PartialEq)]
2178pub struct File {
2179 pub worktree: ModelHandle<Worktree>,
2180 pub path: Arc<Path>,
2181 pub mtime: SystemTime,
2182 pub(crate) entry_id: ProjectEntryId,
2183 pub(crate) is_local: bool,
2184 pub(crate) is_deleted: bool,
2185}
2186
2187impl language::File for File {
2188 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2189 if self.is_local {
2190 Some(self)
2191 } else {
2192 None
2193 }
2194 }
2195
2196 fn mtime(&self) -> SystemTime {
2197 self.mtime
2198 }
2199
2200 fn path(&self) -> &Arc<Path> {
2201 &self.path
2202 }
2203
2204 fn full_path(&self, cx: &AppContext) -> PathBuf {
2205 let mut full_path = PathBuf::new();
2206 let worktree = self.worktree.read(cx);
2207
2208 if worktree.is_visible() {
2209 full_path.push(worktree.root_name());
2210 } else {
2211 let path = worktree.abs_path();
2212
2213 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2214 full_path.push("~");
2215 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2216 } else {
2217 full_path.push(path)
2218 }
2219 }
2220
2221 if self.path.components().next().is_some() {
2222 full_path.push(&self.path);
2223 }
2224
2225 full_path
2226 }
2227
2228 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2229 /// of its worktree, then this method will return the name of the worktree itself.
2230 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2231 self.path
2232 .file_name()
2233 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2234 }
2235
2236 fn is_deleted(&self) -> bool {
2237 self.is_deleted
2238 }
2239
2240 fn as_any(&self) -> &dyn Any {
2241 self
2242 }
2243
2244 fn to_proto(&self) -> rpc::proto::File {
2245 rpc::proto::File {
2246 worktree_id: self.worktree.id() as u64,
2247 entry_id: self.entry_id.to_proto(),
2248 path: self.path.to_string_lossy().into(),
2249 mtime: Some(self.mtime.into()),
2250 is_deleted: self.is_deleted,
2251 }
2252 }
2253}
2254
2255impl language::LocalFile for File {
2256 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2257 self.worktree
2258 .read(cx)
2259 .as_local()
2260 .unwrap()
2261 .abs_path
2262 .join(&self.path)
2263 }
2264
2265 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2266 let worktree = self.worktree.read(cx).as_local().unwrap();
2267 let abs_path = worktree.absolutize(&self.path);
2268 let fs = worktree.fs.clone();
2269 cx.background()
2270 .spawn(async move { fs.load(&abs_path).await })
2271 }
2272
2273 fn buffer_reloaded(
2274 &self,
2275 buffer_id: u64,
2276 version: &clock::Global,
2277 fingerprint: RopeFingerprint,
2278 line_ending: LineEnding,
2279 mtime: SystemTime,
2280 cx: &mut AppContext,
2281 ) {
2282 let worktree = self.worktree.read(cx).as_local().unwrap();
2283 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2284 worktree
2285 .client
2286 .send(proto::BufferReloaded {
2287 project_id,
2288 buffer_id,
2289 version: serialize_version(version),
2290 mtime: Some(mtime.into()),
2291 fingerprint: serialize_fingerprint(fingerprint),
2292 line_ending: serialize_line_ending(line_ending) as i32,
2293 })
2294 .log_err();
2295 }
2296 }
2297}
2298
2299impl File {
2300 pub fn from_proto(
2301 proto: rpc::proto::File,
2302 worktree: ModelHandle<Worktree>,
2303 cx: &AppContext,
2304 ) -> Result<Self> {
2305 let worktree_id = worktree
2306 .read(cx)
2307 .as_remote()
2308 .ok_or_else(|| anyhow!("not remote"))?
2309 .id();
2310
2311 if worktree_id.to_proto() != proto.worktree_id {
2312 return Err(anyhow!("worktree id does not match file"));
2313 }
2314
2315 Ok(Self {
2316 worktree,
2317 path: Path::new(&proto.path).into(),
2318 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2319 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2320 is_local: false,
2321 is_deleted: proto.is_deleted,
2322 })
2323 }
2324
2325 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2326 file.and_then(|f| f.as_any().downcast_ref())
2327 }
2328
2329 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2330 self.worktree.read(cx).id()
2331 }
2332
2333 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2334 if self.is_deleted {
2335 None
2336 } else {
2337 Some(self.entry_id)
2338 }
2339 }
2340}
2341
2342#[derive(Clone, Debug, PartialEq, Eq)]
2343pub struct Entry {
2344 pub id: ProjectEntryId,
2345 pub kind: EntryKind,
2346 pub path: Arc<Path>,
2347 pub inode: u64,
2348 pub mtime: SystemTime,
2349 pub is_symlink: bool,
2350 pub is_ignored: bool,
2351}
2352
2353#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2354pub enum EntryKind {
2355 PendingDir,
2356 Dir,
2357 File(CharBag),
2358}
2359
2360#[derive(Clone, Copy, Debug)]
2361pub enum PathChange {
2362 Added,
2363 Removed,
2364 Updated,
2365 AddedOrUpdated,
2366}
2367
2368impl Entry {
2369 fn new(
2370 path: Arc<Path>,
2371 metadata: &fs::Metadata,
2372 next_entry_id: &AtomicUsize,
2373 root_char_bag: CharBag,
2374 ) -> Self {
2375 Self {
2376 id: ProjectEntryId::new(next_entry_id),
2377 kind: if metadata.is_dir {
2378 EntryKind::PendingDir
2379 } else {
2380 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2381 },
2382 path,
2383 inode: metadata.inode,
2384 mtime: metadata.mtime,
2385 is_symlink: metadata.is_symlink,
2386 is_ignored: false,
2387 }
2388 }
2389
2390 pub fn is_dir(&self) -> bool {
2391 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2392 }
2393
2394 pub fn is_file(&self) -> bool {
2395 matches!(self.kind, EntryKind::File(_))
2396 }
2397}
2398
2399impl sum_tree::Item for Entry {
2400 type Summary = EntrySummary;
2401
2402 fn summary(&self) -> Self::Summary {
2403 let visible_count = if self.is_ignored { 0 } else { 1 };
2404 let file_count;
2405 let visible_file_count;
2406 if self.is_file() {
2407 file_count = 1;
2408 visible_file_count = visible_count;
2409 } else {
2410 file_count = 0;
2411 visible_file_count = 0;
2412 }
2413
2414 EntrySummary {
2415 max_path: self.path.clone(),
2416 count: 1,
2417 visible_count,
2418 file_count,
2419 visible_file_count,
2420 }
2421 }
2422}
2423
2424impl sum_tree::KeyedItem for Entry {
2425 type Key = PathKey;
2426
2427 fn key(&self) -> Self::Key {
2428 PathKey(self.path.clone())
2429 }
2430}
2431
2432#[derive(Clone, Debug)]
2433pub struct EntrySummary {
2434 max_path: Arc<Path>,
2435 count: usize,
2436 visible_count: usize,
2437 file_count: usize,
2438 visible_file_count: usize,
2439}
2440
2441impl Default for EntrySummary {
2442 fn default() -> Self {
2443 Self {
2444 max_path: Arc::from(Path::new("")),
2445 count: 0,
2446 visible_count: 0,
2447 file_count: 0,
2448 visible_file_count: 0,
2449 }
2450 }
2451}
2452
2453impl sum_tree::Summary for EntrySummary {
2454 type Context = ();
2455
2456 fn add_summary(&mut self, rhs: &Self, _: &()) {
2457 self.max_path = rhs.max_path.clone();
2458 self.count += rhs.count;
2459 self.visible_count += rhs.visible_count;
2460 self.file_count += rhs.file_count;
2461 self.visible_file_count += rhs.visible_file_count;
2462 }
2463}
2464
2465#[derive(Clone, Debug)]
2466struct PathEntry {
2467 id: ProjectEntryId,
2468 path: Arc<Path>,
2469 is_ignored: bool,
2470 scan_id: usize,
2471}
2472
2473impl sum_tree::Item for PathEntry {
2474 type Summary = PathEntrySummary;
2475
2476 fn summary(&self) -> Self::Summary {
2477 PathEntrySummary { max_id: self.id }
2478 }
2479}
2480
2481impl sum_tree::KeyedItem for PathEntry {
2482 type Key = ProjectEntryId;
2483
2484 fn key(&self) -> Self::Key {
2485 self.id
2486 }
2487}
2488
2489#[derive(Clone, Debug, Default)]
2490struct PathEntrySummary {
2491 max_id: ProjectEntryId,
2492}
2493
2494impl sum_tree::Summary for PathEntrySummary {
2495 type Context = ();
2496
2497 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2498 self.max_id = summary.max_id;
2499 }
2500}
2501
2502impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2503 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2504 *self = summary.max_id;
2505 }
2506}
2507
2508#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2509pub struct PathKey(Arc<Path>);
2510
2511impl Default for PathKey {
2512 fn default() -> Self {
2513 Self(Path::new("").into())
2514 }
2515}
2516
2517impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2518 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2519 self.0 = summary.max_path.clone();
2520 }
2521}
2522
2523struct BackgroundScanner {
2524 snapshot: Mutex<LocalSnapshot>,
2525 fs: Arc<dyn Fs>,
2526 status_updates_tx: UnboundedSender<ScanState>,
2527 executor: Arc<executor::Background>,
2528 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2529 prev_state: Mutex<BackgroundScannerState>,
2530 finished_initial_scan: bool,
2531}
2532
2533struct BackgroundScannerState {
2534 snapshot: Snapshot,
2535 event_paths: Vec<Arc<Path>>,
2536}
2537
2538impl BackgroundScanner {
2539 fn new(
2540 snapshot: LocalSnapshot,
2541 fs: Arc<dyn Fs>,
2542 status_updates_tx: UnboundedSender<ScanState>,
2543 executor: Arc<executor::Background>,
2544 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2545 ) -> Self {
2546 Self {
2547 fs,
2548 status_updates_tx,
2549 executor,
2550 refresh_requests_rx,
2551 prev_state: Mutex::new(BackgroundScannerState {
2552 snapshot: snapshot.snapshot.clone(),
2553 event_paths: Default::default(),
2554 }),
2555 snapshot: Mutex::new(snapshot),
2556 finished_initial_scan: false,
2557 }
2558 }
2559
2560 async fn run(
2561 &mut self,
2562 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2563 ) {
2564 use futures::FutureExt as _;
2565
2566 let (root_abs_path, root_inode) = {
2567 let snapshot = self.snapshot.lock();
2568 (
2569 snapshot.abs_path.clone(),
2570 snapshot.root_entry().map(|e| e.inode),
2571 )
2572 };
2573
2574 // Populate ignores above the root.
2575 let ignore_stack;
2576 for ancestor in root_abs_path.ancestors().skip(1) {
2577 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2578 {
2579 self.snapshot
2580 .lock()
2581 .ignores_by_parent_abs_path
2582 .insert(ancestor.into(), (ignore.into(), 0));
2583 }
2584 }
2585 {
2586 let mut snapshot = self.snapshot.lock();
2587 snapshot.scan_id += 1;
2588 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2589 if ignore_stack.is_all() {
2590 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2591 root_entry.is_ignored = true;
2592 snapshot.insert_entry(root_entry, self.fs.as_ref());
2593 }
2594 }
2595 };
2596
2597 // Perform an initial scan of the directory.
2598 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2599 smol::block_on(scan_job_tx.send(ScanJob {
2600 abs_path: root_abs_path,
2601 path: Arc::from(Path::new("")),
2602 ignore_stack,
2603 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2604 scan_queue: scan_job_tx.clone(),
2605 }))
2606 .unwrap();
2607 drop(scan_job_tx);
2608 self.scan_dirs(true, scan_job_rx).await;
2609 {
2610 let mut snapshot = self.snapshot.lock();
2611 snapshot.completed_scan_id = snapshot.scan_id;
2612 }
2613 self.send_status_update(false, None);
2614
2615 // Process any any FS events that occurred while performing the initial scan.
2616 // For these events, update events cannot be as precise, because we didn't
2617 // have the previous state loaded yet.
2618 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2619 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2620 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2621 paths.extend(more_events.into_iter().map(|e| e.path));
2622 }
2623 self.process_events(paths).await;
2624 }
2625
2626 self.finished_initial_scan = true;
2627
2628 // Continue processing events until the worktree is dropped.
2629 loop {
2630 select_biased! {
2631 // Process any path refresh requests from the worktree. Prioritize
2632 // these before handling changes reported by the filesystem.
2633 request = self.refresh_requests_rx.recv().fuse() => {
2634 let Ok((paths, barrier)) = request else { break };
2635 if !self.process_refresh_request(paths, barrier).await {
2636 return;
2637 }
2638 }
2639
2640 events = events_rx.next().fuse() => {
2641 let Some(events) = events else { break };
2642 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2643 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2644 paths.extend(more_events.into_iter().map(|e| e.path));
2645 }
2646 self.process_events(paths).await;
2647 }
2648 }
2649 }
2650 }
2651
2652 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2653 if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2654 paths.sort_unstable();
2655 util::extend_sorted(
2656 &mut self.prev_state.lock().event_paths,
2657 paths,
2658 usize::MAX,
2659 Ord::cmp,
2660 );
2661 }
2662 self.send_status_update(false, Some(barrier))
2663 }
2664
2665 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2666 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2667 if let Some(mut paths) = self
2668 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2669 .await
2670 {
2671 paths.sort_unstable();
2672 util::extend_sorted(
2673 &mut self.prev_state.lock().event_paths,
2674 paths,
2675 usize::MAX,
2676 Ord::cmp,
2677 );
2678 }
2679 drop(scan_job_tx);
2680 self.scan_dirs(false, scan_job_rx).await;
2681
2682 self.update_ignore_statuses().await;
2683
2684 //
2685
2686 let mut snapshot = self.snapshot.lock();
2687
2688 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2689 git_repositories.retain(|work_directory_id, _| {
2690 snapshot
2691 .entry_for_id(*work_directory_id)
2692 .map_or(false, |entry| {
2693 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2694 })
2695 });
2696 snapshot.git_repositories = git_repositories;
2697
2698 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2699 git_repository_entries.retain(|_, entry| {
2700 snapshot
2701 .git_repositories
2702 .get(&entry.work_directory.0)
2703 .is_some()
2704 });
2705 snapshot.snapshot.repository_entries = git_repository_entries;
2706
2707 snapshot.removed_entry_ids.clear();
2708 snapshot.completed_scan_id = snapshot.scan_id;
2709
2710 drop(snapshot);
2711
2712 self.send_status_update(false, None);
2713 self.prev_state.lock().event_paths.clear();
2714 }
2715
2716 async fn scan_dirs(
2717 &self,
2718 enable_progress_updates: bool,
2719 scan_jobs_rx: channel::Receiver<ScanJob>,
2720 ) {
2721 use futures::FutureExt as _;
2722
2723 if self
2724 .status_updates_tx
2725 .unbounded_send(ScanState::Started)
2726 .is_err()
2727 {
2728 return;
2729 }
2730
2731 let progress_update_count = AtomicUsize::new(0);
2732 self.executor
2733 .scoped(|scope| {
2734 for _ in 0..self.executor.num_cpus() {
2735 scope.spawn(async {
2736 let mut last_progress_update_count = 0;
2737 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2738 futures::pin_mut!(progress_update_timer);
2739
2740 loop {
2741 select_biased! {
2742 // Process any path refresh requests before moving on to process
2743 // the scan queue, so that user operations are prioritized.
2744 request = self.refresh_requests_rx.recv().fuse() => {
2745 let Ok((paths, barrier)) = request else { break };
2746 if !self.process_refresh_request(paths, barrier).await {
2747 return;
2748 }
2749 }
2750
2751 // Send periodic progress updates to the worktree. Use an atomic counter
2752 // to ensure that only one of the workers sends a progress update after
2753 // the update interval elapses.
2754 _ = progress_update_timer => {
2755 match progress_update_count.compare_exchange(
2756 last_progress_update_count,
2757 last_progress_update_count + 1,
2758 SeqCst,
2759 SeqCst
2760 ) {
2761 Ok(_) => {
2762 last_progress_update_count += 1;
2763 self.send_status_update(true, None);
2764 }
2765 Err(count) => {
2766 last_progress_update_count = count;
2767 }
2768 }
2769 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2770 }
2771
2772 // Recursively load directories from the file system.
2773 job = scan_jobs_rx.recv().fuse() => {
2774 let Ok(job) = job else { break };
2775 if let Err(err) = self.scan_dir(&job).await {
2776 if job.path.as_ref() != Path::new("") {
2777 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2778 }
2779 }
2780 }
2781 }
2782 }
2783 })
2784 }
2785 })
2786 .await;
2787 }
2788
2789 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2790 let mut prev_state = self.prev_state.lock();
2791 let new_snapshot = self.snapshot.lock().clone();
2792 let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2793
2794 let changes = self.build_change_set(
2795 &old_snapshot,
2796 &new_snapshot.snapshot,
2797 &prev_state.event_paths,
2798 );
2799
2800 self.status_updates_tx
2801 .unbounded_send(ScanState::Updated {
2802 snapshot: new_snapshot,
2803 changes,
2804 scanning,
2805 barrier,
2806 })
2807 .is_ok()
2808 }
2809
2810 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2811 let mut new_entries: Vec<Entry> = Vec::new();
2812 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2813 let mut ignore_stack = job.ignore_stack.clone();
2814 let mut new_ignore = None;
2815 let (root_abs_path, root_char_bag, next_entry_id) = {
2816 let snapshot = self.snapshot.lock();
2817 (
2818 snapshot.abs_path().clone(),
2819 snapshot.root_char_bag,
2820 snapshot.next_entry_id.clone(),
2821 )
2822 };
2823 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2824 while let Some(child_abs_path) = child_paths.next().await {
2825 let child_abs_path: Arc<Path> = match child_abs_path {
2826 Ok(child_abs_path) => child_abs_path.into(),
2827 Err(error) => {
2828 log::error!("error processing entry {:?}", error);
2829 continue;
2830 }
2831 };
2832
2833 let child_name = child_abs_path.file_name().unwrap();
2834 let child_path: Arc<Path> = job.path.join(child_name).into();
2835 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2836 Ok(Some(metadata)) => metadata,
2837 Ok(None) => continue,
2838 Err(err) => {
2839 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2840 continue;
2841 }
2842 };
2843
2844 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2845 if child_name == *GITIGNORE {
2846 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2847 Ok(ignore) => {
2848 let ignore = Arc::new(ignore);
2849 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2850 new_ignore = Some(ignore);
2851 }
2852 Err(error) => {
2853 log::error!(
2854 "error loading .gitignore file {:?} - {:?}",
2855 child_name,
2856 error
2857 );
2858 }
2859 }
2860
2861 // Update ignore status of any child entries we've already processed to reflect the
2862 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2863 // there should rarely be too numerous. Update the ignore stack associated with any
2864 // new jobs as well.
2865 let mut new_jobs = new_jobs.iter_mut();
2866 for entry in &mut new_entries {
2867 let entry_abs_path = root_abs_path.join(&entry.path);
2868 entry.is_ignored =
2869 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2870
2871 if entry.is_dir() {
2872 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2873 job.ignore_stack = if entry.is_ignored {
2874 IgnoreStack::all()
2875 } else {
2876 ignore_stack.clone()
2877 };
2878 }
2879 }
2880 }
2881 }
2882
2883 let mut child_entry = Entry::new(
2884 child_path.clone(),
2885 &child_metadata,
2886 &next_entry_id,
2887 root_char_bag,
2888 );
2889
2890 if child_entry.is_dir() {
2891 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2892 child_entry.is_ignored = is_ignored;
2893
2894 // Avoid recursing until crash in the case of a recursive symlink
2895 if !job.ancestor_inodes.contains(&child_entry.inode) {
2896 let mut ancestor_inodes = job.ancestor_inodes.clone();
2897 ancestor_inodes.insert(child_entry.inode);
2898
2899 new_jobs.push(Some(ScanJob {
2900 abs_path: child_abs_path,
2901 path: child_path,
2902 ignore_stack: if is_ignored {
2903 IgnoreStack::all()
2904 } else {
2905 ignore_stack.clone()
2906 },
2907 ancestor_inodes,
2908 scan_queue: job.scan_queue.clone(),
2909 }));
2910 } else {
2911 new_jobs.push(None);
2912 }
2913 } else {
2914 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2915 }
2916
2917 new_entries.push(child_entry);
2918 }
2919
2920 self.snapshot.lock().populate_dir(
2921 job.path.clone(),
2922 new_entries,
2923 new_ignore,
2924 self.fs.as_ref(),
2925 );
2926
2927 for new_job in new_jobs {
2928 if let Some(new_job) = new_job {
2929 job.scan_queue.send(new_job).await.unwrap();
2930 }
2931 }
2932
2933 Ok(())
2934 }
2935
2936 async fn reload_entries_for_paths(
2937 &self,
2938 mut abs_paths: Vec<PathBuf>,
2939 scan_queue_tx: Option<Sender<ScanJob>>,
2940 ) -> Option<Vec<Arc<Path>>> {
2941 let doing_recursive_update = scan_queue_tx.is_some();
2942
2943 abs_paths.sort_unstable();
2944 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2945
2946 let root_abs_path = self.snapshot.lock().abs_path.clone();
2947 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2948 let metadata = futures::future::join_all(
2949 abs_paths
2950 .iter()
2951 .map(|abs_path| self.fs.metadata(&abs_path))
2952 .collect::<Vec<_>>(),
2953 )
2954 .await;
2955
2956 let mut snapshot = self.snapshot.lock();
2957 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2958 snapshot.scan_id += 1;
2959 if is_idle && !doing_recursive_update {
2960 snapshot.completed_scan_id = snapshot.scan_id;
2961 }
2962
2963 // Remove any entries for paths that no longer exist or are being recursively
2964 // refreshed. Do this before adding any new entries, so that renames can be
2965 // detected regardless of the order of the paths.
2966 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2967 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2968 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2969 if matches!(metadata, Ok(None)) || doing_recursive_update {
2970 snapshot.remove_path(path);
2971 }
2972 event_paths.push(path.into());
2973 } else {
2974 log::error!(
2975 "unexpected event {:?} for root path {:?}",
2976 abs_path,
2977 root_canonical_path
2978 );
2979 }
2980 }
2981
2982 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2983 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2984
2985 match metadata {
2986 Ok(Some(metadata)) => {
2987 let ignore_stack =
2988 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2989 let mut fs_entry = Entry::new(
2990 path.clone(),
2991 &metadata,
2992 snapshot.next_entry_id.as_ref(),
2993 snapshot.root_char_bag,
2994 );
2995 fs_entry.is_ignored = ignore_stack.is_all();
2996 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2997
2998 self.reload_repo_for_file_path(&path, &mut snapshot, self.fs.as_ref());
2999
3000 if let Some(scan_queue_tx) = &scan_queue_tx {
3001 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3002 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3003 ancestor_inodes.insert(metadata.inode);
3004 smol::block_on(scan_queue_tx.send(ScanJob {
3005 abs_path,
3006 path,
3007 ignore_stack,
3008 ancestor_inodes,
3009 scan_queue: scan_queue_tx.clone(),
3010 }))
3011 .unwrap();
3012 }
3013 }
3014 }
3015 Ok(None) => {
3016 self.remove_repo_path(&path, &mut snapshot);
3017 }
3018 Err(err) => {
3019 // TODO - create a special 'error' entry in the entries tree to mark this
3020 log::error!("error reading file on event {:?}", err);
3021 }
3022 }
3023 }
3024
3025 Some(event_paths)
3026 }
3027
3028 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3029 if !path
3030 .components()
3031 .any(|component| component.as_os_str() == *DOT_GIT)
3032 {
3033 let scan_id = snapshot.scan_id;
3034 let repo = snapshot.repo_for(&path)?;
3035
3036 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3037
3038 let work_dir = repo.work_directory(snapshot)?;
3039 let work_dir_id = repo.work_directory;
3040
3041 snapshot
3042 .git_repositories
3043 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3044
3045 snapshot.repository_entries.update(&work_dir, |entry| {
3046 entry
3047 .statuses
3048 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3049 });
3050 }
3051
3052 Some(())
3053 }
3054
3055 fn reload_repo_for_file_path(
3056 &self,
3057 path: &Path,
3058 snapshot: &mut LocalSnapshot,
3059 fs: &dyn Fs,
3060 ) -> Option<()> {
3061 let scan_id = snapshot.scan_id;
3062
3063 if path
3064 .components()
3065 .any(|component| component.as_os_str() == *DOT_GIT)
3066 {
3067 let (entry_id, repo_ptr) = {
3068 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3069 let dot_git_dir = path.ancestors()
3070 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3071 .next()?;
3072
3073 snapshot.build_repo(dot_git_dir.into(), fs);
3074 return None;
3075 };
3076 if repo.full_scan_id == scan_id {
3077 return None;
3078 }
3079 (*entry_id, repo.repo_ptr.to_owned())
3080 };
3081
3082 let work_dir = snapshot
3083 .entry_for_id(entry_id)
3084 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3085
3086 let repo = repo_ptr.lock();
3087 repo.reload_index();
3088 let branch = repo.branch_name();
3089 let statuses = repo.statuses().unwrap_or_default();
3090
3091 snapshot.git_repositories.update(&entry_id, |entry| {
3092 entry.scan_id = scan_id;
3093 entry.full_scan_id = scan_id;
3094 });
3095
3096 snapshot.repository_entries.update(&work_dir, |entry| {
3097 entry.branch = branch.map(Into::into);
3098 entry.statuses = statuses;
3099 });
3100 } else {
3101 if snapshot
3102 .entry_for_path(&path)
3103 .map(|entry| entry.is_ignored)
3104 .unwrap_or(false)
3105 {
3106 self.remove_repo_path(&path, snapshot);
3107 return None;
3108 }
3109
3110 let repo = snapshot.repo_for(&path)?;
3111
3112 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3113
3114 let status = {
3115 let local_repo = snapshot.get_local_repo(&repo)?;
3116
3117 // Short circuit if we've already scanned everything
3118 if local_repo.full_scan_id == scan_id {
3119 return None;
3120 }
3121
3122 let git_ptr = local_repo.repo_ptr.lock();
3123 git_ptr.status(&repo_path)
3124 };
3125
3126 let work_dir = repo.work_directory(snapshot)?;
3127 let work_dir_id = repo.work_directory;
3128
3129 snapshot
3130 .git_repositories
3131 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3132
3133 snapshot.repository_entries.update(&work_dir, |entry| {
3134 if let Some(status) = status {
3135 entry.statuses.insert(repo_path, status);
3136 } else {
3137 entry.statuses.remove(&repo_path);
3138 }
3139 });
3140 }
3141
3142 Some(())
3143 }
3144
3145 async fn update_ignore_statuses(&self) {
3146 use futures::FutureExt as _;
3147
3148 let mut snapshot = self.snapshot.lock().clone();
3149 let mut ignores_to_update = Vec::new();
3150 let mut ignores_to_delete = Vec::new();
3151 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3152 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3153 if *scan_id > snapshot.completed_scan_id
3154 && snapshot.entry_for_path(parent_path).is_some()
3155 {
3156 ignores_to_update.push(parent_abs_path.clone());
3157 }
3158
3159 let ignore_path = parent_path.join(&*GITIGNORE);
3160 if snapshot.entry_for_path(ignore_path).is_none() {
3161 ignores_to_delete.push(parent_abs_path.clone());
3162 }
3163 }
3164 }
3165
3166 for parent_abs_path in ignores_to_delete {
3167 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3168 self.snapshot
3169 .lock()
3170 .ignores_by_parent_abs_path
3171 .remove(&parent_abs_path);
3172 }
3173
3174 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3175 ignores_to_update.sort_unstable();
3176 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3177 while let Some(parent_abs_path) = ignores_to_update.next() {
3178 while ignores_to_update
3179 .peek()
3180 .map_or(false, |p| p.starts_with(&parent_abs_path))
3181 {
3182 ignores_to_update.next().unwrap();
3183 }
3184
3185 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3186 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3187 abs_path: parent_abs_path,
3188 ignore_stack,
3189 ignore_queue: ignore_queue_tx.clone(),
3190 }))
3191 .unwrap();
3192 }
3193 drop(ignore_queue_tx);
3194
3195 self.executor
3196 .scoped(|scope| {
3197 for _ in 0..self.executor.num_cpus() {
3198 scope.spawn(async {
3199 loop {
3200 select_biased! {
3201 // Process any path refresh requests before moving on to process
3202 // the queue of ignore statuses.
3203 request = self.refresh_requests_rx.recv().fuse() => {
3204 let Ok((paths, barrier)) = request else { break };
3205 if !self.process_refresh_request(paths, barrier).await {
3206 return;
3207 }
3208 }
3209
3210 // Recursively process directories whose ignores have changed.
3211 job = ignore_queue_rx.recv().fuse() => {
3212 let Ok(job) = job else { break };
3213 self.update_ignore_status(job, &snapshot).await;
3214 }
3215 }
3216 }
3217 });
3218 }
3219 })
3220 .await;
3221 }
3222
3223 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3224 let mut ignore_stack = job.ignore_stack;
3225 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3226 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3227 }
3228
3229 let mut entries_by_id_edits = Vec::new();
3230 let mut entries_by_path_edits = Vec::new();
3231 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3232 for mut entry in snapshot.child_entries(path).cloned() {
3233 let was_ignored = entry.is_ignored;
3234 let abs_path = snapshot.abs_path().join(&entry.path);
3235 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3236 if entry.is_dir() {
3237 let child_ignore_stack = if entry.is_ignored {
3238 IgnoreStack::all()
3239 } else {
3240 ignore_stack.clone()
3241 };
3242 job.ignore_queue
3243 .send(UpdateIgnoreStatusJob {
3244 abs_path: abs_path.into(),
3245 ignore_stack: child_ignore_stack,
3246 ignore_queue: job.ignore_queue.clone(),
3247 })
3248 .await
3249 .unwrap();
3250 }
3251
3252 if entry.is_ignored != was_ignored {
3253 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3254 path_entry.scan_id = snapshot.scan_id;
3255 path_entry.is_ignored = entry.is_ignored;
3256 entries_by_id_edits.push(Edit::Insert(path_entry));
3257 entries_by_path_edits.push(Edit::Insert(entry));
3258 }
3259 }
3260
3261 let mut snapshot = self.snapshot.lock();
3262 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3263 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3264 }
3265
3266 fn build_change_set(
3267 &self,
3268 old_snapshot: &Snapshot,
3269 new_snapshot: &Snapshot,
3270 event_paths: &[Arc<Path>],
3271 ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3272 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3273
3274 let mut changes = HashMap::default();
3275 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3276 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3277 let received_before_initialized = !self.finished_initial_scan;
3278
3279 for path in event_paths {
3280 let path = PathKey(path.clone());
3281 old_paths.seek(&path, Bias::Left, &());
3282 new_paths.seek(&path, Bias::Left, &());
3283
3284 loop {
3285 match (old_paths.item(), new_paths.item()) {
3286 (Some(old_entry), Some(new_entry)) => {
3287 if old_entry.path > path.0
3288 && new_entry.path > path.0
3289 && !old_entry.path.starts_with(&path.0)
3290 && !new_entry.path.starts_with(&path.0)
3291 {
3292 break;
3293 }
3294
3295 match Ord::cmp(&old_entry.path, &new_entry.path) {
3296 Ordering::Less => {
3297 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3298 old_paths.next(&());
3299 }
3300 Ordering::Equal => {
3301 if received_before_initialized {
3302 // If the worktree was not fully initialized when this event was generated,
3303 // we can't know whether this entry was added during the scan or whether
3304 // it was merely updated.
3305 changes.insert(
3306 (new_entry.path.clone(), new_entry.id),
3307 AddedOrUpdated,
3308 );
3309 } else if old_entry.mtime != new_entry.mtime {
3310 changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3311 }
3312 old_paths.next(&());
3313 new_paths.next(&());
3314 }
3315 Ordering::Greater => {
3316 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3317 new_paths.next(&());
3318 }
3319 }
3320 }
3321 (Some(old_entry), None) => {
3322 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3323 old_paths.next(&());
3324 }
3325 (None, Some(new_entry)) => {
3326 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3327 new_paths.next(&());
3328 }
3329 (None, None) => break,
3330 }
3331 }
3332 }
3333
3334 changes
3335 }
3336
3337 async fn progress_timer(&self, running: bool) {
3338 if !running {
3339 return futures::future::pending().await;
3340 }
3341
3342 #[cfg(any(test, feature = "test-support"))]
3343 if self.fs.is_fake() {
3344 return self.executor.simulate_random_delay().await;
3345 }
3346
3347 smol::Timer::after(Duration::from_millis(100)).await;
3348 }
3349}
3350
3351fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3352 let mut result = root_char_bag;
3353 result.extend(
3354 path.to_string_lossy()
3355 .chars()
3356 .map(|c| c.to_ascii_lowercase()),
3357 );
3358 result
3359}
3360
3361struct ScanJob {
3362 abs_path: Arc<Path>,
3363 path: Arc<Path>,
3364 ignore_stack: Arc<IgnoreStack>,
3365 scan_queue: Sender<ScanJob>,
3366 ancestor_inodes: TreeSet<u64>,
3367}
3368
3369struct UpdateIgnoreStatusJob {
3370 abs_path: Arc<Path>,
3371 ignore_stack: Arc<IgnoreStack>,
3372 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3373}
3374
3375pub trait WorktreeHandle {
3376 #[cfg(any(test, feature = "test-support"))]
3377 fn flush_fs_events<'a>(
3378 &self,
3379 cx: &'a gpui::TestAppContext,
3380 ) -> futures::future::LocalBoxFuture<'a, ()>;
3381}
3382
3383impl WorktreeHandle for ModelHandle<Worktree> {
3384 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3385 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3386 // extra directory scans, and emit extra scan-state notifications.
3387 //
3388 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3389 // to ensure that all redundant FS events have already been processed.
3390 #[cfg(any(test, feature = "test-support"))]
3391 fn flush_fs_events<'a>(
3392 &self,
3393 cx: &'a gpui::TestAppContext,
3394 ) -> futures::future::LocalBoxFuture<'a, ()> {
3395 use smol::future::FutureExt;
3396
3397 let filename = "fs-event-sentinel";
3398 let tree = self.clone();
3399 let (fs, root_path) = self.read_with(cx, |tree, _| {
3400 let tree = tree.as_local().unwrap();
3401 (tree.fs.clone(), tree.abs_path().clone())
3402 });
3403
3404 async move {
3405 fs.create_file(&root_path.join(filename), Default::default())
3406 .await
3407 .unwrap();
3408 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3409 .await;
3410
3411 fs.remove_file(&root_path.join(filename), Default::default())
3412 .await
3413 .unwrap();
3414 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3415 .await;
3416
3417 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3418 .await;
3419 }
3420 .boxed_local()
3421 }
3422}
3423
3424#[derive(Clone, Debug)]
3425struct TraversalProgress<'a> {
3426 max_path: &'a Path,
3427 count: usize,
3428 visible_count: usize,
3429 file_count: usize,
3430 visible_file_count: usize,
3431}
3432
3433impl<'a> TraversalProgress<'a> {
3434 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3435 match (include_ignored, include_dirs) {
3436 (true, true) => self.count,
3437 (true, false) => self.file_count,
3438 (false, true) => self.visible_count,
3439 (false, false) => self.visible_file_count,
3440 }
3441 }
3442}
3443
3444impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3445 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3446 self.max_path = summary.max_path.as_ref();
3447 self.count += summary.count;
3448 self.visible_count += summary.visible_count;
3449 self.file_count += summary.file_count;
3450 self.visible_file_count += summary.visible_file_count;
3451 }
3452}
3453
3454impl<'a> Default for TraversalProgress<'a> {
3455 fn default() -> Self {
3456 Self {
3457 max_path: Path::new(""),
3458 count: 0,
3459 visible_count: 0,
3460 file_count: 0,
3461 visible_file_count: 0,
3462 }
3463 }
3464}
3465
3466pub struct Traversal<'a> {
3467 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3468 include_ignored: bool,
3469 include_dirs: bool,
3470}
3471
3472impl<'a> Traversal<'a> {
3473 pub fn advance(&mut self) -> bool {
3474 self.advance_to_offset(self.offset() + 1)
3475 }
3476
3477 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3478 self.cursor.seek_forward(
3479 &TraversalTarget::Count {
3480 count: offset,
3481 include_dirs: self.include_dirs,
3482 include_ignored: self.include_ignored,
3483 },
3484 Bias::Right,
3485 &(),
3486 )
3487 }
3488
3489 pub fn advance_to_sibling(&mut self) -> bool {
3490 while let Some(entry) = self.cursor.item() {
3491 self.cursor.seek_forward(
3492 &TraversalTarget::PathSuccessor(&entry.path),
3493 Bias::Left,
3494 &(),
3495 );
3496 if let Some(entry) = self.cursor.item() {
3497 if (self.include_dirs || !entry.is_dir())
3498 && (self.include_ignored || !entry.is_ignored)
3499 {
3500 return true;
3501 }
3502 }
3503 }
3504 false
3505 }
3506
3507 pub fn entry(&self) -> Option<&'a Entry> {
3508 self.cursor.item()
3509 }
3510
3511 pub fn offset(&self) -> usize {
3512 self.cursor
3513 .start()
3514 .count(self.include_dirs, self.include_ignored)
3515 }
3516}
3517
3518impl<'a> Iterator for Traversal<'a> {
3519 type Item = &'a Entry;
3520
3521 fn next(&mut self) -> Option<Self::Item> {
3522 if let Some(item) = self.entry() {
3523 self.advance();
3524 Some(item)
3525 } else {
3526 None
3527 }
3528 }
3529}
3530
3531#[derive(Debug)]
3532enum TraversalTarget<'a> {
3533 Path(&'a Path),
3534 PathSuccessor(&'a Path),
3535 Count {
3536 count: usize,
3537 include_ignored: bool,
3538 include_dirs: bool,
3539 },
3540}
3541
3542impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3543 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3544 match self {
3545 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3546 TraversalTarget::PathSuccessor(path) => {
3547 if !cursor_location.max_path.starts_with(path) {
3548 Ordering::Equal
3549 } else {
3550 Ordering::Greater
3551 }
3552 }
3553 TraversalTarget::Count {
3554 count,
3555 include_dirs,
3556 include_ignored,
3557 } => Ord::cmp(
3558 count,
3559 &cursor_location.count(*include_dirs, *include_ignored),
3560 ),
3561 }
3562 }
3563}
3564
3565struct ChildEntriesIter<'a> {
3566 parent_path: &'a Path,
3567 traversal: Traversal<'a>,
3568}
3569
3570impl<'a> Iterator for ChildEntriesIter<'a> {
3571 type Item = &'a Entry;
3572
3573 fn next(&mut self) -> Option<Self::Item> {
3574 if let Some(item) = self.traversal.entry() {
3575 if item.path.starts_with(&self.parent_path) {
3576 self.traversal.advance_to_sibling();
3577 return Some(item);
3578 }
3579 }
3580 None
3581 }
3582}
3583
3584impl<'a> From<&'a Entry> for proto::Entry {
3585 fn from(entry: &'a Entry) -> Self {
3586 Self {
3587 id: entry.id.to_proto(),
3588 is_dir: entry.is_dir(),
3589 path: entry.path.to_string_lossy().into(),
3590 inode: entry.inode,
3591 mtime: Some(entry.mtime.into()),
3592 is_symlink: entry.is_symlink,
3593 is_ignored: entry.is_ignored,
3594 }
3595 }
3596}
3597
3598impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3599 type Error = anyhow::Error;
3600
3601 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3602 if let Some(mtime) = entry.mtime {
3603 let kind = if entry.is_dir {
3604 EntryKind::Dir
3605 } else {
3606 let mut char_bag = *root_char_bag;
3607 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3608 EntryKind::File(char_bag)
3609 };
3610 let path: Arc<Path> = PathBuf::from(entry.path).into();
3611 Ok(Entry {
3612 id: ProjectEntryId::from_proto(entry.id),
3613 kind,
3614 path,
3615 inode: entry.inode,
3616 mtime: mtime.into(),
3617 is_symlink: entry.is_symlink,
3618 is_ignored: entry.is_ignored,
3619 })
3620 } else {
3621 Err(anyhow!(
3622 "missing mtime in remote worktree entry {:?}",
3623 entry.path
3624 ))
3625 }
3626 }
3627}
3628
3629#[cfg(test)]
3630mod tests {
3631 use super::*;
3632 use fs::{FakeFs, RealFs};
3633 use gpui::{executor::Deterministic, TestAppContext};
3634 use pretty_assertions::assert_eq;
3635 use rand::prelude::*;
3636 use serde_json::json;
3637 use std::{env, fmt::Write};
3638 use util::{http::FakeHttpClient, test::temp_tree};
3639
3640 #[gpui::test]
3641 async fn test_traversal(cx: &mut TestAppContext) {
3642 let fs = FakeFs::new(cx.background());
3643 fs.insert_tree(
3644 "/root",
3645 json!({
3646 ".gitignore": "a/b\n",
3647 "a": {
3648 "b": "",
3649 "c": "",
3650 }
3651 }),
3652 )
3653 .await;
3654
3655 let http_client = FakeHttpClient::with_404_response();
3656 let client = cx.read(|cx| Client::new(http_client, cx));
3657
3658 let tree = Worktree::local(
3659 client,
3660 Path::new("/root"),
3661 true,
3662 fs,
3663 Default::default(),
3664 &mut cx.to_async(),
3665 )
3666 .await
3667 .unwrap();
3668 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3669 .await;
3670
3671 tree.read_with(cx, |tree, _| {
3672 assert_eq!(
3673 tree.entries(false)
3674 .map(|entry| entry.path.as_ref())
3675 .collect::<Vec<_>>(),
3676 vec![
3677 Path::new(""),
3678 Path::new(".gitignore"),
3679 Path::new("a"),
3680 Path::new("a/c"),
3681 ]
3682 );
3683 assert_eq!(
3684 tree.entries(true)
3685 .map(|entry| entry.path.as_ref())
3686 .collect::<Vec<_>>(),
3687 vec![
3688 Path::new(""),
3689 Path::new(".gitignore"),
3690 Path::new("a"),
3691 Path::new("a/b"),
3692 Path::new("a/c"),
3693 ]
3694 );
3695 })
3696 }
3697
3698 #[gpui::test(iterations = 10)]
3699 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3700 let fs = FakeFs::new(cx.background());
3701 fs.insert_tree(
3702 "/root",
3703 json!({
3704 "lib": {
3705 "a": {
3706 "a.txt": ""
3707 },
3708 "b": {
3709 "b.txt": ""
3710 }
3711 }
3712 }),
3713 )
3714 .await;
3715 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3716 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3717
3718 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3719 let tree = Worktree::local(
3720 client,
3721 Path::new("/root"),
3722 true,
3723 fs.clone(),
3724 Default::default(),
3725 &mut cx.to_async(),
3726 )
3727 .await
3728 .unwrap();
3729
3730 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3731 .await;
3732
3733 tree.read_with(cx, |tree, _| {
3734 assert_eq!(
3735 tree.entries(false)
3736 .map(|entry| entry.path.as_ref())
3737 .collect::<Vec<_>>(),
3738 vec![
3739 Path::new(""),
3740 Path::new("lib"),
3741 Path::new("lib/a"),
3742 Path::new("lib/a/a.txt"),
3743 Path::new("lib/a/lib"),
3744 Path::new("lib/b"),
3745 Path::new("lib/b/b.txt"),
3746 Path::new("lib/b/lib"),
3747 ]
3748 );
3749 });
3750
3751 fs.rename(
3752 Path::new("/root/lib/a/lib"),
3753 Path::new("/root/lib/a/lib-2"),
3754 Default::default(),
3755 )
3756 .await
3757 .unwrap();
3758 executor.run_until_parked();
3759 tree.read_with(cx, |tree, _| {
3760 assert_eq!(
3761 tree.entries(false)
3762 .map(|entry| entry.path.as_ref())
3763 .collect::<Vec<_>>(),
3764 vec![
3765 Path::new(""),
3766 Path::new("lib"),
3767 Path::new("lib/a"),
3768 Path::new("lib/a/a.txt"),
3769 Path::new("lib/a/lib-2"),
3770 Path::new("lib/b"),
3771 Path::new("lib/b/b.txt"),
3772 Path::new("lib/b/lib"),
3773 ]
3774 );
3775 });
3776 }
3777
3778 #[gpui::test]
3779 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3780 let parent_dir = temp_tree(json!({
3781 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3782 "tree": {
3783 ".git": {},
3784 ".gitignore": "ignored-dir\n",
3785 "tracked-dir": {
3786 "tracked-file1": "",
3787 "ancestor-ignored-file1": "",
3788 },
3789 "ignored-dir": {
3790 "ignored-file1": ""
3791 }
3792 }
3793 }));
3794 let dir = parent_dir.path().join("tree");
3795
3796 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3797
3798 let tree = Worktree::local(
3799 client,
3800 dir.as_path(),
3801 true,
3802 Arc::new(RealFs),
3803 Default::default(),
3804 &mut cx.to_async(),
3805 )
3806 .await
3807 .unwrap();
3808 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3809 .await;
3810 tree.flush_fs_events(cx).await;
3811 cx.read(|cx| {
3812 let tree = tree.read(cx);
3813 assert!(
3814 !tree
3815 .entry_for_path("tracked-dir/tracked-file1")
3816 .unwrap()
3817 .is_ignored
3818 );
3819 assert!(
3820 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3821 .unwrap()
3822 .is_ignored
3823 );
3824 assert!(
3825 tree.entry_for_path("ignored-dir/ignored-file1")
3826 .unwrap()
3827 .is_ignored
3828 );
3829 });
3830
3831 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3832 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3833 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3834 tree.flush_fs_events(cx).await;
3835 cx.read(|cx| {
3836 let tree = tree.read(cx);
3837 assert!(
3838 !tree
3839 .entry_for_path("tracked-dir/tracked-file2")
3840 .unwrap()
3841 .is_ignored
3842 );
3843 assert!(
3844 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3845 .unwrap()
3846 .is_ignored
3847 );
3848 assert!(
3849 tree.entry_for_path("ignored-dir/ignored-file2")
3850 .unwrap()
3851 .is_ignored
3852 );
3853 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3854 });
3855 }
3856
3857 #[gpui::test]
3858 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3859 let root = temp_tree(json!({
3860 "dir1": {
3861 ".git": {},
3862 "deps": {
3863 "dep1": {
3864 ".git": {},
3865 "src": {
3866 "a.txt": ""
3867 }
3868 }
3869 },
3870 "src": {
3871 "b.txt": ""
3872 }
3873 },
3874 "c.txt": "",
3875 }));
3876
3877 let http_client = FakeHttpClient::with_404_response();
3878 let client = cx.read(|cx| Client::new(http_client, cx));
3879 let tree = Worktree::local(
3880 client,
3881 root.path(),
3882 true,
3883 Arc::new(RealFs),
3884 Default::default(),
3885 &mut cx.to_async(),
3886 )
3887 .await
3888 .unwrap();
3889
3890 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3891 .await;
3892 tree.flush_fs_events(cx).await;
3893
3894 tree.read_with(cx, |tree, _cx| {
3895 let tree = tree.as_local().unwrap();
3896
3897 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3898
3899 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3900 assert_eq!(
3901 entry
3902 .work_directory(tree)
3903 .map(|directory| directory.as_ref().to_owned()),
3904 Some(Path::new("dir1").to_owned())
3905 );
3906
3907 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3908 assert_eq!(
3909 entry
3910 .work_directory(tree)
3911 .map(|directory| directory.as_ref().to_owned()),
3912 Some(Path::new("dir1/deps/dep1").to_owned())
3913 );
3914 });
3915
3916 let repo_update_events = Arc::new(Mutex::new(vec![]));
3917 tree.update(cx, |_, cx| {
3918 let repo_update_events = repo_update_events.clone();
3919 cx.subscribe(&tree, move |_, _, event, _| {
3920 if let Event::UpdatedGitRepositories(update) = event {
3921 repo_update_events.lock().push(update.clone());
3922 }
3923 })
3924 .detach();
3925 });
3926
3927 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3928 tree.flush_fs_events(cx).await;
3929
3930 assert_eq!(
3931 repo_update_events.lock()[0]
3932 .keys()
3933 .cloned()
3934 .collect::<Vec<Arc<Path>>>(),
3935 vec![Path::new("dir1").into()]
3936 );
3937
3938 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3939 tree.flush_fs_events(cx).await;
3940
3941 tree.read_with(cx, |tree, _cx| {
3942 let tree = tree.as_local().unwrap();
3943
3944 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3945 });
3946 }
3947
3948 #[gpui::test]
3949 async fn test_git_status(cx: &mut TestAppContext) {
3950 #[track_caller]
3951 fn git_init(path: &Path) -> git2::Repository {
3952 git2::Repository::init(path).expect("Failed to initialize git repository")
3953 }
3954
3955 #[track_caller]
3956 fn git_add(path: &Path, repo: &git2::Repository) {
3957 let mut index = repo.index().expect("Failed to get index");
3958 index.add_path(path).expect("Failed to add a.txt");
3959 index.write().expect("Failed to write index");
3960 }
3961
3962 #[track_caller]
3963 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3964 let mut index = repo.index().expect("Failed to get index");
3965 index.remove_path(path).expect("Failed to add a.txt");
3966 index.write().expect("Failed to write index");
3967 }
3968
3969 #[track_caller]
3970 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3971 use git2::Signature;
3972
3973 let signature = Signature::now("test", "test@zed.dev").unwrap();
3974 let oid = repo.index().unwrap().write_tree().unwrap();
3975 let tree = repo.find_tree(oid).unwrap();
3976 if let Some(head) = repo.head().ok() {
3977 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3978
3979 let parent_commit = parent_obj.as_commit().unwrap();
3980
3981 repo.commit(
3982 Some("HEAD"),
3983 &signature,
3984 &signature,
3985 msg,
3986 &tree,
3987 &[parent_commit],
3988 )
3989 .expect("Failed to commit with parent");
3990 } else {
3991 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3992 .expect("Failed to commit");
3993 }
3994 }
3995
3996 #[track_caller]
3997 fn git_stash(repo: &mut git2::Repository) {
3998 use git2::Signature;
3999
4000 let signature = Signature::now("test", "test@zed.dev").unwrap();
4001 repo.stash_save(&signature, "N/A", None)
4002 .expect("Failed to stash");
4003 }
4004
4005 #[track_caller]
4006 fn git_reset(offset: usize, repo: &git2::Repository) {
4007 let head = repo.head().expect("Couldn't get repo head");
4008 let object = head.peel(git2::ObjectType::Commit).unwrap();
4009 let commit = object.as_commit().unwrap();
4010 let new_head = commit
4011 .parents()
4012 .inspect(|parnet| {
4013 parnet.message();
4014 })
4015 .skip(offset)
4016 .next()
4017 .expect("Not enough history");
4018 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
4019 .expect("Could not reset");
4020 }
4021
4022 #[allow(dead_code)]
4023 #[track_caller]
4024 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
4025 repo.statuses(None)
4026 .unwrap()
4027 .iter()
4028 .map(|status| (status.path().unwrap().to_string(), status.status()))
4029 .collect()
4030 }
4031
4032 const IGNORE_RULE: &'static str = "**/target";
4033
4034 let root = temp_tree(json!({
4035 "project": {
4036 "a.txt": "a",
4037 "b.txt": "bb",
4038 "c": {
4039 "d": {
4040 "e.txt": "eee"
4041 }
4042 },
4043 "f.txt": "ffff",
4044 "target": {
4045 "build_file": "???"
4046 },
4047 ".gitignore": IGNORE_RULE
4048 },
4049
4050 }));
4051
4052 let http_client = FakeHttpClient::with_404_response();
4053 let client = cx.read(|cx| Client::new(http_client, cx));
4054 let tree = Worktree::local(
4055 client,
4056 root.path(),
4057 true,
4058 Arc::new(RealFs),
4059 Default::default(),
4060 &mut cx.to_async(),
4061 )
4062 .await
4063 .unwrap();
4064
4065 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4066 .await;
4067
4068 const A_TXT: &'static str = "a.txt";
4069 const B_TXT: &'static str = "b.txt";
4070 const E_TXT: &'static str = "c/d/e.txt";
4071 const F_TXT: &'static str = "f.txt";
4072 const DOTGITIGNORE: &'static str = ".gitignore";
4073 const BUILD_FILE: &'static str = "target/build_file";
4074
4075 let work_dir = root.path().join("project");
4076 let mut repo = git_init(work_dir.as_path());
4077 repo.add_ignore_rule(IGNORE_RULE).unwrap();
4078 git_add(Path::new(A_TXT), &repo);
4079 git_add(Path::new(E_TXT), &repo);
4080 git_add(Path::new(DOTGITIGNORE), &repo);
4081 git_commit("Initial commit", &repo);
4082
4083 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4084
4085 tree.flush_fs_events(cx).await;
4086
4087 // Check that the right git state is observed on startup
4088 tree.read_with(cx, |tree, _cx| {
4089 let snapshot = tree.snapshot();
4090 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4091 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4092 assert_eq!(dir.0.as_ref(), Path::new("project"));
4093
4094 assert_eq!(repo.statuses.iter().count(), 3);
4095 assert_eq!(
4096 repo.statuses.get(&Path::new(A_TXT).into()),
4097 Some(&GitFileStatus::Modified)
4098 );
4099 assert_eq!(
4100 repo.statuses.get(&Path::new(B_TXT).into()),
4101 Some(&GitFileStatus::Added)
4102 );
4103 assert_eq!(
4104 repo.statuses.get(&Path::new(F_TXT).into()),
4105 Some(&GitFileStatus::Added)
4106 );
4107 });
4108
4109 git_add(Path::new(A_TXT), &repo);
4110 git_add(Path::new(B_TXT), &repo);
4111 git_commit("Committing modified and added", &repo);
4112 tree.flush_fs_events(cx).await;
4113
4114 // Check that repo only changes are tracked
4115 tree.read_with(cx, |tree, _cx| {
4116 let snapshot = tree.snapshot();
4117 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4118
4119 assert_eq!(repo.statuses.iter().count(), 1);
4120 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
4121 assert_eq!(repo.statuses.get(&Path::new(B_TXT).into()), None);
4122 assert_eq!(
4123 repo.statuses.get(&Path::new(F_TXT).into()),
4124 Some(&GitFileStatus::Added)
4125 );
4126 });
4127
4128 git_reset(0, &repo);
4129 git_remove_index(Path::new(B_TXT), &repo);
4130 git_stash(&mut repo);
4131 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4132 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4133 tree.flush_fs_events(cx).await;
4134
4135 // Check that more complex repo changes are tracked
4136 tree.read_with(cx, |tree, _cx| {
4137 let snapshot = tree.snapshot();
4138 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4139
4140 assert_eq!(repo.statuses.iter().count(), 3);
4141 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
4142 assert_eq!(
4143 repo.statuses.get(&Path::new(B_TXT).into()),
4144 Some(&GitFileStatus::Added)
4145 );
4146 assert_eq!(
4147 repo.statuses.get(&Path::new(E_TXT).into()),
4148 Some(&GitFileStatus::Modified)
4149 );
4150 assert_eq!(
4151 repo.statuses.get(&Path::new(F_TXT).into()),
4152 Some(&GitFileStatus::Added)
4153 );
4154 });
4155
4156 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4157 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4158 std::fs::write(
4159 work_dir.join(DOTGITIGNORE),
4160 [IGNORE_RULE, "f.txt"].join("\n"),
4161 )
4162 .unwrap();
4163
4164 git_add(Path::new(DOTGITIGNORE), &repo);
4165 git_commit("Committing modified git ignore", &repo);
4166
4167 tree.flush_fs_events(cx).await;
4168
4169 // Check that non-repo behavior is tracked
4170 tree.read_with(cx, |tree, _cx| {
4171 let snapshot = tree.snapshot();
4172 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4173
4174 assert_eq!(repo.statuses.iter().count(), 0);
4175 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
4176 assert_eq!(repo.statuses.get(&Path::new(B_TXT).into()), None);
4177 assert_eq!(repo.statuses.get(&Path::new(E_TXT).into()), None);
4178 assert_eq!(repo.statuses.get(&Path::new(F_TXT).into()), None);
4179 });
4180 }
4181
4182 #[gpui::test]
4183 async fn test_write_file(cx: &mut TestAppContext) {
4184 let dir = temp_tree(json!({
4185 ".git": {},
4186 ".gitignore": "ignored-dir\n",
4187 "tracked-dir": {},
4188 "ignored-dir": {}
4189 }));
4190
4191 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4192
4193 let tree = Worktree::local(
4194 client,
4195 dir.path(),
4196 true,
4197 Arc::new(RealFs),
4198 Default::default(),
4199 &mut cx.to_async(),
4200 )
4201 .await
4202 .unwrap();
4203 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4204 .await;
4205 tree.flush_fs_events(cx).await;
4206
4207 tree.update(cx, |tree, cx| {
4208 tree.as_local().unwrap().write_file(
4209 Path::new("tracked-dir/file.txt"),
4210 "hello".into(),
4211 Default::default(),
4212 cx,
4213 )
4214 })
4215 .await
4216 .unwrap();
4217 tree.update(cx, |tree, cx| {
4218 tree.as_local().unwrap().write_file(
4219 Path::new("ignored-dir/file.txt"),
4220 "world".into(),
4221 Default::default(),
4222 cx,
4223 )
4224 })
4225 .await
4226 .unwrap();
4227
4228 tree.read_with(cx, |tree, _| {
4229 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4230 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4231 assert!(!tracked.is_ignored);
4232 assert!(ignored.is_ignored);
4233 });
4234 }
4235
4236 #[gpui::test(iterations = 30)]
4237 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4238 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4239
4240 let fs = FakeFs::new(cx.background());
4241 fs.insert_tree(
4242 "/root",
4243 json!({
4244 "b": {},
4245 "c": {},
4246 "d": {},
4247 }),
4248 )
4249 .await;
4250
4251 let tree = Worktree::local(
4252 client,
4253 "/root".as_ref(),
4254 true,
4255 fs,
4256 Default::default(),
4257 &mut cx.to_async(),
4258 )
4259 .await
4260 .unwrap();
4261
4262 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4263
4264 let entry = tree
4265 .update(cx, |tree, cx| {
4266 tree.as_local_mut()
4267 .unwrap()
4268 .create_entry("a/e".as_ref(), true, cx)
4269 })
4270 .await
4271 .unwrap();
4272 assert!(entry.is_dir());
4273
4274 cx.foreground().run_until_parked();
4275 tree.read_with(cx, |tree, _| {
4276 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4277 });
4278
4279 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4280 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4281 snapshot1.apply_remote_update(update).unwrap();
4282 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4283 }
4284
4285 #[gpui::test(iterations = 100)]
4286 async fn test_random_worktree_operations_during_initial_scan(
4287 cx: &mut TestAppContext,
4288 mut rng: StdRng,
4289 ) {
4290 let operations = env::var("OPERATIONS")
4291 .map(|o| o.parse().unwrap())
4292 .unwrap_or(5);
4293 let initial_entries = env::var("INITIAL_ENTRIES")
4294 .map(|o| o.parse().unwrap())
4295 .unwrap_or(20);
4296
4297 let root_dir = Path::new("/test");
4298 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4299 fs.as_fake().insert_tree(root_dir, json!({})).await;
4300 for _ in 0..initial_entries {
4301 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4302 }
4303 log::info!("generated initial tree");
4304
4305 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4306 let worktree = Worktree::local(
4307 client.clone(),
4308 root_dir,
4309 true,
4310 fs.clone(),
4311 Default::default(),
4312 &mut cx.to_async(),
4313 )
4314 .await
4315 .unwrap();
4316
4317 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4318
4319 for _ in 0..operations {
4320 worktree
4321 .update(cx, |worktree, cx| {
4322 randomly_mutate_worktree(worktree, &mut rng, cx)
4323 })
4324 .await
4325 .log_err();
4326 worktree.read_with(cx, |tree, _| {
4327 tree.as_local().unwrap().snapshot.check_invariants()
4328 });
4329
4330 if rng.gen_bool(0.6) {
4331 let new_snapshot =
4332 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4333 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4334 snapshot.apply_remote_update(update.clone()).unwrap();
4335 assert_eq!(
4336 snapshot.to_vec(true),
4337 new_snapshot.to_vec(true),
4338 "incorrect snapshot after update {:?}",
4339 update
4340 );
4341 }
4342 }
4343
4344 worktree
4345 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4346 .await;
4347 worktree.read_with(cx, |tree, _| {
4348 tree.as_local().unwrap().snapshot.check_invariants()
4349 });
4350
4351 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4352 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4353 snapshot.apply_remote_update(update.clone()).unwrap();
4354 assert_eq!(
4355 snapshot.to_vec(true),
4356 new_snapshot.to_vec(true),
4357 "incorrect snapshot after update {:?}",
4358 update
4359 );
4360 }
4361
4362 #[gpui::test(iterations = 100)]
4363 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4364 let operations = env::var("OPERATIONS")
4365 .map(|o| o.parse().unwrap())
4366 .unwrap_or(40);
4367 let initial_entries = env::var("INITIAL_ENTRIES")
4368 .map(|o| o.parse().unwrap())
4369 .unwrap_or(20);
4370
4371 let root_dir = Path::new("/test");
4372 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4373 fs.as_fake().insert_tree(root_dir, json!({})).await;
4374 for _ in 0..initial_entries {
4375 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4376 }
4377 log::info!("generated initial tree");
4378
4379 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4380 let worktree = Worktree::local(
4381 client.clone(),
4382 root_dir,
4383 true,
4384 fs.clone(),
4385 Default::default(),
4386 &mut cx.to_async(),
4387 )
4388 .await
4389 .unwrap();
4390
4391 worktree
4392 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4393 .await;
4394
4395 // After the initial scan is complete, the `UpdatedEntries` event can
4396 // be used to follow along with all changes to the worktree's snapshot.
4397 worktree.update(cx, |tree, cx| {
4398 let mut paths = tree
4399 .as_local()
4400 .unwrap()
4401 .paths()
4402 .cloned()
4403 .collect::<Vec<_>>();
4404
4405 cx.subscribe(&worktree, move |tree, _, event, _| {
4406 if let Event::UpdatedEntries(changes) = event {
4407 for ((path, _), change_type) in changes.iter() {
4408 let path = path.clone();
4409 let ix = match paths.binary_search(&path) {
4410 Ok(ix) | Err(ix) => ix,
4411 };
4412 match change_type {
4413 PathChange::Added => {
4414 assert_ne!(paths.get(ix), Some(&path));
4415 paths.insert(ix, path);
4416 }
4417
4418 PathChange::Removed => {
4419 assert_eq!(paths.get(ix), Some(&path));
4420 paths.remove(ix);
4421 }
4422
4423 PathChange::Updated => {
4424 assert_eq!(paths.get(ix), Some(&path));
4425 }
4426
4427 PathChange::AddedOrUpdated => {
4428 if paths[ix] != path {
4429 paths.insert(ix, path);
4430 }
4431 }
4432 }
4433 }
4434
4435 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4436 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4437 }
4438 })
4439 .detach();
4440 });
4441
4442 let mut snapshots = Vec::new();
4443 let mut mutations_len = operations;
4444 while mutations_len > 1 {
4445 if rng.gen_bool(0.2) {
4446 worktree
4447 .update(cx, |worktree, cx| {
4448 randomly_mutate_worktree(worktree, &mut rng, cx)
4449 })
4450 .await
4451 .unwrap();
4452 } else {
4453 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4454 }
4455
4456 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4457 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4458 let len = rng.gen_range(0..=buffered_event_count);
4459 log::info!("flushing {} events", len);
4460 fs.as_fake().flush_events(len).await;
4461 } else {
4462 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4463 mutations_len -= 1;
4464 }
4465
4466 cx.foreground().run_until_parked();
4467 if rng.gen_bool(0.2) {
4468 log::info!("storing snapshot {}", snapshots.len());
4469 let snapshot =
4470 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4471 snapshots.push(snapshot);
4472 }
4473 }
4474
4475 log::info!("quiescing");
4476 fs.as_fake().flush_events(usize::MAX).await;
4477 cx.foreground().run_until_parked();
4478 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4479 snapshot.check_invariants();
4480
4481 {
4482 let new_worktree = Worktree::local(
4483 client.clone(),
4484 root_dir,
4485 true,
4486 fs.clone(),
4487 Default::default(),
4488 &mut cx.to_async(),
4489 )
4490 .await
4491 .unwrap();
4492 new_worktree
4493 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4494 .await;
4495 let new_snapshot =
4496 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4497 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4498 }
4499
4500 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4501 let include_ignored = rng.gen::<bool>();
4502 if !include_ignored {
4503 let mut entries_by_path_edits = Vec::new();
4504 let mut entries_by_id_edits = Vec::new();
4505 for entry in prev_snapshot
4506 .entries_by_id
4507 .cursor::<()>()
4508 .filter(|e| e.is_ignored)
4509 {
4510 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4511 entries_by_id_edits.push(Edit::Remove(entry.id));
4512 }
4513
4514 prev_snapshot
4515 .entries_by_path
4516 .edit(entries_by_path_edits, &());
4517 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4518 }
4519
4520 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4521 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4522 assert_eq!(
4523 prev_snapshot.to_vec(include_ignored),
4524 snapshot.to_vec(include_ignored),
4525 "wrong update for snapshot {i}. update: {:?}",
4526 update
4527 );
4528 }
4529 }
4530
4531 fn randomly_mutate_worktree(
4532 worktree: &mut Worktree,
4533 rng: &mut impl Rng,
4534 cx: &mut ModelContext<Worktree>,
4535 ) -> Task<Result<()>> {
4536 let worktree = worktree.as_local_mut().unwrap();
4537 let snapshot = worktree.snapshot();
4538 let entry = snapshot.entries(false).choose(rng).unwrap();
4539
4540 match rng.gen_range(0_u32..100) {
4541 0..=33 if entry.path.as_ref() != Path::new("") => {
4542 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4543 worktree.delete_entry(entry.id, cx).unwrap()
4544 }
4545 ..=66 if entry.path.as_ref() != Path::new("") => {
4546 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4547 let new_parent_path = if other_entry.is_dir() {
4548 other_entry.path.clone()
4549 } else {
4550 other_entry.path.parent().unwrap().into()
4551 };
4552 let mut new_path = new_parent_path.join(gen_name(rng));
4553 if new_path.starts_with(&entry.path) {
4554 new_path = gen_name(rng).into();
4555 }
4556
4557 log::info!(
4558 "renaming entry {:?} ({}) to {:?}",
4559 entry.path,
4560 entry.id.0,
4561 new_path
4562 );
4563 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4564 cx.foreground().spawn(async move {
4565 task.await?;
4566 Ok(())
4567 })
4568 }
4569 _ => {
4570 let task = if entry.is_dir() {
4571 let child_path = entry.path.join(gen_name(rng));
4572 let is_dir = rng.gen_bool(0.3);
4573 log::info!(
4574 "creating {} at {:?}",
4575 if is_dir { "dir" } else { "file" },
4576 child_path,
4577 );
4578 worktree.create_entry(child_path, is_dir, cx)
4579 } else {
4580 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4581 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4582 };
4583 cx.foreground().spawn(async move {
4584 task.await?;
4585 Ok(())
4586 })
4587 }
4588 }
4589 }
4590
4591 async fn randomly_mutate_fs(
4592 fs: &Arc<dyn Fs>,
4593 root_path: &Path,
4594 insertion_probability: f64,
4595 rng: &mut impl Rng,
4596 ) {
4597 let mut files = Vec::new();
4598 let mut dirs = Vec::new();
4599 for path in fs.as_fake().paths() {
4600 if path.starts_with(root_path) {
4601 if fs.is_file(&path).await {
4602 files.push(path);
4603 } else {
4604 dirs.push(path);
4605 }
4606 }
4607 }
4608
4609 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4610 let path = dirs.choose(rng).unwrap();
4611 let new_path = path.join(gen_name(rng));
4612
4613 if rng.gen() {
4614 log::info!(
4615 "creating dir {:?}",
4616 new_path.strip_prefix(root_path).unwrap()
4617 );
4618 fs.create_dir(&new_path).await.unwrap();
4619 } else {
4620 log::info!(
4621 "creating file {:?}",
4622 new_path.strip_prefix(root_path).unwrap()
4623 );
4624 fs.create_file(&new_path, Default::default()).await.unwrap();
4625 }
4626 } else if rng.gen_bool(0.05) {
4627 let ignore_dir_path = dirs.choose(rng).unwrap();
4628 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4629
4630 let subdirs = dirs
4631 .iter()
4632 .filter(|d| d.starts_with(&ignore_dir_path))
4633 .cloned()
4634 .collect::<Vec<_>>();
4635 let subfiles = files
4636 .iter()
4637 .filter(|d| d.starts_with(&ignore_dir_path))
4638 .cloned()
4639 .collect::<Vec<_>>();
4640 let files_to_ignore = {
4641 let len = rng.gen_range(0..=subfiles.len());
4642 subfiles.choose_multiple(rng, len)
4643 };
4644 let dirs_to_ignore = {
4645 let len = rng.gen_range(0..subdirs.len());
4646 subdirs.choose_multiple(rng, len)
4647 };
4648
4649 let mut ignore_contents = String::new();
4650 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4651 writeln!(
4652 ignore_contents,
4653 "{}",
4654 path_to_ignore
4655 .strip_prefix(&ignore_dir_path)
4656 .unwrap()
4657 .to_str()
4658 .unwrap()
4659 )
4660 .unwrap();
4661 }
4662 log::info!(
4663 "creating gitignore {:?} with contents:\n{}",
4664 ignore_path.strip_prefix(&root_path).unwrap(),
4665 ignore_contents
4666 );
4667 fs.save(
4668 &ignore_path,
4669 &ignore_contents.as_str().into(),
4670 Default::default(),
4671 )
4672 .await
4673 .unwrap();
4674 } else {
4675 let old_path = {
4676 let file_path = files.choose(rng);
4677 let dir_path = dirs[1..].choose(rng);
4678 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4679 };
4680
4681 let is_rename = rng.gen();
4682 if is_rename {
4683 let new_path_parent = dirs
4684 .iter()
4685 .filter(|d| !d.starts_with(old_path))
4686 .choose(rng)
4687 .unwrap();
4688
4689 let overwrite_existing_dir =
4690 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4691 let new_path = if overwrite_existing_dir {
4692 fs.remove_dir(
4693 &new_path_parent,
4694 RemoveOptions {
4695 recursive: true,
4696 ignore_if_not_exists: true,
4697 },
4698 )
4699 .await
4700 .unwrap();
4701 new_path_parent.to_path_buf()
4702 } else {
4703 new_path_parent.join(gen_name(rng))
4704 };
4705
4706 log::info!(
4707 "renaming {:?} to {}{:?}",
4708 old_path.strip_prefix(&root_path).unwrap(),
4709 if overwrite_existing_dir {
4710 "overwrite "
4711 } else {
4712 ""
4713 },
4714 new_path.strip_prefix(&root_path).unwrap()
4715 );
4716 fs.rename(
4717 &old_path,
4718 &new_path,
4719 fs::RenameOptions {
4720 overwrite: true,
4721 ignore_if_exists: true,
4722 },
4723 )
4724 .await
4725 .unwrap();
4726 } else if fs.is_file(&old_path).await {
4727 log::info!(
4728 "deleting file {:?}",
4729 old_path.strip_prefix(&root_path).unwrap()
4730 );
4731 fs.remove_file(old_path, Default::default()).await.unwrap();
4732 } else {
4733 log::info!(
4734 "deleting dir {:?}",
4735 old_path.strip_prefix(&root_path).unwrap()
4736 );
4737 fs.remove_dir(
4738 &old_path,
4739 RemoveOptions {
4740 recursive: true,
4741 ignore_if_not_exists: true,
4742 },
4743 )
4744 .await
4745 .unwrap();
4746 }
4747 }
4748 }
4749
4750 fn gen_name(rng: &mut impl Rng) -> String {
4751 (0..6)
4752 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4753 .map(char::from)
4754 .collect()
4755 }
4756
4757 impl LocalSnapshot {
4758 fn check_invariants(&self) {
4759 assert_eq!(
4760 self.entries_by_path
4761 .cursor::<()>()
4762 .map(|e| (&e.path, e.id))
4763 .collect::<Vec<_>>(),
4764 self.entries_by_id
4765 .cursor::<()>()
4766 .map(|e| (&e.path, e.id))
4767 .collect::<collections::BTreeSet<_>>()
4768 .into_iter()
4769 .collect::<Vec<_>>(),
4770 "entries_by_path and entries_by_id are inconsistent"
4771 );
4772
4773 let mut files = self.files(true, 0);
4774 let mut visible_files = self.files(false, 0);
4775 for entry in self.entries_by_path.cursor::<()>() {
4776 if entry.is_file() {
4777 assert_eq!(files.next().unwrap().inode, entry.inode);
4778 if !entry.is_ignored {
4779 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4780 }
4781 }
4782 }
4783
4784 assert!(files.next().is_none());
4785 assert!(visible_files.next().is_none());
4786
4787 let mut bfs_paths = Vec::new();
4788 let mut stack = vec![Path::new("")];
4789 while let Some(path) = stack.pop() {
4790 bfs_paths.push(path);
4791 let ix = stack.len();
4792 for child_entry in self.child_entries(path) {
4793 stack.insert(ix, &child_entry.path);
4794 }
4795 }
4796
4797 let dfs_paths_via_iter = self
4798 .entries_by_path
4799 .cursor::<()>()
4800 .map(|e| e.path.as_ref())
4801 .collect::<Vec<_>>();
4802 assert_eq!(bfs_paths, dfs_paths_via_iter);
4803
4804 let dfs_paths_via_traversal = self
4805 .entries(true)
4806 .map(|e| e.path.as_ref())
4807 .collect::<Vec<_>>();
4808 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4809
4810 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4811 let ignore_parent_path =
4812 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4813 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4814 assert!(self
4815 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4816 .is_some());
4817 }
4818 }
4819
4820 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4821 let mut paths = Vec::new();
4822 for entry in self.entries_by_path.cursor::<()>() {
4823 if include_ignored || !entry.is_ignored {
4824 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4825 }
4826 }
4827 paths.sort_by(|a, b| a.0.cmp(b.0));
4828 paths
4829 }
4830 }
4831}