1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
184 self.work_directory
185 .relativize(snapshot, path)
186 .and_then(|repo_path| {
187 self.worktree_statuses
188 .iter_from(&repo_path)
189 .take_while(|(key, _)| key.starts_with(&repo_path))
190 .map(|(path, status)| {
191 if path == &repo_path {
192 status
193 } else {
194 &GitFileStatus::Modified
195 }
196 })
197 .next()
198 .copied()
199 })
200 }
201
202 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
203 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
204 let mut removed_statuses: Vec<String> = Vec::new();
205
206 let mut self_statuses = self.worktree_statuses.iter().peekable();
207 let mut other_statuses = other.worktree_statuses.iter().peekable();
208 loop {
209 match (self_statuses.peek(), other_statuses.peek()) {
210 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
211 match Ord::cmp(self_repo_path, other_repo_path) {
212 Ordering::Less => {
213 updated_statuses.push(make_status_entry(self_repo_path, self_status));
214 self_statuses.next();
215 }
216 Ordering::Equal => {
217 if self_status != other_status {
218 updated_statuses
219 .push(make_status_entry(self_repo_path, self_status));
220 }
221
222 self_statuses.next();
223 other_statuses.next();
224 }
225 Ordering::Greater => {
226 removed_statuses.push(make_repo_path(other_repo_path));
227 other_statuses.next();
228 }
229 }
230 }
231 (Some((self_repo_path, self_status)), None) => {
232 updated_statuses.push(make_status_entry(self_repo_path, self_status));
233 self_statuses.next();
234 }
235 (None, Some((other_repo_path, _))) => {
236 removed_statuses.push(make_repo_path(other_repo_path));
237 other_statuses.next();
238 }
239 (None, None) => break,
240 }
241 }
242
243 proto::RepositoryEntry {
244 work_directory_id: self.work_directory_id().to_proto(),
245 branch: self.branch.as_ref().map(|str| str.to_string()),
246 removed_worktree_repo_paths: removed_statuses,
247 updated_worktree_statuses: updated_statuses,
248 }
249 }
250}
251
252fn make_repo_path(path: &RepoPath) -> String {
253 path.as_os_str().to_string_lossy().to_string()
254}
255
256fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
257 proto::StatusEntry {
258 repo_path: make_repo_path(path),
259 status: match status {
260 GitFileStatus::Added => proto::GitStatus::Added.into(),
261 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
262 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
263 },
264 }
265}
266
267impl From<&RepositoryEntry> for proto::RepositoryEntry {
268 fn from(value: &RepositoryEntry) -> Self {
269 proto::RepositoryEntry {
270 work_directory_id: value.work_directory.to_proto(),
271 branch: value.branch.as_ref().map(|str| str.to_string()),
272 updated_worktree_statuses: value
273 .worktree_statuses
274 .iter()
275 .map(|(repo_path, status)| make_status_entry(repo_path, status))
276 .collect(),
277 removed_worktree_repo_paths: Default::default(),
278 }
279 }
280}
281
282/// This path corresponds to the 'content path' (the folder that contains the .git)
283#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
284pub struct RepositoryWorkDirectory(Arc<Path>);
285
286impl Default for RepositoryWorkDirectory {
287 fn default() -> Self {
288 RepositoryWorkDirectory(Arc::from(Path::new("")))
289 }
290}
291
292impl AsRef<Path> for RepositoryWorkDirectory {
293 fn as_ref(&self) -> &Path {
294 self.0.as_ref()
295 }
296}
297
298#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
299pub struct WorkDirectoryEntry(ProjectEntryId);
300
301impl WorkDirectoryEntry {
302 // Note that these paths should be relative to the worktree root.
303 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
304 snapshot
305 .entry_for_id(self.0)
306 .map(|entry| path.starts_with(&entry.path))
307 .unwrap_or(false)
308 }
309
310 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
311 worktree.entry_for_id(self.0).and_then(|entry| {
312 path.strip_prefix(&entry.path)
313 .ok()
314 .map(move |path| path.into())
315 })
316 }
317}
318
319impl Deref for WorkDirectoryEntry {
320 type Target = ProjectEntryId;
321
322 fn deref(&self) -> &Self::Target {
323 &self.0
324 }
325}
326
327impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
328 fn from(value: ProjectEntryId) -> Self {
329 WorkDirectoryEntry(value)
330 }
331}
332
333#[derive(Debug, Clone)]
334pub struct LocalSnapshot {
335 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
336 // The ProjectEntryId corresponds to the entry for the .git dir
337 // work_directory_id
338 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
339 removed_entry_ids: HashMap<u64, ProjectEntryId>,
340 next_entry_id: Arc<AtomicUsize>,
341 snapshot: Snapshot,
342}
343
344#[derive(Debug, Clone)]
345pub struct LocalRepositoryEntry {
346 pub(crate) scan_id: usize,
347 pub(crate) full_scan_id: usize,
348 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
349 /// Path to the actual .git folder.
350 /// Note: if .git is a file, this points to the folder indicated by the .git file
351 pub(crate) git_dir_path: Arc<Path>,
352}
353
354impl LocalRepositoryEntry {
355 // Note that this path should be relative to the worktree root.
356 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
357 path.starts_with(self.git_dir_path.as_ref())
358 }
359}
360
361impl Deref for LocalSnapshot {
362 type Target = Snapshot;
363
364 fn deref(&self) -> &Self::Target {
365 &self.snapshot
366 }
367}
368
369impl DerefMut for LocalSnapshot {
370 fn deref_mut(&mut self) -> &mut Self::Target {
371 &mut self.snapshot
372 }
373}
374
375enum ScanState {
376 Started,
377 Updated {
378 snapshot: LocalSnapshot,
379 changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
380 barrier: Option<barrier::Sender>,
381 scanning: bool,
382 },
383}
384
385struct ShareState {
386 project_id: u64,
387 snapshots_tx: watch::Sender<LocalSnapshot>,
388 resume_updates: watch::Sender<()>,
389 _maintain_remote_snapshot: Task<Option<()>>,
390}
391
392pub enum Event {
393 UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
394 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
395}
396
397impl Entity for Worktree {
398 type Event = Event;
399}
400
401impl Worktree {
402 pub async fn local(
403 client: Arc<Client>,
404 path: impl Into<Arc<Path>>,
405 visible: bool,
406 fs: Arc<dyn Fs>,
407 next_entry_id: Arc<AtomicUsize>,
408 cx: &mut AsyncAppContext,
409 ) -> Result<ModelHandle<Self>> {
410 // After determining whether the root entry is a file or a directory, populate the
411 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
412 let abs_path = path.into();
413 let metadata = fs
414 .metadata(&abs_path)
415 .await
416 .context("failed to stat worktree path")?;
417
418 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
419 let root_name = abs_path
420 .file_name()
421 .map_or(String::new(), |f| f.to_string_lossy().to_string());
422
423 let mut snapshot = LocalSnapshot {
424 ignores_by_parent_abs_path: Default::default(),
425 removed_entry_ids: Default::default(),
426 git_repositories: Default::default(),
427 next_entry_id,
428 snapshot: Snapshot {
429 id: WorktreeId::from_usize(cx.model_id()),
430 abs_path: abs_path.clone(),
431 root_name: root_name.clone(),
432 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
433 entries_by_path: Default::default(),
434 entries_by_id: Default::default(),
435 repository_entries: Default::default(),
436 scan_id: 1,
437 completed_scan_id: 0,
438 },
439 };
440
441 if let Some(metadata) = metadata {
442 snapshot.insert_entry(
443 Entry::new(
444 Arc::from(Path::new("")),
445 &metadata,
446 &snapshot.next_entry_id,
447 snapshot.root_char_bag,
448 ),
449 fs.as_ref(),
450 );
451 }
452
453 let (path_changes_tx, path_changes_rx) = channel::unbounded();
454 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
455
456 cx.spawn_weak(|this, mut cx| async move {
457 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
458 this.update(&mut cx, |this, cx| {
459 let this = this.as_local_mut().unwrap();
460 match state {
461 ScanState::Started => {
462 *this.is_scanning.0.borrow_mut() = true;
463 }
464 ScanState::Updated {
465 snapshot,
466 changes,
467 barrier,
468 scanning,
469 } => {
470 *this.is_scanning.0.borrow_mut() = scanning;
471 this.set_snapshot(snapshot, cx);
472 cx.emit(Event::UpdatedEntries(changes));
473 drop(barrier);
474 }
475 }
476 cx.notify();
477 });
478 }
479 })
480 .detach();
481
482 let background_scanner_task = cx.background().spawn({
483 let fs = fs.clone();
484 let snapshot = snapshot.clone();
485 let background = cx.background().clone();
486 async move {
487 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
488 BackgroundScanner::new(
489 snapshot,
490 fs,
491 scan_states_tx,
492 background,
493 path_changes_rx,
494 )
495 .run(events)
496 .await;
497 }
498 });
499
500 Worktree::Local(LocalWorktree {
501 snapshot,
502 is_scanning: watch::channel_with(true),
503 share: None,
504 path_changes_tx,
505 _background_scanner_task: background_scanner_task,
506 diagnostics: Default::default(),
507 diagnostic_summaries: Default::default(),
508 client,
509 fs,
510 visible,
511 })
512 }))
513 }
514
515 pub fn remote(
516 project_remote_id: u64,
517 replica_id: ReplicaId,
518 worktree: proto::WorktreeMetadata,
519 client: Arc<Client>,
520 cx: &mut AppContext,
521 ) -> ModelHandle<Self> {
522 cx.add_model(|cx: &mut ModelContext<Self>| {
523 let snapshot = Snapshot {
524 id: WorktreeId(worktree.id as usize),
525 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
526 root_name: worktree.root_name.clone(),
527 root_char_bag: worktree
528 .root_name
529 .chars()
530 .map(|c| c.to_ascii_lowercase())
531 .collect(),
532 entries_by_path: Default::default(),
533 entries_by_id: Default::default(),
534 repository_entries: Default::default(),
535 scan_id: 1,
536 completed_scan_id: 0,
537 };
538
539 let (updates_tx, mut updates_rx) = mpsc::unbounded();
540 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
541 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
542
543 cx.background()
544 .spawn({
545 let background_snapshot = background_snapshot.clone();
546 async move {
547 while let Some(update) = updates_rx.next().await {
548 if let Err(error) =
549 background_snapshot.lock().apply_remote_update(update)
550 {
551 log::error!("error applying worktree update: {}", error);
552 }
553 snapshot_updated_tx.send(()).await.ok();
554 }
555 }
556 })
557 .detach();
558
559 cx.spawn_weak(|this, mut cx| async move {
560 while (snapshot_updated_rx.recv().await).is_some() {
561 if let Some(this) = this.upgrade(&cx) {
562 this.update(&mut cx, |this, cx| {
563 let this = this.as_remote_mut().unwrap();
564 this.snapshot = this.background_snapshot.lock().clone();
565 cx.emit(Event::UpdatedEntries(Default::default()));
566 cx.notify();
567 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
568 if this.observed_snapshot(*scan_id) {
569 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
570 let _ = tx.send(());
571 } else {
572 break;
573 }
574 }
575 });
576 } else {
577 break;
578 }
579 }
580 })
581 .detach();
582
583 Worktree::Remote(RemoteWorktree {
584 project_id: project_remote_id,
585 replica_id,
586 snapshot: snapshot.clone(),
587 background_snapshot,
588 updates_tx: Some(updates_tx),
589 snapshot_subscriptions: Default::default(),
590 client: client.clone(),
591 diagnostic_summaries: Default::default(),
592 visible: worktree.visible,
593 disconnected: false,
594 })
595 })
596 }
597
598 pub fn as_local(&self) -> Option<&LocalWorktree> {
599 if let Worktree::Local(worktree) = self {
600 Some(worktree)
601 } else {
602 None
603 }
604 }
605
606 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
607 if let Worktree::Remote(worktree) = self {
608 Some(worktree)
609 } else {
610 None
611 }
612 }
613
614 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
615 if let Worktree::Local(worktree) = self {
616 Some(worktree)
617 } else {
618 None
619 }
620 }
621
622 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
623 if let Worktree::Remote(worktree) = self {
624 Some(worktree)
625 } else {
626 None
627 }
628 }
629
630 pub fn is_local(&self) -> bool {
631 matches!(self, Worktree::Local(_))
632 }
633
634 pub fn is_remote(&self) -> bool {
635 !self.is_local()
636 }
637
638 pub fn snapshot(&self) -> Snapshot {
639 match self {
640 Worktree::Local(worktree) => worktree.snapshot().snapshot,
641 Worktree::Remote(worktree) => worktree.snapshot(),
642 }
643 }
644
645 pub fn scan_id(&self) -> usize {
646 match self {
647 Worktree::Local(worktree) => worktree.snapshot.scan_id,
648 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
649 }
650 }
651
652 pub fn completed_scan_id(&self) -> usize {
653 match self {
654 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
655 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
656 }
657 }
658
659 pub fn is_visible(&self) -> bool {
660 match self {
661 Worktree::Local(worktree) => worktree.visible,
662 Worktree::Remote(worktree) => worktree.visible,
663 }
664 }
665
666 pub fn replica_id(&self) -> ReplicaId {
667 match self {
668 Worktree::Local(_) => 0,
669 Worktree::Remote(worktree) => worktree.replica_id,
670 }
671 }
672
673 pub fn diagnostic_summaries(
674 &self,
675 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
676 match self {
677 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
678 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
679 }
680 .iter()
681 .flat_map(|(path, summaries)| {
682 summaries
683 .iter()
684 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
685 })
686 }
687
688 pub fn abs_path(&self) -> Arc<Path> {
689 match self {
690 Worktree::Local(worktree) => worktree.abs_path.clone(),
691 Worktree::Remote(worktree) => worktree.abs_path.clone(),
692 }
693 }
694}
695
696impl LocalWorktree {
697 pub fn contains_abs_path(&self, path: &Path) -> bool {
698 path.starts_with(&self.abs_path)
699 }
700
701 fn absolutize(&self, path: &Path) -> PathBuf {
702 if path.file_name().is_some() {
703 self.abs_path.join(path)
704 } else {
705 self.abs_path.to_path_buf()
706 }
707 }
708
709 pub(crate) fn load_buffer(
710 &mut self,
711 id: u64,
712 path: &Path,
713 cx: &mut ModelContext<Worktree>,
714 ) -> Task<Result<ModelHandle<Buffer>>> {
715 let path = Arc::from(path);
716 cx.spawn(move |this, mut cx| async move {
717 let (file, contents, diff_base) = this
718 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
719 .await?;
720 let text_buffer = cx
721 .background()
722 .spawn(async move { text::Buffer::new(0, id, contents) })
723 .await;
724 Ok(cx.add_model(|cx| {
725 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
726 buffer.git_diff_recalc(cx);
727 buffer
728 }))
729 })
730 }
731
732 pub fn diagnostics_for_path(
733 &self,
734 path: &Path,
735 ) -> Vec<(
736 LanguageServerId,
737 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
738 )> {
739 self.diagnostics.get(path).cloned().unwrap_or_default()
740 }
741
742 pub fn update_diagnostics(
743 &mut self,
744 server_id: LanguageServerId,
745 worktree_path: Arc<Path>,
746 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
747 _: &mut ModelContext<Worktree>,
748 ) -> Result<bool> {
749 let summaries_by_server_id = self
750 .diagnostic_summaries
751 .entry(worktree_path.clone())
752 .or_default();
753
754 let old_summary = summaries_by_server_id
755 .remove(&server_id)
756 .unwrap_or_default();
757
758 let new_summary = DiagnosticSummary::new(&diagnostics);
759 if new_summary.is_empty() {
760 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
761 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
762 diagnostics_by_server_id.remove(ix);
763 }
764 if diagnostics_by_server_id.is_empty() {
765 self.diagnostics.remove(&worktree_path);
766 }
767 }
768 } else {
769 summaries_by_server_id.insert(server_id, new_summary);
770 let diagnostics_by_server_id =
771 self.diagnostics.entry(worktree_path.clone()).or_default();
772 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
773 Ok(ix) => {
774 diagnostics_by_server_id[ix] = (server_id, diagnostics);
775 }
776 Err(ix) => {
777 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
778 }
779 }
780 }
781
782 if !old_summary.is_empty() || !new_summary.is_empty() {
783 if let Some(share) = self.share.as_ref() {
784 self.client
785 .send(proto::UpdateDiagnosticSummary {
786 project_id: share.project_id,
787 worktree_id: self.id().to_proto(),
788 summary: Some(proto::DiagnosticSummary {
789 path: worktree_path.to_string_lossy().to_string(),
790 language_server_id: server_id.0 as u64,
791 error_count: new_summary.error_count as u32,
792 warning_count: new_summary.warning_count as u32,
793 }),
794 })
795 .log_err();
796 }
797 }
798
799 Ok(!old_summary.is_empty() || !new_summary.is_empty())
800 }
801
802 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
803 let updated_repos =
804 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
805 self.snapshot = new_snapshot;
806
807 if let Some(share) = self.share.as_mut() {
808 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
809 }
810
811 if !updated_repos.is_empty() {
812 cx.emit(Event::UpdatedGitRepositories(updated_repos));
813 }
814 }
815
816 fn changed_repos(
817 &self,
818 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
819 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
820 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
821 let mut diff = HashMap::default();
822 let mut old_repos = old_repos.iter().peekable();
823 let mut new_repos = new_repos.iter().peekable();
824 loop {
825 match (old_repos.peek(), new_repos.peek()) {
826 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
827 match Ord::cmp(old_entry_id, new_entry_id) {
828 Ordering::Less => {
829 if let Some(entry) = self.entry_for_id(**old_entry_id) {
830 diff.insert(entry.path.clone(), (*old_repo).clone());
831 }
832 old_repos.next();
833 }
834 Ordering::Equal => {
835 if old_repo.scan_id != new_repo.scan_id {
836 if let Some(entry) = self.entry_for_id(**new_entry_id) {
837 diff.insert(entry.path.clone(), (*new_repo).clone());
838 }
839 }
840
841 old_repos.next();
842 new_repos.next();
843 }
844 Ordering::Greater => {
845 if let Some(entry) = self.entry_for_id(**new_entry_id) {
846 diff.insert(entry.path.clone(), (*new_repo).clone());
847 }
848 new_repos.next();
849 }
850 }
851 }
852 (Some((old_entry_id, old_repo)), None) => {
853 if let Some(entry) = self.entry_for_id(**old_entry_id) {
854 diff.insert(entry.path.clone(), (*old_repo).clone());
855 }
856 old_repos.next();
857 }
858 (None, Some((new_entry_id, new_repo))) => {
859 if let Some(entry) = self.entry_for_id(**new_entry_id) {
860 diff.insert(entry.path.clone(), (*new_repo).clone());
861 }
862 new_repos.next();
863 }
864 (None, None) => break,
865 }
866 }
867 diff
868 }
869
870 pub fn scan_complete(&self) -> impl Future<Output = ()> {
871 let mut is_scanning_rx = self.is_scanning.1.clone();
872 async move {
873 let mut is_scanning = is_scanning_rx.borrow().clone();
874 while is_scanning {
875 if let Some(value) = is_scanning_rx.recv().await {
876 is_scanning = value;
877 } else {
878 break;
879 }
880 }
881 }
882 }
883
884 pub fn snapshot(&self) -> LocalSnapshot {
885 self.snapshot.clone()
886 }
887
888 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
889 proto::WorktreeMetadata {
890 id: self.id().to_proto(),
891 root_name: self.root_name().to_string(),
892 visible: self.visible,
893 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
894 }
895 }
896
897 fn load(
898 &self,
899 path: &Path,
900 cx: &mut ModelContext<Worktree>,
901 ) -> Task<Result<(File, String, Option<String>)>> {
902 let handle = cx.handle();
903 let path = Arc::from(path);
904 let abs_path = self.absolutize(&path);
905 let fs = self.fs.clone();
906 let snapshot = self.snapshot();
907
908 let mut index_task = None;
909
910 if let Some(repo) = snapshot.repo_for(&path) {
911 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
912 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
913 let repo = repo.repo_ptr.to_owned();
914 index_task = Some(
915 cx.background()
916 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
917 );
918 }
919 }
920
921 cx.spawn(|this, mut cx| async move {
922 let text = fs.load(&abs_path).await?;
923
924 let diff_base = if let Some(index_task) = index_task {
925 index_task.await
926 } else {
927 None
928 };
929
930 // Eagerly populate the snapshot with an updated entry for the loaded file
931 let entry = this
932 .update(&mut cx, |this, cx| {
933 this.as_local().unwrap().refresh_entry(path, None, cx)
934 })
935 .await?;
936
937 Ok((
938 File {
939 entry_id: entry.id,
940 worktree: handle,
941 path: entry.path,
942 mtime: entry.mtime,
943 is_local: true,
944 is_deleted: false,
945 },
946 text,
947 diff_base,
948 ))
949 })
950 }
951
952 pub fn save_buffer(
953 &self,
954 buffer_handle: ModelHandle<Buffer>,
955 path: Arc<Path>,
956 has_changed_file: bool,
957 cx: &mut ModelContext<Worktree>,
958 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
959 let handle = cx.handle();
960 let buffer = buffer_handle.read(cx);
961
962 let rpc = self.client.clone();
963 let buffer_id = buffer.remote_id();
964 let project_id = self.share.as_ref().map(|share| share.project_id);
965
966 let text = buffer.as_rope().clone();
967 let fingerprint = text.fingerprint();
968 let version = buffer.version();
969 let save = self.write_file(path, text, buffer.line_ending(), cx);
970
971 cx.as_mut().spawn(|mut cx| async move {
972 let entry = save.await?;
973
974 if has_changed_file {
975 let new_file = Arc::new(File {
976 entry_id: entry.id,
977 worktree: handle,
978 path: entry.path,
979 mtime: entry.mtime,
980 is_local: true,
981 is_deleted: false,
982 });
983
984 if let Some(project_id) = project_id {
985 rpc.send(proto::UpdateBufferFile {
986 project_id,
987 buffer_id,
988 file: Some(new_file.to_proto()),
989 })
990 .log_err();
991 }
992
993 buffer_handle.update(&mut cx, |buffer, cx| {
994 if has_changed_file {
995 buffer.file_updated(new_file, cx).detach();
996 }
997 });
998 }
999
1000 if let Some(project_id) = project_id {
1001 rpc.send(proto::BufferSaved {
1002 project_id,
1003 buffer_id,
1004 version: serialize_version(&version),
1005 mtime: Some(entry.mtime.into()),
1006 fingerprint: serialize_fingerprint(fingerprint),
1007 })?;
1008 }
1009
1010 buffer_handle.update(&mut cx, |buffer, cx| {
1011 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1012 });
1013
1014 Ok((version, fingerprint, entry.mtime))
1015 })
1016 }
1017
1018 pub fn create_entry(
1019 &self,
1020 path: impl Into<Arc<Path>>,
1021 is_dir: bool,
1022 cx: &mut ModelContext<Worktree>,
1023 ) -> Task<Result<Entry>> {
1024 let path = path.into();
1025 let abs_path = self.absolutize(&path);
1026 let fs = self.fs.clone();
1027 let write = cx.background().spawn(async move {
1028 if is_dir {
1029 fs.create_dir(&abs_path).await
1030 } else {
1031 fs.save(&abs_path, &Default::default(), Default::default())
1032 .await
1033 }
1034 });
1035
1036 cx.spawn(|this, mut cx| async move {
1037 write.await?;
1038 this.update(&mut cx, |this, cx| {
1039 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1040 })
1041 .await
1042 })
1043 }
1044
1045 pub fn write_file(
1046 &self,
1047 path: impl Into<Arc<Path>>,
1048 text: Rope,
1049 line_ending: LineEnding,
1050 cx: &mut ModelContext<Worktree>,
1051 ) -> Task<Result<Entry>> {
1052 let path = path.into();
1053 let abs_path = self.absolutize(&path);
1054 let fs = self.fs.clone();
1055 let write = cx
1056 .background()
1057 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1058
1059 cx.spawn(|this, mut cx| async move {
1060 write.await?;
1061 this.update(&mut cx, |this, cx| {
1062 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1063 })
1064 .await
1065 })
1066 }
1067
1068 pub fn delete_entry(
1069 &self,
1070 entry_id: ProjectEntryId,
1071 cx: &mut ModelContext<Worktree>,
1072 ) -> Option<Task<Result<()>>> {
1073 let entry = self.entry_for_id(entry_id)?.clone();
1074 let abs_path = self.abs_path.clone();
1075 let fs = self.fs.clone();
1076
1077 let delete = cx.background().spawn(async move {
1078 let mut abs_path = fs.canonicalize(&abs_path).await?;
1079 if entry.path.file_name().is_some() {
1080 abs_path = abs_path.join(&entry.path);
1081 }
1082 if entry.is_file() {
1083 fs.remove_file(&abs_path, Default::default()).await?;
1084 } else {
1085 fs.remove_dir(
1086 &abs_path,
1087 RemoveOptions {
1088 recursive: true,
1089 ignore_if_not_exists: false,
1090 },
1091 )
1092 .await?;
1093 }
1094 anyhow::Ok(abs_path)
1095 });
1096
1097 Some(cx.spawn(|this, mut cx| async move {
1098 let abs_path = delete.await?;
1099 let (tx, mut rx) = barrier::channel();
1100 this.update(&mut cx, |this, _| {
1101 this.as_local_mut()
1102 .unwrap()
1103 .path_changes_tx
1104 .try_send((vec![abs_path], tx))
1105 })?;
1106 rx.recv().await;
1107 Ok(())
1108 }))
1109 }
1110
1111 pub fn rename_entry(
1112 &self,
1113 entry_id: ProjectEntryId,
1114 new_path: impl Into<Arc<Path>>,
1115 cx: &mut ModelContext<Worktree>,
1116 ) -> Option<Task<Result<Entry>>> {
1117 let old_path = self.entry_for_id(entry_id)?.path.clone();
1118 let new_path = new_path.into();
1119 let abs_old_path = self.absolutize(&old_path);
1120 let abs_new_path = self.absolutize(&new_path);
1121 let fs = self.fs.clone();
1122 let rename = cx.background().spawn(async move {
1123 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1124 .await
1125 });
1126
1127 Some(cx.spawn(|this, mut cx| async move {
1128 rename.await?;
1129 this.update(&mut cx, |this, cx| {
1130 this.as_local_mut()
1131 .unwrap()
1132 .refresh_entry(new_path.clone(), Some(old_path), cx)
1133 })
1134 .await
1135 }))
1136 }
1137
1138 pub fn copy_entry(
1139 &self,
1140 entry_id: ProjectEntryId,
1141 new_path: impl Into<Arc<Path>>,
1142 cx: &mut ModelContext<Worktree>,
1143 ) -> Option<Task<Result<Entry>>> {
1144 let old_path = self.entry_for_id(entry_id)?.path.clone();
1145 let new_path = new_path.into();
1146 let abs_old_path = self.absolutize(&old_path);
1147 let abs_new_path = self.absolutize(&new_path);
1148 let fs = self.fs.clone();
1149 let copy = cx.background().spawn(async move {
1150 copy_recursive(
1151 fs.as_ref(),
1152 &abs_old_path,
1153 &abs_new_path,
1154 Default::default(),
1155 )
1156 .await
1157 });
1158
1159 Some(cx.spawn(|this, mut cx| async move {
1160 copy.await?;
1161 this.update(&mut cx, |this, cx| {
1162 this.as_local_mut()
1163 .unwrap()
1164 .refresh_entry(new_path.clone(), None, cx)
1165 })
1166 .await
1167 }))
1168 }
1169
1170 fn refresh_entry(
1171 &self,
1172 path: Arc<Path>,
1173 old_path: Option<Arc<Path>>,
1174 cx: &mut ModelContext<Worktree>,
1175 ) -> Task<Result<Entry>> {
1176 let fs = self.fs.clone();
1177 let abs_root_path = self.abs_path.clone();
1178 let path_changes_tx = self.path_changes_tx.clone();
1179 cx.spawn_weak(move |this, mut cx| async move {
1180 let abs_path = fs.canonicalize(&abs_root_path).await?;
1181 let mut paths = Vec::with_capacity(2);
1182 paths.push(if path.file_name().is_some() {
1183 abs_path.join(&path)
1184 } else {
1185 abs_path.clone()
1186 });
1187 if let Some(old_path) = old_path {
1188 paths.push(if old_path.file_name().is_some() {
1189 abs_path.join(&old_path)
1190 } else {
1191 abs_path.clone()
1192 });
1193 }
1194
1195 let (tx, mut rx) = barrier::channel();
1196 path_changes_tx.try_send((paths, tx))?;
1197 rx.recv().await;
1198 this.upgrade(&cx)
1199 .ok_or_else(|| anyhow!("worktree was dropped"))?
1200 .update(&mut cx, |this, _| {
1201 this.entry_for_path(path)
1202 .cloned()
1203 .ok_or_else(|| anyhow!("failed to read path after update"))
1204 })
1205 })
1206 }
1207
1208 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1209 let (share_tx, share_rx) = oneshot::channel();
1210
1211 if let Some(share) = self.share.as_mut() {
1212 let _ = share_tx.send(());
1213 *share.resume_updates.borrow_mut() = ();
1214 } else {
1215 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1216 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1217 let worktree_id = cx.model_id() as u64;
1218
1219 for (path, summaries) in &self.diagnostic_summaries {
1220 for (&server_id, summary) in summaries {
1221 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1222 project_id,
1223 worktree_id,
1224 summary: Some(summary.to_proto(server_id, &path)),
1225 }) {
1226 return Task::ready(Err(e));
1227 }
1228 }
1229 }
1230
1231 let _maintain_remote_snapshot = cx.background().spawn({
1232 let client = self.client.clone();
1233 async move {
1234 let mut share_tx = Some(share_tx);
1235 let mut prev_snapshot = LocalSnapshot {
1236 ignores_by_parent_abs_path: Default::default(),
1237 removed_entry_ids: Default::default(),
1238 next_entry_id: Default::default(),
1239 git_repositories: Default::default(),
1240 snapshot: Snapshot {
1241 id: WorktreeId(worktree_id as usize),
1242 abs_path: Path::new("").into(),
1243 root_name: Default::default(),
1244 root_char_bag: Default::default(),
1245 entries_by_path: Default::default(),
1246 entries_by_id: Default::default(),
1247 repository_entries: Default::default(),
1248 scan_id: 0,
1249 completed_scan_id: 0,
1250 },
1251 };
1252 while let Some(snapshot) = snapshots_rx.recv().await {
1253 #[cfg(any(test, feature = "test-support"))]
1254 const MAX_CHUNK_SIZE: usize = 2;
1255 #[cfg(not(any(test, feature = "test-support")))]
1256 const MAX_CHUNK_SIZE: usize = 256;
1257
1258 let update =
1259 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1260 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1261 let _ = resume_updates_rx.try_recv();
1262 while let Err(error) = client.request(update.clone()).await {
1263 log::error!("failed to send worktree update: {}", error);
1264 log::info!("waiting to resume updates");
1265 if resume_updates_rx.next().await.is_none() {
1266 return Ok(());
1267 }
1268 }
1269 }
1270
1271 if let Some(share_tx) = share_tx.take() {
1272 let _ = share_tx.send(());
1273 }
1274
1275 prev_snapshot = snapshot;
1276 }
1277
1278 Ok::<_, anyhow::Error>(())
1279 }
1280 .log_err()
1281 });
1282
1283 self.share = Some(ShareState {
1284 project_id,
1285 snapshots_tx,
1286 resume_updates: resume_updates_tx,
1287 _maintain_remote_snapshot,
1288 });
1289 }
1290
1291 cx.foreground()
1292 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1293 }
1294
1295 pub fn unshare(&mut self) {
1296 self.share.take();
1297 }
1298
1299 pub fn is_shared(&self) -> bool {
1300 self.share.is_some()
1301 }
1302}
1303
1304impl RemoteWorktree {
1305 fn snapshot(&self) -> Snapshot {
1306 self.snapshot.clone()
1307 }
1308
1309 pub fn disconnected_from_host(&mut self) {
1310 self.updates_tx.take();
1311 self.snapshot_subscriptions.clear();
1312 self.disconnected = true;
1313 }
1314
1315 pub fn save_buffer(
1316 &self,
1317 buffer_handle: ModelHandle<Buffer>,
1318 cx: &mut ModelContext<Worktree>,
1319 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1320 let buffer = buffer_handle.read(cx);
1321 let buffer_id = buffer.remote_id();
1322 let version = buffer.version();
1323 let rpc = self.client.clone();
1324 let project_id = self.project_id;
1325 cx.as_mut().spawn(|mut cx| async move {
1326 let response = rpc
1327 .request(proto::SaveBuffer {
1328 project_id,
1329 buffer_id,
1330 version: serialize_version(&version),
1331 })
1332 .await?;
1333 let version = deserialize_version(&response.version);
1334 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1335 let mtime = response
1336 .mtime
1337 .ok_or_else(|| anyhow!("missing mtime"))?
1338 .into();
1339
1340 buffer_handle.update(&mut cx, |buffer, cx| {
1341 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1342 });
1343
1344 Ok((version, fingerprint, mtime))
1345 })
1346 }
1347
1348 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1349 if let Some(updates_tx) = &self.updates_tx {
1350 updates_tx
1351 .unbounded_send(update)
1352 .expect("consumer runs to completion");
1353 }
1354 }
1355
1356 fn observed_snapshot(&self, scan_id: usize) -> bool {
1357 self.completed_scan_id >= scan_id
1358 }
1359
1360 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1361 let (tx, rx) = oneshot::channel();
1362 if self.observed_snapshot(scan_id) {
1363 let _ = tx.send(());
1364 } else if self.disconnected {
1365 drop(tx);
1366 } else {
1367 match self
1368 .snapshot_subscriptions
1369 .binary_search_by_key(&scan_id, |probe| probe.0)
1370 {
1371 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1372 }
1373 }
1374
1375 async move {
1376 rx.await?;
1377 Ok(())
1378 }
1379 }
1380
1381 pub fn update_diagnostic_summary(
1382 &mut self,
1383 path: Arc<Path>,
1384 summary: &proto::DiagnosticSummary,
1385 ) {
1386 let server_id = LanguageServerId(summary.language_server_id as usize);
1387 let summary = DiagnosticSummary {
1388 error_count: summary.error_count as usize,
1389 warning_count: summary.warning_count as usize,
1390 };
1391
1392 if summary.is_empty() {
1393 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1394 summaries.remove(&server_id);
1395 if summaries.is_empty() {
1396 self.diagnostic_summaries.remove(&path);
1397 }
1398 }
1399 } else {
1400 self.diagnostic_summaries
1401 .entry(path)
1402 .or_default()
1403 .insert(server_id, summary);
1404 }
1405 }
1406
1407 pub fn insert_entry(
1408 &mut self,
1409 entry: proto::Entry,
1410 scan_id: usize,
1411 cx: &mut ModelContext<Worktree>,
1412 ) -> Task<Result<Entry>> {
1413 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1414 cx.spawn(|this, mut cx| async move {
1415 wait_for_snapshot.await?;
1416 this.update(&mut cx, |worktree, _| {
1417 let worktree = worktree.as_remote_mut().unwrap();
1418 let mut snapshot = worktree.background_snapshot.lock();
1419 let entry = snapshot.insert_entry(entry);
1420 worktree.snapshot = snapshot.clone();
1421 entry
1422 })
1423 })
1424 }
1425
1426 pub(crate) fn delete_entry(
1427 &mut self,
1428 id: ProjectEntryId,
1429 scan_id: usize,
1430 cx: &mut ModelContext<Worktree>,
1431 ) -> Task<Result<()>> {
1432 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1433 cx.spawn(|this, mut cx| async move {
1434 wait_for_snapshot.await?;
1435 this.update(&mut cx, |worktree, _| {
1436 let worktree = worktree.as_remote_mut().unwrap();
1437 let mut snapshot = worktree.background_snapshot.lock();
1438 snapshot.delete_entry(id);
1439 worktree.snapshot = snapshot.clone();
1440 });
1441 Ok(())
1442 })
1443 }
1444}
1445
1446impl Snapshot {
1447 pub fn id(&self) -> WorktreeId {
1448 self.id
1449 }
1450
1451 pub fn abs_path(&self) -> &Arc<Path> {
1452 &self.abs_path
1453 }
1454
1455 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1456 self.entries_by_id.get(&entry_id, &()).is_some()
1457 }
1458
1459 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1460 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1461 let old_entry = self.entries_by_id.insert_or_replace(
1462 PathEntry {
1463 id: entry.id,
1464 path: entry.path.clone(),
1465 is_ignored: entry.is_ignored,
1466 scan_id: 0,
1467 },
1468 &(),
1469 );
1470 if let Some(old_entry) = old_entry {
1471 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1472 }
1473 self.entries_by_path.insert_or_replace(entry.clone(), &());
1474 Ok(entry)
1475 }
1476
1477 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1478 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1479 self.entries_by_path = {
1480 let mut cursor = self.entries_by_path.cursor();
1481 let mut new_entries_by_path =
1482 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1483 while let Some(entry) = cursor.item() {
1484 if entry.path.starts_with(&removed_entry.path) {
1485 self.entries_by_id.remove(&entry.id, &());
1486 cursor.next(&());
1487 } else {
1488 break;
1489 }
1490 }
1491 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1492 new_entries_by_path
1493 };
1494
1495 Some(removed_entry.path)
1496 }
1497
1498 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1499 let mut entries_by_path_edits = Vec::new();
1500 let mut entries_by_id_edits = Vec::new();
1501 for entry_id in update.removed_entries {
1502 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1503 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1504 entries_by_id_edits.push(Edit::Remove(entry.id));
1505 }
1506 }
1507
1508 for entry in update.updated_entries {
1509 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1510 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1511 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1512 }
1513 entries_by_id_edits.push(Edit::Insert(PathEntry {
1514 id: entry.id,
1515 path: entry.path.clone(),
1516 is_ignored: entry.is_ignored,
1517 scan_id: 0,
1518 }));
1519 entries_by_path_edits.push(Edit::Insert(entry));
1520 }
1521
1522 self.entries_by_path.edit(entries_by_path_edits, &());
1523 self.entries_by_id.edit(entries_by_id_edits, &());
1524
1525 update.removed_repositories.sort_unstable();
1526 self.repository_entries.retain(|_, entry| {
1527 if let Ok(_) = update
1528 .removed_repositories
1529 .binary_search(&entry.work_directory.to_proto())
1530 {
1531 false
1532 } else {
1533 true
1534 }
1535 });
1536
1537 for repository in update.updated_repositories {
1538 let work_directory_entry: WorkDirectoryEntry =
1539 ProjectEntryId::from_proto(repository.work_directory_id).into();
1540
1541 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1542 let mut statuses = TreeMap::default();
1543 for status_entry in repository.updated_worktree_statuses {
1544 let Some(git_file_status) = read_git_status(status_entry.status) else {
1545 continue;
1546 };
1547
1548 let repo_path = RepoPath::new(status_entry.repo_path.into());
1549 statuses.insert(repo_path, git_file_status);
1550 }
1551
1552 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1553 if self.repository_entries.get(&work_directory).is_some() {
1554 self.repository_entries.update(&work_directory, |repo| {
1555 repo.branch = repository.branch.map(Into::into);
1556 repo.worktree_statuses.insert_tree(statuses);
1557
1558 for repo_path in repository.removed_worktree_repo_paths {
1559 let repo_path = RepoPath::new(repo_path.into());
1560 repo.worktree_statuses.remove(&repo_path);
1561 }
1562 });
1563 } else {
1564 self.repository_entries.insert(
1565 work_directory,
1566 RepositoryEntry {
1567 work_directory: work_directory_entry,
1568 branch: repository.branch.map(Into::into),
1569 worktree_statuses: statuses,
1570 },
1571 )
1572 }
1573 } else {
1574 log::error!("no work directory entry for repository {:?}", repository)
1575 }
1576 }
1577
1578 self.scan_id = update.scan_id as usize;
1579 if update.is_last_update {
1580 self.completed_scan_id = update.scan_id as usize;
1581 }
1582
1583 Ok(())
1584 }
1585
1586 pub fn file_count(&self) -> usize {
1587 self.entries_by_path.summary().file_count
1588 }
1589
1590 pub fn visible_file_count(&self) -> usize {
1591 self.entries_by_path.summary().visible_file_count
1592 }
1593
1594 fn traverse_from_offset(
1595 &self,
1596 include_dirs: bool,
1597 include_ignored: bool,
1598 start_offset: usize,
1599 ) -> Traversal {
1600 let mut cursor = self.entries_by_path.cursor();
1601 cursor.seek(
1602 &TraversalTarget::Count {
1603 count: start_offset,
1604 include_dirs,
1605 include_ignored,
1606 },
1607 Bias::Right,
1608 &(),
1609 );
1610 Traversal {
1611 cursor,
1612 include_dirs,
1613 include_ignored,
1614 }
1615 }
1616
1617 fn traverse_from_path(
1618 &self,
1619 include_dirs: bool,
1620 include_ignored: bool,
1621 path: &Path,
1622 ) -> Traversal {
1623 let mut cursor = self.entries_by_path.cursor();
1624 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1625 Traversal {
1626 cursor,
1627 include_dirs,
1628 include_ignored,
1629 }
1630 }
1631
1632 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1633 self.traverse_from_offset(false, include_ignored, start)
1634 }
1635
1636 pub fn entries(&self, include_ignored: bool) -> Traversal {
1637 self.traverse_from_offset(true, include_ignored, 0)
1638 }
1639
1640 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1641 self.repository_entries.values()
1642 }
1643
1644 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1645 let empty_path = Path::new("");
1646 self.entries_by_path
1647 .cursor::<()>()
1648 .filter(move |entry| entry.path.as_ref() != empty_path)
1649 .map(|entry| &entry.path)
1650 }
1651
1652 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1653 let mut cursor = self.entries_by_path.cursor();
1654 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1655 let traversal = Traversal {
1656 cursor,
1657 include_dirs: true,
1658 include_ignored: true,
1659 };
1660 ChildEntriesIter {
1661 traversal,
1662 parent_path,
1663 }
1664 }
1665
1666 pub fn root_entry(&self) -> Option<&Entry> {
1667 self.entry_for_path("")
1668 }
1669
1670 pub fn root_name(&self) -> &str {
1671 &self.root_name
1672 }
1673
1674 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1675 self.repository_entries
1676 .get(&RepositoryWorkDirectory(Path::new("").into()))
1677 .map(|entry| entry.to_owned())
1678 }
1679
1680 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1681 self.repository_entries.values()
1682 }
1683
1684 pub fn scan_id(&self) -> usize {
1685 self.scan_id
1686 }
1687
1688 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1689 let path = path.as_ref();
1690 self.traverse_from_path(true, true, path)
1691 .entry()
1692 .and_then(|entry| {
1693 if entry.path.as_ref() == path {
1694 Some(entry)
1695 } else {
1696 None
1697 }
1698 })
1699 }
1700
1701 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1702 let entry = self.entries_by_id.get(&id, &())?;
1703 self.entry_for_path(&entry.path)
1704 }
1705
1706 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1707 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1708 }
1709}
1710
1711impl LocalSnapshot {
1712 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1713 self.git_repositories.get(&repo.work_directory.0)
1714 }
1715
1716 pub(crate) fn repo_for_metadata(
1717 &self,
1718 path: &Path,
1719 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1720 self.git_repositories
1721 .iter()
1722 .find(|(_, repo)| repo.in_dot_git(path))
1723 }
1724
1725 #[cfg(test)]
1726 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1727 let root_name = self.root_name.clone();
1728 proto::UpdateWorktree {
1729 project_id,
1730 worktree_id: self.id().to_proto(),
1731 abs_path: self.abs_path().to_string_lossy().into(),
1732 root_name,
1733 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1734 removed_entries: Default::default(),
1735 scan_id: self.scan_id as u64,
1736 is_last_update: true,
1737 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1738 removed_repositories: Default::default(),
1739 }
1740 }
1741
1742 pub(crate) fn build_update(
1743 &self,
1744 other: &Self,
1745 project_id: u64,
1746 worktree_id: u64,
1747 include_ignored: bool,
1748 ) -> proto::UpdateWorktree {
1749 let mut updated_entries = Vec::new();
1750 let mut removed_entries = Vec::new();
1751 let mut self_entries = self
1752 .entries_by_id
1753 .cursor::<()>()
1754 .filter(|e| include_ignored || !e.is_ignored)
1755 .peekable();
1756 let mut other_entries = other
1757 .entries_by_id
1758 .cursor::<()>()
1759 .filter(|e| include_ignored || !e.is_ignored)
1760 .peekable();
1761 loop {
1762 match (self_entries.peek(), other_entries.peek()) {
1763 (Some(self_entry), Some(other_entry)) => {
1764 match Ord::cmp(&self_entry.id, &other_entry.id) {
1765 Ordering::Less => {
1766 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1767 updated_entries.push(entry);
1768 self_entries.next();
1769 }
1770 Ordering::Equal => {
1771 if self_entry.scan_id != other_entry.scan_id {
1772 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1773 updated_entries.push(entry);
1774 }
1775
1776 self_entries.next();
1777 other_entries.next();
1778 }
1779 Ordering::Greater => {
1780 removed_entries.push(other_entry.id.to_proto());
1781 other_entries.next();
1782 }
1783 }
1784 }
1785 (Some(self_entry), None) => {
1786 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1787 updated_entries.push(entry);
1788 self_entries.next();
1789 }
1790 (None, Some(other_entry)) => {
1791 removed_entries.push(other_entry.id.to_proto());
1792 other_entries.next();
1793 }
1794 (None, None) => break,
1795 }
1796 }
1797
1798 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1799 let mut removed_repositories = Vec::new();
1800 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1801 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1802 loop {
1803 match (self_repos.peek(), other_repos.peek()) {
1804 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1805 match Ord::cmp(self_work_dir, other_work_dir) {
1806 Ordering::Less => {
1807 updated_repositories.push((*self_repo).into());
1808 self_repos.next();
1809 }
1810 Ordering::Equal => {
1811 if self_repo != other_repo {
1812 updated_repositories.push(self_repo.build_update(other_repo));
1813 }
1814
1815 self_repos.next();
1816 other_repos.next();
1817 }
1818 Ordering::Greater => {
1819 removed_repositories.push(other_repo.work_directory.to_proto());
1820 other_repos.next();
1821 }
1822 }
1823 }
1824 (Some((_, self_repo)), None) => {
1825 updated_repositories.push((*self_repo).into());
1826 self_repos.next();
1827 }
1828 (None, Some((_, other_repo))) => {
1829 removed_repositories.push(other_repo.work_directory.to_proto());
1830 other_repos.next();
1831 }
1832 (None, None) => break,
1833 }
1834 }
1835
1836 proto::UpdateWorktree {
1837 project_id,
1838 worktree_id,
1839 abs_path: self.abs_path().to_string_lossy().into(),
1840 root_name: self.root_name().to_string(),
1841 updated_entries,
1842 removed_entries,
1843 scan_id: self.scan_id as u64,
1844 is_last_update: self.completed_scan_id == self.scan_id,
1845 updated_repositories,
1846 removed_repositories,
1847 }
1848 }
1849
1850 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1851 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1852 let abs_path = self.abs_path.join(&entry.path);
1853 match smol::block_on(build_gitignore(&abs_path, fs)) {
1854 Ok(ignore) => {
1855 self.ignores_by_parent_abs_path.insert(
1856 abs_path.parent().unwrap().into(),
1857 (Arc::new(ignore), self.scan_id),
1858 );
1859 }
1860 Err(error) => {
1861 log::error!(
1862 "error loading .gitignore file {:?} - {:?}",
1863 &entry.path,
1864 error
1865 );
1866 }
1867 }
1868 }
1869
1870 self.reuse_entry_id(&mut entry);
1871
1872 if entry.kind == EntryKind::PendingDir {
1873 if let Some(existing_entry) =
1874 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1875 {
1876 entry.kind = existing_entry.kind;
1877 }
1878 }
1879
1880 let scan_id = self.scan_id;
1881 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1882 if let Some(removed) = removed {
1883 if removed.id != entry.id {
1884 self.entries_by_id.remove(&removed.id, &());
1885 }
1886 }
1887 self.entries_by_id.insert_or_replace(
1888 PathEntry {
1889 id: entry.id,
1890 path: entry.path.clone(),
1891 is_ignored: entry.is_ignored,
1892 scan_id,
1893 },
1894 &(),
1895 );
1896
1897 entry
1898 }
1899
1900 fn populate_dir(
1901 &mut self,
1902 parent_path: Arc<Path>,
1903 entries: impl IntoIterator<Item = Entry>,
1904 ignore: Option<Arc<Gitignore>>,
1905 fs: &dyn Fs,
1906 ) {
1907 let mut parent_entry = if let Some(parent_entry) =
1908 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1909 {
1910 parent_entry.clone()
1911 } else {
1912 log::warn!(
1913 "populating a directory {:?} that has been removed",
1914 parent_path
1915 );
1916 return;
1917 };
1918
1919 match parent_entry.kind {
1920 EntryKind::PendingDir => {
1921 parent_entry.kind = EntryKind::Dir;
1922 }
1923 EntryKind::Dir => {}
1924 _ => return,
1925 }
1926
1927 if let Some(ignore) = ignore {
1928 self.ignores_by_parent_abs_path.insert(
1929 self.abs_path.join(&parent_path).into(),
1930 (ignore, self.scan_id),
1931 );
1932 }
1933
1934 if parent_path.file_name() == Some(&DOT_GIT) {
1935 self.build_repo(parent_path, fs);
1936 }
1937
1938 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1939 let mut entries_by_id_edits = Vec::new();
1940
1941 for mut entry in entries {
1942 self.reuse_entry_id(&mut entry);
1943 entries_by_id_edits.push(Edit::Insert(PathEntry {
1944 id: entry.id,
1945 path: entry.path.clone(),
1946 is_ignored: entry.is_ignored,
1947 scan_id: self.scan_id,
1948 }));
1949 entries_by_path_edits.push(Edit::Insert(entry));
1950 }
1951
1952 self.entries_by_path.edit(entries_by_path_edits, &());
1953 self.entries_by_id.edit(entries_by_id_edits, &());
1954 }
1955
1956 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1957 let abs_path = self.abs_path.join(&parent_path);
1958 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1959
1960 // Guard against repositories inside the repository metadata
1961 if work_dir
1962 .components()
1963 .find(|component| component.as_os_str() == *DOT_GIT)
1964 .is_some()
1965 {
1966 return None;
1967 };
1968
1969 let work_dir_id = self
1970 .entry_for_path(work_dir.clone())
1971 .map(|entry| entry.id)?;
1972
1973 if self.git_repositories.get(&work_dir_id).is_none() {
1974 let repo = fs.open_repo(abs_path.as_path())?;
1975 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1976 let scan_id = self.scan_id;
1977
1978 let repo_lock = repo.lock();
1979
1980 self.repository_entries.insert(
1981 work_directory,
1982 RepositoryEntry {
1983 work_directory: work_dir_id.into(),
1984 branch: repo_lock.branch_name().map(Into::into),
1985 worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1986 },
1987 );
1988 drop(repo_lock);
1989
1990 self.git_repositories.insert(
1991 work_dir_id,
1992 LocalRepositoryEntry {
1993 scan_id,
1994 full_scan_id: scan_id,
1995 repo_ptr: repo,
1996 git_dir_path: parent_path.clone(),
1997 },
1998 )
1999 }
2000
2001 Some(())
2002 }
2003 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2004 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2005 entry.id = removed_entry_id;
2006 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2007 entry.id = existing_entry.id;
2008 }
2009 }
2010
2011 fn remove_path(&mut self, path: &Path) {
2012 let mut new_entries;
2013 let removed_entries;
2014 {
2015 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2016 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2017 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2018 new_entries.push_tree(cursor.suffix(&()), &());
2019 }
2020 self.entries_by_path = new_entries;
2021
2022 let mut entries_by_id_edits = Vec::new();
2023 for entry in removed_entries.cursor::<()>() {
2024 let removed_entry_id = self
2025 .removed_entry_ids
2026 .entry(entry.inode)
2027 .or_insert(entry.id);
2028 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2029 entries_by_id_edits.push(Edit::Remove(entry.id));
2030 }
2031 self.entries_by_id.edit(entries_by_id_edits, &());
2032
2033 if path.file_name() == Some(&GITIGNORE) {
2034 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2035 if let Some((_, scan_id)) = self
2036 .ignores_by_parent_abs_path
2037 .get_mut(abs_parent_path.as_path())
2038 {
2039 *scan_id = self.snapshot.scan_id;
2040 }
2041 }
2042 }
2043
2044 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2045 let mut inodes = TreeSet::default();
2046 for ancestor in path.ancestors().skip(1) {
2047 if let Some(entry) = self.entry_for_path(ancestor) {
2048 inodes.insert(entry.inode);
2049 }
2050 }
2051 inodes
2052 }
2053
2054 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2055 let mut new_ignores = Vec::new();
2056 for ancestor in abs_path.ancestors().skip(1) {
2057 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2058 new_ignores.push((ancestor, Some(ignore.clone())));
2059 } else {
2060 new_ignores.push((ancestor, None));
2061 }
2062 }
2063
2064 let mut ignore_stack = IgnoreStack::none();
2065 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2066 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2067 ignore_stack = IgnoreStack::all();
2068 break;
2069 } else if let Some(ignore) = ignore {
2070 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2071 }
2072 }
2073
2074 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2075 ignore_stack = IgnoreStack::all();
2076 }
2077
2078 ignore_stack
2079 }
2080}
2081
2082async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2083 let contents = fs.load(abs_path).await?;
2084 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2085 let mut builder = GitignoreBuilder::new(parent);
2086 for line in contents.lines() {
2087 builder.add_line(Some(abs_path.into()), line)?;
2088 }
2089 Ok(builder.build()?)
2090}
2091
2092impl WorktreeId {
2093 pub fn from_usize(handle_id: usize) -> Self {
2094 Self(handle_id)
2095 }
2096
2097 pub(crate) fn from_proto(id: u64) -> Self {
2098 Self(id as usize)
2099 }
2100
2101 pub fn to_proto(&self) -> u64 {
2102 self.0 as u64
2103 }
2104
2105 pub fn to_usize(&self) -> usize {
2106 self.0
2107 }
2108}
2109
2110impl fmt::Display for WorktreeId {
2111 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2112 self.0.fmt(f)
2113 }
2114}
2115
2116impl Deref for Worktree {
2117 type Target = Snapshot;
2118
2119 fn deref(&self) -> &Self::Target {
2120 match self {
2121 Worktree::Local(worktree) => &worktree.snapshot,
2122 Worktree::Remote(worktree) => &worktree.snapshot,
2123 }
2124 }
2125}
2126
2127impl Deref for LocalWorktree {
2128 type Target = LocalSnapshot;
2129
2130 fn deref(&self) -> &Self::Target {
2131 &self.snapshot
2132 }
2133}
2134
2135impl Deref for RemoteWorktree {
2136 type Target = Snapshot;
2137
2138 fn deref(&self) -> &Self::Target {
2139 &self.snapshot
2140 }
2141}
2142
2143impl fmt::Debug for LocalWorktree {
2144 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2145 self.snapshot.fmt(f)
2146 }
2147}
2148
2149impl fmt::Debug for Snapshot {
2150 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2151 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2152 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2153
2154 impl<'a> fmt::Debug for EntriesByPath<'a> {
2155 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2156 f.debug_map()
2157 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2158 .finish()
2159 }
2160 }
2161
2162 impl<'a> fmt::Debug for EntriesById<'a> {
2163 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2164 f.debug_list().entries(self.0.iter()).finish()
2165 }
2166 }
2167
2168 f.debug_struct("Snapshot")
2169 .field("id", &self.id)
2170 .field("root_name", &self.root_name)
2171 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2172 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2173 .finish()
2174 }
2175}
2176
2177#[derive(Clone, PartialEq)]
2178pub struct File {
2179 pub worktree: ModelHandle<Worktree>,
2180 pub path: Arc<Path>,
2181 pub mtime: SystemTime,
2182 pub(crate) entry_id: ProjectEntryId,
2183 pub(crate) is_local: bool,
2184 pub(crate) is_deleted: bool,
2185}
2186
2187impl language::File for File {
2188 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2189 if self.is_local {
2190 Some(self)
2191 } else {
2192 None
2193 }
2194 }
2195
2196 fn mtime(&self) -> SystemTime {
2197 self.mtime
2198 }
2199
2200 fn path(&self) -> &Arc<Path> {
2201 &self.path
2202 }
2203
2204 fn full_path(&self, cx: &AppContext) -> PathBuf {
2205 let mut full_path = PathBuf::new();
2206 let worktree = self.worktree.read(cx);
2207
2208 if worktree.is_visible() {
2209 full_path.push(worktree.root_name());
2210 } else {
2211 let path = worktree.abs_path();
2212
2213 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2214 full_path.push("~");
2215 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2216 } else {
2217 full_path.push(path)
2218 }
2219 }
2220
2221 if self.path.components().next().is_some() {
2222 full_path.push(&self.path);
2223 }
2224
2225 full_path
2226 }
2227
2228 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2229 /// of its worktree, then this method will return the name of the worktree itself.
2230 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2231 self.path
2232 .file_name()
2233 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2234 }
2235
2236 fn is_deleted(&self) -> bool {
2237 self.is_deleted
2238 }
2239
2240 fn as_any(&self) -> &dyn Any {
2241 self
2242 }
2243
2244 fn to_proto(&self) -> rpc::proto::File {
2245 rpc::proto::File {
2246 worktree_id: self.worktree.id() as u64,
2247 entry_id: self.entry_id.to_proto(),
2248 path: self.path.to_string_lossy().into(),
2249 mtime: Some(self.mtime.into()),
2250 is_deleted: self.is_deleted,
2251 }
2252 }
2253}
2254
2255impl language::LocalFile for File {
2256 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2257 self.worktree
2258 .read(cx)
2259 .as_local()
2260 .unwrap()
2261 .abs_path
2262 .join(&self.path)
2263 }
2264
2265 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2266 let worktree = self.worktree.read(cx).as_local().unwrap();
2267 let abs_path = worktree.absolutize(&self.path);
2268 let fs = worktree.fs.clone();
2269 cx.background()
2270 .spawn(async move { fs.load(&abs_path).await })
2271 }
2272
2273 fn buffer_reloaded(
2274 &self,
2275 buffer_id: u64,
2276 version: &clock::Global,
2277 fingerprint: RopeFingerprint,
2278 line_ending: LineEnding,
2279 mtime: SystemTime,
2280 cx: &mut AppContext,
2281 ) {
2282 let worktree = self.worktree.read(cx).as_local().unwrap();
2283 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2284 worktree
2285 .client
2286 .send(proto::BufferReloaded {
2287 project_id,
2288 buffer_id,
2289 version: serialize_version(version),
2290 mtime: Some(mtime.into()),
2291 fingerprint: serialize_fingerprint(fingerprint),
2292 line_ending: serialize_line_ending(line_ending) as i32,
2293 })
2294 .log_err();
2295 }
2296 }
2297}
2298
2299impl File {
2300 pub fn from_proto(
2301 proto: rpc::proto::File,
2302 worktree: ModelHandle<Worktree>,
2303 cx: &AppContext,
2304 ) -> Result<Self> {
2305 let worktree_id = worktree
2306 .read(cx)
2307 .as_remote()
2308 .ok_or_else(|| anyhow!("not remote"))?
2309 .id();
2310
2311 if worktree_id.to_proto() != proto.worktree_id {
2312 return Err(anyhow!("worktree id does not match file"));
2313 }
2314
2315 Ok(Self {
2316 worktree,
2317 path: Path::new(&proto.path).into(),
2318 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2319 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2320 is_local: false,
2321 is_deleted: proto.is_deleted,
2322 })
2323 }
2324
2325 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2326 file.and_then(|f| f.as_any().downcast_ref())
2327 }
2328
2329 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2330 self.worktree.read(cx).id()
2331 }
2332
2333 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2334 if self.is_deleted {
2335 None
2336 } else {
2337 Some(self.entry_id)
2338 }
2339 }
2340}
2341
2342#[derive(Clone, Debug, PartialEq, Eq)]
2343pub struct Entry {
2344 pub id: ProjectEntryId,
2345 pub kind: EntryKind,
2346 pub path: Arc<Path>,
2347 pub inode: u64,
2348 pub mtime: SystemTime,
2349 pub is_symlink: bool,
2350 pub is_ignored: bool,
2351}
2352
2353#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2354pub enum EntryKind {
2355 PendingDir,
2356 Dir,
2357 File(CharBag),
2358}
2359
2360#[derive(Clone, Copy, Debug)]
2361pub enum PathChange {
2362 Added,
2363 Removed,
2364 Updated,
2365 AddedOrUpdated,
2366}
2367
2368impl Entry {
2369 fn new(
2370 path: Arc<Path>,
2371 metadata: &fs::Metadata,
2372 next_entry_id: &AtomicUsize,
2373 root_char_bag: CharBag,
2374 ) -> Self {
2375 Self {
2376 id: ProjectEntryId::new(next_entry_id),
2377 kind: if metadata.is_dir {
2378 EntryKind::PendingDir
2379 } else {
2380 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2381 },
2382 path,
2383 inode: metadata.inode,
2384 mtime: metadata.mtime,
2385 is_symlink: metadata.is_symlink,
2386 is_ignored: false,
2387 }
2388 }
2389
2390 pub fn is_dir(&self) -> bool {
2391 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2392 }
2393
2394 pub fn is_file(&self) -> bool {
2395 matches!(self.kind, EntryKind::File(_))
2396 }
2397}
2398
2399impl sum_tree::Item for Entry {
2400 type Summary = EntrySummary;
2401
2402 fn summary(&self) -> Self::Summary {
2403 let visible_count = if self.is_ignored { 0 } else { 1 };
2404 let file_count;
2405 let visible_file_count;
2406 if self.is_file() {
2407 file_count = 1;
2408 visible_file_count = visible_count;
2409 } else {
2410 file_count = 0;
2411 visible_file_count = 0;
2412 }
2413
2414 EntrySummary {
2415 max_path: self.path.clone(),
2416 count: 1,
2417 visible_count,
2418 file_count,
2419 visible_file_count,
2420 }
2421 }
2422}
2423
2424impl sum_tree::KeyedItem for Entry {
2425 type Key = PathKey;
2426
2427 fn key(&self) -> Self::Key {
2428 PathKey(self.path.clone())
2429 }
2430}
2431
2432#[derive(Clone, Debug)]
2433pub struct EntrySummary {
2434 max_path: Arc<Path>,
2435 count: usize,
2436 visible_count: usize,
2437 file_count: usize,
2438 visible_file_count: usize,
2439}
2440
2441impl Default for EntrySummary {
2442 fn default() -> Self {
2443 Self {
2444 max_path: Arc::from(Path::new("")),
2445 count: 0,
2446 visible_count: 0,
2447 file_count: 0,
2448 visible_file_count: 0,
2449 }
2450 }
2451}
2452
2453impl sum_tree::Summary for EntrySummary {
2454 type Context = ();
2455
2456 fn add_summary(&mut self, rhs: &Self, _: &()) {
2457 self.max_path = rhs.max_path.clone();
2458 self.count += rhs.count;
2459 self.visible_count += rhs.visible_count;
2460 self.file_count += rhs.file_count;
2461 self.visible_file_count += rhs.visible_file_count;
2462 }
2463}
2464
2465#[derive(Clone, Debug)]
2466struct PathEntry {
2467 id: ProjectEntryId,
2468 path: Arc<Path>,
2469 is_ignored: bool,
2470 scan_id: usize,
2471}
2472
2473impl sum_tree::Item for PathEntry {
2474 type Summary = PathEntrySummary;
2475
2476 fn summary(&self) -> Self::Summary {
2477 PathEntrySummary { max_id: self.id }
2478 }
2479}
2480
2481impl sum_tree::KeyedItem for PathEntry {
2482 type Key = ProjectEntryId;
2483
2484 fn key(&self) -> Self::Key {
2485 self.id
2486 }
2487}
2488
2489#[derive(Clone, Debug, Default)]
2490struct PathEntrySummary {
2491 max_id: ProjectEntryId,
2492}
2493
2494impl sum_tree::Summary for PathEntrySummary {
2495 type Context = ();
2496
2497 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2498 self.max_id = summary.max_id;
2499 }
2500}
2501
2502impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2503 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2504 *self = summary.max_id;
2505 }
2506}
2507
2508#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2509pub struct PathKey(Arc<Path>);
2510
2511impl Default for PathKey {
2512 fn default() -> Self {
2513 Self(Path::new("").into())
2514 }
2515}
2516
2517impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2518 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2519 self.0 = summary.max_path.clone();
2520 }
2521}
2522
2523struct BackgroundScanner {
2524 snapshot: Mutex<LocalSnapshot>,
2525 fs: Arc<dyn Fs>,
2526 status_updates_tx: UnboundedSender<ScanState>,
2527 executor: Arc<executor::Background>,
2528 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2529 prev_state: Mutex<BackgroundScannerState>,
2530 finished_initial_scan: bool,
2531}
2532
2533struct BackgroundScannerState {
2534 snapshot: Snapshot,
2535 event_paths: Vec<Arc<Path>>,
2536}
2537
2538impl BackgroundScanner {
2539 fn new(
2540 snapshot: LocalSnapshot,
2541 fs: Arc<dyn Fs>,
2542 status_updates_tx: UnboundedSender<ScanState>,
2543 executor: Arc<executor::Background>,
2544 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2545 ) -> Self {
2546 Self {
2547 fs,
2548 status_updates_tx,
2549 executor,
2550 refresh_requests_rx,
2551 prev_state: Mutex::new(BackgroundScannerState {
2552 snapshot: snapshot.snapshot.clone(),
2553 event_paths: Default::default(),
2554 }),
2555 snapshot: Mutex::new(snapshot),
2556 finished_initial_scan: false,
2557 }
2558 }
2559
2560 async fn run(
2561 &mut self,
2562 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2563 ) {
2564 use futures::FutureExt as _;
2565
2566 let (root_abs_path, root_inode) = {
2567 let snapshot = self.snapshot.lock();
2568 (
2569 snapshot.abs_path.clone(),
2570 snapshot.root_entry().map(|e| e.inode),
2571 )
2572 };
2573
2574 // Populate ignores above the root.
2575 let ignore_stack;
2576 for ancestor in root_abs_path.ancestors().skip(1) {
2577 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2578 {
2579 self.snapshot
2580 .lock()
2581 .ignores_by_parent_abs_path
2582 .insert(ancestor.into(), (ignore.into(), 0));
2583 }
2584 }
2585 {
2586 let mut snapshot = self.snapshot.lock();
2587 snapshot.scan_id += 1;
2588 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2589 if ignore_stack.is_all() {
2590 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2591 root_entry.is_ignored = true;
2592 snapshot.insert_entry(root_entry, self.fs.as_ref());
2593 }
2594 }
2595 };
2596
2597 // Perform an initial scan of the directory.
2598 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2599 smol::block_on(scan_job_tx.send(ScanJob {
2600 abs_path: root_abs_path,
2601 path: Arc::from(Path::new("")),
2602 ignore_stack,
2603 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2604 scan_queue: scan_job_tx.clone(),
2605 }))
2606 .unwrap();
2607 drop(scan_job_tx);
2608 self.scan_dirs(true, scan_job_rx).await;
2609 {
2610 let mut snapshot = self.snapshot.lock();
2611 snapshot.completed_scan_id = snapshot.scan_id;
2612 }
2613 self.send_status_update(false, None);
2614
2615 // Process any any FS events that occurred while performing the initial scan.
2616 // For these events, update events cannot be as precise, because we didn't
2617 // have the previous state loaded yet.
2618 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2619 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2620 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2621 paths.extend(more_events.into_iter().map(|e| e.path));
2622 }
2623 self.process_events(paths).await;
2624 }
2625
2626 self.finished_initial_scan = true;
2627
2628 // Continue processing events until the worktree is dropped.
2629 loop {
2630 select_biased! {
2631 // Process any path refresh requests from the worktree. Prioritize
2632 // these before handling changes reported by the filesystem.
2633 request = self.refresh_requests_rx.recv().fuse() => {
2634 let Ok((paths, barrier)) = request else { break };
2635 if !self.process_refresh_request(paths, barrier).await {
2636 return;
2637 }
2638 }
2639
2640 events = events_rx.next().fuse() => {
2641 let Some(events) = events else { break };
2642 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2643 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2644 paths.extend(more_events.into_iter().map(|e| e.path));
2645 }
2646 self.process_events(paths).await;
2647 }
2648 }
2649 }
2650 }
2651
2652 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2653 if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2654 paths.sort_unstable();
2655 util::extend_sorted(
2656 &mut self.prev_state.lock().event_paths,
2657 paths,
2658 usize::MAX,
2659 Ord::cmp,
2660 );
2661 }
2662 self.send_status_update(false, Some(barrier))
2663 }
2664
2665 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2666 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2667 if let Some(mut paths) = self
2668 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2669 .await
2670 {
2671 paths.sort_unstable();
2672 util::extend_sorted(
2673 &mut self.prev_state.lock().event_paths,
2674 paths,
2675 usize::MAX,
2676 Ord::cmp,
2677 );
2678 }
2679 drop(scan_job_tx);
2680 self.scan_dirs(false, scan_job_rx).await;
2681
2682 self.update_ignore_statuses().await;
2683
2684 let mut snapshot = self.snapshot.lock();
2685
2686 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2687 git_repositories.retain(|work_directory_id, _| {
2688 snapshot
2689 .entry_for_id(*work_directory_id)
2690 .map_or(false, |entry| {
2691 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2692 })
2693 });
2694 snapshot.git_repositories = git_repositories;
2695
2696 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2697 git_repository_entries.retain(|_, entry| {
2698 snapshot
2699 .git_repositories
2700 .get(&entry.work_directory.0)
2701 .is_some()
2702 });
2703 snapshot.snapshot.repository_entries = git_repository_entries;
2704
2705 snapshot.removed_entry_ids.clear();
2706 snapshot.completed_scan_id = snapshot.scan_id;
2707
2708 drop(snapshot);
2709
2710 self.send_status_update(false, None);
2711 self.prev_state.lock().event_paths.clear();
2712 }
2713
2714 async fn scan_dirs(
2715 &self,
2716 enable_progress_updates: bool,
2717 scan_jobs_rx: channel::Receiver<ScanJob>,
2718 ) {
2719 use futures::FutureExt as _;
2720
2721 if self
2722 .status_updates_tx
2723 .unbounded_send(ScanState::Started)
2724 .is_err()
2725 {
2726 return;
2727 }
2728
2729 let progress_update_count = AtomicUsize::new(0);
2730 self.executor
2731 .scoped(|scope| {
2732 for _ in 0..self.executor.num_cpus() {
2733 scope.spawn(async {
2734 let mut last_progress_update_count = 0;
2735 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2736 futures::pin_mut!(progress_update_timer);
2737
2738 loop {
2739 select_biased! {
2740 // Process any path refresh requests before moving on to process
2741 // the scan queue, so that user operations are prioritized.
2742 request = self.refresh_requests_rx.recv().fuse() => {
2743 let Ok((paths, barrier)) = request else { break };
2744 if !self.process_refresh_request(paths, barrier).await {
2745 return;
2746 }
2747 }
2748
2749 // Send periodic progress updates to the worktree. Use an atomic counter
2750 // to ensure that only one of the workers sends a progress update after
2751 // the update interval elapses.
2752 _ = progress_update_timer => {
2753 match progress_update_count.compare_exchange(
2754 last_progress_update_count,
2755 last_progress_update_count + 1,
2756 SeqCst,
2757 SeqCst
2758 ) {
2759 Ok(_) => {
2760 last_progress_update_count += 1;
2761 self.send_status_update(true, None);
2762 }
2763 Err(count) => {
2764 last_progress_update_count = count;
2765 }
2766 }
2767 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2768 }
2769
2770 // Recursively load directories from the file system.
2771 job = scan_jobs_rx.recv().fuse() => {
2772 let Ok(job) = job else { break };
2773 if let Err(err) = self.scan_dir(&job).await {
2774 if job.path.as_ref() != Path::new("") {
2775 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2776 }
2777 }
2778 }
2779 }
2780 }
2781 })
2782 }
2783 })
2784 .await;
2785 }
2786
2787 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2788 let mut prev_state = self.prev_state.lock();
2789 let new_snapshot = self.snapshot.lock().clone();
2790 let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2791
2792 let changes = self.build_change_set(
2793 &old_snapshot,
2794 &new_snapshot.snapshot,
2795 &prev_state.event_paths,
2796 );
2797
2798 self.status_updates_tx
2799 .unbounded_send(ScanState::Updated {
2800 snapshot: new_snapshot,
2801 changes,
2802 scanning,
2803 barrier,
2804 })
2805 .is_ok()
2806 }
2807
2808 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2809 let mut new_entries: Vec<Entry> = Vec::new();
2810 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2811 let mut ignore_stack = job.ignore_stack.clone();
2812 let mut new_ignore = None;
2813 let (root_abs_path, root_char_bag, next_entry_id) = {
2814 let snapshot = self.snapshot.lock();
2815 (
2816 snapshot.abs_path().clone(),
2817 snapshot.root_char_bag,
2818 snapshot.next_entry_id.clone(),
2819 )
2820 };
2821 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2822 while let Some(child_abs_path) = child_paths.next().await {
2823 let child_abs_path: Arc<Path> = match child_abs_path {
2824 Ok(child_abs_path) => child_abs_path.into(),
2825 Err(error) => {
2826 log::error!("error processing entry {:?}", error);
2827 continue;
2828 }
2829 };
2830
2831 let child_name = child_abs_path.file_name().unwrap();
2832 let child_path: Arc<Path> = job.path.join(child_name).into();
2833 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2834 Ok(Some(metadata)) => metadata,
2835 Ok(None) => continue,
2836 Err(err) => {
2837 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2838 continue;
2839 }
2840 };
2841
2842 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2843 if child_name == *GITIGNORE {
2844 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2845 Ok(ignore) => {
2846 let ignore = Arc::new(ignore);
2847 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2848 new_ignore = Some(ignore);
2849 }
2850 Err(error) => {
2851 log::error!(
2852 "error loading .gitignore file {:?} - {:?}",
2853 child_name,
2854 error
2855 );
2856 }
2857 }
2858
2859 // Update ignore status of any child entries we've already processed to reflect the
2860 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2861 // there should rarely be too numerous. Update the ignore stack associated with any
2862 // new jobs as well.
2863 let mut new_jobs = new_jobs.iter_mut();
2864 for entry in &mut new_entries {
2865 let entry_abs_path = root_abs_path.join(&entry.path);
2866 entry.is_ignored =
2867 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2868
2869 if entry.is_dir() {
2870 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2871 job.ignore_stack = if entry.is_ignored {
2872 IgnoreStack::all()
2873 } else {
2874 ignore_stack.clone()
2875 };
2876 }
2877 }
2878 }
2879 }
2880
2881 let mut child_entry = Entry::new(
2882 child_path.clone(),
2883 &child_metadata,
2884 &next_entry_id,
2885 root_char_bag,
2886 );
2887
2888 if child_entry.is_dir() {
2889 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2890 child_entry.is_ignored = is_ignored;
2891
2892 // Avoid recursing until crash in the case of a recursive symlink
2893 if !job.ancestor_inodes.contains(&child_entry.inode) {
2894 let mut ancestor_inodes = job.ancestor_inodes.clone();
2895 ancestor_inodes.insert(child_entry.inode);
2896
2897 new_jobs.push(Some(ScanJob {
2898 abs_path: child_abs_path,
2899 path: child_path,
2900 ignore_stack: if is_ignored {
2901 IgnoreStack::all()
2902 } else {
2903 ignore_stack.clone()
2904 },
2905 ancestor_inodes,
2906 scan_queue: job.scan_queue.clone(),
2907 }));
2908 } else {
2909 new_jobs.push(None);
2910 }
2911 } else {
2912 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2913 }
2914
2915 new_entries.push(child_entry);
2916 }
2917
2918 self.snapshot.lock().populate_dir(
2919 job.path.clone(),
2920 new_entries,
2921 new_ignore,
2922 self.fs.as_ref(),
2923 );
2924
2925 for new_job in new_jobs {
2926 if let Some(new_job) = new_job {
2927 job.scan_queue.send(new_job).await.unwrap();
2928 }
2929 }
2930
2931 Ok(())
2932 }
2933
2934 async fn reload_entries_for_paths(
2935 &self,
2936 mut abs_paths: Vec<PathBuf>,
2937 scan_queue_tx: Option<Sender<ScanJob>>,
2938 ) -> Option<Vec<Arc<Path>>> {
2939 let doing_recursive_update = scan_queue_tx.is_some();
2940
2941 abs_paths.sort_unstable();
2942 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2943
2944 let root_abs_path = self.snapshot.lock().abs_path.clone();
2945 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2946 let metadata = futures::future::join_all(
2947 abs_paths
2948 .iter()
2949 .map(|abs_path| self.fs.metadata(&abs_path))
2950 .collect::<Vec<_>>(),
2951 )
2952 .await;
2953
2954 let mut snapshot = self.snapshot.lock();
2955 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2956 snapshot.scan_id += 1;
2957 if is_idle && !doing_recursive_update {
2958 snapshot.completed_scan_id = snapshot.scan_id;
2959 }
2960
2961 // Remove any entries for paths that no longer exist or are being recursively
2962 // refreshed. Do this before adding any new entries, so that renames can be
2963 // detected regardless of the order of the paths.
2964 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2965 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2966 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2967 if matches!(metadata, Ok(None)) || doing_recursive_update {
2968 snapshot.remove_path(path);
2969 }
2970 event_paths.push(path.into());
2971 } else {
2972 log::error!(
2973 "unexpected event {:?} for root path {:?}",
2974 abs_path,
2975 root_canonical_path
2976 );
2977 }
2978 }
2979
2980 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2981 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2982
2983 match metadata {
2984 Ok(Some(metadata)) => {
2985 let ignore_stack =
2986 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2987 let mut fs_entry = Entry::new(
2988 path.clone(),
2989 &metadata,
2990 snapshot.next_entry_id.as_ref(),
2991 snapshot.root_char_bag,
2992 );
2993 fs_entry.is_ignored = ignore_stack.is_all();
2994 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2995
2996 self.reload_repo_for_path(&path, &mut snapshot, self.fs.as_ref());
2997
2998 if let Some(scan_queue_tx) = &scan_queue_tx {
2999 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3000 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3001 ancestor_inodes.insert(metadata.inode);
3002 smol::block_on(scan_queue_tx.send(ScanJob {
3003 abs_path,
3004 path,
3005 ignore_stack,
3006 ancestor_inodes,
3007 scan_queue: scan_queue_tx.clone(),
3008 }))
3009 .unwrap();
3010 }
3011 }
3012 }
3013 Ok(None) => {
3014 self.remove_repo_path(&path, &mut snapshot);
3015 }
3016 Err(err) => {
3017 // TODO - create a special 'error' entry in the entries tree to mark this
3018 log::error!("error reading file on event {:?}", err);
3019 }
3020 }
3021 }
3022
3023 Some(event_paths)
3024 }
3025
3026 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3027 if !path
3028 .components()
3029 .any(|component| component.as_os_str() == *DOT_GIT)
3030 {
3031 let scan_id = snapshot.scan_id;
3032 let repo = snapshot.repo_for(&path)?;
3033
3034 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3035
3036 let work_dir = repo.work_directory(snapshot)?;
3037 let work_dir_id = repo.work_directory;
3038
3039 snapshot
3040 .git_repositories
3041 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3042
3043 snapshot.repository_entries.update(&work_dir, |entry| {
3044 entry
3045 .worktree_statuses
3046 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3047 });
3048 }
3049
3050 Some(())
3051 }
3052
3053 fn reload_repo_for_path(
3054 &self,
3055 path: &Path,
3056 snapshot: &mut LocalSnapshot,
3057 fs: &dyn Fs,
3058 ) -> Option<()> {
3059 let scan_id = snapshot.scan_id;
3060
3061 if path
3062 .components()
3063 .any(|component| component.as_os_str() == *DOT_GIT)
3064 {
3065 let (entry_id, repo_ptr) = {
3066 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3067 let dot_git_dir = path.ancestors()
3068 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3069 .next()?;
3070
3071 snapshot.build_repo(dot_git_dir.into(), fs);
3072 return None;
3073 };
3074 if repo.full_scan_id == scan_id {
3075 return None;
3076 }
3077 (*entry_id, repo.repo_ptr.to_owned())
3078 };
3079
3080 let work_dir = snapshot
3081 .entry_for_id(entry_id)
3082 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3083
3084 let repo = repo_ptr.lock();
3085 repo.reload_index();
3086 let branch = repo.branch_name();
3087 let statuses = repo.worktree_statuses().unwrap_or_default();
3088
3089 snapshot.git_repositories.update(&entry_id, |entry| {
3090 entry.scan_id = scan_id;
3091 entry.full_scan_id = scan_id;
3092 });
3093
3094 snapshot.repository_entries.update(&work_dir, |entry| {
3095 entry.branch = branch.map(Into::into);
3096 entry.worktree_statuses = statuses;
3097 });
3098 } else {
3099 if snapshot
3100 .entry_for_path(&path)
3101 .map(|entry| entry.is_ignored)
3102 .unwrap_or(false)
3103 {
3104 self.remove_repo_path(&path, snapshot);
3105 return None;
3106 }
3107
3108 let repo = snapshot.repo_for(&path)?;
3109
3110 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3111
3112 let status = {
3113 let local_repo = snapshot.get_local_repo(&repo)?;
3114
3115 // Short circuit if we've already scanned everything
3116 if local_repo.full_scan_id == scan_id {
3117 return None;
3118 }
3119
3120 let git_ptr = local_repo.repo_ptr.lock();
3121 git_ptr.worktree_status(&repo_path)
3122 };
3123
3124 let work_dir = repo.work_directory(snapshot)?;
3125 let work_dir_id = repo.work_directory;
3126
3127 snapshot
3128 .git_repositories
3129 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3130
3131 snapshot.repository_entries.update(&work_dir, |entry| {
3132 if let Some(status) = status {
3133 entry.worktree_statuses.insert(repo_path, status);
3134 } else {
3135 entry.worktree_statuses.remove(&repo_path);
3136 }
3137 });
3138 }
3139
3140 Some(())
3141 }
3142
3143 async fn update_ignore_statuses(&self) {
3144 use futures::FutureExt as _;
3145
3146 let mut snapshot = self.snapshot.lock().clone();
3147 let mut ignores_to_update = Vec::new();
3148 let mut ignores_to_delete = Vec::new();
3149 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3150 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3151 if *scan_id > snapshot.completed_scan_id
3152 && snapshot.entry_for_path(parent_path).is_some()
3153 {
3154 ignores_to_update.push(parent_abs_path.clone());
3155 }
3156
3157 let ignore_path = parent_path.join(&*GITIGNORE);
3158 if snapshot.entry_for_path(ignore_path).is_none() {
3159 ignores_to_delete.push(parent_abs_path.clone());
3160 }
3161 }
3162 }
3163
3164 for parent_abs_path in ignores_to_delete {
3165 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3166 self.snapshot
3167 .lock()
3168 .ignores_by_parent_abs_path
3169 .remove(&parent_abs_path);
3170 }
3171
3172 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3173 ignores_to_update.sort_unstable();
3174 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3175 while let Some(parent_abs_path) = ignores_to_update.next() {
3176 while ignores_to_update
3177 .peek()
3178 .map_or(false, |p| p.starts_with(&parent_abs_path))
3179 {
3180 ignores_to_update.next().unwrap();
3181 }
3182
3183 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3184 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3185 abs_path: parent_abs_path,
3186 ignore_stack,
3187 ignore_queue: ignore_queue_tx.clone(),
3188 }))
3189 .unwrap();
3190 }
3191 drop(ignore_queue_tx);
3192
3193 self.executor
3194 .scoped(|scope| {
3195 for _ in 0..self.executor.num_cpus() {
3196 scope.spawn(async {
3197 loop {
3198 select_biased! {
3199 // Process any path refresh requests before moving on to process
3200 // the queue of ignore statuses.
3201 request = self.refresh_requests_rx.recv().fuse() => {
3202 let Ok((paths, barrier)) = request else { break };
3203 if !self.process_refresh_request(paths, barrier).await {
3204 return;
3205 }
3206 }
3207
3208 // Recursively process directories whose ignores have changed.
3209 job = ignore_queue_rx.recv().fuse() => {
3210 let Ok(job) = job else { break };
3211 self.update_ignore_status(job, &snapshot).await;
3212 }
3213 }
3214 }
3215 });
3216 }
3217 })
3218 .await;
3219 }
3220
3221 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3222 let mut ignore_stack = job.ignore_stack;
3223 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3224 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3225 }
3226
3227 let mut entries_by_id_edits = Vec::new();
3228 let mut entries_by_path_edits = Vec::new();
3229 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3230 for mut entry in snapshot.child_entries(path).cloned() {
3231 let was_ignored = entry.is_ignored;
3232 let abs_path = snapshot.abs_path().join(&entry.path);
3233 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3234 if entry.is_dir() {
3235 let child_ignore_stack = if entry.is_ignored {
3236 IgnoreStack::all()
3237 } else {
3238 ignore_stack.clone()
3239 };
3240 job.ignore_queue
3241 .send(UpdateIgnoreStatusJob {
3242 abs_path: abs_path.into(),
3243 ignore_stack: child_ignore_stack,
3244 ignore_queue: job.ignore_queue.clone(),
3245 })
3246 .await
3247 .unwrap();
3248 }
3249
3250 if entry.is_ignored != was_ignored {
3251 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3252 path_entry.scan_id = snapshot.scan_id;
3253 path_entry.is_ignored = entry.is_ignored;
3254 entries_by_id_edits.push(Edit::Insert(path_entry));
3255 entries_by_path_edits.push(Edit::Insert(entry));
3256 }
3257 }
3258
3259 let mut snapshot = self.snapshot.lock();
3260 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3261 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3262 }
3263
3264 fn build_change_set(
3265 &self,
3266 old_snapshot: &Snapshot,
3267 new_snapshot: &Snapshot,
3268 event_paths: &[Arc<Path>],
3269 ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3270 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3271
3272 let mut changes = HashMap::default();
3273 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3274 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3275 let received_before_initialized = !self.finished_initial_scan;
3276
3277 for path in event_paths {
3278 let path = PathKey(path.clone());
3279 old_paths.seek(&path, Bias::Left, &());
3280 new_paths.seek(&path, Bias::Left, &());
3281
3282 loop {
3283 match (old_paths.item(), new_paths.item()) {
3284 (Some(old_entry), Some(new_entry)) => {
3285 if old_entry.path > path.0
3286 && new_entry.path > path.0
3287 && !old_entry.path.starts_with(&path.0)
3288 && !new_entry.path.starts_with(&path.0)
3289 {
3290 break;
3291 }
3292
3293 match Ord::cmp(&old_entry.path, &new_entry.path) {
3294 Ordering::Less => {
3295 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3296 old_paths.next(&());
3297 }
3298 Ordering::Equal => {
3299 if received_before_initialized {
3300 // If the worktree was not fully initialized when this event was generated,
3301 // we can't know whether this entry was added during the scan or whether
3302 // it was merely updated.
3303 changes.insert(
3304 (new_entry.path.clone(), new_entry.id),
3305 AddedOrUpdated,
3306 );
3307 } else if old_entry.mtime != new_entry.mtime {
3308 changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3309 }
3310 old_paths.next(&());
3311 new_paths.next(&());
3312 }
3313 Ordering::Greater => {
3314 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3315 new_paths.next(&());
3316 }
3317 }
3318 }
3319 (Some(old_entry), None) => {
3320 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3321 old_paths.next(&());
3322 }
3323 (None, Some(new_entry)) => {
3324 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3325 new_paths.next(&());
3326 }
3327 (None, None) => break,
3328 }
3329 }
3330 }
3331
3332 changes
3333 }
3334
3335 async fn progress_timer(&self, running: bool) {
3336 if !running {
3337 return futures::future::pending().await;
3338 }
3339
3340 #[cfg(any(test, feature = "test-support"))]
3341 if self.fs.is_fake() {
3342 return self.executor.simulate_random_delay().await;
3343 }
3344
3345 smol::Timer::after(Duration::from_millis(100)).await;
3346 }
3347}
3348
3349fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3350 let mut result = root_char_bag;
3351 result.extend(
3352 path.to_string_lossy()
3353 .chars()
3354 .map(|c| c.to_ascii_lowercase()),
3355 );
3356 result
3357}
3358
3359struct ScanJob {
3360 abs_path: Arc<Path>,
3361 path: Arc<Path>,
3362 ignore_stack: Arc<IgnoreStack>,
3363 scan_queue: Sender<ScanJob>,
3364 ancestor_inodes: TreeSet<u64>,
3365}
3366
3367struct UpdateIgnoreStatusJob {
3368 abs_path: Arc<Path>,
3369 ignore_stack: Arc<IgnoreStack>,
3370 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3371}
3372
3373pub trait WorktreeHandle {
3374 #[cfg(any(test, feature = "test-support"))]
3375 fn flush_fs_events<'a>(
3376 &self,
3377 cx: &'a gpui::TestAppContext,
3378 ) -> futures::future::LocalBoxFuture<'a, ()>;
3379}
3380
3381impl WorktreeHandle for ModelHandle<Worktree> {
3382 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3383 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3384 // extra directory scans, and emit extra scan-state notifications.
3385 //
3386 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3387 // to ensure that all redundant FS events have already been processed.
3388 #[cfg(any(test, feature = "test-support"))]
3389 fn flush_fs_events<'a>(
3390 &self,
3391 cx: &'a gpui::TestAppContext,
3392 ) -> futures::future::LocalBoxFuture<'a, ()> {
3393 use smol::future::FutureExt;
3394
3395 let filename = "fs-event-sentinel";
3396 let tree = self.clone();
3397 let (fs, root_path) = self.read_with(cx, |tree, _| {
3398 let tree = tree.as_local().unwrap();
3399 (tree.fs.clone(), tree.abs_path().clone())
3400 });
3401
3402 async move {
3403 fs.create_file(&root_path.join(filename), Default::default())
3404 .await
3405 .unwrap();
3406 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3407 .await;
3408
3409 fs.remove_file(&root_path.join(filename), Default::default())
3410 .await
3411 .unwrap();
3412 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3413 .await;
3414
3415 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3416 .await;
3417 }
3418 .boxed_local()
3419 }
3420}
3421
3422#[derive(Clone, Debug)]
3423struct TraversalProgress<'a> {
3424 max_path: &'a Path,
3425 count: usize,
3426 visible_count: usize,
3427 file_count: usize,
3428 visible_file_count: usize,
3429}
3430
3431impl<'a> TraversalProgress<'a> {
3432 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3433 match (include_ignored, include_dirs) {
3434 (true, true) => self.count,
3435 (true, false) => self.file_count,
3436 (false, true) => self.visible_count,
3437 (false, false) => self.visible_file_count,
3438 }
3439 }
3440}
3441
3442impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3443 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3444 self.max_path = summary.max_path.as_ref();
3445 self.count += summary.count;
3446 self.visible_count += summary.visible_count;
3447 self.file_count += summary.file_count;
3448 self.visible_file_count += summary.visible_file_count;
3449 }
3450}
3451
3452impl<'a> Default for TraversalProgress<'a> {
3453 fn default() -> Self {
3454 Self {
3455 max_path: Path::new(""),
3456 count: 0,
3457 visible_count: 0,
3458 file_count: 0,
3459 visible_file_count: 0,
3460 }
3461 }
3462}
3463
3464pub struct Traversal<'a> {
3465 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3466 include_ignored: bool,
3467 include_dirs: bool,
3468}
3469
3470impl<'a> Traversal<'a> {
3471 pub fn advance(&mut self) -> bool {
3472 self.advance_to_offset(self.offset() + 1)
3473 }
3474
3475 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3476 self.cursor.seek_forward(
3477 &TraversalTarget::Count {
3478 count: offset,
3479 include_dirs: self.include_dirs,
3480 include_ignored: self.include_ignored,
3481 },
3482 Bias::Right,
3483 &(),
3484 )
3485 }
3486
3487 pub fn advance_to_sibling(&mut self) -> bool {
3488 while let Some(entry) = self.cursor.item() {
3489 self.cursor.seek_forward(
3490 &TraversalTarget::PathSuccessor(&entry.path),
3491 Bias::Left,
3492 &(),
3493 );
3494 if let Some(entry) = self.cursor.item() {
3495 if (self.include_dirs || !entry.is_dir())
3496 && (self.include_ignored || !entry.is_ignored)
3497 {
3498 return true;
3499 }
3500 }
3501 }
3502 false
3503 }
3504
3505 pub fn entry(&self) -> Option<&'a Entry> {
3506 self.cursor.item()
3507 }
3508
3509 pub fn offset(&self) -> usize {
3510 self.cursor
3511 .start()
3512 .count(self.include_dirs, self.include_ignored)
3513 }
3514}
3515
3516impl<'a> Iterator for Traversal<'a> {
3517 type Item = &'a Entry;
3518
3519 fn next(&mut self) -> Option<Self::Item> {
3520 if let Some(item) = self.entry() {
3521 self.advance();
3522 Some(item)
3523 } else {
3524 None
3525 }
3526 }
3527}
3528
3529#[derive(Debug)]
3530enum TraversalTarget<'a> {
3531 Path(&'a Path),
3532 PathSuccessor(&'a Path),
3533 Count {
3534 count: usize,
3535 include_ignored: bool,
3536 include_dirs: bool,
3537 },
3538}
3539
3540impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3541 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3542 match self {
3543 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3544 TraversalTarget::PathSuccessor(path) => {
3545 if !cursor_location.max_path.starts_with(path) {
3546 Ordering::Equal
3547 } else {
3548 Ordering::Greater
3549 }
3550 }
3551 TraversalTarget::Count {
3552 count,
3553 include_dirs,
3554 include_ignored,
3555 } => Ord::cmp(
3556 count,
3557 &cursor_location.count(*include_dirs, *include_ignored),
3558 ),
3559 }
3560 }
3561}
3562
3563struct ChildEntriesIter<'a> {
3564 parent_path: &'a Path,
3565 traversal: Traversal<'a>,
3566}
3567
3568impl<'a> Iterator for ChildEntriesIter<'a> {
3569 type Item = &'a Entry;
3570
3571 fn next(&mut self) -> Option<Self::Item> {
3572 if let Some(item) = self.traversal.entry() {
3573 if item.path.starts_with(&self.parent_path) {
3574 self.traversal.advance_to_sibling();
3575 return Some(item);
3576 }
3577 }
3578 None
3579 }
3580}
3581
3582impl<'a> From<&'a Entry> for proto::Entry {
3583 fn from(entry: &'a Entry) -> Self {
3584 Self {
3585 id: entry.id.to_proto(),
3586 is_dir: entry.is_dir(),
3587 path: entry.path.to_string_lossy().into(),
3588 inode: entry.inode,
3589 mtime: Some(entry.mtime.into()),
3590 is_symlink: entry.is_symlink,
3591 is_ignored: entry.is_ignored,
3592 }
3593 }
3594}
3595
3596impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3597 type Error = anyhow::Error;
3598
3599 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3600 if let Some(mtime) = entry.mtime {
3601 let kind = if entry.is_dir {
3602 EntryKind::Dir
3603 } else {
3604 let mut char_bag = *root_char_bag;
3605 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3606 EntryKind::File(char_bag)
3607 };
3608 let path: Arc<Path> = PathBuf::from(entry.path).into();
3609 Ok(Entry {
3610 id: ProjectEntryId::from_proto(entry.id),
3611 kind,
3612 path,
3613 inode: entry.inode,
3614 mtime: mtime.into(),
3615 is_symlink: entry.is_symlink,
3616 is_ignored: entry.is_ignored,
3617 })
3618 } else {
3619 Err(anyhow!(
3620 "missing mtime in remote worktree entry {:?}",
3621 entry.path
3622 ))
3623 }
3624 }
3625}
3626
3627#[cfg(test)]
3628mod tests {
3629 use super::*;
3630 use fs::{FakeFs, RealFs};
3631 use gpui::{executor::Deterministic, TestAppContext};
3632 use pretty_assertions::assert_eq;
3633 use rand::prelude::*;
3634 use serde_json::json;
3635 use std::{env, fmt::Write};
3636 use util::{http::FakeHttpClient, test::temp_tree};
3637
3638 #[gpui::test]
3639 async fn test_traversal(cx: &mut TestAppContext) {
3640 let fs = FakeFs::new(cx.background());
3641 fs.insert_tree(
3642 "/root",
3643 json!({
3644 ".gitignore": "a/b\n",
3645 "a": {
3646 "b": "",
3647 "c": "",
3648 }
3649 }),
3650 )
3651 .await;
3652
3653 let http_client = FakeHttpClient::with_404_response();
3654 let client = cx.read(|cx| Client::new(http_client, cx));
3655
3656 let tree = Worktree::local(
3657 client,
3658 Path::new("/root"),
3659 true,
3660 fs,
3661 Default::default(),
3662 &mut cx.to_async(),
3663 )
3664 .await
3665 .unwrap();
3666 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3667 .await;
3668
3669 tree.read_with(cx, |tree, _| {
3670 assert_eq!(
3671 tree.entries(false)
3672 .map(|entry| entry.path.as_ref())
3673 .collect::<Vec<_>>(),
3674 vec![
3675 Path::new(""),
3676 Path::new(".gitignore"),
3677 Path::new("a"),
3678 Path::new("a/c"),
3679 ]
3680 );
3681 assert_eq!(
3682 tree.entries(true)
3683 .map(|entry| entry.path.as_ref())
3684 .collect::<Vec<_>>(),
3685 vec![
3686 Path::new(""),
3687 Path::new(".gitignore"),
3688 Path::new("a"),
3689 Path::new("a/b"),
3690 Path::new("a/c"),
3691 ]
3692 );
3693 })
3694 }
3695
3696 #[gpui::test(iterations = 10)]
3697 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3698 let fs = FakeFs::new(cx.background());
3699 fs.insert_tree(
3700 "/root",
3701 json!({
3702 "lib": {
3703 "a": {
3704 "a.txt": ""
3705 },
3706 "b": {
3707 "b.txt": ""
3708 }
3709 }
3710 }),
3711 )
3712 .await;
3713 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3714 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3715
3716 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3717 let tree = Worktree::local(
3718 client,
3719 Path::new("/root"),
3720 true,
3721 fs.clone(),
3722 Default::default(),
3723 &mut cx.to_async(),
3724 )
3725 .await
3726 .unwrap();
3727
3728 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3729 .await;
3730
3731 tree.read_with(cx, |tree, _| {
3732 assert_eq!(
3733 tree.entries(false)
3734 .map(|entry| entry.path.as_ref())
3735 .collect::<Vec<_>>(),
3736 vec![
3737 Path::new(""),
3738 Path::new("lib"),
3739 Path::new("lib/a"),
3740 Path::new("lib/a/a.txt"),
3741 Path::new("lib/a/lib"),
3742 Path::new("lib/b"),
3743 Path::new("lib/b/b.txt"),
3744 Path::new("lib/b/lib"),
3745 ]
3746 );
3747 });
3748
3749 fs.rename(
3750 Path::new("/root/lib/a/lib"),
3751 Path::new("/root/lib/a/lib-2"),
3752 Default::default(),
3753 )
3754 .await
3755 .unwrap();
3756 executor.run_until_parked();
3757 tree.read_with(cx, |tree, _| {
3758 assert_eq!(
3759 tree.entries(false)
3760 .map(|entry| entry.path.as_ref())
3761 .collect::<Vec<_>>(),
3762 vec![
3763 Path::new(""),
3764 Path::new("lib"),
3765 Path::new("lib/a"),
3766 Path::new("lib/a/a.txt"),
3767 Path::new("lib/a/lib-2"),
3768 Path::new("lib/b"),
3769 Path::new("lib/b/b.txt"),
3770 Path::new("lib/b/lib"),
3771 ]
3772 );
3773 });
3774 }
3775
3776 #[gpui::test]
3777 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3778 let parent_dir = temp_tree(json!({
3779 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3780 "tree": {
3781 ".git": {},
3782 ".gitignore": "ignored-dir\n",
3783 "tracked-dir": {
3784 "tracked-file1": "",
3785 "ancestor-ignored-file1": "",
3786 },
3787 "ignored-dir": {
3788 "ignored-file1": ""
3789 }
3790 }
3791 }));
3792 let dir = parent_dir.path().join("tree");
3793
3794 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3795
3796 let tree = Worktree::local(
3797 client,
3798 dir.as_path(),
3799 true,
3800 Arc::new(RealFs),
3801 Default::default(),
3802 &mut cx.to_async(),
3803 )
3804 .await
3805 .unwrap();
3806 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3807 .await;
3808 tree.flush_fs_events(cx).await;
3809 cx.read(|cx| {
3810 let tree = tree.read(cx);
3811 assert!(
3812 !tree
3813 .entry_for_path("tracked-dir/tracked-file1")
3814 .unwrap()
3815 .is_ignored
3816 );
3817 assert!(
3818 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3819 .unwrap()
3820 .is_ignored
3821 );
3822 assert!(
3823 tree.entry_for_path("ignored-dir/ignored-file1")
3824 .unwrap()
3825 .is_ignored
3826 );
3827 });
3828
3829 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3830 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3831 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3832 tree.flush_fs_events(cx).await;
3833 cx.read(|cx| {
3834 let tree = tree.read(cx);
3835 assert!(
3836 !tree
3837 .entry_for_path("tracked-dir/tracked-file2")
3838 .unwrap()
3839 .is_ignored
3840 );
3841 assert!(
3842 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3843 .unwrap()
3844 .is_ignored
3845 );
3846 assert!(
3847 tree.entry_for_path("ignored-dir/ignored-file2")
3848 .unwrap()
3849 .is_ignored
3850 );
3851 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3852 });
3853 }
3854
3855 #[gpui::test]
3856 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3857 let root = temp_tree(json!({
3858 "dir1": {
3859 ".git": {},
3860 "deps": {
3861 "dep1": {
3862 ".git": {},
3863 "src": {
3864 "a.txt": ""
3865 }
3866 }
3867 },
3868 "src": {
3869 "b.txt": ""
3870 }
3871 },
3872 "c.txt": "",
3873 }));
3874
3875 let http_client = FakeHttpClient::with_404_response();
3876 let client = cx.read(|cx| Client::new(http_client, cx));
3877 let tree = Worktree::local(
3878 client,
3879 root.path(),
3880 true,
3881 Arc::new(RealFs),
3882 Default::default(),
3883 &mut cx.to_async(),
3884 )
3885 .await
3886 .unwrap();
3887
3888 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3889 .await;
3890 tree.flush_fs_events(cx).await;
3891
3892 tree.read_with(cx, |tree, _cx| {
3893 let tree = tree.as_local().unwrap();
3894
3895 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3896
3897 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3898 assert_eq!(
3899 entry
3900 .work_directory(tree)
3901 .map(|directory| directory.as_ref().to_owned()),
3902 Some(Path::new("dir1").to_owned())
3903 );
3904
3905 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3906 assert_eq!(
3907 entry
3908 .work_directory(tree)
3909 .map(|directory| directory.as_ref().to_owned()),
3910 Some(Path::new("dir1/deps/dep1").to_owned())
3911 );
3912 });
3913
3914 let repo_update_events = Arc::new(Mutex::new(vec![]));
3915 tree.update(cx, |_, cx| {
3916 let repo_update_events = repo_update_events.clone();
3917 cx.subscribe(&tree, move |_, _, event, _| {
3918 if let Event::UpdatedGitRepositories(update) = event {
3919 repo_update_events.lock().push(update.clone());
3920 }
3921 })
3922 .detach();
3923 });
3924
3925 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3926 tree.flush_fs_events(cx).await;
3927
3928 assert_eq!(
3929 repo_update_events.lock()[0]
3930 .keys()
3931 .cloned()
3932 .collect::<Vec<Arc<Path>>>(),
3933 vec![Path::new("dir1").into()]
3934 );
3935
3936 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3937 tree.flush_fs_events(cx).await;
3938
3939 tree.read_with(cx, |tree, _cx| {
3940 let tree = tree.as_local().unwrap();
3941
3942 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3943 });
3944 }
3945
3946 #[gpui::test]
3947 async fn test_git_status(cx: &mut TestAppContext) {
3948 #[track_caller]
3949 fn git_init(path: &Path) -> git2::Repository {
3950 git2::Repository::init(path).expect("Failed to initialize git repository")
3951 }
3952
3953 #[track_caller]
3954 fn git_add(path: &Path, repo: &git2::Repository) {
3955 let mut index = repo.index().expect("Failed to get index");
3956 index.add_path(path).expect("Failed to add a.txt");
3957 index.write().expect("Failed to write index");
3958 }
3959
3960 #[track_caller]
3961 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3962 let mut index = repo.index().expect("Failed to get index");
3963 index.remove_path(path).expect("Failed to add a.txt");
3964 index.write().expect("Failed to write index");
3965 }
3966
3967 #[track_caller]
3968 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3969 use git2::Signature;
3970
3971 let signature = Signature::now("test", "test@zed.dev").unwrap();
3972 let oid = repo.index().unwrap().write_tree().unwrap();
3973 let tree = repo.find_tree(oid).unwrap();
3974 if let Some(head) = repo.head().ok() {
3975 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3976
3977 let parent_commit = parent_obj.as_commit().unwrap();
3978
3979 repo.commit(
3980 Some("HEAD"),
3981 &signature,
3982 &signature,
3983 msg,
3984 &tree,
3985 &[parent_commit],
3986 )
3987 .expect("Failed to commit with parent");
3988 } else {
3989 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3990 .expect("Failed to commit");
3991 }
3992 }
3993
3994 #[track_caller]
3995 fn git_stash(repo: &mut git2::Repository) {
3996 use git2::Signature;
3997
3998 let signature = Signature::now("test", "test@zed.dev").unwrap();
3999 repo.stash_save(&signature, "N/A", None)
4000 .expect("Failed to stash");
4001 }
4002
4003 #[track_caller]
4004 fn git_reset(offset: usize, repo: &git2::Repository) {
4005 let head = repo.head().expect("Couldn't get repo head");
4006 let object = head.peel(git2::ObjectType::Commit).unwrap();
4007 let commit = object.as_commit().unwrap();
4008 let new_head = commit
4009 .parents()
4010 .inspect(|parnet| {
4011 parnet.message();
4012 })
4013 .skip(offset)
4014 .next()
4015 .expect("Not enough history");
4016 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
4017 .expect("Could not reset");
4018 }
4019
4020 #[allow(dead_code)]
4021 #[track_caller]
4022 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
4023 repo.statuses(None)
4024 .unwrap()
4025 .iter()
4026 .map(|status| (status.path().unwrap().to_string(), status.status()))
4027 .collect()
4028 }
4029
4030 const IGNORE_RULE: &'static str = "**/target";
4031
4032 let root = temp_tree(json!({
4033 "project": {
4034 "a.txt": "a",
4035 "b.txt": "bb",
4036 "c": {
4037 "d": {
4038 "e.txt": "eee"
4039 }
4040 },
4041 "f.txt": "ffff",
4042 "target": {
4043 "build_file": "???"
4044 },
4045 ".gitignore": IGNORE_RULE
4046 },
4047
4048 }));
4049
4050 let http_client = FakeHttpClient::with_404_response();
4051 let client = cx.read(|cx| Client::new(http_client, cx));
4052 let tree = Worktree::local(
4053 client,
4054 root.path(),
4055 true,
4056 Arc::new(RealFs),
4057 Default::default(),
4058 &mut cx.to_async(),
4059 )
4060 .await
4061 .unwrap();
4062
4063 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4064 .await;
4065
4066 const A_TXT: &'static str = "a.txt";
4067 const B_TXT: &'static str = "b.txt";
4068 const E_TXT: &'static str = "c/d/e.txt";
4069 const F_TXT: &'static str = "f.txt";
4070 const DOTGITIGNORE: &'static str = ".gitignore";
4071 const BUILD_FILE: &'static str = "target/build_file";
4072
4073 let work_dir = root.path().join("project");
4074 let mut repo = git_init(work_dir.as_path());
4075 repo.add_ignore_rule(IGNORE_RULE).unwrap();
4076 git_add(Path::new(A_TXT), &repo);
4077 git_add(Path::new(E_TXT), &repo);
4078 git_add(Path::new(DOTGITIGNORE), &repo);
4079 git_commit("Initial commit", &repo);
4080
4081 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4082
4083 tree.flush_fs_events(cx).await;
4084
4085 // Check that the right git state is observed on startup
4086 tree.read_with(cx, |tree, _cx| {
4087 let snapshot = tree.snapshot();
4088 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4089 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4090 assert_eq!(dir.0.as_ref(), Path::new("project"));
4091
4092 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4093 assert_eq!(
4094 repo.worktree_statuses.get(&Path::new(A_TXT).into()),
4095 Some(&GitFileStatus::Modified)
4096 );
4097 assert_eq!(
4098 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4099 Some(&GitFileStatus::Added)
4100 );
4101 assert_eq!(
4102 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4103 Some(&GitFileStatus::Added)
4104 );
4105 });
4106
4107 git_add(Path::new(A_TXT), &repo);
4108 git_add(Path::new(B_TXT), &repo);
4109 git_commit("Committing modified and added", &repo);
4110 tree.flush_fs_events(cx).await;
4111
4112 // Check that repo only changes are tracked
4113 tree.read_with(cx, |tree, _cx| {
4114 let snapshot = tree.snapshot();
4115 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4116
4117 assert_eq!(repo.worktree_statuses.iter().count(), 1);
4118 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4119 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4120 assert_eq!(
4121 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4122 Some(&GitFileStatus::Added)
4123 );
4124 });
4125
4126 git_reset(0, &repo);
4127 git_remove_index(Path::new(B_TXT), &repo);
4128 git_stash(&mut repo);
4129 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4130 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4131 tree.flush_fs_events(cx).await;
4132
4133 // Check that more complex repo changes are tracked
4134 tree.read_with(cx, |tree, _cx| {
4135 let snapshot = tree.snapshot();
4136 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4137
4138 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4139 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4140 assert_eq!(
4141 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4142 Some(&GitFileStatus::Added)
4143 );
4144 assert_eq!(
4145 repo.worktree_statuses.get(&Path::new(E_TXT).into()),
4146 Some(&GitFileStatus::Modified)
4147 );
4148 assert_eq!(
4149 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4150 Some(&GitFileStatus::Added)
4151 );
4152 });
4153
4154 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4155 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4156 std::fs::write(
4157 work_dir.join(DOTGITIGNORE),
4158 [IGNORE_RULE, "f.txt"].join("\n"),
4159 )
4160 .unwrap();
4161
4162 git_add(Path::new(DOTGITIGNORE), &repo);
4163 git_commit("Committing modified git ignore", &repo);
4164
4165 tree.flush_fs_events(cx).await;
4166
4167 // Check that non-repo behavior is tracked
4168 tree.read_with(cx, |tree, _cx| {
4169 let snapshot = tree.snapshot();
4170 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4171
4172 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4173 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4174 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4175 assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
4176 assert_eq!(repo.worktree_statuses.get(&Path::new(F_TXT).into()), None);
4177 });
4178 }
4179
4180 #[gpui::test]
4181 async fn test_write_file(cx: &mut TestAppContext) {
4182 let dir = temp_tree(json!({
4183 ".git": {},
4184 ".gitignore": "ignored-dir\n",
4185 "tracked-dir": {},
4186 "ignored-dir": {}
4187 }));
4188
4189 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4190
4191 let tree = Worktree::local(
4192 client,
4193 dir.path(),
4194 true,
4195 Arc::new(RealFs),
4196 Default::default(),
4197 &mut cx.to_async(),
4198 )
4199 .await
4200 .unwrap();
4201 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4202 .await;
4203 tree.flush_fs_events(cx).await;
4204
4205 tree.update(cx, |tree, cx| {
4206 tree.as_local().unwrap().write_file(
4207 Path::new("tracked-dir/file.txt"),
4208 "hello".into(),
4209 Default::default(),
4210 cx,
4211 )
4212 })
4213 .await
4214 .unwrap();
4215 tree.update(cx, |tree, cx| {
4216 tree.as_local().unwrap().write_file(
4217 Path::new("ignored-dir/file.txt"),
4218 "world".into(),
4219 Default::default(),
4220 cx,
4221 )
4222 })
4223 .await
4224 .unwrap();
4225
4226 tree.read_with(cx, |tree, _| {
4227 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4228 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4229 assert!(!tracked.is_ignored);
4230 assert!(ignored.is_ignored);
4231 });
4232 }
4233
4234 #[gpui::test(iterations = 30)]
4235 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4236 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4237
4238 let fs = FakeFs::new(cx.background());
4239 fs.insert_tree(
4240 "/root",
4241 json!({
4242 "b": {},
4243 "c": {},
4244 "d": {},
4245 }),
4246 )
4247 .await;
4248
4249 let tree = Worktree::local(
4250 client,
4251 "/root".as_ref(),
4252 true,
4253 fs,
4254 Default::default(),
4255 &mut cx.to_async(),
4256 )
4257 .await
4258 .unwrap();
4259
4260 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4261
4262 let entry = tree
4263 .update(cx, |tree, cx| {
4264 tree.as_local_mut()
4265 .unwrap()
4266 .create_entry("a/e".as_ref(), true, cx)
4267 })
4268 .await
4269 .unwrap();
4270 assert!(entry.is_dir());
4271
4272 cx.foreground().run_until_parked();
4273 tree.read_with(cx, |tree, _| {
4274 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4275 });
4276
4277 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4278 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4279 snapshot1.apply_remote_update(update).unwrap();
4280 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4281 }
4282
4283 #[gpui::test(iterations = 100)]
4284 async fn test_random_worktree_operations_during_initial_scan(
4285 cx: &mut TestAppContext,
4286 mut rng: StdRng,
4287 ) {
4288 let operations = env::var("OPERATIONS")
4289 .map(|o| o.parse().unwrap())
4290 .unwrap_or(5);
4291 let initial_entries = env::var("INITIAL_ENTRIES")
4292 .map(|o| o.parse().unwrap())
4293 .unwrap_or(20);
4294
4295 let root_dir = Path::new("/test");
4296 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4297 fs.as_fake().insert_tree(root_dir, json!({})).await;
4298 for _ in 0..initial_entries {
4299 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4300 }
4301 log::info!("generated initial tree");
4302
4303 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4304 let worktree = Worktree::local(
4305 client.clone(),
4306 root_dir,
4307 true,
4308 fs.clone(),
4309 Default::default(),
4310 &mut cx.to_async(),
4311 )
4312 .await
4313 .unwrap();
4314
4315 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4316
4317 for _ in 0..operations {
4318 worktree
4319 .update(cx, |worktree, cx| {
4320 randomly_mutate_worktree(worktree, &mut rng, cx)
4321 })
4322 .await
4323 .log_err();
4324 worktree.read_with(cx, |tree, _| {
4325 tree.as_local().unwrap().snapshot.check_invariants()
4326 });
4327
4328 if rng.gen_bool(0.6) {
4329 let new_snapshot =
4330 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4331 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4332 snapshot.apply_remote_update(update.clone()).unwrap();
4333 assert_eq!(
4334 snapshot.to_vec(true),
4335 new_snapshot.to_vec(true),
4336 "incorrect snapshot after update {:?}",
4337 update
4338 );
4339 }
4340 }
4341
4342 worktree
4343 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4344 .await;
4345 worktree.read_with(cx, |tree, _| {
4346 tree.as_local().unwrap().snapshot.check_invariants()
4347 });
4348
4349 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4350 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4351 snapshot.apply_remote_update(update.clone()).unwrap();
4352 assert_eq!(
4353 snapshot.to_vec(true),
4354 new_snapshot.to_vec(true),
4355 "incorrect snapshot after update {:?}",
4356 update
4357 );
4358 }
4359
4360 #[gpui::test(iterations = 100)]
4361 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4362 let operations = env::var("OPERATIONS")
4363 .map(|o| o.parse().unwrap())
4364 .unwrap_or(40);
4365 let initial_entries = env::var("INITIAL_ENTRIES")
4366 .map(|o| o.parse().unwrap())
4367 .unwrap_or(20);
4368
4369 let root_dir = Path::new("/test");
4370 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4371 fs.as_fake().insert_tree(root_dir, json!({})).await;
4372 for _ in 0..initial_entries {
4373 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4374 }
4375 log::info!("generated initial tree");
4376
4377 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4378 let worktree = Worktree::local(
4379 client.clone(),
4380 root_dir,
4381 true,
4382 fs.clone(),
4383 Default::default(),
4384 &mut cx.to_async(),
4385 )
4386 .await
4387 .unwrap();
4388
4389 worktree
4390 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4391 .await;
4392
4393 // After the initial scan is complete, the `UpdatedEntries` event can
4394 // be used to follow along with all changes to the worktree's snapshot.
4395 worktree.update(cx, |tree, cx| {
4396 let mut paths = tree
4397 .as_local()
4398 .unwrap()
4399 .paths()
4400 .cloned()
4401 .collect::<Vec<_>>();
4402
4403 cx.subscribe(&worktree, move |tree, _, event, _| {
4404 if let Event::UpdatedEntries(changes) = event {
4405 for ((path, _), change_type) in changes.iter() {
4406 let path = path.clone();
4407 let ix = match paths.binary_search(&path) {
4408 Ok(ix) | Err(ix) => ix,
4409 };
4410 match change_type {
4411 PathChange::Added => {
4412 assert_ne!(paths.get(ix), Some(&path));
4413 paths.insert(ix, path);
4414 }
4415
4416 PathChange::Removed => {
4417 assert_eq!(paths.get(ix), Some(&path));
4418 paths.remove(ix);
4419 }
4420
4421 PathChange::Updated => {
4422 assert_eq!(paths.get(ix), Some(&path));
4423 }
4424
4425 PathChange::AddedOrUpdated => {
4426 if paths[ix] != path {
4427 paths.insert(ix, path);
4428 }
4429 }
4430 }
4431 }
4432
4433 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4434 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4435 }
4436 })
4437 .detach();
4438 });
4439
4440 let mut snapshots = Vec::new();
4441 let mut mutations_len = operations;
4442 while mutations_len > 1 {
4443 if rng.gen_bool(0.2) {
4444 worktree
4445 .update(cx, |worktree, cx| {
4446 randomly_mutate_worktree(worktree, &mut rng, cx)
4447 })
4448 .await
4449 .unwrap();
4450 } else {
4451 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4452 }
4453
4454 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4455 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4456 let len = rng.gen_range(0..=buffered_event_count);
4457 log::info!("flushing {} events", len);
4458 fs.as_fake().flush_events(len).await;
4459 } else {
4460 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4461 mutations_len -= 1;
4462 }
4463
4464 cx.foreground().run_until_parked();
4465 if rng.gen_bool(0.2) {
4466 log::info!("storing snapshot {}", snapshots.len());
4467 let snapshot =
4468 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4469 snapshots.push(snapshot);
4470 }
4471 }
4472
4473 log::info!("quiescing");
4474 fs.as_fake().flush_events(usize::MAX).await;
4475 cx.foreground().run_until_parked();
4476 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4477 snapshot.check_invariants();
4478
4479 {
4480 let new_worktree = Worktree::local(
4481 client.clone(),
4482 root_dir,
4483 true,
4484 fs.clone(),
4485 Default::default(),
4486 &mut cx.to_async(),
4487 )
4488 .await
4489 .unwrap();
4490 new_worktree
4491 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4492 .await;
4493 let new_snapshot =
4494 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4495 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4496 }
4497
4498 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4499 let include_ignored = rng.gen::<bool>();
4500 if !include_ignored {
4501 let mut entries_by_path_edits = Vec::new();
4502 let mut entries_by_id_edits = Vec::new();
4503 for entry in prev_snapshot
4504 .entries_by_id
4505 .cursor::<()>()
4506 .filter(|e| e.is_ignored)
4507 {
4508 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4509 entries_by_id_edits.push(Edit::Remove(entry.id));
4510 }
4511
4512 prev_snapshot
4513 .entries_by_path
4514 .edit(entries_by_path_edits, &());
4515 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4516 }
4517
4518 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4519 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4520 assert_eq!(
4521 prev_snapshot.to_vec(include_ignored),
4522 snapshot.to_vec(include_ignored),
4523 "wrong update for snapshot {i}. update: {:?}",
4524 update
4525 );
4526 }
4527 }
4528
4529 fn randomly_mutate_worktree(
4530 worktree: &mut Worktree,
4531 rng: &mut impl Rng,
4532 cx: &mut ModelContext<Worktree>,
4533 ) -> Task<Result<()>> {
4534 let worktree = worktree.as_local_mut().unwrap();
4535 let snapshot = worktree.snapshot();
4536 let entry = snapshot.entries(false).choose(rng).unwrap();
4537
4538 match rng.gen_range(0_u32..100) {
4539 0..=33 if entry.path.as_ref() != Path::new("") => {
4540 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4541 worktree.delete_entry(entry.id, cx).unwrap()
4542 }
4543 ..=66 if entry.path.as_ref() != Path::new("") => {
4544 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4545 let new_parent_path = if other_entry.is_dir() {
4546 other_entry.path.clone()
4547 } else {
4548 other_entry.path.parent().unwrap().into()
4549 };
4550 let mut new_path = new_parent_path.join(gen_name(rng));
4551 if new_path.starts_with(&entry.path) {
4552 new_path = gen_name(rng).into();
4553 }
4554
4555 log::info!(
4556 "renaming entry {:?} ({}) to {:?}",
4557 entry.path,
4558 entry.id.0,
4559 new_path
4560 );
4561 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4562 cx.foreground().spawn(async move {
4563 task.await?;
4564 Ok(())
4565 })
4566 }
4567 _ => {
4568 let task = if entry.is_dir() {
4569 let child_path = entry.path.join(gen_name(rng));
4570 let is_dir = rng.gen_bool(0.3);
4571 log::info!(
4572 "creating {} at {:?}",
4573 if is_dir { "dir" } else { "file" },
4574 child_path,
4575 );
4576 worktree.create_entry(child_path, is_dir, cx)
4577 } else {
4578 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4579 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4580 };
4581 cx.foreground().spawn(async move {
4582 task.await?;
4583 Ok(())
4584 })
4585 }
4586 }
4587 }
4588
4589 async fn randomly_mutate_fs(
4590 fs: &Arc<dyn Fs>,
4591 root_path: &Path,
4592 insertion_probability: f64,
4593 rng: &mut impl Rng,
4594 ) {
4595 let mut files = Vec::new();
4596 let mut dirs = Vec::new();
4597 for path in fs.as_fake().paths() {
4598 if path.starts_with(root_path) {
4599 if fs.is_file(&path).await {
4600 files.push(path);
4601 } else {
4602 dirs.push(path);
4603 }
4604 }
4605 }
4606
4607 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4608 let path = dirs.choose(rng).unwrap();
4609 let new_path = path.join(gen_name(rng));
4610
4611 if rng.gen() {
4612 log::info!(
4613 "creating dir {:?}",
4614 new_path.strip_prefix(root_path).unwrap()
4615 );
4616 fs.create_dir(&new_path).await.unwrap();
4617 } else {
4618 log::info!(
4619 "creating file {:?}",
4620 new_path.strip_prefix(root_path).unwrap()
4621 );
4622 fs.create_file(&new_path, Default::default()).await.unwrap();
4623 }
4624 } else if rng.gen_bool(0.05) {
4625 let ignore_dir_path = dirs.choose(rng).unwrap();
4626 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4627
4628 let subdirs = dirs
4629 .iter()
4630 .filter(|d| d.starts_with(&ignore_dir_path))
4631 .cloned()
4632 .collect::<Vec<_>>();
4633 let subfiles = files
4634 .iter()
4635 .filter(|d| d.starts_with(&ignore_dir_path))
4636 .cloned()
4637 .collect::<Vec<_>>();
4638 let files_to_ignore = {
4639 let len = rng.gen_range(0..=subfiles.len());
4640 subfiles.choose_multiple(rng, len)
4641 };
4642 let dirs_to_ignore = {
4643 let len = rng.gen_range(0..subdirs.len());
4644 subdirs.choose_multiple(rng, len)
4645 };
4646
4647 let mut ignore_contents = String::new();
4648 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4649 writeln!(
4650 ignore_contents,
4651 "{}",
4652 path_to_ignore
4653 .strip_prefix(&ignore_dir_path)
4654 .unwrap()
4655 .to_str()
4656 .unwrap()
4657 )
4658 .unwrap();
4659 }
4660 log::info!(
4661 "creating gitignore {:?} with contents:\n{}",
4662 ignore_path.strip_prefix(&root_path).unwrap(),
4663 ignore_contents
4664 );
4665 fs.save(
4666 &ignore_path,
4667 &ignore_contents.as_str().into(),
4668 Default::default(),
4669 )
4670 .await
4671 .unwrap();
4672 } else {
4673 let old_path = {
4674 let file_path = files.choose(rng);
4675 let dir_path = dirs[1..].choose(rng);
4676 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4677 };
4678
4679 let is_rename = rng.gen();
4680 if is_rename {
4681 let new_path_parent = dirs
4682 .iter()
4683 .filter(|d| !d.starts_with(old_path))
4684 .choose(rng)
4685 .unwrap();
4686
4687 let overwrite_existing_dir =
4688 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4689 let new_path = if overwrite_existing_dir {
4690 fs.remove_dir(
4691 &new_path_parent,
4692 RemoveOptions {
4693 recursive: true,
4694 ignore_if_not_exists: true,
4695 },
4696 )
4697 .await
4698 .unwrap();
4699 new_path_parent.to_path_buf()
4700 } else {
4701 new_path_parent.join(gen_name(rng))
4702 };
4703
4704 log::info!(
4705 "renaming {:?} to {}{:?}",
4706 old_path.strip_prefix(&root_path).unwrap(),
4707 if overwrite_existing_dir {
4708 "overwrite "
4709 } else {
4710 ""
4711 },
4712 new_path.strip_prefix(&root_path).unwrap()
4713 );
4714 fs.rename(
4715 &old_path,
4716 &new_path,
4717 fs::RenameOptions {
4718 overwrite: true,
4719 ignore_if_exists: true,
4720 },
4721 )
4722 .await
4723 .unwrap();
4724 } else if fs.is_file(&old_path).await {
4725 log::info!(
4726 "deleting file {:?}",
4727 old_path.strip_prefix(&root_path).unwrap()
4728 );
4729 fs.remove_file(old_path, Default::default()).await.unwrap();
4730 } else {
4731 log::info!(
4732 "deleting dir {:?}",
4733 old_path.strip_prefix(&root_path).unwrap()
4734 );
4735 fs.remove_dir(
4736 &old_path,
4737 RemoveOptions {
4738 recursive: true,
4739 ignore_if_not_exists: true,
4740 },
4741 )
4742 .await
4743 .unwrap();
4744 }
4745 }
4746 }
4747
4748 fn gen_name(rng: &mut impl Rng) -> String {
4749 (0..6)
4750 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4751 .map(char::from)
4752 .collect()
4753 }
4754
4755 impl LocalSnapshot {
4756 fn check_invariants(&self) {
4757 assert_eq!(
4758 self.entries_by_path
4759 .cursor::<()>()
4760 .map(|e| (&e.path, e.id))
4761 .collect::<Vec<_>>(),
4762 self.entries_by_id
4763 .cursor::<()>()
4764 .map(|e| (&e.path, e.id))
4765 .collect::<collections::BTreeSet<_>>()
4766 .into_iter()
4767 .collect::<Vec<_>>(),
4768 "entries_by_path and entries_by_id are inconsistent"
4769 );
4770
4771 let mut files = self.files(true, 0);
4772 let mut visible_files = self.files(false, 0);
4773 for entry in self.entries_by_path.cursor::<()>() {
4774 if entry.is_file() {
4775 assert_eq!(files.next().unwrap().inode, entry.inode);
4776 if !entry.is_ignored {
4777 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4778 }
4779 }
4780 }
4781
4782 assert!(files.next().is_none());
4783 assert!(visible_files.next().is_none());
4784
4785 let mut bfs_paths = Vec::new();
4786 let mut stack = vec![Path::new("")];
4787 while let Some(path) = stack.pop() {
4788 bfs_paths.push(path);
4789 let ix = stack.len();
4790 for child_entry in self.child_entries(path) {
4791 stack.insert(ix, &child_entry.path);
4792 }
4793 }
4794
4795 let dfs_paths_via_iter = self
4796 .entries_by_path
4797 .cursor::<()>()
4798 .map(|e| e.path.as_ref())
4799 .collect::<Vec<_>>();
4800 assert_eq!(bfs_paths, dfs_paths_via_iter);
4801
4802 let dfs_paths_via_traversal = self
4803 .entries(true)
4804 .map(|e| e.path.as_ref())
4805 .collect::<Vec<_>>();
4806 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4807
4808 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4809 let ignore_parent_path =
4810 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4811 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4812 assert!(self
4813 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4814 .is_some());
4815 }
4816 }
4817
4818 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4819 let mut paths = Vec::new();
4820 for entry in self.entries_by_path.cursor::<()>() {
4821 if include_ignored || !entry.is_ignored {
4822 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4823 }
4824 }
4825 paths.sort_by(|a, b| a.0.cmp(b.0));
4826 paths
4827 }
4828 }
4829}