1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
147}
148
149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
150 proto::GitStatus::from_i32(git_status).map(|status| match status {
151 proto::GitStatus::Added => GitFileStatus::Added,
152 proto::GitStatus::Modified => GitFileStatus::Modified,
153 proto::GitStatus::Conflict => GitFileStatus::Conflict,
154 })
155}
156
157impl RepositoryEntry {
158 pub fn branch(&self) -> Option<Arc<str>> {
159 self.branch.clone()
160 }
161
162 pub fn work_directory_id(&self) -> ProjectEntryId {
163 *self.work_directory
164 }
165
166 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
167 snapshot
168 .entry_for_id(self.work_directory_id())
169 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
170 }
171
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 self.work_directory.contains(snapshot, path)
174 }
175
176 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
177 self.work_directory
178 .relativize(snapshot, path)
179 .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
180 .cloned()
181 }
182
183 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
184 self.work_directory
185 .relativize(snapshot, path)
186 .and_then(|repo_path| {
187 self.worktree_statuses
188 .iter_from(&repo_path)
189 .take_while(|(key, _)| key.starts_with(&repo_path))
190 .map(|(path, status)| if path == &repo_path {
191 status
192 } else {
193 &GitFileStatus::Modified
194 })
195 .next()
196 .copied()
197 })
198 }
199
200 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
201 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
202 let mut removed_statuses: Vec<String> = Vec::new();
203
204 let mut self_statuses = self.worktree_statuses.iter().peekable();
205 let mut other_statuses = other.worktree_statuses.iter().peekable();
206 loop {
207 match (self_statuses.peek(), other_statuses.peek()) {
208 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
209 match Ord::cmp(self_repo_path, other_repo_path) {
210 Ordering::Less => {
211 updated_statuses.push(make_status_entry(self_repo_path, self_status));
212 self_statuses.next();
213 }
214 Ordering::Equal => {
215 if self_status != other_status {
216 updated_statuses
217 .push(make_status_entry(self_repo_path, self_status));
218 }
219
220 self_statuses.next();
221 other_statuses.next();
222 }
223 Ordering::Greater => {
224 removed_statuses.push(make_repo_path(other_repo_path));
225 other_statuses.next();
226 }
227 }
228 }
229 (Some((self_repo_path, self_status)), None) => {
230 updated_statuses.push(make_status_entry(self_repo_path, self_status));
231 self_statuses.next();
232 }
233 (None, Some((other_repo_path, _))) => {
234 removed_statuses.push(make_repo_path(other_repo_path));
235 other_statuses.next();
236 }
237 (None, None) => break,
238 }
239 }
240
241 proto::RepositoryEntry {
242 work_directory_id: self.work_directory_id().to_proto(),
243 branch: self.branch.as_ref().map(|str| str.to_string()),
244 removed_worktree_repo_paths: removed_statuses,
245 updated_worktree_statuses: updated_statuses,
246 }
247 }
248}
249
250fn make_repo_path(path: &RepoPath) -> String {
251 path.as_os_str().to_string_lossy().to_string()
252}
253
254fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
255 proto::StatusEntry {
256 repo_path: make_repo_path(path),
257 status: match status {
258 GitFileStatus::Added => proto::GitStatus::Added.into(),
259 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
260 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
261 },
262 }
263}
264
265impl From<&RepositoryEntry> for proto::RepositoryEntry {
266 fn from(value: &RepositoryEntry) -> Self {
267 proto::RepositoryEntry {
268 work_directory_id: value.work_directory.to_proto(),
269 branch: value.branch.as_ref().map(|str| str.to_string()),
270 updated_worktree_statuses: value
271 .worktree_statuses
272 .iter()
273 .map(|(repo_path, status)| make_status_entry(repo_path, status))
274 .collect(),
275 removed_worktree_repo_paths: Default::default(),
276 }
277 }
278}
279
280/// This path corresponds to the 'content path' (the folder that contains the .git)
281#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
282pub struct RepositoryWorkDirectory(Arc<Path>);
283
284impl Default for RepositoryWorkDirectory {
285 fn default() -> Self {
286 RepositoryWorkDirectory(Arc::from(Path::new("")))
287 }
288}
289
290impl AsRef<Path> for RepositoryWorkDirectory {
291 fn as_ref(&self) -> &Path {
292 self.0.as_ref()
293 }
294}
295
296#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
297pub struct WorkDirectoryEntry(ProjectEntryId);
298
299impl WorkDirectoryEntry {
300 // Note that these paths should be relative to the worktree root.
301 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
302 snapshot
303 .entry_for_id(self.0)
304 .map(|entry| path.starts_with(&entry.path))
305 .unwrap_or(false)
306 }
307
308 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
309 worktree.entry_for_id(self.0).and_then(|entry| {
310 path.strip_prefix(&entry.path)
311 .ok()
312 .map(move |path| path.into())
313 })
314 }
315}
316
317impl Deref for WorkDirectoryEntry {
318 type Target = ProjectEntryId;
319
320 fn deref(&self) -> &Self::Target {
321 &self.0
322 }
323}
324
325impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
326 fn from(value: ProjectEntryId) -> Self {
327 WorkDirectoryEntry(value)
328 }
329}
330
331#[derive(Debug, Clone)]
332pub struct LocalSnapshot {
333 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
334 // The ProjectEntryId corresponds to the entry for the .git dir
335 // work_directory_id
336 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
337 removed_entry_ids: HashMap<u64, ProjectEntryId>,
338 next_entry_id: Arc<AtomicUsize>,
339 snapshot: Snapshot,
340}
341
342#[derive(Debug, Clone)]
343pub struct LocalRepositoryEntry {
344 pub(crate) scan_id: usize,
345 pub(crate) full_scan_id: usize,
346 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
347 /// Path to the actual .git folder.
348 /// Note: if .git is a file, this points to the folder indicated by the .git file
349 pub(crate) git_dir_path: Arc<Path>,
350}
351
352impl LocalRepositoryEntry {
353 // Note that this path should be relative to the worktree root.
354 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
355 path.starts_with(self.git_dir_path.as_ref())
356 }
357}
358
359impl Deref for LocalSnapshot {
360 type Target = Snapshot;
361
362 fn deref(&self) -> &Self::Target {
363 &self.snapshot
364 }
365}
366
367impl DerefMut for LocalSnapshot {
368 fn deref_mut(&mut self) -> &mut Self::Target {
369 &mut self.snapshot
370 }
371}
372
373enum ScanState {
374 Started,
375 Updated {
376 snapshot: LocalSnapshot,
377 changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
378 barrier: Option<barrier::Sender>,
379 scanning: bool,
380 },
381}
382
383struct ShareState {
384 project_id: u64,
385 snapshots_tx: watch::Sender<LocalSnapshot>,
386 resume_updates: watch::Sender<()>,
387 _maintain_remote_snapshot: Task<Option<()>>,
388}
389
390pub enum Event {
391 UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
392 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
393}
394
395impl Entity for Worktree {
396 type Event = Event;
397}
398
399impl Worktree {
400 pub async fn local(
401 client: Arc<Client>,
402 path: impl Into<Arc<Path>>,
403 visible: bool,
404 fs: Arc<dyn Fs>,
405 next_entry_id: Arc<AtomicUsize>,
406 cx: &mut AsyncAppContext,
407 ) -> Result<ModelHandle<Self>> {
408 // After determining whether the root entry is a file or a directory, populate the
409 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
410 let abs_path = path.into();
411 let metadata = fs
412 .metadata(&abs_path)
413 .await
414 .context("failed to stat worktree path")?;
415
416 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
417 let root_name = abs_path
418 .file_name()
419 .map_or(String::new(), |f| f.to_string_lossy().to_string());
420
421 let mut snapshot = LocalSnapshot {
422 ignores_by_parent_abs_path: Default::default(),
423 removed_entry_ids: Default::default(),
424 git_repositories: Default::default(),
425 next_entry_id,
426 snapshot: Snapshot {
427 id: WorktreeId::from_usize(cx.model_id()),
428 abs_path: abs_path.clone(),
429 root_name: root_name.clone(),
430 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
431 entries_by_path: Default::default(),
432 entries_by_id: Default::default(),
433 repository_entries: Default::default(),
434 scan_id: 1,
435 completed_scan_id: 0,
436 },
437 };
438
439 if let Some(metadata) = metadata {
440 snapshot.insert_entry(
441 Entry::new(
442 Arc::from(Path::new("")),
443 &metadata,
444 &snapshot.next_entry_id,
445 snapshot.root_char_bag,
446 ),
447 fs.as_ref(),
448 );
449 }
450
451 let (path_changes_tx, path_changes_rx) = channel::unbounded();
452 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
453
454 cx.spawn_weak(|this, mut cx| async move {
455 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
456 this.update(&mut cx, |this, cx| {
457 let this = this.as_local_mut().unwrap();
458 match state {
459 ScanState::Started => {
460 *this.is_scanning.0.borrow_mut() = true;
461 }
462 ScanState::Updated {
463 snapshot,
464 changes,
465 barrier,
466 scanning,
467 } => {
468 *this.is_scanning.0.borrow_mut() = scanning;
469 this.set_snapshot(snapshot, cx);
470 cx.emit(Event::UpdatedEntries(changes));
471 drop(barrier);
472 }
473 }
474 cx.notify();
475 });
476 }
477 })
478 .detach();
479
480 let background_scanner_task = cx.background().spawn({
481 let fs = fs.clone();
482 let snapshot = snapshot.clone();
483 let background = cx.background().clone();
484 async move {
485 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
486 BackgroundScanner::new(
487 snapshot,
488 fs,
489 scan_states_tx,
490 background,
491 path_changes_rx,
492 )
493 .run(events)
494 .await;
495 }
496 });
497
498 Worktree::Local(LocalWorktree {
499 snapshot,
500 is_scanning: watch::channel_with(true),
501 share: None,
502 path_changes_tx,
503 _background_scanner_task: background_scanner_task,
504 diagnostics: Default::default(),
505 diagnostic_summaries: Default::default(),
506 client,
507 fs,
508 visible,
509 })
510 }))
511 }
512
513 pub fn remote(
514 project_remote_id: u64,
515 replica_id: ReplicaId,
516 worktree: proto::WorktreeMetadata,
517 client: Arc<Client>,
518 cx: &mut AppContext,
519 ) -> ModelHandle<Self> {
520 cx.add_model(|cx: &mut ModelContext<Self>| {
521 let snapshot = Snapshot {
522 id: WorktreeId(worktree.id as usize),
523 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
524 root_name: worktree.root_name.clone(),
525 root_char_bag: worktree
526 .root_name
527 .chars()
528 .map(|c| c.to_ascii_lowercase())
529 .collect(),
530 entries_by_path: Default::default(),
531 entries_by_id: Default::default(),
532 repository_entries: Default::default(),
533 scan_id: 1,
534 completed_scan_id: 0,
535 };
536
537 let (updates_tx, mut updates_rx) = mpsc::unbounded();
538 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
539 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
540
541 cx.background()
542 .spawn({
543 let background_snapshot = background_snapshot.clone();
544 async move {
545 while let Some(update) = updates_rx.next().await {
546 if let Err(error) =
547 background_snapshot.lock().apply_remote_update(update)
548 {
549 log::error!("error applying worktree update: {}", error);
550 }
551 snapshot_updated_tx.send(()).await.ok();
552 }
553 }
554 })
555 .detach();
556
557 cx.spawn_weak(|this, mut cx| async move {
558 while (snapshot_updated_rx.recv().await).is_some() {
559 if let Some(this) = this.upgrade(&cx) {
560 this.update(&mut cx, |this, cx| {
561 let this = this.as_remote_mut().unwrap();
562 this.snapshot = this.background_snapshot.lock().clone();
563 cx.emit(Event::UpdatedEntries(Default::default()));
564 cx.notify();
565 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
566 if this.observed_snapshot(*scan_id) {
567 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
568 let _ = tx.send(());
569 } else {
570 break;
571 }
572 }
573 });
574 } else {
575 break;
576 }
577 }
578 })
579 .detach();
580
581 Worktree::Remote(RemoteWorktree {
582 project_id: project_remote_id,
583 replica_id,
584 snapshot: snapshot.clone(),
585 background_snapshot,
586 updates_tx: Some(updates_tx),
587 snapshot_subscriptions: Default::default(),
588 client: client.clone(),
589 diagnostic_summaries: Default::default(),
590 visible: worktree.visible,
591 disconnected: false,
592 })
593 })
594 }
595
596 pub fn as_local(&self) -> Option<&LocalWorktree> {
597 if let Worktree::Local(worktree) = self {
598 Some(worktree)
599 } else {
600 None
601 }
602 }
603
604 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
605 if let Worktree::Remote(worktree) = self {
606 Some(worktree)
607 } else {
608 None
609 }
610 }
611
612 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
613 if let Worktree::Local(worktree) = self {
614 Some(worktree)
615 } else {
616 None
617 }
618 }
619
620 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
621 if let Worktree::Remote(worktree) = self {
622 Some(worktree)
623 } else {
624 None
625 }
626 }
627
628 pub fn is_local(&self) -> bool {
629 matches!(self, Worktree::Local(_))
630 }
631
632 pub fn is_remote(&self) -> bool {
633 !self.is_local()
634 }
635
636 pub fn snapshot(&self) -> Snapshot {
637 match self {
638 Worktree::Local(worktree) => worktree.snapshot().snapshot,
639 Worktree::Remote(worktree) => worktree.snapshot(),
640 }
641 }
642
643 pub fn scan_id(&self) -> usize {
644 match self {
645 Worktree::Local(worktree) => worktree.snapshot.scan_id,
646 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
647 }
648 }
649
650 pub fn completed_scan_id(&self) -> usize {
651 match self {
652 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
653 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
654 }
655 }
656
657 pub fn is_visible(&self) -> bool {
658 match self {
659 Worktree::Local(worktree) => worktree.visible,
660 Worktree::Remote(worktree) => worktree.visible,
661 }
662 }
663
664 pub fn replica_id(&self) -> ReplicaId {
665 match self {
666 Worktree::Local(_) => 0,
667 Worktree::Remote(worktree) => worktree.replica_id,
668 }
669 }
670
671 pub fn diagnostic_summaries(
672 &self,
673 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
674 match self {
675 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
676 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
677 }
678 .iter()
679 .flat_map(|(path, summaries)| {
680 summaries
681 .iter()
682 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
683 })
684 }
685
686 pub fn abs_path(&self) -> Arc<Path> {
687 match self {
688 Worktree::Local(worktree) => worktree.abs_path.clone(),
689 Worktree::Remote(worktree) => worktree.abs_path.clone(),
690 }
691 }
692}
693
694impl LocalWorktree {
695 pub fn contains_abs_path(&self, path: &Path) -> bool {
696 path.starts_with(&self.abs_path)
697 }
698
699 fn absolutize(&self, path: &Path) -> PathBuf {
700 if path.file_name().is_some() {
701 self.abs_path.join(path)
702 } else {
703 self.abs_path.to_path_buf()
704 }
705 }
706
707 pub(crate) fn load_buffer(
708 &mut self,
709 id: u64,
710 path: &Path,
711 cx: &mut ModelContext<Worktree>,
712 ) -> Task<Result<ModelHandle<Buffer>>> {
713 let path = Arc::from(path);
714 cx.spawn(move |this, mut cx| async move {
715 let (file, contents, diff_base) = this
716 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
717 .await?;
718 let text_buffer = cx
719 .background()
720 .spawn(async move { text::Buffer::new(0, id, contents) })
721 .await;
722 Ok(cx.add_model(|cx| {
723 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
724 buffer.git_diff_recalc(cx);
725 buffer
726 }))
727 })
728 }
729
730 pub fn diagnostics_for_path(
731 &self,
732 path: &Path,
733 ) -> Vec<(
734 LanguageServerId,
735 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
736 )> {
737 self.diagnostics.get(path).cloned().unwrap_or_default()
738 }
739
740 pub fn update_diagnostics(
741 &mut self,
742 server_id: LanguageServerId,
743 worktree_path: Arc<Path>,
744 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
745 _: &mut ModelContext<Worktree>,
746 ) -> Result<bool> {
747 let summaries_by_server_id = self
748 .diagnostic_summaries
749 .entry(worktree_path.clone())
750 .or_default();
751
752 let old_summary = summaries_by_server_id
753 .remove(&server_id)
754 .unwrap_or_default();
755
756 let new_summary = DiagnosticSummary::new(&diagnostics);
757 if new_summary.is_empty() {
758 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
759 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
760 diagnostics_by_server_id.remove(ix);
761 }
762 if diagnostics_by_server_id.is_empty() {
763 self.diagnostics.remove(&worktree_path);
764 }
765 }
766 } else {
767 summaries_by_server_id.insert(server_id, new_summary);
768 let diagnostics_by_server_id =
769 self.diagnostics.entry(worktree_path.clone()).or_default();
770 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
771 Ok(ix) => {
772 diagnostics_by_server_id[ix] = (server_id, diagnostics);
773 }
774 Err(ix) => {
775 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
776 }
777 }
778 }
779
780 if !old_summary.is_empty() || !new_summary.is_empty() {
781 if let Some(share) = self.share.as_ref() {
782 self.client
783 .send(proto::UpdateDiagnosticSummary {
784 project_id: share.project_id,
785 worktree_id: self.id().to_proto(),
786 summary: Some(proto::DiagnosticSummary {
787 path: worktree_path.to_string_lossy().to_string(),
788 language_server_id: server_id.0 as u64,
789 error_count: new_summary.error_count as u32,
790 warning_count: new_summary.warning_count as u32,
791 }),
792 })
793 .log_err();
794 }
795 }
796
797 Ok(!old_summary.is_empty() || !new_summary.is_empty())
798 }
799
800 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
801 let updated_repos =
802 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
803 self.snapshot = new_snapshot;
804
805 if let Some(share) = self.share.as_mut() {
806 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
807 }
808
809 if !updated_repos.is_empty() {
810 cx.emit(Event::UpdatedGitRepositories(updated_repos));
811 }
812 }
813
814 fn changed_repos(
815 &self,
816 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
817 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
818 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
819 let mut diff = HashMap::default();
820 let mut old_repos = old_repos.iter().peekable();
821 let mut new_repos = new_repos.iter().peekable();
822 loop {
823 match (old_repos.peek(), new_repos.peek()) {
824 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
825 match Ord::cmp(old_entry_id, new_entry_id) {
826 Ordering::Less => {
827 if let Some(entry) = self.entry_for_id(**old_entry_id) {
828 diff.insert(entry.path.clone(), (*old_repo).clone());
829 }
830 old_repos.next();
831 }
832 Ordering::Equal => {
833 if old_repo.scan_id != new_repo.scan_id {
834 if let Some(entry) = self.entry_for_id(**new_entry_id) {
835 diff.insert(entry.path.clone(), (*new_repo).clone());
836 }
837 }
838
839 old_repos.next();
840 new_repos.next();
841 }
842 Ordering::Greater => {
843 if let Some(entry) = self.entry_for_id(**new_entry_id) {
844 diff.insert(entry.path.clone(), (*new_repo).clone());
845 }
846 new_repos.next();
847 }
848 }
849 }
850 (Some((old_entry_id, old_repo)), None) => {
851 if let Some(entry) = self.entry_for_id(**old_entry_id) {
852 diff.insert(entry.path.clone(), (*old_repo).clone());
853 }
854 old_repos.next();
855 }
856 (None, Some((new_entry_id, new_repo))) => {
857 if let Some(entry) = self.entry_for_id(**new_entry_id) {
858 diff.insert(entry.path.clone(), (*new_repo).clone());
859 }
860 new_repos.next();
861 }
862 (None, None) => break,
863 }
864 }
865 diff
866 }
867
868 pub fn scan_complete(&self) -> impl Future<Output = ()> {
869 let mut is_scanning_rx = self.is_scanning.1.clone();
870 async move {
871 let mut is_scanning = is_scanning_rx.borrow().clone();
872 while is_scanning {
873 if let Some(value) = is_scanning_rx.recv().await {
874 is_scanning = value;
875 } else {
876 break;
877 }
878 }
879 }
880 }
881
882 pub fn snapshot(&self) -> LocalSnapshot {
883 self.snapshot.clone()
884 }
885
886 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
887 proto::WorktreeMetadata {
888 id: self.id().to_proto(),
889 root_name: self.root_name().to_string(),
890 visible: self.visible,
891 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
892 }
893 }
894
895 fn load(
896 &self,
897 path: &Path,
898 cx: &mut ModelContext<Worktree>,
899 ) -> Task<Result<(File, String, Option<String>)>> {
900 let handle = cx.handle();
901 let path = Arc::from(path);
902 let abs_path = self.absolutize(&path);
903 let fs = self.fs.clone();
904 let snapshot = self.snapshot();
905
906 let mut index_task = None;
907
908 if let Some(repo) = snapshot.repo_for(&path) {
909 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
910 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
911 let repo = repo.repo_ptr.to_owned();
912 index_task = Some(
913 cx.background()
914 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
915 );
916 }
917 }
918
919 cx.spawn(|this, mut cx| async move {
920 let text = fs.load(&abs_path).await?;
921
922 let diff_base = if let Some(index_task) = index_task {
923 index_task.await
924 } else {
925 None
926 };
927
928 // Eagerly populate the snapshot with an updated entry for the loaded file
929 let entry = this
930 .update(&mut cx, |this, cx| {
931 this.as_local().unwrap().refresh_entry(path, None, cx)
932 })
933 .await?;
934
935 Ok((
936 File {
937 entry_id: entry.id,
938 worktree: handle,
939 path: entry.path,
940 mtime: entry.mtime,
941 is_local: true,
942 is_deleted: false,
943 },
944 text,
945 diff_base,
946 ))
947 })
948 }
949
950 pub fn save_buffer(
951 &self,
952 buffer_handle: ModelHandle<Buffer>,
953 path: Arc<Path>,
954 has_changed_file: bool,
955 cx: &mut ModelContext<Worktree>,
956 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
957 let handle = cx.handle();
958 let buffer = buffer_handle.read(cx);
959
960 let rpc = self.client.clone();
961 let buffer_id = buffer.remote_id();
962 let project_id = self.share.as_ref().map(|share| share.project_id);
963
964 let text = buffer.as_rope().clone();
965 let fingerprint = text.fingerprint();
966 let version = buffer.version();
967 let save = self.write_file(path, text, buffer.line_ending(), cx);
968
969 cx.as_mut().spawn(|mut cx| async move {
970 let entry = save.await?;
971
972 if has_changed_file {
973 let new_file = Arc::new(File {
974 entry_id: entry.id,
975 worktree: handle,
976 path: entry.path,
977 mtime: entry.mtime,
978 is_local: true,
979 is_deleted: false,
980 });
981
982 if let Some(project_id) = project_id {
983 rpc.send(proto::UpdateBufferFile {
984 project_id,
985 buffer_id,
986 file: Some(new_file.to_proto()),
987 })
988 .log_err();
989 }
990
991 buffer_handle.update(&mut cx, |buffer, cx| {
992 if has_changed_file {
993 buffer.file_updated(new_file, cx).detach();
994 }
995 });
996 }
997
998 if let Some(project_id) = project_id {
999 rpc.send(proto::BufferSaved {
1000 project_id,
1001 buffer_id,
1002 version: serialize_version(&version),
1003 mtime: Some(entry.mtime.into()),
1004 fingerprint: serialize_fingerprint(fingerprint),
1005 })?;
1006 }
1007
1008 buffer_handle.update(&mut cx, |buffer, cx| {
1009 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1010 });
1011
1012 Ok((version, fingerprint, entry.mtime))
1013 })
1014 }
1015
1016 pub fn create_entry(
1017 &self,
1018 path: impl Into<Arc<Path>>,
1019 is_dir: bool,
1020 cx: &mut ModelContext<Worktree>,
1021 ) -> Task<Result<Entry>> {
1022 let path = path.into();
1023 let abs_path = self.absolutize(&path);
1024 let fs = self.fs.clone();
1025 let write = cx.background().spawn(async move {
1026 if is_dir {
1027 fs.create_dir(&abs_path).await
1028 } else {
1029 fs.save(&abs_path, &Default::default(), Default::default())
1030 .await
1031 }
1032 });
1033
1034 cx.spawn(|this, mut cx| async move {
1035 write.await?;
1036 this.update(&mut cx, |this, cx| {
1037 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1038 })
1039 .await
1040 })
1041 }
1042
1043 pub fn write_file(
1044 &self,
1045 path: impl Into<Arc<Path>>,
1046 text: Rope,
1047 line_ending: LineEnding,
1048 cx: &mut ModelContext<Worktree>,
1049 ) -> Task<Result<Entry>> {
1050 let path = path.into();
1051 let abs_path = self.absolutize(&path);
1052 let fs = self.fs.clone();
1053 let write = cx
1054 .background()
1055 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1056
1057 cx.spawn(|this, mut cx| async move {
1058 write.await?;
1059 this.update(&mut cx, |this, cx| {
1060 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1061 })
1062 .await
1063 })
1064 }
1065
1066 pub fn delete_entry(
1067 &self,
1068 entry_id: ProjectEntryId,
1069 cx: &mut ModelContext<Worktree>,
1070 ) -> Option<Task<Result<()>>> {
1071 let entry = self.entry_for_id(entry_id)?.clone();
1072 let abs_path = self.abs_path.clone();
1073 let fs = self.fs.clone();
1074
1075 let delete = cx.background().spawn(async move {
1076 let mut abs_path = fs.canonicalize(&abs_path).await?;
1077 if entry.path.file_name().is_some() {
1078 abs_path = abs_path.join(&entry.path);
1079 }
1080 if entry.is_file() {
1081 fs.remove_file(&abs_path, Default::default()).await?;
1082 } else {
1083 fs.remove_dir(
1084 &abs_path,
1085 RemoveOptions {
1086 recursive: true,
1087 ignore_if_not_exists: false,
1088 },
1089 )
1090 .await?;
1091 }
1092 anyhow::Ok(abs_path)
1093 });
1094
1095 Some(cx.spawn(|this, mut cx| async move {
1096 let abs_path = delete.await?;
1097 let (tx, mut rx) = barrier::channel();
1098 this.update(&mut cx, |this, _| {
1099 this.as_local_mut()
1100 .unwrap()
1101 .path_changes_tx
1102 .try_send((vec![abs_path], tx))
1103 })?;
1104 rx.recv().await;
1105 Ok(())
1106 }))
1107 }
1108
1109 pub fn rename_entry(
1110 &self,
1111 entry_id: ProjectEntryId,
1112 new_path: impl Into<Arc<Path>>,
1113 cx: &mut ModelContext<Worktree>,
1114 ) -> Option<Task<Result<Entry>>> {
1115 let old_path = self.entry_for_id(entry_id)?.path.clone();
1116 let new_path = new_path.into();
1117 let abs_old_path = self.absolutize(&old_path);
1118 let abs_new_path = self.absolutize(&new_path);
1119 let fs = self.fs.clone();
1120 let rename = cx.background().spawn(async move {
1121 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1122 .await
1123 });
1124
1125 Some(cx.spawn(|this, mut cx| async move {
1126 rename.await?;
1127 this.update(&mut cx, |this, cx| {
1128 this.as_local_mut()
1129 .unwrap()
1130 .refresh_entry(new_path.clone(), Some(old_path), cx)
1131 })
1132 .await
1133 }))
1134 }
1135
1136 pub fn copy_entry(
1137 &self,
1138 entry_id: ProjectEntryId,
1139 new_path: impl Into<Arc<Path>>,
1140 cx: &mut ModelContext<Worktree>,
1141 ) -> Option<Task<Result<Entry>>> {
1142 let old_path = self.entry_for_id(entry_id)?.path.clone();
1143 let new_path = new_path.into();
1144 let abs_old_path = self.absolutize(&old_path);
1145 let abs_new_path = self.absolutize(&new_path);
1146 let fs = self.fs.clone();
1147 let copy = cx.background().spawn(async move {
1148 copy_recursive(
1149 fs.as_ref(),
1150 &abs_old_path,
1151 &abs_new_path,
1152 Default::default(),
1153 )
1154 .await
1155 });
1156
1157 Some(cx.spawn(|this, mut cx| async move {
1158 copy.await?;
1159 this.update(&mut cx, |this, cx| {
1160 this.as_local_mut()
1161 .unwrap()
1162 .refresh_entry(new_path.clone(), None, cx)
1163 })
1164 .await
1165 }))
1166 }
1167
1168 fn refresh_entry(
1169 &self,
1170 path: Arc<Path>,
1171 old_path: Option<Arc<Path>>,
1172 cx: &mut ModelContext<Worktree>,
1173 ) -> Task<Result<Entry>> {
1174 let fs = self.fs.clone();
1175 let abs_root_path = self.abs_path.clone();
1176 let path_changes_tx = self.path_changes_tx.clone();
1177 cx.spawn_weak(move |this, mut cx| async move {
1178 let abs_path = fs.canonicalize(&abs_root_path).await?;
1179 let mut paths = Vec::with_capacity(2);
1180 paths.push(if path.file_name().is_some() {
1181 abs_path.join(&path)
1182 } else {
1183 abs_path.clone()
1184 });
1185 if let Some(old_path) = old_path {
1186 paths.push(if old_path.file_name().is_some() {
1187 abs_path.join(&old_path)
1188 } else {
1189 abs_path.clone()
1190 });
1191 }
1192
1193 let (tx, mut rx) = barrier::channel();
1194 path_changes_tx.try_send((paths, tx))?;
1195 rx.recv().await;
1196 this.upgrade(&cx)
1197 .ok_or_else(|| anyhow!("worktree was dropped"))?
1198 .update(&mut cx, |this, _| {
1199 this.entry_for_path(path)
1200 .cloned()
1201 .ok_or_else(|| anyhow!("failed to read path after update"))
1202 })
1203 })
1204 }
1205
1206 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1207 let (share_tx, share_rx) = oneshot::channel();
1208
1209 if let Some(share) = self.share.as_mut() {
1210 let _ = share_tx.send(());
1211 *share.resume_updates.borrow_mut() = ();
1212 } else {
1213 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1214 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1215 let worktree_id = cx.model_id() as u64;
1216
1217 for (path, summaries) in &self.diagnostic_summaries {
1218 for (&server_id, summary) in summaries {
1219 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1220 project_id,
1221 worktree_id,
1222 summary: Some(summary.to_proto(server_id, &path)),
1223 }) {
1224 return Task::ready(Err(e));
1225 }
1226 }
1227 }
1228
1229 let _maintain_remote_snapshot = cx.background().spawn({
1230 let client = self.client.clone();
1231 async move {
1232 let mut share_tx = Some(share_tx);
1233 let mut prev_snapshot = LocalSnapshot {
1234 ignores_by_parent_abs_path: Default::default(),
1235 removed_entry_ids: Default::default(),
1236 next_entry_id: Default::default(),
1237 git_repositories: Default::default(),
1238 snapshot: Snapshot {
1239 id: WorktreeId(worktree_id as usize),
1240 abs_path: Path::new("").into(),
1241 root_name: Default::default(),
1242 root_char_bag: Default::default(),
1243 entries_by_path: Default::default(),
1244 entries_by_id: Default::default(),
1245 repository_entries: Default::default(),
1246 scan_id: 0,
1247 completed_scan_id: 0,
1248 },
1249 };
1250 while let Some(snapshot) = snapshots_rx.recv().await {
1251 #[cfg(any(test, feature = "test-support"))]
1252 const MAX_CHUNK_SIZE: usize = 2;
1253 #[cfg(not(any(test, feature = "test-support")))]
1254 const MAX_CHUNK_SIZE: usize = 256;
1255
1256 let update =
1257 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1258 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1259 let _ = resume_updates_rx.try_recv();
1260 while let Err(error) = client.request(update.clone()).await {
1261 log::error!("failed to send worktree update: {}", error);
1262 log::info!("waiting to resume updates");
1263 if resume_updates_rx.next().await.is_none() {
1264 return Ok(());
1265 }
1266 }
1267 }
1268
1269 if let Some(share_tx) = share_tx.take() {
1270 let _ = share_tx.send(());
1271 }
1272
1273 prev_snapshot = snapshot;
1274 }
1275
1276 Ok::<_, anyhow::Error>(())
1277 }
1278 .log_err()
1279 });
1280
1281 self.share = Some(ShareState {
1282 project_id,
1283 snapshots_tx,
1284 resume_updates: resume_updates_tx,
1285 _maintain_remote_snapshot,
1286 });
1287 }
1288
1289 cx.foreground()
1290 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1291 }
1292
1293 pub fn unshare(&mut self) {
1294 self.share.take();
1295 }
1296
1297 pub fn is_shared(&self) -> bool {
1298 self.share.is_some()
1299 }
1300}
1301
1302impl RemoteWorktree {
1303 fn snapshot(&self) -> Snapshot {
1304 self.snapshot.clone()
1305 }
1306
1307 pub fn disconnected_from_host(&mut self) {
1308 self.updates_tx.take();
1309 self.snapshot_subscriptions.clear();
1310 self.disconnected = true;
1311 }
1312
1313 pub fn save_buffer(
1314 &self,
1315 buffer_handle: ModelHandle<Buffer>,
1316 cx: &mut ModelContext<Worktree>,
1317 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1318 let buffer = buffer_handle.read(cx);
1319 let buffer_id = buffer.remote_id();
1320 let version = buffer.version();
1321 let rpc = self.client.clone();
1322 let project_id = self.project_id;
1323 cx.as_mut().spawn(|mut cx| async move {
1324 let response = rpc
1325 .request(proto::SaveBuffer {
1326 project_id,
1327 buffer_id,
1328 version: serialize_version(&version),
1329 })
1330 .await?;
1331 let version = deserialize_version(&response.version);
1332 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1333 let mtime = response
1334 .mtime
1335 .ok_or_else(|| anyhow!("missing mtime"))?
1336 .into();
1337
1338 buffer_handle.update(&mut cx, |buffer, cx| {
1339 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1340 });
1341
1342 Ok((version, fingerprint, mtime))
1343 })
1344 }
1345
1346 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1347 if let Some(updates_tx) = &self.updates_tx {
1348 updates_tx
1349 .unbounded_send(update)
1350 .expect("consumer runs to completion");
1351 }
1352 }
1353
1354 fn observed_snapshot(&self, scan_id: usize) -> bool {
1355 self.completed_scan_id >= scan_id
1356 }
1357
1358 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1359 let (tx, rx) = oneshot::channel();
1360 if self.observed_snapshot(scan_id) {
1361 let _ = tx.send(());
1362 } else if self.disconnected {
1363 drop(tx);
1364 } else {
1365 match self
1366 .snapshot_subscriptions
1367 .binary_search_by_key(&scan_id, |probe| probe.0)
1368 {
1369 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1370 }
1371 }
1372
1373 async move {
1374 rx.await?;
1375 Ok(())
1376 }
1377 }
1378
1379 pub fn update_diagnostic_summary(
1380 &mut self,
1381 path: Arc<Path>,
1382 summary: &proto::DiagnosticSummary,
1383 ) {
1384 let server_id = LanguageServerId(summary.language_server_id as usize);
1385 let summary = DiagnosticSummary {
1386 error_count: summary.error_count as usize,
1387 warning_count: summary.warning_count as usize,
1388 };
1389
1390 if summary.is_empty() {
1391 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1392 summaries.remove(&server_id);
1393 if summaries.is_empty() {
1394 self.diagnostic_summaries.remove(&path);
1395 }
1396 }
1397 } else {
1398 self.diagnostic_summaries
1399 .entry(path)
1400 .or_default()
1401 .insert(server_id, summary);
1402 }
1403 }
1404
1405 pub fn insert_entry(
1406 &mut self,
1407 entry: proto::Entry,
1408 scan_id: usize,
1409 cx: &mut ModelContext<Worktree>,
1410 ) -> Task<Result<Entry>> {
1411 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1412 cx.spawn(|this, mut cx| async move {
1413 wait_for_snapshot.await?;
1414 this.update(&mut cx, |worktree, _| {
1415 let worktree = worktree.as_remote_mut().unwrap();
1416 let mut snapshot = worktree.background_snapshot.lock();
1417 let entry = snapshot.insert_entry(entry);
1418 worktree.snapshot = snapshot.clone();
1419 entry
1420 })
1421 })
1422 }
1423
1424 pub(crate) fn delete_entry(
1425 &mut self,
1426 id: ProjectEntryId,
1427 scan_id: usize,
1428 cx: &mut ModelContext<Worktree>,
1429 ) -> Task<Result<()>> {
1430 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1431 cx.spawn(|this, mut cx| async move {
1432 wait_for_snapshot.await?;
1433 this.update(&mut cx, |worktree, _| {
1434 let worktree = worktree.as_remote_mut().unwrap();
1435 let mut snapshot = worktree.background_snapshot.lock();
1436 snapshot.delete_entry(id);
1437 worktree.snapshot = snapshot.clone();
1438 });
1439 Ok(())
1440 })
1441 }
1442}
1443
1444impl Snapshot {
1445 pub fn id(&self) -> WorktreeId {
1446 self.id
1447 }
1448
1449 pub fn abs_path(&self) -> &Arc<Path> {
1450 &self.abs_path
1451 }
1452
1453 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1454 self.entries_by_id.get(&entry_id, &()).is_some()
1455 }
1456
1457 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1458 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1459 let old_entry = self.entries_by_id.insert_or_replace(
1460 PathEntry {
1461 id: entry.id,
1462 path: entry.path.clone(),
1463 is_ignored: entry.is_ignored,
1464 scan_id: 0,
1465 },
1466 &(),
1467 );
1468 if let Some(old_entry) = old_entry {
1469 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1470 }
1471 self.entries_by_path.insert_or_replace(entry.clone(), &());
1472 Ok(entry)
1473 }
1474
1475 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1476 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1477 self.entries_by_path = {
1478 let mut cursor = self.entries_by_path.cursor();
1479 let mut new_entries_by_path =
1480 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1481 while let Some(entry) = cursor.item() {
1482 if entry.path.starts_with(&removed_entry.path) {
1483 self.entries_by_id.remove(&entry.id, &());
1484 cursor.next(&());
1485 } else {
1486 break;
1487 }
1488 }
1489 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1490 new_entries_by_path
1491 };
1492
1493 Some(removed_entry.path)
1494 }
1495
1496 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1497 let mut entries_by_path_edits = Vec::new();
1498 let mut entries_by_id_edits = Vec::new();
1499 for entry_id in update.removed_entries {
1500 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1501 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1502 entries_by_id_edits.push(Edit::Remove(entry.id));
1503 }
1504 }
1505
1506 for entry in update.updated_entries {
1507 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1508 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1509 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1510 }
1511 entries_by_id_edits.push(Edit::Insert(PathEntry {
1512 id: entry.id,
1513 path: entry.path.clone(),
1514 is_ignored: entry.is_ignored,
1515 scan_id: 0,
1516 }));
1517 entries_by_path_edits.push(Edit::Insert(entry));
1518 }
1519
1520 self.entries_by_path.edit(entries_by_path_edits, &());
1521 self.entries_by_id.edit(entries_by_id_edits, &());
1522
1523 update.removed_repositories.sort_unstable();
1524 self.repository_entries.retain(|_, entry| {
1525 if let Ok(_) = update
1526 .removed_repositories
1527 .binary_search(&entry.work_directory.to_proto())
1528 {
1529 false
1530 } else {
1531 true
1532 }
1533 });
1534
1535 for repository in update.updated_repositories {
1536 let work_directory_entry: WorkDirectoryEntry =
1537 ProjectEntryId::from_proto(repository.work_directory_id).into();
1538
1539 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1540 let mut statuses = TreeMap::default();
1541 for status_entry in repository.updated_worktree_statuses {
1542 let Some(git_file_status) = read_git_status(status_entry.status) else {
1543 continue;
1544 };
1545
1546 let repo_path = RepoPath::new(status_entry.repo_path.into());
1547 statuses.insert(repo_path, git_file_status);
1548 }
1549
1550 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1551 if self.repository_entries.get(&work_directory).is_some() {
1552 self.repository_entries.update(&work_directory, |repo| {
1553 repo.branch = repository.branch.map(Into::into);
1554 repo.worktree_statuses.insert_tree(statuses);
1555
1556 for repo_path in repository.removed_worktree_repo_paths {
1557 let repo_path = RepoPath::new(repo_path.into());
1558 repo.worktree_statuses.remove(&repo_path);
1559 }
1560 });
1561 } else {
1562 self.repository_entries.insert(
1563 work_directory,
1564 RepositoryEntry {
1565 work_directory: work_directory_entry,
1566 branch: repository.branch.map(Into::into),
1567 worktree_statuses: statuses,
1568 },
1569 )
1570 }
1571 } else {
1572 log::error!("no work directory entry for repository {:?}", repository)
1573 }
1574 }
1575
1576 self.scan_id = update.scan_id as usize;
1577 if update.is_last_update {
1578 self.completed_scan_id = update.scan_id as usize;
1579 }
1580
1581 Ok(())
1582 }
1583
1584 pub fn file_count(&self) -> usize {
1585 self.entries_by_path.summary().file_count
1586 }
1587
1588 pub fn visible_file_count(&self) -> usize {
1589 self.entries_by_path.summary().visible_file_count
1590 }
1591
1592 fn traverse_from_offset(
1593 &self,
1594 include_dirs: bool,
1595 include_ignored: bool,
1596 start_offset: usize,
1597 ) -> Traversal {
1598 let mut cursor = self.entries_by_path.cursor();
1599 cursor.seek(
1600 &TraversalTarget::Count {
1601 count: start_offset,
1602 include_dirs,
1603 include_ignored,
1604 },
1605 Bias::Right,
1606 &(),
1607 );
1608 Traversal {
1609 cursor,
1610 include_dirs,
1611 include_ignored,
1612 }
1613 }
1614
1615 fn traverse_from_path(
1616 &self,
1617 include_dirs: bool,
1618 include_ignored: bool,
1619 path: &Path,
1620 ) -> Traversal {
1621 let mut cursor = self.entries_by_path.cursor();
1622 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1623 Traversal {
1624 cursor,
1625 include_dirs,
1626 include_ignored,
1627 }
1628 }
1629
1630 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1631 self.traverse_from_offset(false, include_ignored, start)
1632 }
1633
1634 pub fn entries(&self, include_ignored: bool) -> Traversal {
1635 self.traverse_from_offset(true, include_ignored, 0)
1636 }
1637
1638 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1639 self.repository_entries.values()
1640 }
1641
1642 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1643 let empty_path = Path::new("");
1644 self.entries_by_path
1645 .cursor::<()>()
1646 .filter(move |entry| entry.path.as_ref() != empty_path)
1647 .map(|entry| &entry.path)
1648 }
1649
1650 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1651 let mut cursor = self.entries_by_path.cursor();
1652 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1653 let traversal = Traversal {
1654 cursor,
1655 include_dirs: true,
1656 include_ignored: true,
1657 };
1658 ChildEntriesIter {
1659 traversal,
1660 parent_path,
1661 }
1662 }
1663
1664 pub fn root_entry(&self) -> Option<&Entry> {
1665 self.entry_for_path("")
1666 }
1667
1668 pub fn root_name(&self) -> &str {
1669 &self.root_name
1670 }
1671
1672 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1673 self.repository_entries
1674 .get(&RepositoryWorkDirectory(Path::new("").into()))
1675 .map(|entry| entry.to_owned())
1676 }
1677
1678 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1679 self.repository_entries.values()
1680 }
1681
1682 pub fn scan_id(&self) -> usize {
1683 self.scan_id
1684 }
1685
1686 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1687 let path = path.as_ref();
1688 self.traverse_from_path(true, true, path)
1689 .entry()
1690 .and_then(|entry| {
1691 if entry.path.as_ref() == path {
1692 Some(entry)
1693 } else {
1694 None
1695 }
1696 })
1697 }
1698
1699 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1700 let entry = self.entries_by_id.get(&id, &())?;
1701 self.entry_for_path(&entry.path)
1702 }
1703
1704 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1705 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1706 }
1707}
1708
1709impl LocalSnapshot {
1710 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1711 self.git_repositories.get(&repo.work_directory.0)
1712 }
1713
1714 pub(crate) fn repo_for_metadata(
1715 &self,
1716 path: &Path,
1717 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1718 self.git_repositories
1719 .iter()
1720 .find(|(_, repo)| repo.in_dot_git(path))
1721 }
1722
1723 #[cfg(test)]
1724 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1725 let root_name = self.root_name.clone();
1726 proto::UpdateWorktree {
1727 project_id,
1728 worktree_id: self.id().to_proto(),
1729 abs_path: self.abs_path().to_string_lossy().into(),
1730 root_name,
1731 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1732 removed_entries: Default::default(),
1733 scan_id: self.scan_id as u64,
1734 is_last_update: true,
1735 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1736 removed_repositories: Default::default(),
1737 }
1738 }
1739
1740 pub(crate) fn build_update(
1741 &self,
1742 other: &Self,
1743 project_id: u64,
1744 worktree_id: u64,
1745 include_ignored: bool,
1746 ) -> proto::UpdateWorktree {
1747 let mut updated_entries = Vec::new();
1748 let mut removed_entries = Vec::new();
1749 let mut self_entries = self
1750 .entries_by_id
1751 .cursor::<()>()
1752 .filter(|e| include_ignored || !e.is_ignored)
1753 .peekable();
1754 let mut other_entries = other
1755 .entries_by_id
1756 .cursor::<()>()
1757 .filter(|e| include_ignored || !e.is_ignored)
1758 .peekable();
1759 loop {
1760 match (self_entries.peek(), other_entries.peek()) {
1761 (Some(self_entry), Some(other_entry)) => {
1762 match Ord::cmp(&self_entry.id, &other_entry.id) {
1763 Ordering::Less => {
1764 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1765 updated_entries.push(entry);
1766 self_entries.next();
1767 }
1768 Ordering::Equal => {
1769 if self_entry.scan_id != other_entry.scan_id {
1770 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1771 updated_entries.push(entry);
1772 }
1773
1774 self_entries.next();
1775 other_entries.next();
1776 }
1777 Ordering::Greater => {
1778 removed_entries.push(other_entry.id.to_proto());
1779 other_entries.next();
1780 }
1781 }
1782 }
1783 (Some(self_entry), None) => {
1784 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1785 updated_entries.push(entry);
1786 self_entries.next();
1787 }
1788 (None, Some(other_entry)) => {
1789 removed_entries.push(other_entry.id.to_proto());
1790 other_entries.next();
1791 }
1792 (None, None) => break,
1793 }
1794 }
1795
1796 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1797 let mut removed_repositories = Vec::new();
1798 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1799 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1800 loop {
1801 match (self_repos.peek(), other_repos.peek()) {
1802 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1803 match Ord::cmp(self_work_dir, other_work_dir) {
1804 Ordering::Less => {
1805 updated_repositories.push((*self_repo).into());
1806 self_repos.next();
1807 }
1808 Ordering::Equal => {
1809 if self_repo != other_repo {
1810 updated_repositories.push(self_repo.build_update(other_repo));
1811 }
1812
1813 self_repos.next();
1814 other_repos.next();
1815 }
1816 Ordering::Greater => {
1817 removed_repositories.push(other_repo.work_directory.to_proto());
1818 other_repos.next();
1819 }
1820 }
1821 }
1822 (Some((_, self_repo)), None) => {
1823 updated_repositories.push((*self_repo).into());
1824 self_repos.next();
1825 }
1826 (None, Some((_, other_repo))) => {
1827 removed_repositories.push(other_repo.work_directory.to_proto());
1828 other_repos.next();
1829 }
1830 (None, None) => break,
1831 }
1832 }
1833
1834 proto::UpdateWorktree {
1835 project_id,
1836 worktree_id,
1837 abs_path: self.abs_path().to_string_lossy().into(),
1838 root_name: self.root_name().to_string(),
1839 updated_entries,
1840 removed_entries,
1841 scan_id: self.scan_id as u64,
1842 is_last_update: self.completed_scan_id == self.scan_id,
1843 updated_repositories,
1844 removed_repositories,
1845 }
1846 }
1847
1848 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1849 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1850 let abs_path = self.abs_path.join(&entry.path);
1851 match smol::block_on(build_gitignore(&abs_path, fs)) {
1852 Ok(ignore) => {
1853 self.ignores_by_parent_abs_path.insert(
1854 abs_path.parent().unwrap().into(),
1855 (Arc::new(ignore), self.scan_id),
1856 );
1857 }
1858 Err(error) => {
1859 log::error!(
1860 "error loading .gitignore file {:?} - {:?}",
1861 &entry.path,
1862 error
1863 );
1864 }
1865 }
1866 }
1867
1868 self.reuse_entry_id(&mut entry);
1869
1870 if entry.kind == EntryKind::PendingDir {
1871 if let Some(existing_entry) =
1872 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1873 {
1874 entry.kind = existing_entry.kind;
1875 }
1876 }
1877
1878 let scan_id = self.scan_id;
1879 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1880 if let Some(removed) = removed {
1881 if removed.id != entry.id {
1882 self.entries_by_id.remove(&removed.id, &());
1883 }
1884 }
1885 self.entries_by_id.insert_or_replace(
1886 PathEntry {
1887 id: entry.id,
1888 path: entry.path.clone(),
1889 is_ignored: entry.is_ignored,
1890 scan_id,
1891 },
1892 &(),
1893 );
1894
1895 entry
1896 }
1897
1898 fn populate_dir(
1899 &mut self,
1900 parent_path: Arc<Path>,
1901 entries: impl IntoIterator<Item = Entry>,
1902 ignore: Option<Arc<Gitignore>>,
1903 fs: &dyn Fs,
1904 ) {
1905 let mut parent_entry = if let Some(parent_entry) =
1906 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1907 {
1908 parent_entry.clone()
1909 } else {
1910 log::warn!(
1911 "populating a directory {:?} that has been removed",
1912 parent_path
1913 );
1914 return;
1915 };
1916
1917 match parent_entry.kind {
1918 EntryKind::PendingDir => {
1919 parent_entry.kind = EntryKind::Dir;
1920 }
1921 EntryKind::Dir => {}
1922 _ => return,
1923 }
1924
1925 if let Some(ignore) = ignore {
1926 self.ignores_by_parent_abs_path.insert(
1927 self.abs_path.join(&parent_path).into(),
1928 (ignore, self.scan_id),
1929 );
1930 }
1931
1932 if parent_path.file_name() == Some(&DOT_GIT) {
1933 self.build_repo(parent_path, fs);
1934 }
1935
1936 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1937 let mut entries_by_id_edits = Vec::new();
1938
1939 for mut entry in entries {
1940 self.reuse_entry_id(&mut entry);
1941 entries_by_id_edits.push(Edit::Insert(PathEntry {
1942 id: entry.id,
1943 path: entry.path.clone(),
1944 is_ignored: entry.is_ignored,
1945 scan_id: self.scan_id,
1946 }));
1947 entries_by_path_edits.push(Edit::Insert(entry));
1948 }
1949
1950 self.entries_by_path.edit(entries_by_path_edits, &());
1951 self.entries_by_id.edit(entries_by_id_edits, &());
1952 }
1953
1954 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1955 let abs_path = self.abs_path.join(&parent_path);
1956 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1957
1958 // Guard against repositories inside the repository metadata
1959 if work_dir
1960 .components()
1961 .find(|component| component.as_os_str() == *DOT_GIT)
1962 .is_some()
1963 {
1964 return None;
1965 };
1966
1967 let work_dir_id = self
1968 .entry_for_path(work_dir.clone())
1969 .map(|entry| entry.id)?;
1970
1971 if self.git_repositories.get(&work_dir_id).is_none() {
1972 let repo = fs.open_repo(abs_path.as_path())?;
1973 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1974 let scan_id = self.scan_id;
1975
1976 let repo_lock = repo.lock();
1977
1978 self.repository_entries.insert(
1979 work_directory,
1980 RepositoryEntry {
1981 work_directory: work_dir_id.into(),
1982 branch: repo_lock.branch_name().map(Into::into),
1983 worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1984 },
1985 );
1986 drop(repo_lock);
1987
1988 self.git_repositories.insert(
1989 work_dir_id,
1990 LocalRepositoryEntry {
1991 scan_id,
1992 full_scan_id: scan_id,
1993 repo_ptr: repo,
1994 git_dir_path: parent_path.clone(),
1995 },
1996 )
1997 }
1998
1999 Some(())
2000 }
2001 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2002 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2003 entry.id = removed_entry_id;
2004 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2005 entry.id = existing_entry.id;
2006 }
2007 }
2008
2009 fn remove_path(&mut self, path: &Path) {
2010 let mut new_entries;
2011 let removed_entries;
2012 {
2013 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2014 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2015 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2016 new_entries.push_tree(cursor.suffix(&()), &());
2017 }
2018 self.entries_by_path = new_entries;
2019
2020 let mut entries_by_id_edits = Vec::new();
2021 for entry in removed_entries.cursor::<()>() {
2022 let removed_entry_id = self
2023 .removed_entry_ids
2024 .entry(entry.inode)
2025 .or_insert(entry.id);
2026 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2027 entries_by_id_edits.push(Edit::Remove(entry.id));
2028 }
2029 self.entries_by_id.edit(entries_by_id_edits, &());
2030
2031 if path.file_name() == Some(&GITIGNORE) {
2032 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2033 if let Some((_, scan_id)) = self
2034 .ignores_by_parent_abs_path
2035 .get_mut(abs_parent_path.as_path())
2036 {
2037 *scan_id = self.snapshot.scan_id;
2038 }
2039 }
2040 }
2041
2042 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2043 let mut inodes = TreeSet::default();
2044 for ancestor in path.ancestors().skip(1) {
2045 if let Some(entry) = self.entry_for_path(ancestor) {
2046 inodes.insert(entry.inode);
2047 }
2048 }
2049 inodes
2050 }
2051
2052 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2053 let mut new_ignores = Vec::new();
2054 for ancestor in abs_path.ancestors().skip(1) {
2055 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2056 new_ignores.push((ancestor, Some(ignore.clone())));
2057 } else {
2058 new_ignores.push((ancestor, None));
2059 }
2060 }
2061
2062 let mut ignore_stack = IgnoreStack::none();
2063 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2064 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2065 ignore_stack = IgnoreStack::all();
2066 break;
2067 } else if let Some(ignore) = ignore {
2068 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2069 }
2070 }
2071
2072 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2073 ignore_stack = IgnoreStack::all();
2074 }
2075
2076 ignore_stack
2077 }
2078}
2079
2080async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2081 let contents = fs.load(abs_path).await?;
2082 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2083 let mut builder = GitignoreBuilder::new(parent);
2084 for line in contents.lines() {
2085 builder.add_line(Some(abs_path.into()), line)?;
2086 }
2087 Ok(builder.build()?)
2088}
2089
2090impl WorktreeId {
2091 pub fn from_usize(handle_id: usize) -> Self {
2092 Self(handle_id)
2093 }
2094
2095 pub(crate) fn from_proto(id: u64) -> Self {
2096 Self(id as usize)
2097 }
2098
2099 pub fn to_proto(&self) -> u64 {
2100 self.0 as u64
2101 }
2102
2103 pub fn to_usize(&self) -> usize {
2104 self.0
2105 }
2106}
2107
2108impl fmt::Display for WorktreeId {
2109 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2110 self.0.fmt(f)
2111 }
2112}
2113
2114impl Deref for Worktree {
2115 type Target = Snapshot;
2116
2117 fn deref(&self) -> &Self::Target {
2118 match self {
2119 Worktree::Local(worktree) => &worktree.snapshot,
2120 Worktree::Remote(worktree) => &worktree.snapshot,
2121 }
2122 }
2123}
2124
2125impl Deref for LocalWorktree {
2126 type Target = LocalSnapshot;
2127
2128 fn deref(&self) -> &Self::Target {
2129 &self.snapshot
2130 }
2131}
2132
2133impl Deref for RemoteWorktree {
2134 type Target = Snapshot;
2135
2136 fn deref(&self) -> &Self::Target {
2137 &self.snapshot
2138 }
2139}
2140
2141impl fmt::Debug for LocalWorktree {
2142 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2143 self.snapshot.fmt(f)
2144 }
2145}
2146
2147impl fmt::Debug for Snapshot {
2148 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2149 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2150 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2151
2152 impl<'a> fmt::Debug for EntriesByPath<'a> {
2153 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2154 f.debug_map()
2155 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2156 .finish()
2157 }
2158 }
2159
2160 impl<'a> fmt::Debug for EntriesById<'a> {
2161 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2162 f.debug_list().entries(self.0.iter()).finish()
2163 }
2164 }
2165
2166 f.debug_struct("Snapshot")
2167 .field("id", &self.id)
2168 .field("root_name", &self.root_name)
2169 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2170 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2171 .finish()
2172 }
2173}
2174
2175#[derive(Clone, PartialEq)]
2176pub struct File {
2177 pub worktree: ModelHandle<Worktree>,
2178 pub path: Arc<Path>,
2179 pub mtime: SystemTime,
2180 pub(crate) entry_id: ProjectEntryId,
2181 pub(crate) is_local: bool,
2182 pub(crate) is_deleted: bool,
2183}
2184
2185impl language::File for File {
2186 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2187 if self.is_local {
2188 Some(self)
2189 } else {
2190 None
2191 }
2192 }
2193
2194 fn mtime(&self) -> SystemTime {
2195 self.mtime
2196 }
2197
2198 fn path(&self) -> &Arc<Path> {
2199 &self.path
2200 }
2201
2202 fn full_path(&self, cx: &AppContext) -> PathBuf {
2203 let mut full_path = PathBuf::new();
2204 let worktree = self.worktree.read(cx);
2205
2206 if worktree.is_visible() {
2207 full_path.push(worktree.root_name());
2208 } else {
2209 let path = worktree.abs_path();
2210
2211 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2212 full_path.push("~");
2213 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2214 } else {
2215 full_path.push(path)
2216 }
2217 }
2218
2219 if self.path.components().next().is_some() {
2220 full_path.push(&self.path);
2221 }
2222
2223 full_path
2224 }
2225
2226 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2227 /// of its worktree, then this method will return the name of the worktree itself.
2228 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2229 self.path
2230 .file_name()
2231 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2232 }
2233
2234 fn is_deleted(&self) -> bool {
2235 self.is_deleted
2236 }
2237
2238 fn as_any(&self) -> &dyn Any {
2239 self
2240 }
2241
2242 fn to_proto(&self) -> rpc::proto::File {
2243 rpc::proto::File {
2244 worktree_id: self.worktree.id() as u64,
2245 entry_id: self.entry_id.to_proto(),
2246 path: self.path.to_string_lossy().into(),
2247 mtime: Some(self.mtime.into()),
2248 is_deleted: self.is_deleted,
2249 }
2250 }
2251}
2252
2253impl language::LocalFile for File {
2254 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2255 self.worktree
2256 .read(cx)
2257 .as_local()
2258 .unwrap()
2259 .abs_path
2260 .join(&self.path)
2261 }
2262
2263 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2264 let worktree = self.worktree.read(cx).as_local().unwrap();
2265 let abs_path = worktree.absolutize(&self.path);
2266 let fs = worktree.fs.clone();
2267 cx.background()
2268 .spawn(async move { fs.load(&abs_path).await })
2269 }
2270
2271 fn buffer_reloaded(
2272 &self,
2273 buffer_id: u64,
2274 version: &clock::Global,
2275 fingerprint: RopeFingerprint,
2276 line_ending: LineEnding,
2277 mtime: SystemTime,
2278 cx: &mut AppContext,
2279 ) {
2280 let worktree = self.worktree.read(cx).as_local().unwrap();
2281 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2282 worktree
2283 .client
2284 .send(proto::BufferReloaded {
2285 project_id,
2286 buffer_id,
2287 version: serialize_version(version),
2288 mtime: Some(mtime.into()),
2289 fingerprint: serialize_fingerprint(fingerprint),
2290 line_ending: serialize_line_ending(line_ending) as i32,
2291 })
2292 .log_err();
2293 }
2294 }
2295}
2296
2297impl File {
2298 pub fn from_proto(
2299 proto: rpc::proto::File,
2300 worktree: ModelHandle<Worktree>,
2301 cx: &AppContext,
2302 ) -> Result<Self> {
2303 let worktree_id = worktree
2304 .read(cx)
2305 .as_remote()
2306 .ok_or_else(|| anyhow!("not remote"))?
2307 .id();
2308
2309 if worktree_id.to_proto() != proto.worktree_id {
2310 return Err(anyhow!("worktree id does not match file"));
2311 }
2312
2313 Ok(Self {
2314 worktree,
2315 path: Path::new(&proto.path).into(),
2316 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2317 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2318 is_local: false,
2319 is_deleted: proto.is_deleted,
2320 })
2321 }
2322
2323 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2324 file.and_then(|f| f.as_any().downcast_ref())
2325 }
2326
2327 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2328 self.worktree.read(cx).id()
2329 }
2330
2331 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2332 if self.is_deleted {
2333 None
2334 } else {
2335 Some(self.entry_id)
2336 }
2337 }
2338}
2339
2340#[derive(Clone, Debug, PartialEq, Eq)]
2341pub struct Entry {
2342 pub id: ProjectEntryId,
2343 pub kind: EntryKind,
2344 pub path: Arc<Path>,
2345 pub inode: u64,
2346 pub mtime: SystemTime,
2347 pub is_symlink: bool,
2348 pub is_ignored: bool,
2349}
2350
2351#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2352pub enum EntryKind {
2353 PendingDir,
2354 Dir,
2355 File(CharBag),
2356}
2357
2358#[derive(Clone, Copy, Debug)]
2359pub enum PathChange {
2360 Added,
2361 Removed,
2362 Updated,
2363 AddedOrUpdated,
2364}
2365
2366impl Entry {
2367 fn new(
2368 path: Arc<Path>,
2369 metadata: &fs::Metadata,
2370 next_entry_id: &AtomicUsize,
2371 root_char_bag: CharBag,
2372 ) -> Self {
2373 Self {
2374 id: ProjectEntryId::new(next_entry_id),
2375 kind: if metadata.is_dir {
2376 EntryKind::PendingDir
2377 } else {
2378 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2379 },
2380 path,
2381 inode: metadata.inode,
2382 mtime: metadata.mtime,
2383 is_symlink: metadata.is_symlink,
2384 is_ignored: false,
2385 }
2386 }
2387
2388 pub fn is_dir(&self) -> bool {
2389 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2390 }
2391
2392 pub fn is_file(&self) -> bool {
2393 matches!(self.kind, EntryKind::File(_))
2394 }
2395}
2396
2397impl sum_tree::Item for Entry {
2398 type Summary = EntrySummary;
2399
2400 fn summary(&self) -> Self::Summary {
2401 let visible_count = if self.is_ignored { 0 } else { 1 };
2402 let file_count;
2403 let visible_file_count;
2404 if self.is_file() {
2405 file_count = 1;
2406 visible_file_count = visible_count;
2407 } else {
2408 file_count = 0;
2409 visible_file_count = 0;
2410 }
2411
2412 EntrySummary {
2413 max_path: self.path.clone(),
2414 count: 1,
2415 visible_count,
2416 file_count,
2417 visible_file_count,
2418 }
2419 }
2420}
2421
2422impl sum_tree::KeyedItem for Entry {
2423 type Key = PathKey;
2424
2425 fn key(&self) -> Self::Key {
2426 PathKey(self.path.clone())
2427 }
2428}
2429
2430#[derive(Clone, Debug)]
2431pub struct EntrySummary {
2432 max_path: Arc<Path>,
2433 count: usize,
2434 visible_count: usize,
2435 file_count: usize,
2436 visible_file_count: usize,
2437}
2438
2439impl Default for EntrySummary {
2440 fn default() -> Self {
2441 Self {
2442 max_path: Arc::from(Path::new("")),
2443 count: 0,
2444 visible_count: 0,
2445 file_count: 0,
2446 visible_file_count: 0,
2447 }
2448 }
2449}
2450
2451impl sum_tree::Summary for EntrySummary {
2452 type Context = ();
2453
2454 fn add_summary(&mut self, rhs: &Self, _: &()) {
2455 self.max_path = rhs.max_path.clone();
2456 self.count += rhs.count;
2457 self.visible_count += rhs.visible_count;
2458 self.file_count += rhs.file_count;
2459 self.visible_file_count += rhs.visible_file_count;
2460 }
2461}
2462
2463#[derive(Clone, Debug)]
2464struct PathEntry {
2465 id: ProjectEntryId,
2466 path: Arc<Path>,
2467 is_ignored: bool,
2468 scan_id: usize,
2469}
2470
2471impl sum_tree::Item for PathEntry {
2472 type Summary = PathEntrySummary;
2473
2474 fn summary(&self) -> Self::Summary {
2475 PathEntrySummary { max_id: self.id }
2476 }
2477}
2478
2479impl sum_tree::KeyedItem for PathEntry {
2480 type Key = ProjectEntryId;
2481
2482 fn key(&self) -> Self::Key {
2483 self.id
2484 }
2485}
2486
2487#[derive(Clone, Debug, Default)]
2488struct PathEntrySummary {
2489 max_id: ProjectEntryId,
2490}
2491
2492impl sum_tree::Summary for PathEntrySummary {
2493 type Context = ();
2494
2495 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2496 self.max_id = summary.max_id;
2497 }
2498}
2499
2500impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2501 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2502 *self = summary.max_id;
2503 }
2504}
2505
2506#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2507pub struct PathKey(Arc<Path>);
2508
2509impl Default for PathKey {
2510 fn default() -> Self {
2511 Self(Path::new("").into())
2512 }
2513}
2514
2515impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2516 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2517 self.0 = summary.max_path.clone();
2518 }
2519}
2520
2521struct BackgroundScanner {
2522 snapshot: Mutex<LocalSnapshot>,
2523 fs: Arc<dyn Fs>,
2524 status_updates_tx: UnboundedSender<ScanState>,
2525 executor: Arc<executor::Background>,
2526 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2527 prev_state: Mutex<BackgroundScannerState>,
2528 finished_initial_scan: bool,
2529}
2530
2531struct BackgroundScannerState {
2532 snapshot: Snapshot,
2533 event_paths: Vec<Arc<Path>>,
2534}
2535
2536impl BackgroundScanner {
2537 fn new(
2538 snapshot: LocalSnapshot,
2539 fs: Arc<dyn Fs>,
2540 status_updates_tx: UnboundedSender<ScanState>,
2541 executor: Arc<executor::Background>,
2542 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2543 ) -> Self {
2544 Self {
2545 fs,
2546 status_updates_tx,
2547 executor,
2548 refresh_requests_rx,
2549 prev_state: Mutex::new(BackgroundScannerState {
2550 snapshot: snapshot.snapshot.clone(),
2551 event_paths: Default::default(),
2552 }),
2553 snapshot: Mutex::new(snapshot),
2554 finished_initial_scan: false,
2555 }
2556 }
2557
2558 async fn run(
2559 &mut self,
2560 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2561 ) {
2562 use futures::FutureExt as _;
2563
2564 let (root_abs_path, root_inode) = {
2565 let snapshot = self.snapshot.lock();
2566 (
2567 snapshot.abs_path.clone(),
2568 snapshot.root_entry().map(|e| e.inode),
2569 )
2570 };
2571
2572 // Populate ignores above the root.
2573 let ignore_stack;
2574 for ancestor in root_abs_path.ancestors().skip(1) {
2575 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2576 {
2577 self.snapshot
2578 .lock()
2579 .ignores_by_parent_abs_path
2580 .insert(ancestor.into(), (ignore.into(), 0));
2581 }
2582 }
2583 {
2584 let mut snapshot = self.snapshot.lock();
2585 snapshot.scan_id += 1;
2586 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2587 if ignore_stack.is_all() {
2588 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2589 root_entry.is_ignored = true;
2590 snapshot.insert_entry(root_entry, self.fs.as_ref());
2591 }
2592 }
2593 };
2594
2595 // Perform an initial scan of the directory.
2596 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2597 smol::block_on(scan_job_tx.send(ScanJob {
2598 abs_path: root_abs_path,
2599 path: Arc::from(Path::new("")),
2600 ignore_stack,
2601 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2602 scan_queue: scan_job_tx.clone(),
2603 }))
2604 .unwrap();
2605 drop(scan_job_tx);
2606 self.scan_dirs(true, scan_job_rx).await;
2607 {
2608 let mut snapshot = self.snapshot.lock();
2609 snapshot.completed_scan_id = snapshot.scan_id;
2610 }
2611 self.send_status_update(false, None);
2612
2613 // Process any any FS events that occurred while performing the initial scan.
2614 // For these events, update events cannot be as precise, because we didn't
2615 // have the previous state loaded yet.
2616 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2617 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2618 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2619 paths.extend(more_events.into_iter().map(|e| e.path));
2620 }
2621 self.process_events(paths).await;
2622 }
2623
2624 self.finished_initial_scan = true;
2625
2626 // Continue processing events until the worktree is dropped.
2627 loop {
2628 select_biased! {
2629 // Process any path refresh requests from the worktree. Prioritize
2630 // these before handling changes reported by the filesystem.
2631 request = self.refresh_requests_rx.recv().fuse() => {
2632 let Ok((paths, barrier)) = request else { break };
2633 if !self.process_refresh_request(paths, barrier).await {
2634 return;
2635 }
2636 }
2637
2638 events = events_rx.next().fuse() => {
2639 let Some(events) = events else { break };
2640 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2641 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2642 paths.extend(more_events.into_iter().map(|e| e.path));
2643 }
2644 self.process_events(paths).await;
2645 }
2646 }
2647 }
2648 }
2649
2650 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2651 if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2652 paths.sort_unstable();
2653 util::extend_sorted(
2654 &mut self.prev_state.lock().event_paths,
2655 paths,
2656 usize::MAX,
2657 Ord::cmp,
2658 );
2659 }
2660 self.send_status_update(false, Some(barrier))
2661 }
2662
2663 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2664 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2665 if let Some(mut paths) = self
2666 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2667 .await
2668 {
2669 paths.sort_unstable();
2670 util::extend_sorted(
2671 &mut self.prev_state.lock().event_paths,
2672 paths,
2673 usize::MAX,
2674 Ord::cmp,
2675 );
2676 }
2677 drop(scan_job_tx);
2678 self.scan_dirs(false, scan_job_rx).await;
2679
2680 self.update_ignore_statuses().await;
2681
2682 let mut snapshot = self.snapshot.lock();
2683
2684 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2685 git_repositories.retain(|work_directory_id, _| {
2686 snapshot
2687 .entry_for_id(*work_directory_id)
2688 .map_or(false, |entry| {
2689 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2690 })
2691 });
2692 snapshot.git_repositories = git_repositories;
2693
2694 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2695 git_repository_entries.retain(|_, entry| {
2696 snapshot
2697 .git_repositories
2698 .get(&entry.work_directory.0)
2699 .is_some()
2700 });
2701 snapshot.snapshot.repository_entries = git_repository_entries;
2702
2703 snapshot.removed_entry_ids.clear();
2704 snapshot.completed_scan_id = snapshot.scan_id;
2705
2706 drop(snapshot);
2707
2708 self.send_status_update(false, None);
2709 self.prev_state.lock().event_paths.clear();
2710 }
2711
2712 async fn scan_dirs(
2713 &self,
2714 enable_progress_updates: bool,
2715 scan_jobs_rx: channel::Receiver<ScanJob>,
2716 ) {
2717 use futures::FutureExt as _;
2718
2719 if self
2720 .status_updates_tx
2721 .unbounded_send(ScanState::Started)
2722 .is_err()
2723 {
2724 return;
2725 }
2726
2727 let progress_update_count = AtomicUsize::new(0);
2728 self.executor
2729 .scoped(|scope| {
2730 for _ in 0..self.executor.num_cpus() {
2731 scope.spawn(async {
2732 let mut last_progress_update_count = 0;
2733 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2734 futures::pin_mut!(progress_update_timer);
2735
2736 loop {
2737 select_biased! {
2738 // Process any path refresh requests before moving on to process
2739 // the scan queue, so that user operations are prioritized.
2740 request = self.refresh_requests_rx.recv().fuse() => {
2741 let Ok((paths, barrier)) = request else { break };
2742 if !self.process_refresh_request(paths, barrier).await {
2743 return;
2744 }
2745 }
2746
2747 // Send periodic progress updates to the worktree. Use an atomic counter
2748 // to ensure that only one of the workers sends a progress update after
2749 // the update interval elapses.
2750 _ = progress_update_timer => {
2751 match progress_update_count.compare_exchange(
2752 last_progress_update_count,
2753 last_progress_update_count + 1,
2754 SeqCst,
2755 SeqCst
2756 ) {
2757 Ok(_) => {
2758 last_progress_update_count += 1;
2759 self.send_status_update(true, None);
2760 }
2761 Err(count) => {
2762 last_progress_update_count = count;
2763 }
2764 }
2765 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2766 }
2767
2768 // Recursively load directories from the file system.
2769 job = scan_jobs_rx.recv().fuse() => {
2770 let Ok(job) = job else { break };
2771 if let Err(err) = self.scan_dir(&job).await {
2772 if job.path.as_ref() != Path::new("") {
2773 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2774 }
2775 }
2776 }
2777 }
2778 }
2779 })
2780 }
2781 })
2782 .await;
2783 }
2784
2785 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2786 let mut prev_state = self.prev_state.lock();
2787 let new_snapshot = self.snapshot.lock().clone();
2788 let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2789
2790 let changes = self.build_change_set(
2791 &old_snapshot,
2792 &new_snapshot.snapshot,
2793 &prev_state.event_paths,
2794 );
2795
2796 self.status_updates_tx
2797 .unbounded_send(ScanState::Updated {
2798 snapshot: new_snapshot,
2799 changes,
2800 scanning,
2801 barrier,
2802 })
2803 .is_ok()
2804 }
2805
2806 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2807 let mut new_entries: Vec<Entry> = Vec::new();
2808 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2809 let mut ignore_stack = job.ignore_stack.clone();
2810 let mut new_ignore = None;
2811 let (root_abs_path, root_char_bag, next_entry_id) = {
2812 let snapshot = self.snapshot.lock();
2813 (
2814 snapshot.abs_path().clone(),
2815 snapshot.root_char_bag,
2816 snapshot.next_entry_id.clone(),
2817 )
2818 };
2819 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2820 while let Some(child_abs_path) = child_paths.next().await {
2821 let child_abs_path: Arc<Path> = match child_abs_path {
2822 Ok(child_abs_path) => child_abs_path.into(),
2823 Err(error) => {
2824 log::error!("error processing entry {:?}", error);
2825 continue;
2826 }
2827 };
2828
2829 let child_name = child_abs_path.file_name().unwrap();
2830 let child_path: Arc<Path> = job.path.join(child_name).into();
2831 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2832 Ok(Some(metadata)) => metadata,
2833 Ok(None) => continue,
2834 Err(err) => {
2835 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2836 continue;
2837 }
2838 };
2839
2840 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2841 if child_name == *GITIGNORE {
2842 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2843 Ok(ignore) => {
2844 let ignore = Arc::new(ignore);
2845 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2846 new_ignore = Some(ignore);
2847 }
2848 Err(error) => {
2849 log::error!(
2850 "error loading .gitignore file {:?} - {:?}",
2851 child_name,
2852 error
2853 );
2854 }
2855 }
2856
2857 // Update ignore status of any child entries we've already processed to reflect the
2858 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2859 // there should rarely be too numerous. Update the ignore stack associated with any
2860 // new jobs as well.
2861 let mut new_jobs = new_jobs.iter_mut();
2862 for entry in &mut new_entries {
2863 let entry_abs_path = root_abs_path.join(&entry.path);
2864 entry.is_ignored =
2865 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2866
2867 if entry.is_dir() {
2868 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2869 job.ignore_stack = if entry.is_ignored {
2870 IgnoreStack::all()
2871 } else {
2872 ignore_stack.clone()
2873 };
2874 }
2875 }
2876 }
2877 }
2878
2879 let mut child_entry = Entry::new(
2880 child_path.clone(),
2881 &child_metadata,
2882 &next_entry_id,
2883 root_char_bag,
2884 );
2885
2886 if child_entry.is_dir() {
2887 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2888 child_entry.is_ignored = is_ignored;
2889
2890 // Avoid recursing until crash in the case of a recursive symlink
2891 if !job.ancestor_inodes.contains(&child_entry.inode) {
2892 let mut ancestor_inodes = job.ancestor_inodes.clone();
2893 ancestor_inodes.insert(child_entry.inode);
2894
2895 new_jobs.push(Some(ScanJob {
2896 abs_path: child_abs_path,
2897 path: child_path,
2898 ignore_stack: if is_ignored {
2899 IgnoreStack::all()
2900 } else {
2901 ignore_stack.clone()
2902 },
2903 ancestor_inodes,
2904 scan_queue: job.scan_queue.clone(),
2905 }));
2906 } else {
2907 new_jobs.push(None);
2908 }
2909 } else {
2910 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2911 }
2912
2913 new_entries.push(child_entry);
2914 }
2915
2916 self.snapshot.lock().populate_dir(
2917 job.path.clone(),
2918 new_entries,
2919 new_ignore,
2920 self.fs.as_ref(),
2921 );
2922
2923 for new_job in new_jobs {
2924 if let Some(new_job) = new_job {
2925 job.scan_queue.send(new_job).await.unwrap();
2926 }
2927 }
2928
2929 Ok(())
2930 }
2931
2932 async fn reload_entries_for_paths(
2933 &self,
2934 mut abs_paths: Vec<PathBuf>,
2935 scan_queue_tx: Option<Sender<ScanJob>>,
2936 ) -> Option<Vec<Arc<Path>>> {
2937 let doing_recursive_update = scan_queue_tx.is_some();
2938
2939 abs_paths.sort_unstable();
2940 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2941
2942 let root_abs_path = self.snapshot.lock().abs_path.clone();
2943 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2944 let metadata = futures::future::join_all(
2945 abs_paths
2946 .iter()
2947 .map(|abs_path| self.fs.metadata(&abs_path))
2948 .collect::<Vec<_>>(),
2949 )
2950 .await;
2951
2952 let mut snapshot = self.snapshot.lock();
2953 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2954 snapshot.scan_id += 1;
2955 if is_idle && !doing_recursive_update {
2956 snapshot.completed_scan_id = snapshot.scan_id;
2957 }
2958
2959 // Remove any entries for paths that no longer exist or are being recursively
2960 // refreshed. Do this before adding any new entries, so that renames can be
2961 // detected regardless of the order of the paths.
2962 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2963 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2964 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2965 if matches!(metadata, Ok(None)) || doing_recursive_update {
2966 snapshot.remove_path(path);
2967 }
2968 event_paths.push(path.into());
2969 } else {
2970 log::error!(
2971 "unexpected event {:?} for root path {:?}",
2972 abs_path,
2973 root_canonical_path
2974 );
2975 }
2976 }
2977
2978 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2979 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2980
2981 match metadata {
2982 Ok(Some(metadata)) => {
2983 let ignore_stack =
2984 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2985 let mut fs_entry = Entry::new(
2986 path.clone(),
2987 &metadata,
2988 snapshot.next_entry_id.as_ref(),
2989 snapshot.root_char_bag,
2990 );
2991 fs_entry.is_ignored = ignore_stack.is_all();
2992 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2993
2994 self.reload_repo_for_path(&path, &mut snapshot, self.fs.as_ref());
2995
2996 if let Some(scan_queue_tx) = &scan_queue_tx {
2997 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2998 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2999 ancestor_inodes.insert(metadata.inode);
3000 smol::block_on(scan_queue_tx.send(ScanJob {
3001 abs_path,
3002 path,
3003 ignore_stack,
3004 ancestor_inodes,
3005 scan_queue: scan_queue_tx.clone(),
3006 }))
3007 .unwrap();
3008 }
3009 }
3010 }
3011 Ok(None) => {
3012 self.remove_repo_path(&path, &mut snapshot);
3013 }
3014 Err(err) => {
3015 // TODO - create a special 'error' entry in the entries tree to mark this
3016 log::error!("error reading file on event {:?}", err);
3017 }
3018 }
3019 }
3020
3021 Some(event_paths)
3022 }
3023
3024 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3025 if !path
3026 .components()
3027 .any(|component| component.as_os_str() == *DOT_GIT)
3028 {
3029 let scan_id = snapshot.scan_id;
3030 let repo = snapshot.repo_for(&path)?;
3031
3032 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3033
3034 let work_dir = repo.work_directory(snapshot)?;
3035 let work_dir_id = repo.work_directory;
3036
3037 snapshot
3038 .git_repositories
3039 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3040
3041 snapshot.repository_entries.update(&work_dir, |entry| {
3042 entry
3043 .worktree_statuses
3044 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3045 });
3046 }
3047
3048 Some(())
3049 }
3050
3051 fn reload_repo_for_path(
3052 &self,
3053 path: &Path,
3054 snapshot: &mut LocalSnapshot,
3055 fs: &dyn Fs,
3056 ) -> Option<()> {
3057 let scan_id = snapshot.scan_id;
3058
3059 if path
3060 .components()
3061 .any(|component| component.as_os_str() == *DOT_GIT)
3062 {
3063 let (entry_id, repo_ptr) = {
3064 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3065 let dot_git_dir = path.ancestors()
3066 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3067 .next()?;
3068
3069 snapshot.build_repo(dot_git_dir.into(), fs);
3070 return None;
3071 };
3072 if repo.full_scan_id == scan_id {
3073 return None;
3074 }
3075 (*entry_id, repo.repo_ptr.to_owned())
3076 };
3077
3078 let work_dir = snapshot
3079 .entry_for_id(entry_id)
3080 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3081
3082 let repo = repo_ptr.lock();
3083 repo.reload_index();
3084 let branch = repo.branch_name();
3085 let statuses = repo.worktree_statuses().unwrap_or_default();
3086
3087 snapshot.git_repositories.update(&entry_id, |entry| {
3088 entry.scan_id = scan_id;
3089 entry.full_scan_id = scan_id;
3090 });
3091
3092 snapshot.repository_entries.update(&work_dir, |entry| {
3093 entry.branch = branch.map(Into::into);
3094 entry.worktree_statuses = statuses;
3095 });
3096 } else {
3097 if snapshot
3098 .entry_for_path(&path)
3099 .map(|entry| entry.is_ignored)
3100 .unwrap_or(false)
3101 {
3102 self.remove_repo_path(&path, snapshot);
3103 return None;
3104 }
3105
3106 let repo = snapshot.repo_for(&path)?;
3107
3108 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3109
3110 let status = {
3111 let local_repo = snapshot.get_local_repo(&repo)?;
3112
3113 // Short circuit if we've already scanned everything
3114 if local_repo.full_scan_id == scan_id {
3115 return None;
3116 }
3117
3118 let git_ptr = local_repo.repo_ptr.lock();
3119 git_ptr.worktree_status(&repo_path)
3120 };
3121
3122 let work_dir = repo.work_directory(snapshot)?;
3123 let work_dir_id = repo.work_directory;
3124
3125 snapshot
3126 .git_repositories
3127 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3128
3129 snapshot.repository_entries.update(&work_dir, |entry| {
3130 if let Some(status) = status {
3131 entry.worktree_statuses.insert(repo_path, status);
3132 } else {
3133 entry.worktree_statuses.remove(&repo_path);
3134 }
3135 });
3136 }
3137
3138 Some(())
3139 }
3140
3141 async fn update_ignore_statuses(&self) {
3142 use futures::FutureExt as _;
3143
3144 let mut snapshot = self.snapshot.lock().clone();
3145 let mut ignores_to_update = Vec::new();
3146 let mut ignores_to_delete = Vec::new();
3147 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3148 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3149 if *scan_id > snapshot.completed_scan_id
3150 && snapshot.entry_for_path(parent_path).is_some()
3151 {
3152 ignores_to_update.push(parent_abs_path.clone());
3153 }
3154
3155 let ignore_path = parent_path.join(&*GITIGNORE);
3156 if snapshot.entry_for_path(ignore_path).is_none() {
3157 ignores_to_delete.push(parent_abs_path.clone());
3158 }
3159 }
3160 }
3161
3162 for parent_abs_path in ignores_to_delete {
3163 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3164 self.snapshot
3165 .lock()
3166 .ignores_by_parent_abs_path
3167 .remove(&parent_abs_path);
3168 }
3169
3170 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3171 ignores_to_update.sort_unstable();
3172 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3173 while let Some(parent_abs_path) = ignores_to_update.next() {
3174 while ignores_to_update
3175 .peek()
3176 .map_or(false, |p| p.starts_with(&parent_abs_path))
3177 {
3178 ignores_to_update.next().unwrap();
3179 }
3180
3181 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3182 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3183 abs_path: parent_abs_path,
3184 ignore_stack,
3185 ignore_queue: ignore_queue_tx.clone(),
3186 }))
3187 .unwrap();
3188 }
3189 drop(ignore_queue_tx);
3190
3191 self.executor
3192 .scoped(|scope| {
3193 for _ in 0..self.executor.num_cpus() {
3194 scope.spawn(async {
3195 loop {
3196 select_biased! {
3197 // Process any path refresh requests before moving on to process
3198 // the queue of ignore statuses.
3199 request = self.refresh_requests_rx.recv().fuse() => {
3200 let Ok((paths, barrier)) = request else { break };
3201 if !self.process_refresh_request(paths, barrier).await {
3202 return;
3203 }
3204 }
3205
3206 // Recursively process directories whose ignores have changed.
3207 job = ignore_queue_rx.recv().fuse() => {
3208 let Ok(job) = job else { break };
3209 self.update_ignore_status(job, &snapshot).await;
3210 }
3211 }
3212 }
3213 });
3214 }
3215 })
3216 .await;
3217 }
3218
3219 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3220 let mut ignore_stack = job.ignore_stack;
3221 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3222 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3223 }
3224
3225 let mut entries_by_id_edits = Vec::new();
3226 let mut entries_by_path_edits = Vec::new();
3227 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3228 for mut entry in snapshot.child_entries(path).cloned() {
3229 let was_ignored = entry.is_ignored;
3230 let abs_path = snapshot.abs_path().join(&entry.path);
3231 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3232 if entry.is_dir() {
3233 let child_ignore_stack = if entry.is_ignored {
3234 IgnoreStack::all()
3235 } else {
3236 ignore_stack.clone()
3237 };
3238 job.ignore_queue
3239 .send(UpdateIgnoreStatusJob {
3240 abs_path: abs_path.into(),
3241 ignore_stack: child_ignore_stack,
3242 ignore_queue: job.ignore_queue.clone(),
3243 })
3244 .await
3245 .unwrap();
3246 }
3247
3248 if entry.is_ignored != was_ignored {
3249 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3250 path_entry.scan_id = snapshot.scan_id;
3251 path_entry.is_ignored = entry.is_ignored;
3252 entries_by_id_edits.push(Edit::Insert(path_entry));
3253 entries_by_path_edits.push(Edit::Insert(entry));
3254 }
3255 }
3256
3257 let mut snapshot = self.snapshot.lock();
3258 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3259 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3260 }
3261
3262 fn build_change_set(
3263 &self,
3264 old_snapshot: &Snapshot,
3265 new_snapshot: &Snapshot,
3266 event_paths: &[Arc<Path>],
3267 ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3268 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3269
3270 let mut changes = HashMap::default();
3271 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3272 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3273 let received_before_initialized = !self.finished_initial_scan;
3274
3275 for path in event_paths {
3276 let path = PathKey(path.clone());
3277 old_paths.seek(&path, Bias::Left, &());
3278 new_paths.seek(&path, Bias::Left, &());
3279
3280 loop {
3281 match (old_paths.item(), new_paths.item()) {
3282 (Some(old_entry), Some(new_entry)) => {
3283 if old_entry.path > path.0
3284 && new_entry.path > path.0
3285 && !old_entry.path.starts_with(&path.0)
3286 && !new_entry.path.starts_with(&path.0)
3287 {
3288 break;
3289 }
3290
3291 match Ord::cmp(&old_entry.path, &new_entry.path) {
3292 Ordering::Less => {
3293 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3294 old_paths.next(&());
3295 }
3296 Ordering::Equal => {
3297 if received_before_initialized {
3298 // If the worktree was not fully initialized when this event was generated,
3299 // we can't know whether this entry was added during the scan or whether
3300 // it was merely updated.
3301 changes.insert(
3302 (new_entry.path.clone(), new_entry.id),
3303 AddedOrUpdated,
3304 );
3305 } else if old_entry.mtime != new_entry.mtime {
3306 changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3307 }
3308 old_paths.next(&());
3309 new_paths.next(&());
3310 }
3311 Ordering::Greater => {
3312 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3313 new_paths.next(&());
3314 }
3315 }
3316 }
3317 (Some(old_entry), None) => {
3318 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3319 old_paths.next(&());
3320 }
3321 (None, Some(new_entry)) => {
3322 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3323 new_paths.next(&());
3324 }
3325 (None, None) => break,
3326 }
3327 }
3328 }
3329
3330 changes
3331 }
3332
3333 async fn progress_timer(&self, running: bool) {
3334 if !running {
3335 return futures::future::pending().await;
3336 }
3337
3338 #[cfg(any(test, feature = "test-support"))]
3339 if self.fs.is_fake() {
3340 return self.executor.simulate_random_delay().await;
3341 }
3342
3343 smol::Timer::after(Duration::from_millis(100)).await;
3344 }
3345}
3346
3347fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3348 let mut result = root_char_bag;
3349 result.extend(
3350 path.to_string_lossy()
3351 .chars()
3352 .map(|c| c.to_ascii_lowercase()),
3353 );
3354 result
3355}
3356
3357struct ScanJob {
3358 abs_path: Arc<Path>,
3359 path: Arc<Path>,
3360 ignore_stack: Arc<IgnoreStack>,
3361 scan_queue: Sender<ScanJob>,
3362 ancestor_inodes: TreeSet<u64>,
3363}
3364
3365struct UpdateIgnoreStatusJob {
3366 abs_path: Arc<Path>,
3367 ignore_stack: Arc<IgnoreStack>,
3368 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3369}
3370
3371pub trait WorktreeHandle {
3372 #[cfg(any(test, feature = "test-support"))]
3373 fn flush_fs_events<'a>(
3374 &self,
3375 cx: &'a gpui::TestAppContext,
3376 ) -> futures::future::LocalBoxFuture<'a, ()>;
3377}
3378
3379impl WorktreeHandle for ModelHandle<Worktree> {
3380 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3381 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3382 // extra directory scans, and emit extra scan-state notifications.
3383 //
3384 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3385 // to ensure that all redundant FS events have already been processed.
3386 #[cfg(any(test, feature = "test-support"))]
3387 fn flush_fs_events<'a>(
3388 &self,
3389 cx: &'a gpui::TestAppContext,
3390 ) -> futures::future::LocalBoxFuture<'a, ()> {
3391 use smol::future::FutureExt;
3392
3393 let filename = "fs-event-sentinel";
3394 let tree = self.clone();
3395 let (fs, root_path) = self.read_with(cx, |tree, _| {
3396 let tree = tree.as_local().unwrap();
3397 (tree.fs.clone(), tree.abs_path().clone())
3398 });
3399
3400 async move {
3401 fs.create_file(&root_path.join(filename), Default::default())
3402 .await
3403 .unwrap();
3404 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3405 .await;
3406
3407 fs.remove_file(&root_path.join(filename), Default::default())
3408 .await
3409 .unwrap();
3410 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3411 .await;
3412
3413 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3414 .await;
3415 }
3416 .boxed_local()
3417 }
3418}
3419
3420#[derive(Clone, Debug)]
3421struct TraversalProgress<'a> {
3422 max_path: &'a Path,
3423 count: usize,
3424 visible_count: usize,
3425 file_count: usize,
3426 visible_file_count: usize,
3427}
3428
3429impl<'a> TraversalProgress<'a> {
3430 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3431 match (include_ignored, include_dirs) {
3432 (true, true) => self.count,
3433 (true, false) => self.file_count,
3434 (false, true) => self.visible_count,
3435 (false, false) => self.visible_file_count,
3436 }
3437 }
3438}
3439
3440impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3441 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3442 self.max_path = summary.max_path.as_ref();
3443 self.count += summary.count;
3444 self.visible_count += summary.visible_count;
3445 self.file_count += summary.file_count;
3446 self.visible_file_count += summary.visible_file_count;
3447 }
3448}
3449
3450impl<'a> Default for TraversalProgress<'a> {
3451 fn default() -> Self {
3452 Self {
3453 max_path: Path::new(""),
3454 count: 0,
3455 visible_count: 0,
3456 file_count: 0,
3457 visible_file_count: 0,
3458 }
3459 }
3460}
3461
3462pub struct Traversal<'a> {
3463 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3464 include_ignored: bool,
3465 include_dirs: bool,
3466}
3467
3468impl<'a> Traversal<'a> {
3469 pub fn advance(&mut self) -> bool {
3470 self.advance_to_offset(self.offset() + 1)
3471 }
3472
3473 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3474 self.cursor.seek_forward(
3475 &TraversalTarget::Count {
3476 count: offset,
3477 include_dirs: self.include_dirs,
3478 include_ignored: self.include_ignored,
3479 },
3480 Bias::Right,
3481 &(),
3482 )
3483 }
3484
3485 pub fn advance_to_sibling(&mut self) -> bool {
3486 while let Some(entry) = self.cursor.item() {
3487 self.cursor.seek_forward(
3488 &TraversalTarget::PathSuccessor(&entry.path),
3489 Bias::Left,
3490 &(),
3491 );
3492 if let Some(entry) = self.cursor.item() {
3493 if (self.include_dirs || !entry.is_dir())
3494 && (self.include_ignored || !entry.is_ignored)
3495 {
3496 return true;
3497 }
3498 }
3499 }
3500 false
3501 }
3502
3503 pub fn entry(&self) -> Option<&'a Entry> {
3504 self.cursor.item()
3505 }
3506
3507 pub fn offset(&self) -> usize {
3508 self.cursor
3509 .start()
3510 .count(self.include_dirs, self.include_ignored)
3511 }
3512}
3513
3514impl<'a> Iterator for Traversal<'a> {
3515 type Item = &'a Entry;
3516
3517 fn next(&mut self) -> Option<Self::Item> {
3518 if let Some(item) = self.entry() {
3519 self.advance();
3520 Some(item)
3521 } else {
3522 None
3523 }
3524 }
3525}
3526
3527#[derive(Debug)]
3528enum TraversalTarget<'a> {
3529 Path(&'a Path),
3530 PathSuccessor(&'a Path),
3531 Count {
3532 count: usize,
3533 include_ignored: bool,
3534 include_dirs: bool,
3535 },
3536}
3537
3538impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3539 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3540 match self {
3541 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3542 TraversalTarget::PathSuccessor(path) => {
3543 if !cursor_location.max_path.starts_with(path) {
3544 Ordering::Equal
3545 } else {
3546 Ordering::Greater
3547 }
3548 }
3549 TraversalTarget::Count {
3550 count,
3551 include_dirs,
3552 include_ignored,
3553 } => Ord::cmp(
3554 count,
3555 &cursor_location.count(*include_dirs, *include_ignored),
3556 ),
3557 }
3558 }
3559}
3560
3561struct ChildEntriesIter<'a> {
3562 parent_path: &'a Path,
3563 traversal: Traversal<'a>,
3564}
3565
3566impl<'a> Iterator for ChildEntriesIter<'a> {
3567 type Item = &'a Entry;
3568
3569 fn next(&mut self) -> Option<Self::Item> {
3570 if let Some(item) = self.traversal.entry() {
3571 if item.path.starts_with(&self.parent_path) {
3572 self.traversal.advance_to_sibling();
3573 return Some(item);
3574 }
3575 }
3576 None
3577 }
3578}
3579
3580impl<'a> From<&'a Entry> for proto::Entry {
3581 fn from(entry: &'a Entry) -> Self {
3582 Self {
3583 id: entry.id.to_proto(),
3584 is_dir: entry.is_dir(),
3585 path: entry.path.to_string_lossy().into(),
3586 inode: entry.inode,
3587 mtime: Some(entry.mtime.into()),
3588 is_symlink: entry.is_symlink,
3589 is_ignored: entry.is_ignored,
3590 }
3591 }
3592}
3593
3594impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3595 type Error = anyhow::Error;
3596
3597 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3598 if let Some(mtime) = entry.mtime {
3599 let kind = if entry.is_dir {
3600 EntryKind::Dir
3601 } else {
3602 let mut char_bag = *root_char_bag;
3603 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3604 EntryKind::File(char_bag)
3605 };
3606 let path: Arc<Path> = PathBuf::from(entry.path).into();
3607 Ok(Entry {
3608 id: ProjectEntryId::from_proto(entry.id),
3609 kind,
3610 path,
3611 inode: entry.inode,
3612 mtime: mtime.into(),
3613 is_symlink: entry.is_symlink,
3614 is_ignored: entry.is_ignored,
3615 })
3616 } else {
3617 Err(anyhow!(
3618 "missing mtime in remote worktree entry {:?}",
3619 entry.path
3620 ))
3621 }
3622 }
3623}
3624
3625#[cfg(test)]
3626mod tests {
3627 use super::*;
3628 use fs::{FakeFs, RealFs};
3629 use gpui::{executor::Deterministic, TestAppContext};
3630 use pretty_assertions::assert_eq;
3631 use rand::prelude::*;
3632 use serde_json::json;
3633 use std::{env, fmt::Write};
3634 use util::{http::FakeHttpClient, test::temp_tree};
3635
3636 #[gpui::test]
3637 async fn test_traversal(cx: &mut TestAppContext) {
3638 let fs = FakeFs::new(cx.background());
3639 fs.insert_tree(
3640 "/root",
3641 json!({
3642 ".gitignore": "a/b\n",
3643 "a": {
3644 "b": "",
3645 "c": "",
3646 }
3647 }),
3648 )
3649 .await;
3650
3651 let http_client = FakeHttpClient::with_404_response();
3652 let client = cx.read(|cx| Client::new(http_client, cx));
3653
3654 let tree = Worktree::local(
3655 client,
3656 Path::new("/root"),
3657 true,
3658 fs,
3659 Default::default(),
3660 &mut cx.to_async(),
3661 )
3662 .await
3663 .unwrap();
3664 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3665 .await;
3666
3667 tree.read_with(cx, |tree, _| {
3668 assert_eq!(
3669 tree.entries(false)
3670 .map(|entry| entry.path.as_ref())
3671 .collect::<Vec<_>>(),
3672 vec![
3673 Path::new(""),
3674 Path::new(".gitignore"),
3675 Path::new("a"),
3676 Path::new("a/c"),
3677 ]
3678 );
3679 assert_eq!(
3680 tree.entries(true)
3681 .map(|entry| entry.path.as_ref())
3682 .collect::<Vec<_>>(),
3683 vec![
3684 Path::new(""),
3685 Path::new(".gitignore"),
3686 Path::new("a"),
3687 Path::new("a/b"),
3688 Path::new("a/c"),
3689 ]
3690 );
3691 })
3692 }
3693
3694 #[gpui::test(iterations = 10)]
3695 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3696 let fs = FakeFs::new(cx.background());
3697 fs.insert_tree(
3698 "/root",
3699 json!({
3700 "lib": {
3701 "a": {
3702 "a.txt": ""
3703 },
3704 "b": {
3705 "b.txt": ""
3706 }
3707 }
3708 }),
3709 )
3710 .await;
3711 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3712 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3713
3714 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3715 let tree = Worktree::local(
3716 client,
3717 Path::new("/root"),
3718 true,
3719 fs.clone(),
3720 Default::default(),
3721 &mut cx.to_async(),
3722 )
3723 .await
3724 .unwrap();
3725
3726 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3727 .await;
3728
3729 tree.read_with(cx, |tree, _| {
3730 assert_eq!(
3731 tree.entries(false)
3732 .map(|entry| entry.path.as_ref())
3733 .collect::<Vec<_>>(),
3734 vec![
3735 Path::new(""),
3736 Path::new("lib"),
3737 Path::new("lib/a"),
3738 Path::new("lib/a/a.txt"),
3739 Path::new("lib/a/lib"),
3740 Path::new("lib/b"),
3741 Path::new("lib/b/b.txt"),
3742 Path::new("lib/b/lib"),
3743 ]
3744 );
3745 });
3746
3747 fs.rename(
3748 Path::new("/root/lib/a/lib"),
3749 Path::new("/root/lib/a/lib-2"),
3750 Default::default(),
3751 )
3752 .await
3753 .unwrap();
3754 executor.run_until_parked();
3755 tree.read_with(cx, |tree, _| {
3756 assert_eq!(
3757 tree.entries(false)
3758 .map(|entry| entry.path.as_ref())
3759 .collect::<Vec<_>>(),
3760 vec![
3761 Path::new(""),
3762 Path::new("lib"),
3763 Path::new("lib/a"),
3764 Path::new("lib/a/a.txt"),
3765 Path::new("lib/a/lib-2"),
3766 Path::new("lib/b"),
3767 Path::new("lib/b/b.txt"),
3768 Path::new("lib/b/lib"),
3769 ]
3770 );
3771 });
3772 }
3773
3774 #[gpui::test]
3775 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3776 let parent_dir = temp_tree(json!({
3777 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3778 "tree": {
3779 ".git": {},
3780 ".gitignore": "ignored-dir\n",
3781 "tracked-dir": {
3782 "tracked-file1": "",
3783 "ancestor-ignored-file1": "",
3784 },
3785 "ignored-dir": {
3786 "ignored-file1": ""
3787 }
3788 }
3789 }));
3790 let dir = parent_dir.path().join("tree");
3791
3792 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3793
3794 let tree = Worktree::local(
3795 client,
3796 dir.as_path(),
3797 true,
3798 Arc::new(RealFs),
3799 Default::default(),
3800 &mut cx.to_async(),
3801 )
3802 .await
3803 .unwrap();
3804 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3805 .await;
3806 tree.flush_fs_events(cx).await;
3807 cx.read(|cx| {
3808 let tree = tree.read(cx);
3809 assert!(
3810 !tree
3811 .entry_for_path("tracked-dir/tracked-file1")
3812 .unwrap()
3813 .is_ignored
3814 );
3815 assert!(
3816 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3817 .unwrap()
3818 .is_ignored
3819 );
3820 assert!(
3821 tree.entry_for_path("ignored-dir/ignored-file1")
3822 .unwrap()
3823 .is_ignored
3824 );
3825 });
3826
3827 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3828 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3829 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3830 tree.flush_fs_events(cx).await;
3831 cx.read(|cx| {
3832 let tree = tree.read(cx);
3833 assert!(
3834 !tree
3835 .entry_for_path("tracked-dir/tracked-file2")
3836 .unwrap()
3837 .is_ignored
3838 );
3839 assert!(
3840 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3841 .unwrap()
3842 .is_ignored
3843 );
3844 assert!(
3845 tree.entry_for_path("ignored-dir/ignored-file2")
3846 .unwrap()
3847 .is_ignored
3848 );
3849 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3850 });
3851 }
3852
3853 #[gpui::test]
3854 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3855 let root = temp_tree(json!({
3856 "dir1": {
3857 ".git": {},
3858 "deps": {
3859 "dep1": {
3860 ".git": {},
3861 "src": {
3862 "a.txt": ""
3863 }
3864 }
3865 },
3866 "src": {
3867 "b.txt": ""
3868 }
3869 },
3870 "c.txt": "",
3871 }));
3872
3873 let http_client = FakeHttpClient::with_404_response();
3874 let client = cx.read(|cx| Client::new(http_client, cx));
3875 let tree = Worktree::local(
3876 client,
3877 root.path(),
3878 true,
3879 Arc::new(RealFs),
3880 Default::default(),
3881 &mut cx.to_async(),
3882 )
3883 .await
3884 .unwrap();
3885
3886 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3887 .await;
3888 tree.flush_fs_events(cx).await;
3889
3890 tree.read_with(cx, |tree, _cx| {
3891 let tree = tree.as_local().unwrap();
3892
3893 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3894
3895 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3896 assert_eq!(
3897 entry
3898 .work_directory(tree)
3899 .map(|directory| directory.as_ref().to_owned()),
3900 Some(Path::new("dir1").to_owned())
3901 );
3902
3903 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3904 assert_eq!(
3905 entry
3906 .work_directory(tree)
3907 .map(|directory| directory.as_ref().to_owned()),
3908 Some(Path::new("dir1/deps/dep1").to_owned())
3909 );
3910 });
3911
3912 let repo_update_events = Arc::new(Mutex::new(vec![]));
3913 tree.update(cx, |_, cx| {
3914 let repo_update_events = repo_update_events.clone();
3915 cx.subscribe(&tree, move |_, _, event, _| {
3916 if let Event::UpdatedGitRepositories(update) = event {
3917 repo_update_events.lock().push(update.clone());
3918 }
3919 })
3920 .detach();
3921 });
3922
3923 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3924 tree.flush_fs_events(cx).await;
3925
3926 assert_eq!(
3927 repo_update_events.lock()[0]
3928 .keys()
3929 .cloned()
3930 .collect::<Vec<Arc<Path>>>(),
3931 vec![Path::new("dir1").into()]
3932 );
3933
3934 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3935 tree.flush_fs_events(cx).await;
3936
3937 tree.read_with(cx, |tree, _cx| {
3938 let tree = tree.as_local().unwrap();
3939
3940 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3941 });
3942 }
3943
3944 #[gpui::test]
3945 async fn test_git_status(cx: &mut TestAppContext) {
3946 #[track_caller]
3947 fn git_init(path: &Path) -> git2::Repository {
3948 git2::Repository::init(path).expect("Failed to initialize git repository")
3949 }
3950
3951 #[track_caller]
3952 fn git_add(path: &Path, repo: &git2::Repository) {
3953 let mut index = repo.index().expect("Failed to get index");
3954 index.add_path(path).expect("Failed to add a.txt");
3955 index.write().expect("Failed to write index");
3956 }
3957
3958 #[track_caller]
3959 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3960 let mut index = repo.index().expect("Failed to get index");
3961 index.remove_path(path).expect("Failed to add a.txt");
3962 index.write().expect("Failed to write index");
3963 }
3964
3965 #[track_caller]
3966 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3967 use git2::Signature;
3968
3969 let signature = Signature::now("test", "test@zed.dev").unwrap();
3970 let oid = repo.index().unwrap().write_tree().unwrap();
3971 let tree = repo.find_tree(oid).unwrap();
3972 if let Some(head) = repo.head().ok() {
3973 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3974
3975 let parent_commit = parent_obj.as_commit().unwrap();
3976
3977 repo.commit(
3978 Some("HEAD"),
3979 &signature,
3980 &signature,
3981 msg,
3982 &tree,
3983 &[parent_commit],
3984 )
3985 .expect("Failed to commit with parent");
3986 } else {
3987 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3988 .expect("Failed to commit");
3989 }
3990 }
3991
3992 #[track_caller]
3993 fn git_stash(repo: &mut git2::Repository) {
3994 use git2::Signature;
3995
3996 let signature = Signature::now("test", "test@zed.dev").unwrap();
3997 repo.stash_save(&signature, "N/A", None)
3998 .expect("Failed to stash");
3999 }
4000
4001 #[track_caller]
4002 fn git_reset(offset: usize, repo: &git2::Repository) {
4003 let head = repo.head().expect("Couldn't get repo head");
4004 let object = head.peel(git2::ObjectType::Commit).unwrap();
4005 let commit = object.as_commit().unwrap();
4006 let new_head = commit
4007 .parents()
4008 .inspect(|parnet| {
4009 parnet.message();
4010 })
4011 .skip(offset)
4012 .next()
4013 .expect("Not enough history");
4014 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
4015 .expect("Could not reset");
4016 }
4017
4018 #[allow(dead_code)]
4019 #[track_caller]
4020 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
4021 repo.statuses(None)
4022 .unwrap()
4023 .iter()
4024 .map(|status| (status.path().unwrap().to_string(), status.status()))
4025 .collect()
4026 }
4027
4028 const IGNORE_RULE: &'static str = "**/target";
4029
4030 let root = temp_tree(json!({
4031 "project": {
4032 "a.txt": "a",
4033 "b.txt": "bb",
4034 "c": {
4035 "d": {
4036 "e.txt": "eee"
4037 }
4038 },
4039 "f.txt": "ffff",
4040 "target": {
4041 "build_file": "???"
4042 },
4043 ".gitignore": IGNORE_RULE
4044 },
4045
4046 }));
4047
4048 let http_client = FakeHttpClient::with_404_response();
4049 let client = cx.read(|cx| Client::new(http_client, cx));
4050 let tree = Worktree::local(
4051 client,
4052 root.path(),
4053 true,
4054 Arc::new(RealFs),
4055 Default::default(),
4056 &mut cx.to_async(),
4057 )
4058 .await
4059 .unwrap();
4060
4061 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4062 .await;
4063
4064 const A_TXT: &'static str = "a.txt";
4065 const B_TXT: &'static str = "b.txt";
4066 const E_TXT: &'static str = "c/d/e.txt";
4067 const F_TXT: &'static str = "f.txt";
4068 const DOTGITIGNORE: &'static str = ".gitignore";
4069 const BUILD_FILE: &'static str = "target/build_file";
4070
4071 let work_dir = root.path().join("project");
4072 let mut repo = git_init(work_dir.as_path());
4073 repo.add_ignore_rule(IGNORE_RULE).unwrap();
4074 git_add(Path::new(A_TXT), &repo);
4075 git_add(Path::new(E_TXT), &repo);
4076 git_add(Path::new(DOTGITIGNORE), &repo);
4077 git_commit("Initial commit", &repo);
4078
4079 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4080
4081 tree.flush_fs_events(cx).await;
4082
4083 // Check that the right git state is observed on startup
4084 tree.read_with(cx, |tree, _cx| {
4085 let snapshot = tree.snapshot();
4086 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4087 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4088 assert_eq!(dir.0.as_ref(), Path::new("project"));
4089
4090 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4091 assert_eq!(
4092 repo.worktree_statuses.get(&Path::new(A_TXT).into()),
4093 Some(&GitFileStatus::Modified)
4094 );
4095 assert_eq!(
4096 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4097 Some(&GitFileStatus::Added)
4098 );
4099 assert_eq!(
4100 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4101 Some(&GitFileStatus::Added)
4102 );
4103 });
4104
4105 git_add(Path::new(A_TXT), &repo);
4106 git_add(Path::new(B_TXT), &repo);
4107 git_commit("Committing modified and added", &repo);
4108 tree.flush_fs_events(cx).await;
4109
4110 // Check that repo only changes are tracked
4111 tree.read_with(cx, |tree, _cx| {
4112 let snapshot = tree.snapshot();
4113 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4114
4115 assert_eq!(repo.worktree_statuses.iter().count(), 1);
4116 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4117 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4118 assert_eq!(
4119 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4120 Some(&GitFileStatus::Added)
4121 );
4122 });
4123
4124 git_reset(0, &repo);
4125 git_remove_index(Path::new(B_TXT), &repo);
4126 git_stash(&mut repo);
4127 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4128 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4129 tree.flush_fs_events(cx).await;
4130
4131 // Check that more complex repo changes are tracked
4132 tree.read_with(cx, |tree, _cx| {
4133 let snapshot = tree.snapshot();
4134 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4135
4136 assert_eq!(repo.worktree_statuses.iter().count(), 3);
4137 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4138 assert_eq!(
4139 repo.worktree_statuses.get(&Path::new(B_TXT).into()),
4140 Some(&GitFileStatus::Added)
4141 );
4142 assert_eq!(
4143 repo.worktree_statuses.get(&Path::new(E_TXT).into()),
4144 Some(&GitFileStatus::Modified)
4145 );
4146 assert_eq!(
4147 repo.worktree_statuses.get(&Path::new(F_TXT).into()),
4148 Some(&GitFileStatus::Added)
4149 );
4150 });
4151
4152 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4153 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4154 std::fs::write(
4155 work_dir.join(DOTGITIGNORE),
4156 [IGNORE_RULE, "f.txt"].join("\n"),
4157 )
4158 .unwrap();
4159
4160 git_add(Path::new(DOTGITIGNORE), &repo);
4161 git_commit("Committing modified git ignore", &repo);
4162
4163 tree.flush_fs_events(cx).await;
4164
4165 git_status(&repo);
4166
4167 // Check that non-repo behavior is tracked
4168 tree.read_with(cx, |tree, _cx| {
4169 let snapshot = tree.snapshot();
4170 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4171
4172 assert_eq!(repo.worktree_statuses.iter().count(), 0);
4173 assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
4174 assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
4175 assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
4176 assert_eq!(repo.worktree_statuses.get(&Path::new(F_TXT).into()), None);
4177 });
4178 }
4179
4180 #[gpui::test]
4181 async fn test_write_file(cx: &mut TestAppContext) {
4182 let dir = temp_tree(json!({
4183 ".git": {},
4184 ".gitignore": "ignored-dir\n",
4185 "tracked-dir": {},
4186 "ignored-dir": {}
4187 }));
4188
4189 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4190
4191 let tree = Worktree::local(
4192 client,
4193 dir.path(),
4194 true,
4195 Arc::new(RealFs),
4196 Default::default(),
4197 &mut cx.to_async(),
4198 )
4199 .await
4200 .unwrap();
4201 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4202 .await;
4203 tree.flush_fs_events(cx).await;
4204
4205 tree.update(cx, |tree, cx| {
4206 tree.as_local().unwrap().write_file(
4207 Path::new("tracked-dir/file.txt"),
4208 "hello".into(),
4209 Default::default(),
4210 cx,
4211 )
4212 })
4213 .await
4214 .unwrap();
4215 tree.update(cx, |tree, cx| {
4216 tree.as_local().unwrap().write_file(
4217 Path::new("ignored-dir/file.txt"),
4218 "world".into(),
4219 Default::default(),
4220 cx,
4221 )
4222 })
4223 .await
4224 .unwrap();
4225
4226 tree.read_with(cx, |tree, _| {
4227 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4228 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4229 assert!(!tracked.is_ignored);
4230 assert!(ignored.is_ignored);
4231 });
4232 }
4233
4234 #[gpui::test(iterations = 30)]
4235 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4236 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4237
4238 let fs = FakeFs::new(cx.background());
4239 fs.insert_tree(
4240 "/root",
4241 json!({
4242 "b": {},
4243 "c": {},
4244 "d": {},
4245 }),
4246 )
4247 .await;
4248
4249 let tree = Worktree::local(
4250 client,
4251 "/root".as_ref(),
4252 true,
4253 fs,
4254 Default::default(),
4255 &mut cx.to_async(),
4256 )
4257 .await
4258 .unwrap();
4259
4260 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4261
4262 let entry = tree
4263 .update(cx, |tree, cx| {
4264 tree.as_local_mut()
4265 .unwrap()
4266 .create_entry("a/e".as_ref(), true, cx)
4267 })
4268 .await
4269 .unwrap();
4270 assert!(entry.is_dir());
4271
4272 cx.foreground().run_until_parked();
4273 tree.read_with(cx, |tree, _| {
4274 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4275 });
4276
4277 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4278 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4279 snapshot1.apply_remote_update(update).unwrap();
4280 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4281 }
4282
4283 #[gpui::test(iterations = 100)]
4284 async fn test_random_worktree_operations_during_initial_scan(
4285 cx: &mut TestAppContext,
4286 mut rng: StdRng,
4287 ) {
4288 let operations = env::var("OPERATIONS")
4289 .map(|o| o.parse().unwrap())
4290 .unwrap_or(5);
4291 let initial_entries = env::var("INITIAL_ENTRIES")
4292 .map(|o| o.parse().unwrap())
4293 .unwrap_or(20);
4294
4295 let root_dir = Path::new("/test");
4296 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4297 fs.as_fake().insert_tree(root_dir, json!({})).await;
4298 for _ in 0..initial_entries {
4299 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4300 }
4301 log::info!("generated initial tree");
4302
4303 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4304 let worktree = Worktree::local(
4305 client.clone(),
4306 root_dir,
4307 true,
4308 fs.clone(),
4309 Default::default(),
4310 &mut cx.to_async(),
4311 )
4312 .await
4313 .unwrap();
4314
4315 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4316
4317 for _ in 0..operations {
4318 worktree
4319 .update(cx, |worktree, cx| {
4320 randomly_mutate_worktree(worktree, &mut rng, cx)
4321 })
4322 .await
4323 .log_err();
4324 worktree.read_with(cx, |tree, _| {
4325 tree.as_local().unwrap().snapshot.check_invariants()
4326 });
4327
4328 if rng.gen_bool(0.6) {
4329 let new_snapshot =
4330 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4331 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4332 snapshot.apply_remote_update(update.clone()).unwrap();
4333 assert_eq!(
4334 snapshot.to_vec(true),
4335 new_snapshot.to_vec(true),
4336 "incorrect snapshot after update {:?}",
4337 update
4338 );
4339 }
4340 }
4341
4342 worktree
4343 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4344 .await;
4345 worktree.read_with(cx, |tree, _| {
4346 tree.as_local().unwrap().snapshot.check_invariants()
4347 });
4348
4349 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4350 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4351 snapshot.apply_remote_update(update.clone()).unwrap();
4352 assert_eq!(
4353 snapshot.to_vec(true),
4354 new_snapshot.to_vec(true),
4355 "incorrect snapshot after update {:?}",
4356 update
4357 );
4358 }
4359
4360 #[gpui::test(iterations = 100)]
4361 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4362 let operations = env::var("OPERATIONS")
4363 .map(|o| o.parse().unwrap())
4364 .unwrap_or(40);
4365 let initial_entries = env::var("INITIAL_ENTRIES")
4366 .map(|o| o.parse().unwrap())
4367 .unwrap_or(20);
4368
4369 let root_dir = Path::new("/test");
4370 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4371 fs.as_fake().insert_tree(root_dir, json!({})).await;
4372 for _ in 0..initial_entries {
4373 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4374 }
4375 log::info!("generated initial tree");
4376
4377 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4378 let worktree = Worktree::local(
4379 client.clone(),
4380 root_dir,
4381 true,
4382 fs.clone(),
4383 Default::default(),
4384 &mut cx.to_async(),
4385 )
4386 .await
4387 .unwrap();
4388
4389 worktree
4390 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4391 .await;
4392
4393 // After the initial scan is complete, the `UpdatedEntries` event can
4394 // be used to follow along with all changes to the worktree's snapshot.
4395 worktree.update(cx, |tree, cx| {
4396 let mut paths = tree
4397 .as_local()
4398 .unwrap()
4399 .paths()
4400 .cloned()
4401 .collect::<Vec<_>>();
4402
4403 cx.subscribe(&worktree, move |tree, _, event, _| {
4404 if let Event::UpdatedEntries(changes) = event {
4405 for ((path, _), change_type) in changes.iter() {
4406 let path = path.clone();
4407 let ix = match paths.binary_search(&path) {
4408 Ok(ix) | Err(ix) => ix,
4409 };
4410 match change_type {
4411 PathChange::Added => {
4412 assert_ne!(paths.get(ix), Some(&path));
4413 paths.insert(ix, path);
4414 }
4415
4416 PathChange::Removed => {
4417 assert_eq!(paths.get(ix), Some(&path));
4418 paths.remove(ix);
4419 }
4420
4421 PathChange::Updated => {
4422 assert_eq!(paths.get(ix), Some(&path));
4423 }
4424
4425 PathChange::AddedOrUpdated => {
4426 if paths[ix] != path {
4427 paths.insert(ix, path);
4428 }
4429 }
4430 }
4431 }
4432
4433 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4434 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4435 }
4436 })
4437 .detach();
4438 });
4439
4440 let mut snapshots = Vec::new();
4441 let mut mutations_len = operations;
4442 while mutations_len > 1 {
4443 if rng.gen_bool(0.2) {
4444 worktree
4445 .update(cx, |worktree, cx| {
4446 randomly_mutate_worktree(worktree, &mut rng, cx)
4447 })
4448 .await
4449 .unwrap();
4450 } else {
4451 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4452 }
4453
4454 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4455 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4456 let len = rng.gen_range(0..=buffered_event_count);
4457 log::info!("flushing {} events", len);
4458 fs.as_fake().flush_events(len).await;
4459 } else {
4460 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4461 mutations_len -= 1;
4462 }
4463
4464 cx.foreground().run_until_parked();
4465 if rng.gen_bool(0.2) {
4466 log::info!("storing snapshot {}", snapshots.len());
4467 let snapshot =
4468 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4469 snapshots.push(snapshot);
4470 }
4471 }
4472
4473 log::info!("quiescing");
4474 fs.as_fake().flush_events(usize::MAX).await;
4475 cx.foreground().run_until_parked();
4476 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4477 snapshot.check_invariants();
4478
4479 {
4480 let new_worktree = Worktree::local(
4481 client.clone(),
4482 root_dir,
4483 true,
4484 fs.clone(),
4485 Default::default(),
4486 &mut cx.to_async(),
4487 )
4488 .await
4489 .unwrap();
4490 new_worktree
4491 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4492 .await;
4493 let new_snapshot =
4494 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4495 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4496 }
4497
4498 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4499 let include_ignored = rng.gen::<bool>();
4500 if !include_ignored {
4501 let mut entries_by_path_edits = Vec::new();
4502 let mut entries_by_id_edits = Vec::new();
4503 for entry in prev_snapshot
4504 .entries_by_id
4505 .cursor::<()>()
4506 .filter(|e| e.is_ignored)
4507 {
4508 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4509 entries_by_id_edits.push(Edit::Remove(entry.id));
4510 }
4511
4512 prev_snapshot
4513 .entries_by_path
4514 .edit(entries_by_path_edits, &());
4515 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4516 }
4517
4518 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4519 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4520 assert_eq!(
4521 prev_snapshot.to_vec(include_ignored),
4522 snapshot.to_vec(include_ignored),
4523 "wrong update for snapshot {i}. update: {:?}",
4524 update
4525 );
4526 }
4527 }
4528
4529 fn randomly_mutate_worktree(
4530 worktree: &mut Worktree,
4531 rng: &mut impl Rng,
4532 cx: &mut ModelContext<Worktree>,
4533 ) -> Task<Result<()>> {
4534 let worktree = worktree.as_local_mut().unwrap();
4535 let snapshot = worktree.snapshot();
4536 let entry = snapshot.entries(false).choose(rng).unwrap();
4537
4538 match rng.gen_range(0_u32..100) {
4539 0..=33 if entry.path.as_ref() != Path::new("") => {
4540 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4541 worktree.delete_entry(entry.id, cx).unwrap()
4542 }
4543 ..=66 if entry.path.as_ref() != Path::new("") => {
4544 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4545 let new_parent_path = if other_entry.is_dir() {
4546 other_entry.path.clone()
4547 } else {
4548 other_entry.path.parent().unwrap().into()
4549 };
4550 let mut new_path = new_parent_path.join(gen_name(rng));
4551 if new_path.starts_with(&entry.path) {
4552 new_path = gen_name(rng).into();
4553 }
4554
4555 log::info!(
4556 "renaming entry {:?} ({}) to {:?}",
4557 entry.path,
4558 entry.id.0,
4559 new_path
4560 );
4561 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4562 cx.foreground().spawn(async move {
4563 task.await?;
4564 Ok(())
4565 })
4566 }
4567 _ => {
4568 let task = if entry.is_dir() {
4569 let child_path = entry.path.join(gen_name(rng));
4570 let is_dir = rng.gen_bool(0.3);
4571 log::info!(
4572 "creating {} at {:?}",
4573 if is_dir { "dir" } else { "file" },
4574 child_path,
4575 );
4576 worktree.create_entry(child_path, is_dir, cx)
4577 } else {
4578 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4579 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4580 };
4581 cx.foreground().spawn(async move {
4582 task.await?;
4583 Ok(())
4584 })
4585 }
4586 }
4587 }
4588
4589 async fn randomly_mutate_fs(
4590 fs: &Arc<dyn Fs>,
4591 root_path: &Path,
4592 insertion_probability: f64,
4593 rng: &mut impl Rng,
4594 ) {
4595 let mut files = Vec::new();
4596 let mut dirs = Vec::new();
4597 for path in fs.as_fake().paths() {
4598 if path.starts_with(root_path) {
4599 if fs.is_file(&path).await {
4600 files.push(path);
4601 } else {
4602 dirs.push(path);
4603 }
4604 }
4605 }
4606
4607 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4608 let path = dirs.choose(rng).unwrap();
4609 let new_path = path.join(gen_name(rng));
4610
4611 if rng.gen() {
4612 log::info!(
4613 "creating dir {:?}",
4614 new_path.strip_prefix(root_path).unwrap()
4615 );
4616 fs.create_dir(&new_path).await.unwrap();
4617 } else {
4618 log::info!(
4619 "creating file {:?}",
4620 new_path.strip_prefix(root_path).unwrap()
4621 );
4622 fs.create_file(&new_path, Default::default()).await.unwrap();
4623 }
4624 } else if rng.gen_bool(0.05) {
4625 let ignore_dir_path = dirs.choose(rng).unwrap();
4626 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4627
4628 let subdirs = dirs
4629 .iter()
4630 .filter(|d| d.starts_with(&ignore_dir_path))
4631 .cloned()
4632 .collect::<Vec<_>>();
4633 let subfiles = files
4634 .iter()
4635 .filter(|d| d.starts_with(&ignore_dir_path))
4636 .cloned()
4637 .collect::<Vec<_>>();
4638 let files_to_ignore = {
4639 let len = rng.gen_range(0..=subfiles.len());
4640 subfiles.choose_multiple(rng, len)
4641 };
4642 let dirs_to_ignore = {
4643 let len = rng.gen_range(0..subdirs.len());
4644 subdirs.choose_multiple(rng, len)
4645 };
4646
4647 let mut ignore_contents = String::new();
4648 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4649 writeln!(
4650 ignore_contents,
4651 "{}",
4652 path_to_ignore
4653 .strip_prefix(&ignore_dir_path)
4654 .unwrap()
4655 .to_str()
4656 .unwrap()
4657 )
4658 .unwrap();
4659 }
4660 log::info!(
4661 "creating gitignore {:?} with contents:\n{}",
4662 ignore_path.strip_prefix(&root_path).unwrap(),
4663 ignore_contents
4664 );
4665 fs.save(
4666 &ignore_path,
4667 &ignore_contents.as_str().into(),
4668 Default::default(),
4669 )
4670 .await
4671 .unwrap();
4672 } else {
4673 let old_path = {
4674 let file_path = files.choose(rng);
4675 let dir_path = dirs[1..].choose(rng);
4676 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4677 };
4678
4679 let is_rename = rng.gen();
4680 if is_rename {
4681 let new_path_parent = dirs
4682 .iter()
4683 .filter(|d| !d.starts_with(old_path))
4684 .choose(rng)
4685 .unwrap();
4686
4687 let overwrite_existing_dir =
4688 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4689 let new_path = if overwrite_existing_dir {
4690 fs.remove_dir(
4691 &new_path_parent,
4692 RemoveOptions {
4693 recursive: true,
4694 ignore_if_not_exists: true,
4695 },
4696 )
4697 .await
4698 .unwrap();
4699 new_path_parent.to_path_buf()
4700 } else {
4701 new_path_parent.join(gen_name(rng))
4702 };
4703
4704 log::info!(
4705 "renaming {:?} to {}{:?}",
4706 old_path.strip_prefix(&root_path).unwrap(),
4707 if overwrite_existing_dir {
4708 "overwrite "
4709 } else {
4710 ""
4711 },
4712 new_path.strip_prefix(&root_path).unwrap()
4713 );
4714 fs.rename(
4715 &old_path,
4716 &new_path,
4717 fs::RenameOptions {
4718 overwrite: true,
4719 ignore_if_exists: true,
4720 },
4721 )
4722 .await
4723 .unwrap();
4724 } else if fs.is_file(&old_path).await {
4725 log::info!(
4726 "deleting file {:?}",
4727 old_path.strip_prefix(&root_path).unwrap()
4728 );
4729 fs.remove_file(old_path, Default::default()).await.unwrap();
4730 } else {
4731 log::info!(
4732 "deleting dir {:?}",
4733 old_path.strip_prefix(&root_path).unwrap()
4734 );
4735 fs.remove_dir(
4736 &old_path,
4737 RemoveOptions {
4738 recursive: true,
4739 ignore_if_not_exists: true,
4740 },
4741 )
4742 .await
4743 .unwrap();
4744 }
4745 }
4746 }
4747
4748 fn gen_name(rng: &mut impl Rng) -> String {
4749 (0..6)
4750 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4751 .map(char::from)
4752 .collect()
4753 }
4754
4755 impl LocalSnapshot {
4756 fn check_invariants(&self) {
4757 assert_eq!(
4758 self.entries_by_path
4759 .cursor::<()>()
4760 .map(|e| (&e.path, e.id))
4761 .collect::<Vec<_>>(),
4762 self.entries_by_id
4763 .cursor::<()>()
4764 .map(|e| (&e.path, e.id))
4765 .collect::<collections::BTreeSet<_>>()
4766 .into_iter()
4767 .collect::<Vec<_>>(),
4768 "entries_by_path and entries_by_id are inconsistent"
4769 );
4770
4771 let mut files = self.files(true, 0);
4772 let mut visible_files = self.files(false, 0);
4773 for entry in self.entries_by_path.cursor::<()>() {
4774 if entry.is_file() {
4775 assert_eq!(files.next().unwrap().inode, entry.inode);
4776 if !entry.is_ignored {
4777 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4778 }
4779 }
4780 }
4781
4782 assert!(files.next().is_none());
4783 assert!(visible_files.next().is_none());
4784
4785 let mut bfs_paths = Vec::new();
4786 let mut stack = vec![Path::new("")];
4787 while let Some(path) = stack.pop() {
4788 bfs_paths.push(path);
4789 let ix = stack.len();
4790 for child_entry in self.child_entries(path) {
4791 stack.insert(ix, &child_entry.path);
4792 }
4793 }
4794
4795 let dfs_paths_via_iter = self
4796 .entries_by_path
4797 .cursor::<()>()
4798 .map(|e| e.path.as_ref())
4799 .collect::<Vec<_>>();
4800 assert_eq!(bfs_paths, dfs_paths_via_iter);
4801
4802 let dfs_paths_via_traversal = self
4803 .entries(true)
4804 .map(|e| e.path.as_ref())
4805 .collect::<Vec<_>>();
4806 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4807
4808 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4809 let ignore_parent_path =
4810 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4811 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4812 assert!(self
4813 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4814 .is_some());
4815 }
4816 }
4817
4818 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4819 let mut paths = Vec::new();
4820 for entry in self.entries_by_path.cursor::<()>() {
4821 if include_ignored || !entry.is_ignored {
4822 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4823 }
4824 }
4825 paths.sort_by(|a, b| a.0.cmp(b.0));
4826 paths
4827 }
4828 }
4829}