1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, HashSet, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 FutureExt, Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{AddAssign, Deref, DerefMut, Sub},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 scan_requests_tx: channel::Sender<ScanRequest>,
71 path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
72 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
73 _background_scanner_task: Task<()>,
74 share: Option<ShareState>,
75 diagnostics: HashMap<
76 Arc<Path>,
77 Vec<(
78 LanguageServerId,
79 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
80 )>,
81 >,
82 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
83 client: Arc<Client>,
84 fs: Arc<dyn Fs>,
85 visible: bool,
86}
87
88struct ScanRequest {
89 relative_paths: Vec<Arc<Path>>,
90 done: barrier::Sender,
91}
92
93pub struct RemoteWorktree {
94 snapshot: Snapshot,
95 background_snapshot: Arc<Mutex<Snapshot>>,
96 project_id: u64,
97 client: Arc<Client>,
98 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
99 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
100 replica_id: ReplicaId,
101 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
102 visible: bool,
103 disconnected: bool,
104}
105
106#[derive(Clone)]
107pub struct Snapshot {
108 id: WorktreeId,
109 abs_path: Arc<Path>,
110 root_name: String,
111 root_char_bag: CharBag,
112 entries_by_path: SumTree<Entry>,
113 entries_by_id: SumTree<PathEntry>,
114 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
115
116 /// A number that increases every time the worktree begins scanning
117 /// a set of paths from the filesystem. This scanning could be caused
118 /// by some operation performed on the worktree, such as reading or
119 /// writing a file, or by an event reported by the filesystem.
120 scan_id: usize,
121
122 /// The latest scan id that has completed, and whose preceding scans
123 /// have all completed. The current `scan_id` could be more than one
124 /// greater than the `completed_scan_id` if operations are performed
125 /// on the worktree while it is processing a file-system event.
126 completed_scan_id: usize,
127}
128
129#[derive(Clone, Debug, PartialEq, Eq)]
130pub struct RepositoryEntry {
131 pub(crate) work_directory: WorkDirectoryEntry,
132 pub(crate) branch: Option<Arc<str>>,
133}
134
135impl RepositoryEntry {
136 pub fn branch(&self) -> Option<Arc<str>> {
137 self.branch.clone()
138 }
139
140 pub fn work_directory_id(&self) -> ProjectEntryId {
141 *self.work_directory
142 }
143
144 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
145 snapshot
146 .entry_for_id(self.work_directory_id())
147 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
148 }
149
150 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
151 proto::RepositoryEntry {
152 work_directory_id: self.work_directory_id().to_proto(),
153 branch: self.branch.as_ref().map(|str| str.to_string()),
154 }
155 }
156}
157
158impl From<&RepositoryEntry> for proto::RepositoryEntry {
159 fn from(value: &RepositoryEntry) -> Self {
160 proto::RepositoryEntry {
161 work_directory_id: value.work_directory.to_proto(),
162 branch: value.branch.as_ref().map(|str| str.to_string()),
163 }
164 }
165}
166
167/// This path corresponds to the 'content path' (the folder that contains the .git)
168#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
169pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
170
171impl Default for RepositoryWorkDirectory {
172 fn default() -> Self {
173 RepositoryWorkDirectory(Arc::from(Path::new("")))
174 }
175}
176
177impl AsRef<Path> for RepositoryWorkDirectory {
178 fn as_ref(&self) -> &Path {
179 self.0.as_ref()
180 }
181}
182
183#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
184pub struct WorkDirectoryEntry(ProjectEntryId);
185
186impl WorkDirectoryEntry {
187 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
188 worktree.entry_for_id(self.0).and_then(|entry| {
189 path.strip_prefix(&entry.path)
190 .ok()
191 .map(move |path| path.into())
192 })
193 }
194}
195
196impl Deref for WorkDirectoryEntry {
197 type Target = ProjectEntryId;
198
199 fn deref(&self) -> &Self::Target {
200 &self.0
201 }
202}
203
204impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
205 fn from(value: ProjectEntryId) -> Self {
206 WorkDirectoryEntry(value)
207 }
208}
209
210#[derive(Debug, Clone)]
211pub struct LocalSnapshot {
212 snapshot: Snapshot,
213 /// All of the gitignore files in the worktree, indexed by their relative path.
214 /// The boolean indicates whether the gitignore needs to be updated.
215 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
216 /// All of the git repositories in the worktree, indexed by the project entry
217 /// id of their parent directory.
218 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
219}
220
221struct BackgroundScannerState {
222 snapshot: LocalSnapshot,
223 scanned_dirs: HashSet<ProjectEntryId>,
224 path_prefixes_to_scan: HashSet<Arc<Path>>,
225 paths_to_scan: HashSet<Arc<Path>>,
226 /// The ids of all of the entries that were removed from the snapshot
227 /// as part of the current update. These entry ids may be re-used
228 /// if the same inode is discovered at a new path, or if the given
229 /// path is re-created after being deleted.
230 removed_entry_ids: HashMap<u64, ProjectEntryId>,
231 changed_paths: Vec<Arc<Path>>,
232 prev_snapshot: Snapshot,
233}
234
235#[derive(Debug, Clone)]
236pub struct LocalRepositoryEntry {
237 pub(crate) git_dir_scan_id: usize,
238 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
239 /// Path to the actual .git folder.
240 /// Note: if .git is a file, this points to the folder indicated by the .git file
241 pub(crate) git_dir_path: Arc<Path>,
242}
243
244impl Deref for LocalSnapshot {
245 type Target = Snapshot;
246
247 fn deref(&self) -> &Self::Target {
248 &self.snapshot
249 }
250}
251
252impl DerefMut for LocalSnapshot {
253 fn deref_mut(&mut self) -> &mut Self::Target {
254 &mut self.snapshot
255 }
256}
257
258enum ScanState {
259 Started,
260 Updated {
261 snapshot: LocalSnapshot,
262 changes: UpdatedEntriesSet,
263 barrier: Option<barrier::Sender>,
264 scanning: bool,
265 },
266}
267
268struct ShareState {
269 project_id: u64,
270 snapshots_tx:
271 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
272 resume_updates: watch::Sender<()>,
273 _maintain_remote_snapshot: Task<Option<()>>,
274}
275
276pub enum Event {
277 UpdatedEntries(UpdatedEntriesSet),
278 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
279}
280
281impl Entity for Worktree {
282 type Event = Event;
283}
284
285impl Worktree {
286 pub async fn local(
287 client: Arc<Client>,
288 path: impl Into<Arc<Path>>,
289 visible: bool,
290 fs: Arc<dyn Fs>,
291 next_entry_id: Arc<AtomicUsize>,
292 cx: &mut AsyncAppContext,
293 ) -> Result<ModelHandle<Self>> {
294 // After determining whether the root entry is a file or a directory, populate the
295 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
296 let abs_path = path.into();
297 let metadata = fs
298 .metadata(&abs_path)
299 .await
300 .context("failed to stat worktree path")?;
301
302 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
303 let root_name = abs_path
304 .file_name()
305 .map_or(String::new(), |f| f.to_string_lossy().to_string());
306
307 let mut snapshot = LocalSnapshot {
308 ignores_by_parent_abs_path: Default::default(),
309 git_repositories: Default::default(),
310 snapshot: Snapshot {
311 id: WorktreeId::from_usize(cx.model_id()),
312 abs_path: abs_path.clone(),
313 root_name: root_name.clone(),
314 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
315 entries_by_path: Default::default(),
316 entries_by_id: Default::default(),
317 repository_entries: Default::default(),
318 scan_id: 1,
319 completed_scan_id: 0,
320 },
321 };
322
323 if let Some(metadata) = metadata {
324 snapshot.insert_entry(
325 Entry::new(
326 Arc::from(Path::new("")),
327 &metadata,
328 &next_entry_id,
329 snapshot.root_char_bag,
330 ),
331 fs.as_ref(),
332 );
333 }
334
335 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
336 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
337 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
338
339 cx.spawn_weak(|this, mut cx| async move {
340 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
341 this.update(&mut cx, |this, cx| {
342 let this = this.as_local_mut().unwrap();
343 match state {
344 ScanState::Started => {
345 *this.is_scanning.0.borrow_mut() = true;
346 }
347 ScanState::Updated {
348 snapshot,
349 changes,
350 barrier,
351 scanning,
352 } => {
353 *this.is_scanning.0.borrow_mut() = scanning;
354 this.set_snapshot(snapshot, changes, cx);
355 drop(barrier);
356 }
357 }
358 cx.notify();
359 });
360 }
361 })
362 .detach();
363
364 let background_scanner_task = cx.background().spawn({
365 let fs = fs.clone();
366 let snapshot = snapshot.clone();
367 let background = cx.background().clone();
368 async move {
369 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
370 BackgroundScanner::new(
371 snapshot,
372 next_entry_id,
373 fs,
374 scan_states_tx,
375 background,
376 scan_requests_rx,
377 path_prefixes_to_scan_rx,
378 )
379 .run(events)
380 .await;
381 }
382 });
383
384 Worktree::Local(LocalWorktree {
385 snapshot,
386 is_scanning: watch::channel_with(true),
387 share: None,
388 scan_requests_tx,
389 path_prefixes_to_scan_tx,
390 _background_scanner_task: background_scanner_task,
391 diagnostics: Default::default(),
392 diagnostic_summaries: Default::default(),
393 client,
394 fs,
395 visible,
396 })
397 }))
398 }
399
400 pub fn remote(
401 project_remote_id: u64,
402 replica_id: ReplicaId,
403 worktree: proto::WorktreeMetadata,
404 client: Arc<Client>,
405 cx: &mut AppContext,
406 ) -> ModelHandle<Self> {
407 cx.add_model(|cx: &mut ModelContext<Self>| {
408 let snapshot = Snapshot {
409 id: WorktreeId(worktree.id as usize),
410 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
411 root_name: worktree.root_name.clone(),
412 root_char_bag: worktree
413 .root_name
414 .chars()
415 .map(|c| c.to_ascii_lowercase())
416 .collect(),
417 entries_by_path: Default::default(),
418 entries_by_id: Default::default(),
419 repository_entries: Default::default(),
420 scan_id: 1,
421 completed_scan_id: 0,
422 };
423
424 let (updates_tx, mut updates_rx) = mpsc::unbounded();
425 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
426 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
427
428 cx.background()
429 .spawn({
430 let background_snapshot = background_snapshot.clone();
431 async move {
432 while let Some(update) = updates_rx.next().await {
433 if let Err(error) =
434 background_snapshot.lock().apply_remote_update(update)
435 {
436 log::error!("error applying worktree update: {}", error);
437 }
438 snapshot_updated_tx.send(()).await.ok();
439 }
440 }
441 })
442 .detach();
443
444 cx.spawn_weak(|this, mut cx| async move {
445 while (snapshot_updated_rx.recv().await).is_some() {
446 if let Some(this) = this.upgrade(&cx) {
447 this.update(&mut cx, |this, cx| {
448 let this = this.as_remote_mut().unwrap();
449 this.snapshot = this.background_snapshot.lock().clone();
450 cx.emit(Event::UpdatedEntries(Arc::from([])));
451 cx.notify();
452 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
453 if this.observed_snapshot(*scan_id) {
454 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
455 let _ = tx.send(());
456 } else {
457 break;
458 }
459 }
460 });
461 } else {
462 break;
463 }
464 }
465 })
466 .detach();
467
468 Worktree::Remote(RemoteWorktree {
469 project_id: project_remote_id,
470 replica_id,
471 snapshot: snapshot.clone(),
472 background_snapshot,
473 updates_tx: Some(updates_tx),
474 snapshot_subscriptions: Default::default(),
475 client: client.clone(),
476 diagnostic_summaries: Default::default(),
477 visible: worktree.visible,
478 disconnected: false,
479 })
480 })
481 }
482
483 pub fn as_local(&self) -> Option<&LocalWorktree> {
484 if let Worktree::Local(worktree) = self {
485 Some(worktree)
486 } else {
487 None
488 }
489 }
490
491 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
492 if let Worktree::Remote(worktree) = self {
493 Some(worktree)
494 } else {
495 None
496 }
497 }
498
499 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
500 if let Worktree::Local(worktree) = self {
501 Some(worktree)
502 } else {
503 None
504 }
505 }
506
507 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
508 if let Worktree::Remote(worktree) = self {
509 Some(worktree)
510 } else {
511 None
512 }
513 }
514
515 pub fn is_local(&self) -> bool {
516 matches!(self, Worktree::Local(_))
517 }
518
519 pub fn is_remote(&self) -> bool {
520 !self.is_local()
521 }
522
523 pub fn snapshot(&self) -> Snapshot {
524 match self {
525 Worktree::Local(worktree) => worktree.snapshot().snapshot,
526 Worktree::Remote(worktree) => worktree.snapshot(),
527 }
528 }
529
530 pub fn scan_id(&self) -> usize {
531 match self {
532 Worktree::Local(worktree) => worktree.snapshot.scan_id,
533 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
534 }
535 }
536
537 pub fn completed_scan_id(&self) -> usize {
538 match self {
539 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
540 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
541 }
542 }
543
544 pub fn is_visible(&self) -> bool {
545 match self {
546 Worktree::Local(worktree) => worktree.visible,
547 Worktree::Remote(worktree) => worktree.visible,
548 }
549 }
550
551 pub fn replica_id(&self) -> ReplicaId {
552 match self {
553 Worktree::Local(_) => 0,
554 Worktree::Remote(worktree) => worktree.replica_id,
555 }
556 }
557
558 pub fn diagnostic_summaries(
559 &self,
560 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
561 match self {
562 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
563 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
564 }
565 .iter()
566 .flat_map(|(path, summaries)| {
567 summaries
568 .iter()
569 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
570 })
571 }
572
573 pub fn abs_path(&self) -> Arc<Path> {
574 match self {
575 Worktree::Local(worktree) => worktree.abs_path.clone(),
576 Worktree::Remote(worktree) => worktree.abs_path.clone(),
577 }
578 }
579
580 pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
581 let entry = self.root_entry()?;
582 Some(File::for_entry(entry.clone(), cx.handle()))
583 }
584}
585
586impl LocalWorktree {
587 pub fn contains_abs_path(&self, path: &Path) -> bool {
588 path.starts_with(&self.abs_path)
589 }
590
591 pub(crate) fn load_buffer(
592 &mut self,
593 id: u64,
594 path: &Path,
595 cx: &mut ModelContext<Worktree>,
596 ) -> Task<Result<ModelHandle<Buffer>>> {
597 let path = Arc::from(path);
598 cx.spawn(move |this, mut cx| async move {
599 let (file, contents, diff_base) = this
600 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
601 .await?;
602 let text_buffer = cx
603 .background()
604 .spawn(async move { text::Buffer::new(0, id, contents) })
605 .await;
606 Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
607 })
608 }
609
610 pub fn diagnostics_for_path(
611 &self,
612 path: &Path,
613 ) -> Vec<(
614 LanguageServerId,
615 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
616 )> {
617 self.diagnostics.get(path).cloned().unwrap_or_default()
618 }
619
620 pub fn clear_diagnostics_for_language_server(
621 &mut self,
622 server_id: LanguageServerId,
623 _: &mut ModelContext<Worktree>,
624 ) {
625 let worktree_id = self.id().to_proto();
626 self.diagnostic_summaries
627 .retain(|path, summaries_by_server_id| {
628 if summaries_by_server_id.remove(&server_id).is_some() {
629 if let Some(share) = self.share.as_ref() {
630 self.client
631 .send(proto::UpdateDiagnosticSummary {
632 project_id: share.project_id,
633 worktree_id,
634 summary: Some(proto::DiagnosticSummary {
635 path: path.to_string_lossy().to_string(),
636 language_server_id: server_id.0 as u64,
637 error_count: 0,
638 warning_count: 0,
639 }),
640 })
641 .log_err();
642 }
643 !summaries_by_server_id.is_empty()
644 } else {
645 true
646 }
647 });
648
649 self.diagnostics.retain(|_, diagnostics_by_server_id| {
650 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
651 diagnostics_by_server_id.remove(ix);
652 !diagnostics_by_server_id.is_empty()
653 } else {
654 true
655 }
656 });
657 }
658
659 pub fn update_diagnostics(
660 &mut self,
661 server_id: LanguageServerId,
662 worktree_path: Arc<Path>,
663 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
664 _: &mut ModelContext<Worktree>,
665 ) -> Result<bool> {
666 let summaries_by_server_id = self
667 .diagnostic_summaries
668 .entry(worktree_path.clone())
669 .or_default();
670
671 let old_summary = summaries_by_server_id
672 .remove(&server_id)
673 .unwrap_or_default();
674
675 let new_summary = DiagnosticSummary::new(&diagnostics);
676 if new_summary.is_empty() {
677 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
678 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
679 diagnostics_by_server_id.remove(ix);
680 }
681 if diagnostics_by_server_id.is_empty() {
682 self.diagnostics.remove(&worktree_path);
683 }
684 }
685 } else {
686 summaries_by_server_id.insert(server_id, new_summary);
687 let diagnostics_by_server_id =
688 self.diagnostics.entry(worktree_path.clone()).or_default();
689 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
690 Ok(ix) => {
691 diagnostics_by_server_id[ix] = (server_id, diagnostics);
692 }
693 Err(ix) => {
694 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
695 }
696 }
697 }
698
699 if !old_summary.is_empty() || !new_summary.is_empty() {
700 if let Some(share) = self.share.as_ref() {
701 self.client
702 .send(proto::UpdateDiagnosticSummary {
703 project_id: share.project_id,
704 worktree_id: self.id().to_proto(),
705 summary: Some(proto::DiagnosticSummary {
706 path: worktree_path.to_string_lossy().to_string(),
707 language_server_id: server_id.0 as u64,
708 error_count: new_summary.error_count as u32,
709 warning_count: new_summary.warning_count as u32,
710 }),
711 })
712 .log_err();
713 }
714 }
715
716 Ok(!old_summary.is_empty() || !new_summary.is_empty())
717 }
718
719 fn set_snapshot(
720 &mut self,
721 new_snapshot: LocalSnapshot,
722 entry_changes: UpdatedEntriesSet,
723 cx: &mut ModelContext<Worktree>,
724 ) {
725 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
726
727 self.snapshot = new_snapshot;
728
729 if let Some(share) = self.share.as_mut() {
730 share
731 .snapshots_tx
732 .unbounded_send((
733 self.snapshot.clone(),
734 entry_changes.clone(),
735 repo_changes.clone(),
736 ))
737 .ok();
738 }
739
740 if !entry_changes.is_empty() {
741 cx.emit(Event::UpdatedEntries(entry_changes));
742 }
743 if !repo_changes.is_empty() {
744 cx.emit(Event::UpdatedGitRepositories(repo_changes));
745 }
746 }
747
748 fn changed_repos(
749 &self,
750 old_snapshot: &LocalSnapshot,
751 new_snapshot: &LocalSnapshot,
752 ) -> UpdatedGitRepositoriesSet {
753 let mut changes = Vec::new();
754 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
755 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
756 loop {
757 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
758 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
759 match Ord::cmp(&new_entry_id, &old_entry_id) {
760 Ordering::Less => {
761 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
762 changes.push((
763 entry.path.clone(),
764 GitRepositoryChange {
765 old_repository: None,
766 },
767 ));
768 }
769 new_repos.next();
770 }
771 Ordering::Equal => {
772 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
773 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
774 let old_repo = old_snapshot
775 .repository_entries
776 .get(&RepositoryWorkDirectory(entry.path.clone()))
777 .cloned();
778 changes.push((
779 entry.path.clone(),
780 GitRepositoryChange {
781 old_repository: old_repo,
782 },
783 ));
784 }
785 }
786 new_repos.next();
787 old_repos.next();
788 }
789 Ordering::Greater => {
790 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
791 let old_repo = old_snapshot
792 .repository_entries
793 .get(&RepositoryWorkDirectory(entry.path.clone()))
794 .cloned();
795 changes.push((
796 entry.path.clone(),
797 GitRepositoryChange {
798 old_repository: old_repo,
799 },
800 ));
801 }
802 old_repos.next();
803 }
804 }
805 }
806 (Some((entry_id, _)), None) => {
807 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
808 changes.push((
809 entry.path.clone(),
810 GitRepositoryChange {
811 old_repository: None,
812 },
813 ));
814 }
815 new_repos.next();
816 }
817 (None, Some((entry_id, _))) => {
818 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
819 let old_repo = old_snapshot
820 .repository_entries
821 .get(&RepositoryWorkDirectory(entry.path.clone()))
822 .cloned();
823 changes.push((
824 entry.path.clone(),
825 GitRepositoryChange {
826 old_repository: old_repo,
827 },
828 ));
829 }
830 old_repos.next();
831 }
832 (None, None) => break,
833 }
834 }
835
836 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
837 (value.0.clone(), value.1.clone())
838 }
839
840 changes.into()
841 }
842
843 pub fn scan_complete(&self) -> impl Future<Output = ()> {
844 let mut is_scanning_rx = self.is_scanning.1.clone();
845 async move {
846 let mut is_scanning = is_scanning_rx.borrow().clone();
847 while is_scanning {
848 if let Some(value) = is_scanning_rx.recv().await {
849 is_scanning = value;
850 } else {
851 break;
852 }
853 }
854 }
855 }
856
857 pub fn snapshot(&self) -> LocalSnapshot {
858 self.snapshot.clone()
859 }
860
861 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
862 proto::WorktreeMetadata {
863 id: self.id().to_proto(),
864 root_name: self.root_name().to_string(),
865 visible: self.visible,
866 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
867 }
868 }
869
870 fn load(
871 &self,
872 path: &Path,
873 cx: &mut ModelContext<Worktree>,
874 ) -> Task<Result<(File, String, Option<String>)>> {
875 let path = Arc::from(path);
876 let abs_path = self.absolutize(&path);
877 let fs = self.fs.clone();
878 let entry = self.refresh_entry(path.clone(), None, cx);
879
880 cx.spawn(|this, cx| async move {
881 let text = fs.load(&abs_path).await?;
882 let entry = entry.await?;
883
884 let mut index_task = None;
885 let snapshot = this.read_with(&cx, |this, _| this.as_local().unwrap().snapshot());
886 if let Some(repo) = snapshot.repository_for_path(&path) {
887 let repo_path = repo.work_directory.relativize(&snapshot, &path).unwrap();
888 if let Some(repo) = snapshot.git_repositories.get(&*repo.work_directory) {
889 let repo = repo.repo_ptr.clone();
890 index_task = Some(
891 cx.background()
892 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
893 );
894 }
895 }
896
897 let diff_base = if let Some(index_task) = index_task {
898 index_task.await
899 } else {
900 None
901 };
902
903 Ok((
904 File {
905 entry_id: entry.id,
906 worktree: this,
907 path: entry.path,
908 mtime: entry.mtime,
909 is_local: true,
910 is_deleted: false,
911 },
912 text,
913 diff_base,
914 ))
915 })
916 }
917
918 pub fn save_buffer(
919 &self,
920 buffer_handle: ModelHandle<Buffer>,
921 path: Arc<Path>,
922 has_changed_file: bool,
923 cx: &mut ModelContext<Worktree>,
924 ) -> Task<Result<()>> {
925 let handle = cx.handle();
926 let buffer = buffer_handle.read(cx);
927
928 let rpc = self.client.clone();
929 let buffer_id = buffer.remote_id();
930 let project_id = self.share.as_ref().map(|share| share.project_id);
931
932 let text = buffer.as_rope().clone();
933 let fingerprint = text.fingerprint();
934 let version = buffer.version();
935 let save = self.write_file(path, text, buffer.line_ending(), cx);
936
937 cx.as_mut().spawn(|mut cx| async move {
938 let entry = save.await?;
939
940 if has_changed_file {
941 let new_file = Arc::new(File {
942 entry_id: entry.id,
943 worktree: handle,
944 path: entry.path,
945 mtime: entry.mtime,
946 is_local: true,
947 is_deleted: false,
948 });
949
950 if let Some(project_id) = project_id {
951 rpc.send(proto::UpdateBufferFile {
952 project_id,
953 buffer_id,
954 file: Some(new_file.to_proto()),
955 })
956 .log_err();
957 }
958
959 buffer_handle.update(&mut cx, |buffer, cx| {
960 if has_changed_file {
961 buffer.file_updated(new_file, cx).detach();
962 }
963 });
964 }
965
966 if let Some(project_id) = project_id {
967 rpc.send(proto::BufferSaved {
968 project_id,
969 buffer_id,
970 version: serialize_version(&version),
971 mtime: Some(entry.mtime.into()),
972 fingerprint: serialize_fingerprint(fingerprint),
973 })?;
974 }
975
976 buffer_handle.update(&mut cx, |buffer, cx| {
977 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
978 });
979
980 Ok(())
981 })
982 }
983
984 pub fn create_entry(
985 &self,
986 path: impl Into<Arc<Path>>,
987 is_dir: bool,
988 cx: &mut ModelContext<Worktree>,
989 ) -> Task<Result<Entry>> {
990 let path = path.into();
991 let abs_path = self.absolutize(&path);
992 let fs = self.fs.clone();
993 let write = cx.background().spawn(async move {
994 if is_dir {
995 fs.create_dir(&abs_path).await
996 } else {
997 fs.save(&abs_path, &Default::default(), Default::default())
998 .await
999 }
1000 });
1001
1002 cx.spawn(|this, mut cx| async move {
1003 write.await?;
1004 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1005 let mut refreshes = Vec::new();
1006 for path in path.ancestors().skip(1) {
1007 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1008 path.into(),
1009 None,
1010 cx,
1011 ));
1012 }
1013 (
1014 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1015 refreshes,
1016 )
1017 });
1018 for refresh in refreshes {
1019 refresh.await.log_err();
1020 }
1021
1022 result.await
1023 })
1024 }
1025
1026 pub fn write_file(
1027 &self,
1028 path: impl Into<Arc<Path>>,
1029 text: Rope,
1030 line_ending: LineEnding,
1031 cx: &mut ModelContext<Worktree>,
1032 ) -> Task<Result<Entry>> {
1033 let path = path.into();
1034 let abs_path = self.absolutize(&path);
1035 let fs = self.fs.clone();
1036 let write = cx
1037 .background()
1038 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1039
1040 cx.spawn(|this, mut cx| async move {
1041 write.await?;
1042 this.update(&mut cx, |this, cx| {
1043 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1044 })
1045 .await
1046 })
1047 }
1048
1049 pub fn delete_entry(
1050 &self,
1051 entry_id: ProjectEntryId,
1052 cx: &mut ModelContext<Worktree>,
1053 ) -> Option<Task<Result<()>>> {
1054 let entry = self.entry_for_id(entry_id)?.clone();
1055 let abs_path = self.absolutize(&entry.path);
1056 let fs = self.fs.clone();
1057
1058 let delete = cx.background().spawn(async move {
1059 if entry.is_file() {
1060 fs.remove_file(&abs_path, Default::default()).await?;
1061 } else {
1062 fs.remove_dir(
1063 &abs_path,
1064 RemoveOptions {
1065 recursive: true,
1066 ignore_if_not_exists: false,
1067 },
1068 )
1069 .await?;
1070 }
1071 anyhow::Ok(entry.path)
1072 });
1073
1074 Some(cx.spawn(|this, mut cx| async move {
1075 let path = delete.await?;
1076 this.update(&mut cx, |this, _| {
1077 this.as_local_mut()
1078 .unwrap()
1079 .refresh_entries_for_paths(vec![path])
1080 })
1081 .recv()
1082 .await;
1083 Ok(())
1084 }))
1085 }
1086
1087 pub fn rename_entry(
1088 &self,
1089 entry_id: ProjectEntryId,
1090 new_path: impl Into<Arc<Path>>,
1091 cx: &mut ModelContext<Worktree>,
1092 ) -> Option<Task<Result<Entry>>> {
1093 let old_path = self.entry_for_id(entry_id)?.path.clone();
1094 let new_path = new_path.into();
1095 let abs_old_path = self.absolutize(&old_path);
1096 let abs_new_path = self.absolutize(&new_path);
1097 let fs = self.fs.clone();
1098 let rename = cx.background().spawn(async move {
1099 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1100 .await
1101 });
1102
1103 Some(cx.spawn(|this, mut cx| async move {
1104 rename.await?;
1105 this.update(&mut cx, |this, cx| {
1106 this.as_local_mut()
1107 .unwrap()
1108 .refresh_entry(new_path.clone(), Some(old_path), cx)
1109 })
1110 .await
1111 }))
1112 }
1113
1114 pub fn copy_entry(
1115 &self,
1116 entry_id: ProjectEntryId,
1117 new_path: impl Into<Arc<Path>>,
1118 cx: &mut ModelContext<Worktree>,
1119 ) -> Option<Task<Result<Entry>>> {
1120 let old_path = self.entry_for_id(entry_id)?.path.clone();
1121 let new_path = new_path.into();
1122 let abs_old_path = self.absolutize(&old_path);
1123 let abs_new_path = self.absolutize(&new_path);
1124 let fs = self.fs.clone();
1125 let copy = cx.background().spawn(async move {
1126 copy_recursive(
1127 fs.as_ref(),
1128 &abs_old_path,
1129 &abs_new_path,
1130 Default::default(),
1131 )
1132 .await
1133 });
1134
1135 Some(cx.spawn(|this, mut cx| async move {
1136 copy.await?;
1137 this.update(&mut cx, |this, cx| {
1138 this.as_local_mut()
1139 .unwrap()
1140 .refresh_entry(new_path.clone(), None, cx)
1141 })
1142 .await
1143 }))
1144 }
1145
1146 pub fn expand_entry(
1147 &mut self,
1148 entry_id: ProjectEntryId,
1149 cx: &mut ModelContext<Worktree>,
1150 ) -> Option<Task<Result<()>>> {
1151 let path = self.entry_for_id(entry_id)?.path.clone();
1152 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1153 Some(cx.background().spawn(async move {
1154 refresh.next().await;
1155 Ok(())
1156 }))
1157 }
1158
1159 pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1160 let (tx, rx) = barrier::channel();
1161 self.scan_requests_tx
1162 .try_send(ScanRequest {
1163 relative_paths: paths,
1164 done: tx,
1165 })
1166 .ok();
1167 rx
1168 }
1169
1170 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1171 self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1172 }
1173
1174 fn refresh_entry(
1175 &self,
1176 path: Arc<Path>,
1177 old_path: Option<Arc<Path>>,
1178 cx: &mut ModelContext<Worktree>,
1179 ) -> Task<Result<Entry>> {
1180 let paths = if let Some(old_path) = old_path.as_ref() {
1181 vec![old_path.clone(), path.clone()]
1182 } else {
1183 vec![path.clone()]
1184 };
1185 let mut refresh = self.refresh_entries_for_paths(paths);
1186 cx.spawn_weak(move |this, mut cx| async move {
1187 refresh.recv().await;
1188 this.upgrade(&cx)
1189 .ok_or_else(|| anyhow!("worktree was dropped"))?
1190 .update(&mut cx, |this, _| {
1191 this.entry_for_path(path)
1192 .cloned()
1193 .ok_or_else(|| anyhow!("failed to read path after update"))
1194 })
1195 })
1196 }
1197
1198 pub fn observe_updates<F, Fut>(
1199 &mut self,
1200 project_id: u64,
1201 cx: &mut ModelContext<Worktree>,
1202 callback: F,
1203 ) -> oneshot::Receiver<()>
1204 where
1205 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1206 Fut: Send + Future<Output = bool>,
1207 {
1208 #[cfg(any(test, feature = "test-support"))]
1209 const MAX_CHUNK_SIZE: usize = 2;
1210 #[cfg(not(any(test, feature = "test-support")))]
1211 const MAX_CHUNK_SIZE: usize = 256;
1212
1213 let (share_tx, share_rx) = oneshot::channel();
1214
1215 if let Some(share) = self.share.as_mut() {
1216 share_tx.send(()).ok();
1217 *share.resume_updates.borrow_mut() = ();
1218 return share_rx;
1219 }
1220
1221 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1222 let (snapshots_tx, mut snapshots_rx) =
1223 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1224 snapshots_tx
1225 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1226 .ok();
1227
1228 let worktree_id = cx.model_id() as u64;
1229 let _maintain_remote_snapshot = cx.background().spawn(async move {
1230 let mut is_first = true;
1231 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1232 let update;
1233 if is_first {
1234 update = snapshot.build_initial_update(project_id, worktree_id);
1235 is_first = false;
1236 } else {
1237 update =
1238 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1239 }
1240
1241 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1242 let _ = resume_updates_rx.try_recv();
1243 loop {
1244 let result = callback(update.clone());
1245 if result.await {
1246 break;
1247 } else {
1248 log::info!("waiting to resume updates");
1249 if resume_updates_rx.next().await.is_none() {
1250 return Some(());
1251 }
1252 }
1253 }
1254 }
1255 }
1256 share_tx.send(()).ok();
1257 Some(())
1258 });
1259
1260 self.share = Some(ShareState {
1261 project_id,
1262 snapshots_tx,
1263 resume_updates: resume_updates_tx,
1264 _maintain_remote_snapshot,
1265 });
1266 share_rx
1267 }
1268
1269 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1270 let client = self.client.clone();
1271
1272 for (path, summaries) in &self.diagnostic_summaries {
1273 for (&server_id, summary) in summaries {
1274 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1275 project_id,
1276 worktree_id: cx.model_id() as u64,
1277 summary: Some(summary.to_proto(server_id, &path)),
1278 }) {
1279 return Task::ready(Err(e));
1280 }
1281 }
1282 }
1283
1284 let rx = self.observe_updates(project_id, cx, move |update| {
1285 client.request(update).map(|result| result.is_ok())
1286 });
1287 cx.foreground()
1288 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1289 }
1290
1291 pub fn unshare(&mut self) {
1292 self.share.take();
1293 }
1294
1295 pub fn is_shared(&self) -> bool {
1296 self.share.is_some()
1297 }
1298}
1299
1300impl RemoteWorktree {
1301 fn snapshot(&self) -> Snapshot {
1302 self.snapshot.clone()
1303 }
1304
1305 pub fn disconnected_from_host(&mut self) {
1306 self.updates_tx.take();
1307 self.snapshot_subscriptions.clear();
1308 self.disconnected = true;
1309 }
1310
1311 pub fn save_buffer(
1312 &self,
1313 buffer_handle: ModelHandle<Buffer>,
1314 cx: &mut ModelContext<Worktree>,
1315 ) -> Task<Result<()>> {
1316 let buffer = buffer_handle.read(cx);
1317 let buffer_id = buffer.remote_id();
1318 let version = buffer.version();
1319 let rpc = self.client.clone();
1320 let project_id = self.project_id;
1321 cx.as_mut().spawn(|mut cx| async move {
1322 let response = rpc
1323 .request(proto::SaveBuffer {
1324 project_id,
1325 buffer_id,
1326 version: serialize_version(&version),
1327 })
1328 .await?;
1329 let version = deserialize_version(&response.version);
1330 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1331 let mtime = response
1332 .mtime
1333 .ok_or_else(|| anyhow!("missing mtime"))?
1334 .into();
1335
1336 buffer_handle.update(&mut cx, |buffer, cx| {
1337 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1338 });
1339
1340 Ok(())
1341 })
1342 }
1343
1344 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1345 if let Some(updates_tx) = &self.updates_tx {
1346 updates_tx
1347 .unbounded_send(update)
1348 .expect("consumer runs to completion");
1349 }
1350 }
1351
1352 fn observed_snapshot(&self, scan_id: usize) -> bool {
1353 self.completed_scan_id >= scan_id
1354 }
1355
1356 pub(crate) fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1357 let (tx, rx) = oneshot::channel();
1358 if self.observed_snapshot(scan_id) {
1359 let _ = tx.send(());
1360 } else if self.disconnected {
1361 drop(tx);
1362 } else {
1363 match self
1364 .snapshot_subscriptions
1365 .binary_search_by_key(&scan_id, |probe| probe.0)
1366 {
1367 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1368 }
1369 }
1370
1371 async move {
1372 rx.await?;
1373 Ok(())
1374 }
1375 }
1376
1377 pub fn update_diagnostic_summary(
1378 &mut self,
1379 path: Arc<Path>,
1380 summary: &proto::DiagnosticSummary,
1381 ) {
1382 let server_id = LanguageServerId(summary.language_server_id as usize);
1383 let summary = DiagnosticSummary {
1384 error_count: summary.error_count as usize,
1385 warning_count: summary.warning_count as usize,
1386 };
1387
1388 if summary.is_empty() {
1389 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1390 summaries.remove(&server_id);
1391 if summaries.is_empty() {
1392 self.diagnostic_summaries.remove(&path);
1393 }
1394 }
1395 } else {
1396 self.diagnostic_summaries
1397 .entry(path)
1398 .or_default()
1399 .insert(server_id, summary);
1400 }
1401 }
1402
1403 pub fn insert_entry(
1404 &mut self,
1405 entry: proto::Entry,
1406 scan_id: usize,
1407 cx: &mut ModelContext<Worktree>,
1408 ) -> Task<Result<Entry>> {
1409 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1410 cx.spawn(|this, mut cx| async move {
1411 wait_for_snapshot.await?;
1412 this.update(&mut cx, |worktree, _| {
1413 let worktree = worktree.as_remote_mut().unwrap();
1414 let mut snapshot = worktree.background_snapshot.lock();
1415 let entry = snapshot.insert_entry(entry);
1416 worktree.snapshot = snapshot.clone();
1417 entry
1418 })
1419 })
1420 }
1421
1422 pub(crate) fn delete_entry(
1423 &mut self,
1424 id: ProjectEntryId,
1425 scan_id: usize,
1426 cx: &mut ModelContext<Worktree>,
1427 ) -> Task<Result<()>> {
1428 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1429 cx.spawn(|this, mut cx| async move {
1430 wait_for_snapshot.await?;
1431 this.update(&mut cx, |worktree, _| {
1432 let worktree = worktree.as_remote_mut().unwrap();
1433 let mut snapshot = worktree.background_snapshot.lock();
1434 snapshot.delete_entry(id);
1435 worktree.snapshot = snapshot.clone();
1436 });
1437 Ok(())
1438 })
1439 }
1440}
1441
1442impl Snapshot {
1443 pub fn id(&self) -> WorktreeId {
1444 self.id
1445 }
1446
1447 pub fn abs_path(&self) -> &Arc<Path> {
1448 &self.abs_path
1449 }
1450
1451 pub fn absolutize(&self, path: &Path) -> PathBuf {
1452 if path.file_name().is_some() {
1453 self.abs_path.join(path)
1454 } else {
1455 self.abs_path.to_path_buf()
1456 }
1457 }
1458
1459 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1460 self.entries_by_id.get(&entry_id, &()).is_some()
1461 }
1462
1463 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1464 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1465 let old_entry = self.entries_by_id.insert_or_replace(
1466 PathEntry {
1467 id: entry.id,
1468 path: entry.path.clone(),
1469 is_ignored: entry.is_ignored,
1470 scan_id: 0,
1471 },
1472 &(),
1473 );
1474 if let Some(old_entry) = old_entry {
1475 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1476 }
1477 self.entries_by_path.insert_or_replace(entry.clone(), &());
1478 Ok(entry)
1479 }
1480
1481 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1482 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1483 self.entries_by_path = {
1484 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1485 let mut new_entries_by_path =
1486 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1487 while let Some(entry) = cursor.item() {
1488 if entry.path.starts_with(&removed_entry.path) {
1489 self.entries_by_id.remove(&entry.id, &());
1490 cursor.next(&());
1491 } else {
1492 break;
1493 }
1494 }
1495 new_entries_by_path.append(cursor.suffix(&()), &());
1496 new_entries_by_path
1497 };
1498
1499 Some(removed_entry.path)
1500 }
1501
1502 #[cfg(any(test, feature = "test-support"))]
1503 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1504 let path = path.into();
1505 self.entries_by_path
1506 .get(&PathKey(Arc::from(path)), &())
1507 .and_then(|entry| entry.git_status)
1508 }
1509
1510 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1511 let mut entries_by_path_edits = Vec::new();
1512 let mut entries_by_id_edits = Vec::new();
1513
1514 for entry_id in update.removed_entries {
1515 let entry_id = ProjectEntryId::from_proto(entry_id);
1516 entries_by_id_edits.push(Edit::Remove(entry_id));
1517 if let Some(entry) = self.entry_for_id(entry_id) {
1518 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1519 }
1520 }
1521
1522 for entry in update.updated_entries {
1523 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1524 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1525 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1526 }
1527 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1528 if old_entry.id != entry.id {
1529 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1530 }
1531 }
1532 entries_by_id_edits.push(Edit::Insert(PathEntry {
1533 id: entry.id,
1534 path: entry.path.clone(),
1535 is_ignored: entry.is_ignored,
1536 scan_id: 0,
1537 }));
1538 entries_by_path_edits.push(Edit::Insert(entry));
1539 }
1540
1541 self.entries_by_path.edit(entries_by_path_edits, &());
1542 self.entries_by_id.edit(entries_by_id_edits, &());
1543
1544 update.removed_repositories.sort_unstable();
1545 self.repository_entries.retain(|_, entry| {
1546 if let Ok(_) = update
1547 .removed_repositories
1548 .binary_search(&entry.work_directory.to_proto())
1549 {
1550 false
1551 } else {
1552 true
1553 }
1554 });
1555
1556 for repository in update.updated_repositories {
1557 let work_directory_entry: WorkDirectoryEntry =
1558 ProjectEntryId::from_proto(repository.work_directory_id).into();
1559
1560 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1561 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1562 if self.repository_entries.get(&work_directory).is_some() {
1563 self.repository_entries.update(&work_directory, |repo| {
1564 repo.branch = repository.branch.map(Into::into);
1565 });
1566 } else {
1567 self.repository_entries.insert(
1568 work_directory,
1569 RepositoryEntry {
1570 work_directory: work_directory_entry,
1571 branch: repository.branch.map(Into::into),
1572 },
1573 )
1574 }
1575 } else {
1576 log::error!("no work directory entry for repository {:?}", repository)
1577 }
1578 }
1579
1580 self.scan_id = update.scan_id as usize;
1581 if update.is_last_update {
1582 self.completed_scan_id = update.scan_id as usize;
1583 }
1584
1585 Ok(())
1586 }
1587
1588 pub fn file_count(&self) -> usize {
1589 self.entries_by_path.summary().file_count
1590 }
1591
1592 pub fn visible_file_count(&self) -> usize {
1593 self.entries_by_path.summary().non_ignored_file_count
1594 }
1595
1596 fn traverse_from_offset(
1597 &self,
1598 include_dirs: bool,
1599 include_ignored: bool,
1600 start_offset: usize,
1601 ) -> Traversal {
1602 let mut cursor = self.entries_by_path.cursor();
1603 cursor.seek(
1604 &TraversalTarget::Count {
1605 count: start_offset,
1606 include_dirs,
1607 include_ignored,
1608 },
1609 Bias::Right,
1610 &(),
1611 );
1612 Traversal {
1613 cursor,
1614 include_dirs,
1615 include_ignored,
1616 }
1617 }
1618
1619 fn traverse_from_path(
1620 &self,
1621 include_dirs: bool,
1622 include_ignored: bool,
1623 path: &Path,
1624 ) -> Traversal {
1625 let mut cursor = self.entries_by_path.cursor();
1626 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1627 Traversal {
1628 cursor,
1629 include_dirs,
1630 include_ignored,
1631 }
1632 }
1633
1634 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1635 self.traverse_from_offset(false, include_ignored, start)
1636 }
1637
1638 pub fn entries(&self, include_ignored: bool) -> Traversal {
1639 self.traverse_from_offset(true, include_ignored, 0)
1640 }
1641
1642 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1643 self.repository_entries
1644 .iter()
1645 .map(|(path, entry)| (&path.0, entry))
1646 }
1647
1648 /// Get the repository whose work directory contains the given path.
1649 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1650 self.repository_entries
1651 .get(&RepositoryWorkDirectory(path.into()))
1652 .cloned()
1653 }
1654
1655 /// Get the repository whose work directory contains the given path.
1656 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1657 self.repository_and_work_directory_for_path(path)
1658 .map(|e| e.1)
1659 }
1660
1661 pub fn repository_and_work_directory_for_path(
1662 &self,
1663 path: &Path,
1664 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1665 self.repository_entries
1666 .iter()
1667 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1668 .last()
1669 .map(|(path, repo)| (path.clone(), repo.clone()))
1670 }
1671
1672 /// Given an ordered iterator of entries, returns an iterator of those entries,
1673 /// along with their containing git repository.
1674 pub fn entries_with_repositories<'a>(
1675 &'a self,
1676 entries: impl 'a + Iterator<Item = &'a Entry>,
1677 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1678 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1679 let mut repositories = self.repositories().peekable();
1680 entries.map(move |entry| {
1681 while let Some((repo_path, _)) = containing_repos.last() {
1682 if !entry.path.starts_with(repo_path) {
1683 containing_repos.pop();
1684 } else {
1685 break;
1686 }
1687 }
1688 while let Some((repo_path, _)) = repositories.peek() {
1689 if entry.path.starts_with(repo_path) {
1690 containing_repos.push(repositories.next().unwrap());
1691 } else {
1692 break;
1693 }
1694 }
1695 let repo = containing_repos.last().map(|(_, repo)| *repo);
1696 (entry, repo)
1697 })
1698 }
1699
1700 /// Update the `git_status` of the given entries such that files'
1701 /// statuses bubble up to their ancestor directories.
1702 pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
1703 let mut cursor = self
1704 .entries_by_path
1705 .cursor::<(TraversalProgress, GitStatuses)>();
1706 let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
1707
1708 let mut result_ix = 0;
1709 loop {
1710 let next_entry = result.get(result_ix);
1711 let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
1712
1713 let entry_to_finish = match (containing_entry, next_entry) {
1714 (Some(_), None) => entry_stack.pop(),
1715 (Some(containing_entry), Some(next_path)) => {
1716 if !next_path.path.starts_with(&containing_entry.path) {
1717 entry_stack.pop()
1718 } else {
1719 None
1720 }
1721 }
1722 (None, Some(_)) => None,
1723 (None, None) => break,
1724 };
1725
1726 if let Some((entry_ix, prev_statuses)) = entry_to_finish {
1727 cursor.seek_forward(
1728 &TraversalTarget::PathSuccessor(&result[entry_ix].path),
1729 Bias::Left,
1730 &(),
1731 );
1732
1733 let statuses = cursor.start().1 - prev_statuses;
1734
1735 result[entry_ix].git_status = if statuses.conflict > 0 {
1736 Some(GitFileStatus::Conflict)
1737 } else if statuses.modified > 0 {
1738 Some(GitFileStatus::Modified)
1739 } else if statuses.added > 0 {
1740 Some(GitFileStatus::Added)
1741 } else {
1742 None
1743 };
1744 } else {
1745 if result[result_ix].is_dir() {
1746 cursor.seek_forward(
1747 &TraversalTarget::Path(&result[result_ix].path),
1748 Bias::Left,
1749 &(),
1750 );
1751 entry_stack.push((result_ix, cursor.start().1));
1752 }
1753 result_ix += 1;
1754 }
1755 }
1756 }
1757
1758 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1759 let empty_path = Path::new("");
1760 self.entries_by_path
1761 .cursor::<()>()
1762 .filter(move |entry| entry.path.as_ref() != empty_path)
1763 .map(|entry| &entry.path)
1764 }
1765
1766 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1767 let mut cursor = self.entries_by_path.cursor();
1768 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1769 let traversal = Traversal {
1770 cursor,
1771 include_dirs: true,
1772 include_ignored: true,
1773 };
1774 ChildEntriesIter {
1775 traversal,
1776 parent_path,
1777 }
1778 }
1779
1780 pub fn descendent_entries<'a>(
1781 &'a self,
1782 include_dirs: bool,
1783 include_ignored: bool,
1784 parent_path: &'a Path,
1785 ) -> DescendentEntriesIter<'a> {
1786 let mut cursor = self.entries_by_path.cursor();
1787 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1788 let mut traversal = Traversal {
1789 cursor,
1790 include_dirs,
1791 include_ignored,
1792 };
1793
1794 if traversal.end_offset() == traversal.start_offset() {
1795 traversal.advance();
1796 }
1797
1798 DescendentEntriesIter {
1799 traversal,
1800 parent_path,
1801 }
1802 }
1803
1804 pub fn root_entry(&self) -> Option<&Entry> {
1805 self.entry_for_path("")
1806 }
1807
1808 pub fn root_name(&self) -> &str {
1809 &self.root_name
1810 }
1811
1812 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1813 self.repository_entries
1814 .get(&RepositoryWorkDirectory(Path::new("").into()))
1815 .map(|entry| entry.to_owned())
1816 }
1817
1818 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1819 self.repository_entries.values()
1820 }
1821
1822 pub fn scan_id(&self) -> usize {
1823 self.scan_id
1824 }
1825
1826 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1827 let path = path.as_ref();
1828 self.traverse_from_path(true, true, path)
1829 .entry()
1830 .and_then(|entry| {
1831 if entry.path.as_ref() == path {
1832 Some(entry)
1833 } else {
1834 None
1835 }
1836 })
1837 }
1838
1839 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1840 let entry = self.entries_by_id.get(&id, &())?;
1841 self.entry_for_path(&entry.path)
1842 }
1843
1844 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1845 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1846 }
1847}
1848
1849impl LocalSnapshot {
1850 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1851 self.git_repositories.get(&repo.work_directory.0)
1852 }
1853
1854 pub(crate) fn local_repo_for_path(
1855 &self,
1856 path: &Path,
1857 ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
1858 let (path, repo) = self.repository_and_work_directory_for_path(path)?;
1859 Some((path, self.git_repositories.get(&repo.work_directory_id())?))
1860 }
1861
1862 fn build_update(
1863 &self,
1864 project_id: u64,
1865 worktree_id: u64,
1866 entry_changes: UpdatedEntriesSet,
1867 repo_changes: UpdatedGitRepositoriesSet,
1868 ) -> proto::UpdateWorktree {
1869 let mut updated_entries = Vec::new();
1870 let mut removed_entries = Vec::new();
1871 let mut updated_repositories = Vec::new();
1872 let mut removed_repositories = Vec::new();
1873
1874 for (_, entry_id, path_change) in entry_changes.iter() {
1875 if let PathChange::Removed = path_change {
1876 removed_entries.push(entry_id.0 as u64);
1877 } else if let Some(entry) = self.entry_for_id(*entry_id) {
1878 updated_entries.push(proto::Entry::from(entry));
1879 }
1880 }
1881
1882 for (work_dir_path, change) in repo_changes.iter() {
1883 let new_repo = self
1884 .repository_entries
1885 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1886 match (&change.old_repository, new_repo) {
1887 (Some(old_repo), Some(new_repo)) => {
1888 updated_repositories.push(new_repo.build_update(old_repo));
1889 }
1890 (None, Some(new_repo)) => {
1891 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1892 }
1893 (Some(old_repo), None) => {
1894 removed_repositories.push(old_repo.work_directory.0.to_proto());
1895 }
1896 _ => {}
1897 }
1898 }
1899
1900 removed_entries.sort_unstable();
1901 updated_entries.sort_unstable_by_key(|e| e.id);
1902 removed_repositories.sort_unstable();
1903 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1904
1905 // TODO - optimize, knowing that removed_entries are sorted.
1906 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1907
1908 proto::UpdateWorktree {
1909 project_id,
1910 worktree_id,
1911 abs_path: self.abs_path().to_string_lossy().into(),
1912 root_name: self.root_name().to_string(),
1913 updated_entries,
1914 removed_entries,
1915 scan_id: self.scan_id as u64,
1916 is_last_update: self.completed_scan_id == self.scan_id,
1917 updated_repositories,
1918 removed_repositories,
1919 }
1920 }
1921
1922 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1923 let mut updated_entries = self
1924 .entries_by_path
1925 .iter()
1926 .map(proto::Entry::from)
1927 .collect::<Vec<_>>();
1928 updated_entries.sort_unstable_by_key(|e| e.id);
1929
1930 let mut updated_repositories = self
1931 .repository_entries
1932 .values()
1933 .map(proto::RepositoryEntry::from)
1934 .collect::<Vec<_>>();
1935 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1936
1937 proto::UpdateWorktree {
1938 project_id,
1939 worktree_id,
1940 abs_path: self.abs_path().to_string_lossy().into(),
1941 root_name: self.root_name().to_string(),
1942 updated_entries,
1943 removed_entries: Vec::new(),
1944 scan_id: self.scan_id as u64,
1945 is_last_update: self.completed_scan_id == self.scan_id,
1946 updated_repositories,
1947 removed_repositories: Vec::new(),
1948 }
1949 }
1950
1951 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1952 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1953 let abs_path = self.abs_path.join(&entry.path);
1954 match smol::block_on(build_gitignore(&abs_path, fs)) {
1955 Ok(ignore) => {
1956 self.ignores_by_parent_abs_path
1957 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1958 }
1959 Err(error) => {
1960 log::error!(
1961 "error loading .gitignore file {:?} - {:?}",
1962 &entry.path,
1963 error
1964 );
1965 }
1966 }
1967 }
1968
1969 if entry.kind == EntryKind::PendingDir {
1970 if let Some(existing_entry) =
1971 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1972 {
1973 entry.kind = existing_entry.kind;
1974 }
1975 }
1976
1977 let scan_id = self.scan_id;
1978 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1979 if let Some(removed) = removed {
1980 if removed.id != entry.id {
1981 self.entries_by_id.remove(&removed.id, &());
1982 }
1983 }
1984 self.entries_by_id.insert_or_replace(
1985 PathEntry {
1986 id: entry.id,
1987 path: entry.path.clone(),
1988 is_ignored: entry.is_ignored,
1989 scan_id,
1990 },
1991 &(),
1992 );
1993
1994 entry
1995 }
1996
1997 #[must_use = "Changed paths must be used for diffing later"]
1998 fn scan_statuses(
1999 &mut self,
2000 repo_ptr: &dyn GitRepository,
2001 work_directory: &RepositoryWorkDirectory,
2002 ) -> Vec<Arc<Path>> {
2003 let mut changes = vec![];
2004 let mut edits = vec![];
2005 for mut entry in self
2006 .descendent_entries(false, false, &work_directory.0)
2007 .cloned()
2008 {
2009 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2010 continue;
2011 };
2012 let git_file_status = repo_ptr
2013 .status(&RepoPath(repo_path.into()))
2014 .log_err()
2015 .flatten();
2016 if entry.git_status != git_file_status {
2017 entry.git_status = git_file_status;
2018 changes.push(entry.path.clone());
2019 edits.push(Edit::Insert(entry));
2020 }
2021 }
2022
2023 self.entries_by_path.edit(edits, &());
2024 changes
2025 }
2026
2027 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2028 let mut inodes = TreeSet::default();
2029 for ancestor in path.ancestors().skip(1) {
2030 if let Some(entry) = self.entry_for_path(ancestor) {
2031 inodes.insert(entry.inode);
2032 }
2033 }
2034 inodes
2035 }
2036
2037 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2038 let mut new_ignores = Vec::new();
2039 for ancestor in abs_path.ancestors().skip(1) {
2040 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2041 new_ignores.push((ancestor, Some(ignore.clone())));
2042 } else {
2043 new_ignores.push((ancestor, None));
2044 }
2045 }
2046
2047 let mut ignore_stack = IgnoreStack::none();
2048 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2049 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2050 ignore_stack = IgnoreStack::all();
2051 break;
2052 } else if let Some(ignore) = ignore {
2053 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2054 }
2055 }
2056
2057 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2058 ignore_stack = IgnoreStack::all();
2059 }
2060
2061 ignore_stack
2062 }
2063
2064 #[cfg(test)]
2065 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2066 self.entries_by_path
2067 .cursor::<()>()
2068 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2069 }
2070
2071 #[cfg(test)]
2072 pub fn check_invariants(&self, git_state: bool) {
2073 use pretty_assertions::assert_eq;
2074
2075 assert_eq!(
2076 self.entries_by_path
2077 .cursor::<()>()
2078 .map(|e| (&e.path, e.id))
2079 .collect::<Vec<_>>(),
2080 self.entries_by_id
2081 .cursor::<()>()
2082 .map(|e| (&e.path, e.id))
2083 .collect::<collections::BTreeSet<_>>()
2084 .into_iter()
2085 .collect::<Vec<_>>(),
2086 "entries_by_path and entries_by_id are inconsistent"
2087 );
2088
2089 let mut files = self.files(true, 0);
2090 let mut visible_files = self.files(false, 0);
2091 for entry in self.entries_by_path.cursor::<()>() {
2092 if entry.is_file() {
2093 assert_eq!(files.next().unwrap().inode, entry.inode);
2094 if !entry.is_ignored && !entry.is_external {
2095 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2096 }
2097 }
2098 }
2099
2100 assert!(files.next().is_none());
2101 assert!(visible_files.next().is_none());
2102
2103 let mut bfs_paths = Vec::new();
2104 let mut stack = self
2105 .root_entry()
2106 .map(|e| e.path.as_ref())
2107 .into_iter()
2108 .collect::<Vec<_>>();
2109 while let Some(path) = stack.pop() {
2110 bfs_paths.push(path);
2111 let ix = stack.len();
2112 for child_entry in self.child_entries(path) {
2113 stack.insert(ix, &child_entry.path);
2114 }
2115 }
2116
2117 let dfs_paths_via_iter = self
2118 .entries_by_path
2119 .cursor::<()>()
2120 .map(|e| e.path.as_ref())
2121 .collect::<Vec<_>>();
2122 assert_eq!(bfs_paths, dfs_paths_via_iter);
2123
2124 let dfs_paths_via_traversal = self
2125 .entries(true)
2126 .map(|e| e.path.as_ref())
2127 .collect::<Vec<_>>();
2128 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2129
2130 if git_state {
2131 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2132 let ignore_parent_path =
2133 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2134 assert!(self.entry_for_path(&ignore_parent_path).is_some());
2135 assert!(self
2136 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2137 .is_some());
2138 }
2139 }
2140 }
2141
2142 #[cfg(test)]
2143 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2144 let mut paths = Vec::new();
2145 for entry in self.entries_by_path.cursor::<()>() {
2146 if include_ignored || !entry.is_ignored {
2147 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2148 }
2149 }
2150 paths.sort_by(|a, b| a.0.cmp(b.0));
2151 paths
2152 }
2153}
2154
2155impl BackgroundScannerState {
2156 fn should_scan_directory(&self, entry: &Entry) -> bool {
2157 (!entry.is_external && !entry.is_ignored)
2158 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2159 || self
2160 .paths_to_scan
2161 .iter()
2162 .any(|p| p.starts_with(&entry.path))
2163 || self
2164 .path_prefixes_to_scan
2165 .iter()
2166 .any(|p| entry.path.starts_with(p))
2167 }
2168
2169 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2170 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2171 entry.id = removed_entry_id;
2172 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2173 entry.id = existing_entry.id;
2174 }
2175 }
2176
2177 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2178 self.reuse_entry_id(&mut entry);
2179 let entry = self.snapshot.insert_entry(entry, fs);
2180 if entry.path.file_name() == Some(&DOT_GIT) {
2181 self.build_repository(entry.path.clone(), fs);
2182 }
2183
2184 #[cfg(test)]
2185 self.snapshot.check_invariants(false);
2186
2187 entry
2188 }
2189
2190 fn populate_dir(
2191 &mut self,
2192 parent_path: &Arc<Path>,
2193 entries: impl IntoIterator<Item = Entry>,
2194 ignore: Option<Arc<Gitignore>>,
2195 fs: &dyn Fs,
2196 ) {
2197 let mut parent_entry = if let Some(parent_entry) = self
2198 .snapshot
2199 .entries_by_path
2200 .get(&PathKey(parent_path.clone()), &())
2201 {
2202 parent_entry.clone()
2203 } else {
2204 log::warn!(
2205 "populating a directory {:?} that has been removed",
2206 parent_path
2207 );
2208 return;
2209 };
2210
2211 match parent_entry.kind {
2212 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2213 EntryKind::Dir => {}
2214 _ => return,
2215 }
2216
2217 if let Some(ignore) = ignore {
2218 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2219 self.snapshot
2220 .ignores_by_parent_abs_path
2221 .insert(abs_parent_path, (ignore, false));
2222 }
2223
2224 self.scanned_dirs.insert(parent_entry.id);
2225 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2226 let mut entries_by_id_edits = Vec::new();
2227 let mut dotgit_path = None;
2228
2229 for entry in entries {
2230 if entry.path.file_name() == Some(&DOT_GIT) {
2231 dotgit_path = Some(entry.path.clone());
2232 }
2233
2234 entries_by_id_edits.push(Edit::Insert(PathEntry {
2235 id: entry.id,
2236 path: entry.path.clone(),
2237 is_ignored: entry.is_ignored,
2238 scan_id: self.snapshot.scan_id,
2239 }));
2240 entries_by_path_edits.push(Edit::Insert(entry));
2241 }
2242
2243 self.snapshot
2244 .entries_by_path
2245 .edit(entries_by_path_edits, &());
2246 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2247
2248 if let Some(dotgit_path) = dotgit_path {
2249 self.build_repository(dotgit_path, fs);
2250 }
2251 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2252 self.changed_paths.insert(ix, parent_path.clone());
2253 }
2254
2255 #[cfg(test)]
2256 self.snapshot.check_invariants(false);
2257 }
2258
2259 fn remove_path(&mut self, path: &Path) {
2260 let mut new_entries;
2261 let removed_entries;
2262 {
2263 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2264 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2265 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2266 new_entries.append(cursor.suffix(&()), &());
2267 }
2268 self.snapshot.entries_by_path = new_entries;
2269
2270 let mut entries_by_id_edits = Vec::new();
2271 for entry in removed_entries.cursor::<()>() {
2272 let removed_entry_id = self
2273 .removed_entry_ids
2274 .entry(entry.inode)
2275 .or_insert(entry.id);
2276 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2277 entries_by_id_edits.push(Edit::Remove(entry.id));
2278 }
2279 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2280
2281 if path.file_name() == Some(&GITIGNORE) {
2282 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2283 if let Some((_, needs_update)) = self
2284 .snapshot
2285 .ignores_by_parent_abs_path
2286 .get_mut(abs_parent_path.as_path())
2287 {
2288 *needs_update = true;
2289 }
2290 }
2291
2292 #[cfg(test)]
2293 self.snapshot.check_invariants(false);
2294 }
2295
2296 fn reload_repositories(&mut self, changed_paths: &[Arc<Path>], fs: &dyn Fs) {
2297 let scan_id = self.snapshot.scan_id;
2298
2299 // Find each of the .git directories that contain any of the given paths.
2300 let mut prev_dot_git_dir = None;
2301 for changed_path in changed_paths {
2302 let Some(dot_git_dir) = changed_path
2303 .ancestors()
2304 .find(|ancestor| ancestor.file_name() == Some(&*DOT_GIT)) else {
2305 continue;
2306 };
2307
2308 // Avoid processing the same repository multiple times, if multiple paths
2309 // within it have changed.
2310 if prev_dot_git_dir == Some(dot_git_dir) {
2311 continue;
2312 }
2313 prev_dot_git_dir = Some(dot_git_dir);
2314
2315 // If there is already a repository for this .git directory, reload
2316 // the status for all of its files.
2317 let repository = self
2318 .snapshot
2319 .git_repositories
2320 .iter()
2321 .find_map(|(entry_id, repo)| {
2322 (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2323 });
2324 match repository {
2325 None => {
2326 self.build_repository(dot_git_dir.into(), fs);
2327 }
2328 Some((entry_id, repository)) => {
2329 if repository.git_dir_scan_id == scan_id {
2330 continue;
2331 }
2332 let Some(work_dir) = self
2333 .snapshot
2334 .entry_for_id(entry_id)
2335 .map(|entry| RepositoryWorkDirectory(entry.path.clone())) else { continue };
2336
2337 let repository = repository.repo_ptr.lock();
2338 let branch = repository.branch_name();
2339 repository.reload_index();
2340
2341 self.snapshot
2342 .git_repositories
2343 .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2344 self.snapshot
2345 .snapshot
2346 .repository_entries
2347 .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2348
2349 let changed_paths = self.snapshot.scan_statuses(&*repository, &work_dir);
2350 util::extend_sorted(
2351 &mut self.changed_paths,
2352 changed_paths,
2353 usize::MAX,
2354 Ord::cmp,
2355 )
2356 }
2357 }
2358 }
2359
2360 // Remove any git repositories whose .git entry no longer exists.
2361 let mut snapshot = &mut self.snapshot;
2362 let mut repositories = mem::take(&mut snapshot.git_repositories);
2363 let mut repository_entries = mem::take(&mut snapshot.repository_entries);
2364 repositories.retain(|work_directory_id, _| {
2365 snapshot
2366 .entry_for_id(*work_directory_id)
2367 .map_or(false, |entry| {
2368 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2369 })
2370 });
2371 repository_entries.retain(|_, entry| repositories.get(&entry.work_directory.0).is_some());
2372 snapshot.git_repositories = repositories;
2373 snapshot.repository_entries = repository_entries;
2374 }
2375
2376 fn build_repository(&mut self, dot_git_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2377 let work_dir_path: Arc<Path> = dot_git_path.parent().unwrap().into();
2378
2379 // Guard against repositories inside the repository metadata
2380 if work_dir_path.iter().any(|component| component == *DOT_GIT) {
2381 return None;
2382 };
2383
2384 let work_dir_id = self
2385 .snapshot
2386 .entry_for_path(work_dir_path.clone())
2387 .map(|entry| entry.id)?;
2388
2389 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2390 return None;
2391 }
2392
2393 let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2394 let repository = fs.open_repo(abs_path.as_path())?;
2395 let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2396
2397 let repo_lock = repository.lock();
2398 self.snapshot.repository_entries.insert(
2399 work_directory.clone(),
2400 RepositoryEntry {
2401 work_directory: work_dir_id.into(),
2402 branch: repo_lock.branch_name().map(Into::into),
2403 },
2404 );
2405
2406 let changed_paths = self
2407 .snapshot
2408 .scan_statuses(repo_lock.deref(), &work_directory);
2409 drop(repo_lock);
2410
2411 self.snapshot.git_repositories.insert(
2412 work_dir_id,
2413 LocalRepositoryEntry {
2414 git_dir_scan_id: 0,
2415 repo_ptr: repository,
2416 git_dir_path: dot_git_path.clone(),
2417 },
2418 );
2419
2420 util::extend_sorted(&mut self.changed_paths, changed_paths, usize::MAX, Ord::cmp);
2421 Some(())
2422 }
2423}
2424
2425async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2426 let contents = fs.load(abs_path).await?;
2427 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2428 let mut builder = GitignoreBuilder::new(parent);
2429 for line in contents.lines() {
2430 builder.add_line(Some(abs_path.into()), line)?;
2431 }
2432 Ok(builder.build()?)
2433}
2434
2435impl WorktreeId {
2436 pub fn from_usize(handle_id: usize) -> Self {
2437 Self(handle_id)
2438 }
2439
2440 pub(crate) fn from_proto(id: u64) -> Self {
2441 Self(id as usize)
2442 }
2443
2444 pub fn to_proto(&self) -> u64 {
2445 self.0 as u64
2446 }
2447
2448 pub fn to_usize(&self) -> usize {
2449 self.0
2450 }
2451}
2452
2453impl fmt::Display for WorktreeId {
2454 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2455 self.0.fmt(f)
2456 }
2457}
2458
2459impl Deref for Worktree {
2460 type Target = Snapshot;
2461
2462 fn deref(&self) -> &Self::Target {
2463 match self {
2464 Worktree::Local(worktree) => &worktree.snapshot,
2465 Worktree::Remote(worktree) => &worktree.snapshot,
2466 }
2467 }
2468}
2469
2470impl Deref for LocalWorktree {
2471 type Target = LocalSnapshot;
2472
2473 fn deref(&self) -> &Self::Target {
2474 &self.snapshot
2475 }
2476}
2477
2478impl Deref for RemoteWorktree {
2479 type Target = Snapshot;
2480
2481 fn deref(&self) -> &Self::Target {
2482 &self.snapshot
2483 }
2484}
2485
2486impl fmt::Debug for LocalWorktree {
2487 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2488 self.snapshot.fmt(f)
2489 }
2490}
2491
2492impl fmt::Debug for Snapshot {
2493 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2494 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2495 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2496
2497 impl<'a> fmt::Debug for EntriesByPath<'a> {
2498 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2499 f.debug_map()
2500 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2501 .finish()
2502 }
2503 }
2504
2505 impl<'a> fmt::Debug for EntriesById<'a> {
2506 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2507 f.debug_list().entries(self.0.iter()).finish()
2508 }
2509 }
2510
2511 f.debug_struct("Snapshot")
2512 .field("id", &self.id)
2513 .field("root_name", &self.root_name)
2514 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2515 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2516 .finish()
2517 }
2518}
2519
2520#[derive(Clone, PartialEq)]
2521pub struct File {
2522 pub worktree: ModelHandle<Worktree>,
2523 pub path: Arc<Path>,
2524 pub mtime: SystemTime,
2525 pub(crate) entry_id: ProjectEntryId,
2526 pub(crate) is_local: bool,
2527 pub(crate) is_deleted: bool,
2528}
2529
2530impl language::File for File {
2531 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2532 if self.is_local {
2533 Some(self)
2534 } else {
2535 None
2536 }
2537 }
2538
2539 fn mtime(&self) -> SystemTime {
2540 self.mtime
2541 }
2542
2543 fn path(&self) -> &Arc<Path> {
2544 &self.path
2545 }
2546
2547 fn full_path(&self, cx: &AppContext) -> PathBuf {
2548 let mut full_path = PathBuf::new();
2549 let worktree = self.worktree.read(cx);
2550
2551 if worktree.is_visible() {
2552 full_path.push(worktree.root_name());
2553 } else {
2554 let path = worktree.abs_path();
2555
2556 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2557 full_path.push("~");
2558 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2559 } else {
2560 full_path.push(path)
2561 }
2562 }
2563
2564 if self.path.components().next().is_some() {
2565 full_path.push(&self.path);
2566 }
2567
2568 full_path
2569 }
2570
2571 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2572 /// of its worktree, then this method will return the name of the worktree itself.
2573 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2574 self.path
2575 .file_name()
2576 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2577 }
2578
2579 fn worktree_id(&self) -> usize {
2580 self.worktree.id()
2581 }
2582
2583 fn is_deleted(&self) -> bool {
2584 self.is_deleted
2585 }
2586
2587 fn as_any(&self) -> &dyn Any {
2588 self
2589 }
2590
2591 fn to_proto(&self) -> rpc::proto::File {
2592 rpc::proto::File {
2593 worktree_id: self.worktree.id() as u64,
2594 entry_id: self.entry_id.to_proto(),
2595 path: self.path.to_string_lossy().into(),
2596 mtime: Some(self.mtime.into()),
2597 is_deleted: self.is_deleted,
2598 }
2599 }
2600}
2601
2602impl language::LocalFile for File {
2603 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2604 self.worktree
2605 .read(cx)
2606 .as_local()
2607 .unwrap()
2608 .abs_path
2609 .join(&self.path)
2610 }
2611
2612 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2613 let worktree = self.worktree.read(cx).as_local().unwrap();
2614 let abs_path = worktree.absolutize(&self.path);
2615 let fs = worktree.fs.clone();
2616 cx.background()
2617 .spawn(async move { fs.load(&abs_path).await })
2618 }
2619
2620 fn buffer_reloaded(
2621 &self,
2622 buffer_id: u64,
2623 version: &clock::Global,
2624 fingerprint: RopeFingerprint,
2625 line_ending: LineEnding,
2626 mtime: SystemTime,
2627 cx: &mut AppContext,
2628 ) {
2629 let worktree = self.worktree.read(cx).as_local().unwrap();
2630 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2631 worktree
2632 .client
2633 .send(proto::BufferReloaded {
2634 project_id,
2635 buffer_id,
2636 version: serialize_version(version),
2637 mtime: Some(mtime.into()),
2638 fingerprint: serialize_fingerprint(fingerprint),
2639 line_ending: serialize_line_ending(line_ending) as i32,
2640 })
2641 .log_err();
2642 }
2643 }
2644}
2645
2646impl File {
2647 pub fn for_entry(entry: Entry, worktree: ModelHandle<Worktree>) -> Arc<Self> {
2648 Arc::new(Self {
2649 worktree,
2650 path: entry.path.clone(),
2651 mtime: entry.mtime,
2652 entry_id: entry.id,
2653 is_local: true,
2654 is_deleted: false,
2655 })
2656 }
2657
2658 pub fn from_proto(
2659 proto: rpc::proto::File,
2660 worktree: ModelHandle<Worktree>,
2661 cx: &AppContext,
2662 ) -> Result<Self> {
2663 let worktree_id = worktree
2664 .read(cx)
2665 .as_remote()
2666 .ok_or_else(|| anyhow!("not remote"))?
2667 .id();
2668
2669 if worktree_id.to_proto() != proto.worktree_id {
2670 return Err(anyhow!("worktree id does not match file"));
2671 }
2672
2673 Ok(Self {
2674 worktree,
2675 path: Path::new(&proto.path).into(),
2676 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2677 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2678 is_local: false,
2679 is_deleted: proto.is_deleted,
2680 })
2681 }
2682
2683 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2684 file.and_then(|f| f.as_any().downcast_ref())
2685 }
2686
2687 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2688 self.worktree.read(cx).id()
2689 }
2690
2691 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2692 if self.is_deleted {
2693 None
2694 } else {
2695 Some(self.entry_id)
2696 }
2697 }
2698}
2699
2700#[derive(Clone, Debug, PartialEq, Eq)]
2701pub struct Entry {
2702 pub id: ProjectEntryId,
2703 pub kind: EntryKind,
2704 pub path: Arc<Path>,
2705 pub inode: u64,
2706 pub mtime: SystemTime,
2707 pub is_symlink: bool,
2708
2709 /// Whether this entry is ignored by Git.
2710 ///
2711 /// We only scan ignored entries once the directory is expanded and
2712 /// exclude them from searches.
2713 pub is_ignored: bool,
2714
2715 /// Whether this entry's canonical path is outside of the worktree.
2716 /// This means the entry is only accessible from the worktree root via a
2717 /// symlink.
2718 ///
2719 /// We only scan entries outside of the worktree once the symlinked
2720 /// directory is expanded. External entries are treated like gitignored
2721 /// entries in that they are not included in searches.
2722 pub is_external: bool,
2723 pub git_status: Option<GitFileStatus>,
2724}
2725
2726#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2727pub enum EntryKind {
2728 UnloadedDir,
2729 PendingDir,
2730 Dir,
2731 File(CharBag),
2732}
2733
2734#[derive(Clone, Copy, Debug, PartialEq)]
2735pub enum PathChange {
2736 /// A filesystem entry was was created.
2737 Added,
2738 /// A filesystem entry was removed.
2739 Removed,
2740 /// A filesystem entry was updated.
2741 Updated,
2742 /// A filesystem entry was either updated or added. We don't know
2743 /// whether or not it already existed, because the path had not
2744 /// been loaded before the event.
2745 AddedOrUpdated,
2746 /// A filesystem entry was found during the initial scan of the worktree.
2747 Loaded,
2748}
2749
2750pub struct GitRepositoryChange {
2751 /// The previous state of the repository, if it already existed.
2752 pub old_repository: Option<RepositoryEntry>,
2753}
2754
2755pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2756pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
2757
2758impl Entry {
2759 fn new(
2760 path: Arc<Path>,
2761 metadata: &fs::Metadata,
2762 next_entry_id: &AtomicUsize,
2763 root_char_bag: CharBag,
2764 ) -> Self {
2765 Self {
2766 id: ProjectEntryId::new(next_entry_id),
2767 kind: if metadata.is_dir {
2768 EntryKind::PendingDir
2769 } else {
2770 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2771 },
2772 path,
2773 inode: metadata.inode,
2774 mtime: metadata.mtime,
2775 is_symlink: metadata.is_symlink,
2776 is_ignored: false,
2777 is_external: false,
2778 git_status: None,
2779 }
2780 }
2781
2782 pub fn is_dir(&self) -> bool {
2783 self.kind.is_dir()
2784 }
2785
2786 pub fn is_file(&self) -> bool {
2787 self.kind.is_file()
2788 }
2789
2790 pub fn git_status(&self) -> Option<GitFileStatus> {
2791 self.git_status
2792 }
2793}
2794
2795impl EntryKind {
2796 pub fn is_dir(&self) -> bool {
2797 matches!(
2798 self,
2799 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
2800 )
2801 }
2802
2803 pub fn is_unloaded(&self) -> bool {
2804 matches!(self, EntryKind::UnloadedDir)
2805 }
2806
2807 pub fn is_file(&self) -> bool {
2808 matches!(self, EntryKind::File(_))
2809 }
2810}
2811
2812impl sum_tree::Item for Entry {
2813 type Summary = EntrySummary;
2814
2815 fn summary(&self) -> Self::Summary {
2816 let non_ignored_count = if self.is_ignored || self.is_external {
2817 0
2818 } else {
2819 1
2820 };
2821 let file_count;
2822 let non_ignored_file_count;
2823 if self.is_file() {
2824 file_count = 1;
2825 non_ignored_file_count = non_ignored_count;
2826 } else {
2827 file_count = 0;
2828 non_ignored_file_count = 0;
2829 }
2830
2831 let mut statuses = GitStatuses::default();
2832 match self.git_status {
2833 Some(status) => match status {
2834 GitFileStatus::Added => statuses.added = 1,
2835 GitFileStatus::Modified => statuses.modified = 1,
2836 GitFileStatus::Conflict => statuses.conflict = 1,
2837 },
2838 None => {}
2839 }
2840
2841 EntrySummary {
2842 max_path: self.path.clone(),
2843 count: 1,
2844 non_ignored_count,
2845 file_count,
2846 non_ignored_file_count,
2847 statuses,
2848 }
2849 }
2850}
2851
2852impl sum_tree::KeyedItem for Entry {
2853 type Key = PathKey;
2854
2855 fn key(&self) -> Self::Key {
2856 PathKey(self.path.clone())
2857 }
2858}
2859
2860#[derive(Clone, Debug)]
2861pub struct EntrySummary {
2862 max_path: Arc<Path>,
2863 count: usize,
2864 non_ignored_count: usize,
2865 file_count: usize,
2866 non_ignored_file_count: usize,
2867 statuses: GitStatuses,
2868}
2869
2870impl Default for EntrySummary {
2871 fn default() -> Self {
2872 Self {
2873 max_path: Arc::from(Path::new("")),
2874 count: 0,
2875 non_ignored_count: 0,
2876 file_count: 0,
2877 non_ignored_file_count: 0,
2878 statuses: Default::default(),
2879 }
2880 }
2881}
2882
2883impl sum_tree::Summary for EntrySummary {
2884 type Context = ();
2885
2886 fn add_summary(&mut self, rhs: &Self, _: &()) {
2887 self.max_path = rhs.max_path.clone();
2888 self.count += rhs.count;
2889 self.non_ignored_count += rhs.non_ignored_count;
2890 self.file_count += rhs.file_count;
2891 self.non_ignored_file_count += rhs.non_ignored_file_count;
2892 self.statuses += rhs.statuses;
2893 }
2894}
2895
2896#[derive(Clone, Debug)]
2897struct PathEntry {
2898 id: ProjectEntryId,
2899 path: Arc<Path>,
2900 is_ignored: bool,
2901 scan_id: usize,
2902}
2903
2904impl sum_tree::Item for PathEntry {
2905 type Summary = PathEntrySummary;
2906
2907 fn summary(&self) -> Self::Summary {
2908 PathEntrySummary { max_id: self.id }
2909 }
2910}
2911
2912impl sum_tree::KeyedItem for PathEntry {
2913 type Key = ProjectEntryId;
2914
2915 fn key(&self) -> Self::Key {
2916 self.id
2917 }
2918}
2919
2920#[derive(Clone, Debug, Default)]
2921struct PathEntrySummary {
2922 max_id: ProjectEntryId,
2923}
2924
2925impl sum_tree::Summary for PathEntrySummary {
2926 type Context = ();
2927
2928 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2929 self.max_id = summary.max_id;
2930 }
2931}
2932
2933impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2934 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2935 *self = summary.max_id;
2936 }
2937}
2938
2939#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2940pub struct PathKey(Arc<Path>);
2941
2942impl Default for PathKey {
2943 fn default() -> Self {
2944 Self(Path::new("").into())
2945 }
2946}
2947
2948impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2949 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2950 self.0 = summary.max_path.clone();
2951 }
2952}
2953
2954struct BackgroundScanner {
2955 state: Mutex<BackgroundScannerState>,
2956 fs: Arc<dyn Fs>,
2957 status_updates_tx: UnboundedSender<ScanState>,
2958 executor: Arc<executor::Background>,
2959 scan_requests_rx: channel::Receiver<ScanRequest>,
2960 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
2961 next_entry_id: Arc<AtomicUsize>,
2962 phase: BackgroundScannerPhase,
2963}
2964
2965#[derive(PartialEq)]
2966enum BackgroundScannerPhase {
2967 InitialScan,
2968 EventsReceivedDuringInitialScan,
2969 Events,
2970}
2971
2972impl BackgroundScanner {
2973 fn new(
2974 snapshot: LocalSnapshot,
2975 next_entry_id: Arc<AtomicUsize>,
2976 fs: Arc<dyn Fs>,
2977 status_updates_tx: UnboundedSender<ScanState>,
2978 executor: Arc<executor::Background>,
2979 scan_requests_rx: channel::Receiver<ScanRequest>,
2980 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
2981 ) -> Self {
2982 Self {
2983 fs,
2984 status_updates_tx,
2985 executor,
2986 scan_requests_rx,
2987 path_prefixes_to_scan_rx,
2988 next_entry_id,
2989 state: Mutex::new(BackgroundScannerState {
2990 prev_snapshot: snapshot.snapshot.clone(),
2991 snapshot,
2992 scanned_dirs: Default::default(),
2993 path_prefixes_to_scan: Default::default(),
2994 paths_to_scan: Default::default(),
2995 removed_entry_ids: Default::default(),
2996 changed_paths: Default::default(),
2997 }),
2998 phase: BackgroundScannerPhase::InitialScan,
2999 }
3000 }
3001
3002 async fn run(
3003 &mut self,
3004 mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
3005 ) {
3006 use futures::FutureExt as _;
3007
3008 let (root_abs_path, root_inode) = {
3009 let snapshot = &self.state.lock().snapshot;
3010 (
3011 snapshot.abs_path.clone(),
3012 snapshot.root_entry().map(|e| e.inode),
3013 )
3014 };
3015
3016 // Populate ignores above the root.
3017 let ignore_stack;
3018 for ancestor in root_abs_path.ancestors().skip(1) {
3019 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3020 {
3021 self.state
3022 .lock()
3023 .snapshot
3024 .ignores_by_parent_abs_path
3025 .insert(ancestor.into(), (ignore.into(), false));
3026 }
3027 }
3028 {
3029 let mut state = self.state.lock();
3030 state.snapshot.scan_id += 1;
3031 ignore_stack = state
3032 .snapshot
3033 .ignore_stack_for_abs_path(&root_abs_path, true);
3034 if ignore_stack.is_all() {
3035 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3036 root_entry.is_ignored = true;
3037 state.insert_entry(root_entry, self.fs.as_ref());
3038 }
3039 }
3040 };
3041
3042 // Perform an initial scan of the directory.
3043 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3044 smol::block_on(scan_job_tx.send(ScanJob {
3045 abs_path: root_abs_path,
3046 path: Arc::from(Path::new("")),
3047 ignore_stack,
3048 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
3049 is_external: false,
3050 scan_queue: scan_job_tx.clone(),
3051 }))
3052 .unwrap();
3053 drop(scan_job_tx);
3054 self.scan_dirs(true, scan_job_rx).await;
3055 {
3056 let mut state = self.state.lock();
3057 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3058 }
3059
3060 self.send_status_update(false, None);
3061
3062 // Process any any FS events that occurred while performing the initial scan.
3063 // For these events, update events cannot be as precise, because we didn't
3064 // have the previous state loaded yet.
3065 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3066 if let Poll::Ready(Some(events)) = futures::poll!(fs_events_rx.next()) {
3067 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
3068 while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
3069 paths.extend(more_events.into_iter().map(|e| e.path));
3070 }
3071 self.process_events(paths).await;
3072 }
3073
3074 // Continue processing events until the worktree is dropped.
3075 self.phase = BackgroundScannerPhase::Events;
3076 loop {
3077 select_biased! {
3078 // Process any path refresh requests from the worktree. Prioritize
3079 // these before handling changes reported by the filesystem.
3080 request = self.scan_requests_rx.recv().fuse() => {
3081 let Ok(request) = request else { break };
3082 if !self.process_scan_request(request, false).await {
3083 return;
3084 }
3085 }
3086
3087 path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3088 let Ok(path_prefix) = path_prefix else { break };
3089 log::trace!("adding path prefix {:?}", path_prefix);
3090
3091 let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3092 if did_scan {
3093 let abs_path =
3094 {
3095 let mut state = self.state.lock();
3096 state.path_prefixes_to_scan.insert(path_prefix.clone());
3097 state.snapshot.abs_path.join(&path_prefix)
3098 };
3099
3100 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3101 self.process_events(vec![abs_path]).await;
3102 }
3103 }
3104 }
3105
3106 events = fs_events_rx.next().fuse() => {
3107 let Some(events) = events else { break };
3108 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
3109 while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
3110 paths.extend(more_events.into_iter().map(|e| e.path));
3111 }
3112 self.process_events(paths.clone()).await;
3113 }
3114 }
3115 }
3116 }
3117
3118 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3119 log::debug!("rescanning paths {:?}", request.relative_paths);
3120
3121 request.relative_paths.sort_unstable();
3122 self.forcibly_load_paths(&request.relative_paths).await;
3123
3124 let root_path = self.state.lock().snapshot.abs_path.clone();
3125 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3126 Ok(path) => path,
3127 Err(err) => {
3128 log::error!("failed to canonicalize root path: {}", err);
3129 return false;
3130 }
3131 };
3132 let abs_paths = request
3133 .relative_paths
3134 .iter()
3135 .map(|path| {
3136 if path.file_name().is_some() {
3137 root_canonical_path.join(path)
3138 } else {
3139 root_canonical_path.clone()
3140 }
3141 })
3142 .collect::<Vec<_>>();
3143
3144 self.reload_entries_for_paths(
3145 root_path,
3146 root_canonical_path,
3147 &request.relative_paths,
3148 abs_paths,
3149 None,
3150 )
3151 .await;
3152 self.send_status_update(scanning, Some(request.done))
3153 }
3154
3155 async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3156 log::debug!("received fs events {:?}", abs_paths);
3157
3158 let root_path = self.state.lock().snapshot.abs_path.clone();
3159 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3160 Ok(path) => path,
3161 Err(err) => {
3162 log::error!("failed to canonicalize root path: {}", err);
3163 return;
3164 }
3165 };
3166
3167 let mut relative_paths = Vec::with_capacity(abs_paths.len());
3168 let mut unloaded_relative_paths = Vec::new();
3169 abs_paths.sort_unstable();
3170 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3171 abs_paths.retain(|abs_path| {
3172 let snapshot = &self.state.lock().snapshot;
3173 {
3174 let relative_path: Arc<Path> =
3175 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3176 path.into()
3177 } else {
3178 log::error!(
3179 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3180 );
3181 return false;
3182 };
3183
3184 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3185 snapshot
3186 .entry_for_path(parent)
3187 .map_or(false, |entry| entry.kind == EntryKind::Dir)
3188 });
3189 if !parent_dir_is_loaded {
3190 log::debug!("ignoring event {relative_path:?} within unloaded directory");
3191 unloaded_relative_paths.push(relative_path);
3192 return false;
3193 }
3194
3195 relative_paths.push(relative_path);
3196 true
3197 }
3198 });
3199
3200 if !relative_paths.is_empty() {
3201 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3202 self.reload_entries_for_paths(
3203 root_path,
3204 root_canonical_path,
3205 &relative_paths,
3206 abs_paths,
3207 Some(scan_job_tx.clone()),
3208 )
3209 .await;
3210 drop(scan_job_tx);
3211 self.scan_dirs(false, scan_job_rx).await;
3212
3213 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3214 self.update_ignore_statuses(scan_job_tx).await;
3215 self.scan_dirs(false, scan_job_rx).await;
3216 }
3217
3218 {
3219 let mut state = self.state.lock();
3220 relative_paths.extend(unloaded_relative_paths);
3221 state.reload_repositories(&relative_paths, self.fs.as_ref());
3222 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3223 for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3224 state.scanned_dirs.remove(&entry_id);
3225 }
3226 }
3227
3228 self.send_status_update(false, None);
3229 }
3230
3231 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3232 let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3233 {
3234 let mut state = self.state.lock();
3235 let root_path = state.snapshot.abs_path.clone();
3236 for path in paths {
3237 for ancestor in path.ancestors() {
3238 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3239 if entry.kind == EntryKind::UnloadedDir {
3240 let abs_path = root_path.join(ancestor);
3241 let ignore_stack =
3242 state.snapshot.ignore_stack_for_abs_path(&abs_path, true);
3243 let ancestor_inodes =
3244 state.snapshot.ancestor_inodes_for_path(&ancestor);
3245 scan_job_tx
3246 .try_send(ScanJob {
3247 abs_path: abs_path.into(),
3248 path: ancestor.into(),
3249 ignore_stack,
3250 scan_queue: scan_job_tx.clone(),
3251 ancestor_inodes,
3252 is_external: entry.is_external,
3253 })
3254 .unwrap();
3255 state.paths_to_scan.insert(path.clone());
3256 break;
3257 }
3258 }
3259 }
3260 }
3261 drop(scan_job_tx);
3262 }
3263 while let Some(job) = scan_job_rx.next().await {
3264 self.scan_dir(&job).await.log_err();
3265 }
3266
3267 mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3268 }
3269
3270 async fn scan_dirs(
3271 &self,
3272 enable_progress_updates: bool,
3273 scan_jobs_rx: channel::Receiver<ScanJob>,
3274 ) {
3275 use futures::FutureExt as _;
3276
3277 if self
3278 .status_updates_tx
3279 .unbounded_send(ScanState::Started)
3280 .is_err()
3281 {
3282 return;
3283 }
3284
3285 let progress_update_count = AtomicUsize::new(0);
3286 self.executor
3287 .scoped(|scope| {
3288 for _ in 0..self.executor.num_cpus() {
3289 scope.spawn(async {
3290 let mut last_progress_update_count = 0;
3291 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3292 futures::pin_mut!(progress_update_timer);
3293
3294 loop {
3295 select_biased! {
3296 // Process any path refresh requests before moving on to process
3297 // the scan queue, so that user operations are prioritized.
3298 request = self.scan_requests_rx.recv().fuse() => {
3299 let Ok(request) = request else { break };
3300 if !self.process_scan_request(request, true).await {
3301 return;
3302 }
3303 }
3304
3305 // Send periodic progress updates to the worktree. Use an atomic counter
3306 // to ensure that only one of the workers sends a progress update after
3307 // the update interval elapses.
3308 _ = progress_update_timer => {
3309 match progress_update_count.compare_exchange(
3310 last_progress_update_count,
3311 last_progress_update_count + 1,
3312 SeqCst,
3313 SeqCst
3314 ) {
3315 Ok(_) => {
3316 last_progress_update_count += 1;
3317 self.send_status_update(true, None);
3318 }
3319 Err(count) => {
3320 last_progress_update_count = count;
3321 }
3322 }
3323 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3324 }
3325
3326 // Recursively load directories from the file system.
3327 job = scan_jobs_rx.recv().fuse() => {
3328 let Ok(job) = job else { break };
3329 if let Err(err) = self.scan_dir(&job).await {
3330 if job.path.as_ref() != Path::new("") {
3331 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3332 }
3333 }
3334 }
3335 }
3336 }
3337 })
3338 }
3339 })
3340 .await;
3341 }
3342
3343 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3344 let mut state = self.state.lock();
3345 if state.changed_paths.is_empty() && scanning {
3346 return true;
3347 }
3348
3349 let new_snapshot = state.snapshot.clone();
3350 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3351 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3352 state.changed_paths.clear();
3353
3354 self.status_updates_tx
3355 .unbounded_send(ScanState::Updated {
3356 snapshot: new_snapshot,
3357 changes,
3358 scanning,
3359 barrier,
3360 })
3361 .is_ok()
3362 }
3363
3364 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3365 log::debug!("scan directory {:?}", job.path);
3366
3367 let mut ignore_stack = job.ignore_stack.clone();
3368 let mut new_ignore = None;
3369 let (root_abs_path, root_char_bag, next_entry_id, repository) = {
3370 let snapshot = &self.state.lock().snapshot;
3371 (
3372 snapshot.abs_path().clone(),
3373 snapshot.root_char_bag,
3374 self.next_entry_id.clone(),
3375 snapshot
3376 .local_repo_for_path(&job.path)
3377 .map(|(work_dir, repo)| (work_dir, repo.clone())),
3378 )
3379 };
3380
3381 let mut root_canonical_path = None;
3382 let mut new_entries: Vec<Entry> = Vec::new();
3383 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3384 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3385 while let Some(child_abs_path) = child_paths.next().await {
3386 let child_abs_path: Arc<Path> = match child_abs_path {
3387 Ok(child_abs_path) => child_abs_path.into(),
3388 Err(error) => {
3389 log::error!("error processing entry {:?}", error);
3390 continue;
3391 }
3392 };
3393
3394 let child_name = child_abs_path.file_name().unwrap();
3395 let child_path: Arc<Path> = job.path.join(child_name).into();
3396 let child_metadata = match self.fs.metadata(&child_abs_path).await {
3397 Ok(Some(metadata)) => metadata,
3398 Ok(None) => continue,
3399 Err(err) => {
3400 log::error!("error processing {:?}: {:?}", child_abs_path, err);
3401 continue;
3402 }
3403 };
3404
3405 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3406 if child_name == *GITIGNORE {
3407 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3408 Ok(ignore) => {
3409 let ignore = Arc::new(ignore);
3410 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3411 new_ignore = Some(ignore);
3412 }
3413 Err(error) => {
3414 log::error!(
3415 "error loading .gitignore file {:?} - {:?}",
3416 child_name,
3417 error
3418 );
3419 }
3420 }
3421
3422 // Update ignore status of any child entries we've already processed to reflect the
3423 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3424 // there should rarely be too numerous. Update the ignore stack associated with any
3425 // new jobs as well.
3426 let mut new_jobs = new_jobs.iter_mut();
3427 for entry in &mut new_entries {
3428 let entry_abs_path = root_abs_path.join(&entry.path);
3429 entry.is_ignored =
3430 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3431
3432 if entry.is_dir() {
3433 if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3434 job.ignore_stack = if entry.is_ignored {
3435 IgnoreStack::all()
3436 } else {
3437 ignore_stack.clone()
3438 };
3439 }
3440 }
3441 }
3442 }
3443
3444 let mut child_entry = Entry::new(
3445 child_path.clone(),
3446 &child_metadata,
3447 &next_entry_id,
3448 root_char_bag,
3449 );
3450
3451 if job.is_external {
3452 child_entry.is_external = true;
3453 } else if child_metadata.is_symlink {
3454 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3455 Ok(path) => path,
3456 Err(err) => {
3457 log::error!(
3458 "error reading target of symlink {:?}: {:?}",
3459 child_abs_path,
3460 err
3461 );
3462 continue;
3463 }
3464 };
3465
3466 // lazily canonicalize the root path in order to determine if
3467 // symlinks point outside of the worktree.
3468 let root_canonical_path = match &root_canonical_path {
3469 Some(path) => path,
3470 None => match self.fs.canonicalize(&root_abs_path).await {
3471 Ok(path) => root_canonical_path.insert(path),
3472 Err(err) => {
3473 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3474 continue;
3475 }
3476 },
3477 };
3478
3479 if !canonical_path.starts_with(root_canonical_path) {
3480 child_entry.is_external = true;
3481 }
3482 }
3483
3484 if child_entry.is_dir() {
3485 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3486
3487 // Avoid recursing until crash in the case of a recursive symlink
3488 if !job.ancestor_inodes.contains(&child_entry.inode) {
3489 let mut ancestor_inodes = job.ancestor_inodes.clone();
3490 ancestor_inodes.insert(child_entry.inode);
3491
3492 new_jobs.push(Some(ScanJob {
3493 abs_path: child_abs_path,
3494 path: child_path,
3495 is_external: child_entry.is_external,
3496 ignore_stack: if child_entry.is_ignored {
3497 IgnoreStack::all()
3498 } else {
3499 ignore_stack.clone()
3500 },
3501 ancestor_inodes,
3502 scan_queue: job.scan_queue.clone(),
3503 }));
3504 } else {
3505 new_jobs.push(None);
3506 }
3507 } else {
3508 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3509 if !child_entry.is_ignored {
3510 if let Some((repo_path, repo)) = &repository {
3511 if let Ok(path) = child_path.strip_prefix(&repo_path.0) {
3512 child_entry.git_status = repo
3513 .repo_ptr
3514 .lock()
3515 .status(&RepoPath(path.into()))
3516 .log_err()
3517 .flatten();
3518 }
3519 }
3520 }
3521 }
3522
3523 new_entries.push(child_entry);
3524 }
3525
3526 let mut state = self.state.lock();
3527 let mut new_jobs = new_jobs.into_iter();
3528 for entry in &mut new_entries {
3529 state.reuse_entry_id(entry);
3530
3531 if entry.is_dir() {
3532 let new_job = new_jobs.next().expect("missing scan job for entry");
3533 if state.should_scan_directory(&entry) {
3534 if let Some(new_job) = new_job {
3535 job.scan_queue
3536 .try_send(new_job)
3537 .expect("channel is unbounded");
3538 }
3539 } else {
3540 log::debug!("defer scanning directory {:?}", entry.path);
3541 entry.kind = EntryKind::UnloadedDir;
3542 }
3543 }
3544 }
3545 assert!(new_jobs.next().is_none());
3546
3547 state.populate_dir(&job.path, new_entries, new_ignore, self.fs.as_ref());
3548 Ok(())
3549 }
3550
3551 async fn reload_entries_for_paths(
3552 &self,
3553 root_abs_path: Arc<Path>,
3554 root_canonical_path: PathBuf,
3555 relative_paths: &[Arc<Path>],
3556 abs_paths: Vec<PathBuf>,
3557 scan_queue_tx: Option<Sender<ScanJob>>,
3558 ) {
3559 let metadata = futures::future::join_all(
3560 abs_paths
3561 .iter()
3562 .map(|abs_path| async move {
3563 let metadata = self.fs.metadata(&abs_path).await?;
3564 if let Some(metadata) = metadata {
3565 let canonical_path = self.fs.canonicalize(&abs_path).await?;
3566 anyhow::Ok(Some((metadata, canonical_path)))
3567 } else {
3568 Ok(None)
3569 }
3570 })
3571 .collect::<Vec<_>>(),
3572 )
3573 .await;
3574
3575 let mut state = self.state.lock();
3576 let snapshot = &mut state.snapshot;
3577 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3578 let doing_recursive_update = scan_queue_tx.is_some();
3579 snapshot.scan_id += 1;
3580 if is_idle && !doing_recursive_update {
3581 snapshot.completed_scan_id = snapshot.scan_id;
3582 }
3583
3584 // Remove any entries for paths that no longer exist or are being recursively
3585 // refreshed. Do this before adding any new entries, so that renames can be
3586 // detected regardless of the order of the paths.
3587 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
3588 if matches!(metadata, Ok(None)) || doing_recursive_update {
3589 log::trace!("remove path {:?}", path);
3590 state.remove_path(path);
3591 }
3592 }
3593
3594 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
3595 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3596 match metadata {
3597 Ok(Some((metadata, canonical_path))) => {
3598 let ignore_stack = state
3599 .snapshot
3600 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3601
3602 let mut fs_entry = Entry::new(
3603 path.clone(),
3604 metadata,
3605 self.next_entry_id.as_ref(),
3606 state.snapshot.root_char_bag,
3607 );
3608 fs_entry.is_ignored = ignore_stack.is_all();
3609 fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
3610
3611 if !fs_entry.is_ignored {
3612 if !fs_entry.is_dir() {
3613 if let Some((work_dir, repo)) =
3614 state.snapshot.local_repo_for_path(&path)
3615 {
3616 if let Ok(path) = path.strip_prefix(work_dir.0) {
3617 fs_entry.git_status = repo
3618 .repo_ptr
3619 .lock()
3620 .status(&RepoPath(path.into()))
3621 .log_err()
3622 .flatten()
3623 }
3624 }
3625 }
3626 }
3627
3628 let fs_entry = state.insert_entry(fs_entry, self.fs.as_ref());
3629
3630 if let Some(scan_queue_tx) = &scan_queue_tx {
3631 let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3632 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3633 ancestor_inodes.insert(metadata.inode);
3634 smol::block_on(scan_queue_tx.send(ScanJob {
3635 abs_path,
3636 path: path.clone(),
3637 ignore_stack,
3638 ancestor_inodes,
3639 is_external: fs_entry.is_external,
3640 scan_queue: scan_queue_tx.clone(),
3641 }))
3642 .unwrap();
3643 }
3644 }
3645 }
3646 Ok(None) => {
3647 self.remove_repo_path(&path, &mut state.snapshot);
3648 }
3649 Err(err) => {
3650 // TODO - create a special 'error' entry in the entries tree to mark this
3651 log::error!("error reading file on event {:?}", err);
3652 }
3653 }
3654 }
3655
3656 util::extend_sorted(
3657 &mut state.changed_paths,
3658 relative_paths.iter().cloned(),
3659 usize::MAX,
3660 Ord::cmp,
3661 );
3662 }
3663
3664 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3665 if !path
3666 .components()
3667 .any(|component| component.as_os_str() == *DOT_GIT)
3668 {
3669 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3670 let entry = repository.work_directory.0;
3671 snapshot.git_repositories.remove(&entry);
3672 snapshot
3673 .snapshot
3674 .repository_entries
3675 .remove(&RepositoryWorkDirectory(path.into()));
3676 return Some(());
3677 }
3678 }
3679
3680 // TODO statuses
3681 // Track when a .git is removed and iterate over the file system there
3682
3683 Some(())
3684 }
3685
3686 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
3687 use futures::FutureExt as _;
3688
3689 let mut snapshot = self.state.lock().snapshot.clone();
3690 let mut ignores_to_update = Vec::new();
3691 let mut ignores_to_delete = Vec::new();
3692 let abs_path = snapshot.abs_path.clone();
3693 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3694 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3695 if *needs_update {
3696 *needs_update = false;
3697 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3698 ignores_to_update.push(parent_abs_path.clone());
3699 }
3700 }
3701
3702 let ignore_path = parent_path.join(&*GITIGNORE);
3703 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3704 ignores_to_delete.push(parent_abs_path.clone());
3705 }
3706 }
3707 }
3708
3709 for parent_abs_path in ignores_to_delete {
3710 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3711 self.state
3712 .lock()
3713 .snapshot
3714 .ignores_by_parent_abs_path
3715 .remove(&parent_abs_path);
3716 }
3717
3718 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3719 ignores_to_update.sort_unstable();
3720 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3721 while let Some(parent_abs_path) = ignores_to_update.next() {
3722 while ignores_to_update
3723 .peek()
3724 .map_or(false, |p| p.starts_with(&parent_abs_path))
3725 {
3726 ignores_to_update.next().unwrap();
3727 }
3728
3729 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3730 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3731 abs_path: parent_abs_path,
3732 ignore_stack,
3733 ignore_queue: ignore_queue_tx.clone(),
3734 scan_queue: scan_job_tx.clone(),
3735 }))
3736 .unwrap();
3737 }
3738 drop(ignore_queue_tx);
3739
3740 self.executor
3741 .scoped(|scope| {
3742 for _ in 0..self.executor.num_cpus() {
3743 scope.spawn(async {
3744 loop {
3745 select_biased! {
3746 // Process any path refresh requests before moving on to process
3747 // the queue of ignore statuses.
3748 request = self.scan_requests_rx.recv().fuse() => {
3749 let Ok(request) = request else { break };
3750 if !self.process_scan_request(request, true).await {
3751 return;
3752 }
3753 }
3754
3755 // Recursively process directories whose ignores have changed.
3756 job = ignore_queue_rx.recv().fuse() => {
3757 let Ok(job) = job else { break };
3758 self.update_ignore_status(job, &snapshot).await;
3759 }
3760 }
3761 }
3762 });
3763 }
3764 })
3765 .await;
3766 }
3767
3768 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3769 log::trace!("update ignore status {:?}", job.abs_path);
3770
3771 let mut ignore_stack = job.ignore_stack;
3772 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3773 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3774 }
3775
3776 let mut entries_by_id_edits = Vec::new();
3777 let mut entries_by_path_edits = Vec::new();
3778 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3779 for mut entry in snapshot.child_entries(path).cloned() {
3780 let was_ignored = entry.is_ignored;
3781 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
3782 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3783 if entry.is_dir() {
3784 let child_ignore_stack = if entry.is_ignored {
3785 IgnoreStack::all()
3786 } else {
3787 ignore_stack.clone()
3788 };
3789
3790 // Scan any directories that were previously ignored and weren't
3791 // previously scanned.
3792 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
3793 let state = self.state.lock();
3794 if state.should_scan_directory(&entry) {
3795 job.scan_queue
3796 .try_send(ScanJob {
3797 abs_path: abs_path.clone(),
3798 path: entry.path.clone(),
3799 ignore_stack: child_ignore_stack.clone(),
3800 scan_queue: job.scan_queue.clone(),
3801 ancestor_inodes: state
3802 .snapshot
3803 .ancestor_inodes_for_path(&entry.path),
3804 is_external: false,
3805 })
3806 .unwrap();
3807 }
3808 }
3809
3810 job.ignore_queue
3811 .send(UpdateIgnoreStatusJob {
3812 abs_path: abs_path.clone(),
3813 ignore_stack: child_ignore_stack,
3814 ignore_queue: job.ignore_queue.clone(),
3815 scan_queue: job.scan_queue.clone(),
3816 })
3817 .await
3818 .unwrap();
3819 }
3820
3821 if entry.is_ignored != was_ignored {
3822 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3823 path_entry.scan_id = snapshot.scan_id;
3824 path_entry.is_ignored = entry.is_ignored;
3825 entries_by_id_edits.push(Edit::Insert(path_entry));
3826 entries_by_path_edits.push(Edit::Insert(entry));
3827 }
3828 }
3829
3830 let state = &mut self.state.lock();
3831 for edit in &entries_by_path_edits {
3832 if let Edit::Insert(entry) = edit {
3833 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3834 state.changed_paths.insert(ix, entry.path.clone());
3835 }
3836 }
3837 }
3838
3839 state
3840 .snapshot
3841 .entries_by_path
3842 .edit(entries_by_path_edits, &());
3843 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3844 }
3845
3846 fn build_change_set(
3847 &self,
3848 old_snapshot: &Snapshot,
3849 new_snapshot: &Snapshot,
3850 event_paths: &[Arc<Path>],
3851 ) -> UpdatedEntriesSet {
3852 use BackgroundScannerPhase::*;
3853 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3854
3855 // Identify which paths have changed. Use the known set of changed
3856 // parent paths to optimize the search.
3857 let mut changes = Vec::new();
3858 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3859 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3860 let mut last_newly_loaded_dir_path = None;
3861 old_paths.next(&());
3862 new_paths.next(&());
3863 for path in event_paths {
3864 let path = PathKey(path.clone());
3865 if old_paths.item().map_or(false, |e| e.path < path.0) {
3866 old_paths.seek_forward(&path, Bias::Left, &());
3867 }
3868 if new_paths.item().map_or(false, |e| e.path < path.0) {
3869 new_paths.seek_forward(&path, Bias::Left, &());
3870 }
3871 loop {
3872 match (old_paths.item(), new_paths.item()) {
3873 (Some(old_entry), Some(new_entry)) => {
3874 if old_entry.path > path.0
3875 && new_entry.path > path.0
3876 && !old_entry.path.starts_with(&path.0)
3877 && !new_entry.path.starts_with(&path.0)
3878 {
3879 break;
3880 }
3881
3882 match Ord::cmp(&old_entry.path, &new_entry.path) {
3883 Ordering::Less => {
3884 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3885 old_paths.next(&());
3886 }
3887 Ordering::Equal => {
3888 if self.phase == EventsReceivedDuringInitialScan {
3889 if old_entry.id != new_entry.id {
3890 changes.push((
3891 old_entry.path.clone(),
3892 old_entry.id,
3893 Removed,
3894 ));
3895 }
3896 // If the worktree was not fully initialized when this event was generated,
3897 // we can't know whether this entry was added during the scan or whether
3898 // it was merely updated.
3899 changes.push((
3900 new_entry.path.clone(),
3901 new_entry.id,
3902 AddedOrUpdated,
3903 ));
3904 } else if old_entry.id != new_entry.id {
3905 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3906 changes.push((new_entry.path.clone(), new_entry.id, Added));
3907 } else if old_entry != new_entry {
3908 if old_entry.kind.is_unloaded() {
3909 last_newly_loaded_dir_path = Some(&new_entry.path);
3910 changes.push((
3911 new_entry.path.clone(),
3912 new_entry.id,
3913 Loaded,
3914 ));
3915 } else {
3916 changes.push((
3917 new_entry.path.clone(),
3918 new_entry.id,
3919 Updated,
3920 ));
3921 }
3922 }
3923 old_paths.next(&());
3924 new_paths.next(&());
3925 }
3926 Ordering::Greater => {
3927 let is_newly_loaded = self.phase == InitialScan
3928 || last_newly_loaded_dir_path
3929 .as_ref()
3930 .map_or(false, |dir| new_entry.path.starts_with(&dir));
3931 changes.push((
3932 new_entry.path.clone(),
3933 new_entry.id,
3934 if is_newly_loaded { Loaded } else { Added },
3935 ));
3936 new_paths.next(&());
3937 }
3938 }
3939 }
3940 (Some(old_entry), None) => {
3941 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3942 old_paths.next(&());
3943 }
3944 (None, Some(new_entry)) => {
3945 let is_newly_loaded = self.phase == InitialScan
3946 || last_newly_loaded_dir_path
3947 .as_ref()
3948 .map_or(false, |dir| new_entry.path.starts_with(&dir));
3949 changes.push((
3950 new_entry.path.clone(),
3951 new_entry.id,
3952 if is_newly_loaded { Loaded } else { Added },
3953 ));
3954 new_paths.next(&());
3955 }
3956 (None, None) => break,
3957 }
3958 }
3959 }
3960
3961 changes.into()
3962 }
3963
3964 async fn progress_timer(&self, running: bool) {
3965 if !running {
3966 return futures::future::pending().await;
3967 }
3968
3969 #[cfg(any(test, feature = "test-support"))]
3970 if self.fs.is_fake() {
3971 return self.executor.simulate_random_delay().await;
3972 }
3973
3974 smol::Timer::after(Duration::from_millis(100)).await;
3975 }
3976}
3977
3978fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3979 let mut result = root_char_bag;
3980 result.extend(
3981 path.to_string_lossy()
3982 .chars()
3983 .map(|c| c.to_ascii_lowercase()),
3984 );
3985 result
3986}
3987
3988struct ScanJob {
3989 abs_path: Arc<Path>,
3990 path: Arc<Path>,
3991 ignore_stack: Arc<IgnoreStack>,
3992 scan_queue: Sender<ScanJob>,
3993 ancestor_inodes: TreeSet<u64>,
3994 is_external: bool,
3995}
3996
3997struct UpdateIgnoreStatusJob {
3998 abs_path: Arc<Path>,
3999 ignore_stack: Arc<IgnoreStack>,
4000 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4001 scan_queue: Sender<ScanJob>,
4002}
4003
4004pub trait WorktreeHandle {
4005 #[cfg(any(test, feature = "test-support"))]
4006 fn flush_fs_events<'a>(
4007 &self,
4008 cx: &'a gpui::TestAppContext,
4009 ) -> futures::future::LocalBoxFuture<'a, ()>;
4010}
4011
4012impl WorktreeHandle for ModelHandle<Worktree> {
4013 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4014 // occurred before the worktree was constructed. These events can cause the worktree to perform
4015 // extra directory scans, and emit extra scan-state notifications.
4016 //
4017 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4018 // to ensure that all redundant FS events have already been processed.
4019 #[cfg(any(test, feature = "test-support"))]
4020 fn flush_fs_events<'a>(
4021 &self,
4022 cx: &'a gpui::TestAppContext,
4023 ) -> futures::future::LocalBoxFuture<'a, ()> {
4024 let filename = "fs-event-sentinel";
4025 let tree = self.clone();
4026 let (fs, root_path) = self.read_with(cx, |tree, _| {
4027 let tree = tree.as_local().unwrap();
4028 (tree.fs.clone(), tree.abs_path().clone())
4029 });
4030
4031 async move {
4032 fs.create_file(&root_path.join(filename), Default::default())
4033 .await
4034 .unwrap();
4035 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
4036 .await;
4037
4038 fs.remove_file(&root_path.join(filename), Default::default())
4039 .await
4040 .unwrap();
4041 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
4042 .await;
4043
4044 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4045 .await;
4046 }
4047 .boxed_local()
4048 }
4049}
4050
4051#[derive(Clone, Debug)]
4052struct TraversalProgress<'a> {
4053 max_path: &'a Path,
4054 count: usize,
4055 non_ignored_count: usize,
4056 file_count: usize,
4057 non_ignored_file_count: usize,
4058}
4059
4060impl<'a> TraversalProgress<'a> {
4061 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
4062 match (include_ignored, include_dirs) {
4063 (true, true) => self.count,
4064 (true, false) => self.file_count,
4065 (false, true) => self.non_ignored_count,
4066 (false, false) => self.non_ignored_file_count,
4067 }
4068 }
4069}
4070
4071impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4072 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4073 self.max_path = summary.max_path.as_ref();
4074 self.count += summary.count;
4075 self.non_ignored_count += summary.non_ignored_count;
4076 self.file_count += summary.file_count;
4077 self.non_ignored_file_count += summary.non_ignored_file_count;
4078 }
4079}
4080
4081impl<'a> Default for TraversalProgress<'a> {
4082 fn default() -> Self {
4083 Self {
4084 max_path: Path::new(""),
4085 count: 0,
4086 non_ignored_count: 0,
4087 file_count: 0,
4088 non_ignored_file_count: 0,
4089 }
4090 }
4091}
4092
4093#[derive(Clone, Debug, Default, Copy)]
4094struct GitStatuses {
4095 added: usize,
4096 modified: usize,
4097 conflict: usize,
4098}
4099
4100impl AddAssign for GitStatuses {
4101 fn add_assign(&mut self, rhs: Self) {
4102 self.added += rhs.added;
4103 self.modified += rhs.modified;
4104 self.conflict += rhs.conflict;
4105 }
4106}
4107
4108impl Sub for GitStatuses {
4109 type Output = GitStatuses;
4110
4111 fn sub(self, rhs: Self) -> Self::Output {
4112 GitStatuses {
4113 added: self.added - rhs.added,
4114 modified: self.modified - rhs.modified,
4115 conflict: self.conflict - rhs.conflict,
4116 }
4117 }
4118}
4119
4120impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4121 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4122 *self += summary.statuses
4123 }
4124}
4125
4126pub struct Traversal<'a> {
4127 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4128 include_ignored: bool,
4129 include_dirs: bool,
4130}
4131
4132impl<'a> Traversal<'a> {
4133 pub fn advance(&mut self) -> bool {
4134 self.cursor.seek_forward(
4135 &TraversalTarget::Count {
4136 count: self.end_offset() + 1,
4137 include_dirs: self.include_dirs,
4138 include_ignored: self.include_ignored,
4139 },
4140 Bias::Left,
4141 &(),
4142 )
4143 }
4144
4145 pub fn advance_to_sibling(&mut self) -> bool {
4146 while let Some(entry) = self.cursor.item() {
4147 self.cursor.seek_forward(
4148 &TraversalTarget::PathSuccessor(&entry.path),
4149 Bias::Left,
4150 &(),
4151 );
4152 if let Some(entry) = self.cursor.item() {
4153 if (self.include_dirs || !entry.is_dir())
4154 && (self.include_ignored || !entry.is_ignored)
4155 {
4156 return true;
4157 }
4158 }
4159 }
4160 false
4161 }
4162
4163 pub fn entry(&self) -> Option<&'a Entry> {
4164 self.cursor.item()
4165 }
4166
4167 pub fn start_offset(&self) -> usize {
4168 self.cursor
4169 .start()
4170 .count(self.include_dirs, self.include_ignored)
4171 }
4172
4173 pub fn end_offset(&self) -> usize {
4174 self.cursor
4175 .end(&())
4176 .count(self.include_dirs, self.include_ignored)
4177 }
4178}
4179
4180impl<'a> Iterator for Traversal<'a> {
4181 type Item = &'a Entry;
4182
4183 fn next(&mut self) -> Option<Self::Item> {
4184 if let Some(item) = self.entry() {
4185 self.advance();
4186 Some(item)
4187 } else {
4188 None
4189 }
4190 }
4191}
4192
4193#[derive(Debug)]
4194enum TraversalTarget<'a> {
4195 Path(&'a Path),
4196 PathSuccessor(&'a Path),
4197 Count {
4198 count: usize,
4199 include_ignored: bool,
4200 include_dirs: bool,
4201 },
4202}
4203
4204impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4205 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4206 match self {
4207 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4208 TraversalTarget::PathSuccessor(path) => {
4209 if !cursor_location.max_path.starts_with(path) {
4210 Ordering::Equal
4211 } else {
4212 Ordering::Greater
4213 }
4214 }
4215 TraversalTarget::Count {
4216 count,
4217 include_dirs,
4218 include_ignored,
4219 } => Ord::cmp(
4220 count,
4221 &cursor_location.count(*include_dirs, *include_ignored),
4222 ),
4223 }
4224 }
4225}
4226
4227impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4228 for TraversalTarget<'b>
4229{
4230 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4231 self.cmp(&cursor_location.0, &())
4232 }
4233}
4234
4235struct ChildEntriesIter<'a> {
4236 parent_path: &'a Path,
4237 traversal: Traversal<'a>,
4238}
4239
4240impl<'a> Iterator for ChildEntriesIter<'a> {
4241 type Item = &'a Entry;
4242
4243 fn next(&mut self) -> Option<Self::Item> {
4244 if let Some(item) = self.traversal.entry() {
4245 if item.path.starts_with(&self.parent_path) {
4246 self.traversal.advance_to_sibling();
4247 return Some(item);
4248 }
4249 }
4250 None
4251 }
4252}
4253
4254pub struct DescendentEntriesIter<'a> {
4255 parent_path: &'a Path,
4256 traversal: Traversal<'a>,
4257}
4258
4259impl<'a> Iterator for DescendentEntriesIter<'a> {
4260 type Item = &'a Entry;
4261
4262 fn next(&mut self) -> Option<Self::Item> {
4263 if let Some(item) = self.traversal.entry() {
4264 if item.path.starts_with(&self.parent_path) {
4265 self.traversal.advance();
4266 return Some(item);
4267 }
4268 }
4269 None
4270 }
4271}
4272
4273impl<'a> From<&'a Entry> for proto::Entry {
4274 fn from(entry: &'a Entry) -> Self {
4275 Self {
4276 id: entry.id.to_proto(),
4277 is_dir: entry.is_dir(),
4278 path: entry.path.to_string_lossy().into(),
4279 inode: entry.inode,
4280 mtime: Some(entry.mtime.into()),
4281 is_symlink: entry.is_symlink,
4282 is_ignored: entry.is_ignored,
4283 is_external: entry.is_external,
4284 git_status: entry.git_status.map(|status| status.to_proto()),
4285 }
4286 }
4287}
4288
4289impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4290 type Error = anyhow::Error;
4291
4292 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4293 if let Some(mtime) = entry.mtime {
4294 let kind = if entry.is_dir {
4295 EntryKind::Dir
4296 } else {
4297 let mut char_bag = *root_char_bag;
4298 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4299 EntryKind::File(char_bag)
4300 };
4301 let path: Arc<Path> = PathBuf::from(entry.path).into();
4302 Ok(Entry {
4303 id: ProjectEntryId::from_proto(entry.id),
4304 kind,
4305 path,
4306 inode: entry.inode,
4307 mtime: mtime.into(),
4308 is_symlink: entry.is_symlink,
4309 is_ignored: entry.is_ignored,
4310 is_external: entry.is_external,
4311 git_status: GitFileStatus::from_proto(entry.git_status),
4312 })
4313 } else {
4314 Err(anyhow!(
4315 "missing mtime in remote worktree entry {:?}",
4316 entry.path
4317 ))
4318 }
4319 }
4320}