1use crate::{
2 copy_recursive, ignore::IgnoreStack, project_settings::ProjectSettings, DiagnosticSummary,
3 ProjectEntryId, RemoveOptions,
4};
5use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
6use anyhow::{anyhow, Context as _, Result};
7use client::{proto, Client};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{
11 repository::{GitFileStatus, GitRepository, RepoPath},
12 Fs,
13};
14use futures::{
15 channel::{
16 mpsc::{self, UnboundedSender},
17 oneshot,
18 },
19 select_biased,
20 task::Poll,
21 FutureExt as _, Stream, StreamExt,
22};
23use fuzzy::CharBag;
24use git::{DOT_GIT, GITIGNORE};
25use gpui::{
26 AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
27 Task,
28};
29use itertools::Itertools;
30use language::{
31 proto::{
32 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
33 serialize_version,
34 },
35 Buffer, Capability, DiagnosticEntry, File as _, LineEnding, PointUtf16, Rope, RopeFingerprint,
36 Unclipped,
37};
38use lsp::LanguageServerId;
39use parking_lot::Mutex;
40use postage::{
41 barrier,
42 prelude::{Sink as _, Stream as _},
43 watch,
44};
45use settings::{Settings, SettingsStore};
46use smol::channel::{self, Sender};
47use std::{
48 any::Any,
49 cmp::{self, Ordering},
50 convert::TryFrom,
51 ffi::OsStr,
52 fmt,
53 future::Future,
54 mem,
55 ops::{AddAssign, Deref, DerefMut, Sub},
56 path::{Path, PathBuf},
57 pin::Pin,
58 sync::{
59 atomic::{AtomicUsize, Ordering::SeqCst},
60 Arc,
61 },
62 time::{Duration, SystemTime},
63};
64use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
65use text::BufferId;
66use util::{
67 paths::{PathMatcher, HOME},
68 ResultExt,
69};
70
71#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
72pub struct WorktreeId(usize);
73
74pub enum Worktree {
75 Local(LocalWorktree),
76 Remote(RemoteWorktree),
77}
78
79pub struct LocalWorktree {
80 snapshot: LocalSnapshot,
81 scan_requests_tx: channel::Sender<ScanRequest>,
82 path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
83 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
84 _background_scanner_tasks: Vec<Task<()>>,
85 share: Option<ShareState>,
86 diagnostics: HashMap<
87 Arc<Path>,
88 Vec<(
89 LanguageServerId,
90 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
91 )>,
92 >,
93 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
94 client: Arc<Client>,
95 fs: Arc<dyn Fs>,
96 visible: bool,
97}
98
99struct ScanRequest {
100 relative_paths: Vec<Arc<Path>>,
101 done: barrier::Sender,
102}
103
104pub struct RemoteWorktree {
105 snapshot: Snapshot,
106 background_snapshot: Arc<Mutex<Snapshot>>,
107 project_id: u64,
108 client: Arc<Client>,
109 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
110 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
111 replica_id: ReplicaId,
112 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
113 visible: bool,
114 disconnected: bool,
115}
116
117#[derive(Clone)]
118pub struct Snapshot {
119 id: WorktreeId,
120 abs_path: Arc<Path>,
121 root_name: String,
122 root_char_bag: CharBag,
123 entries_by_path: SumTree<Entry>,
124 entries_by_id: SumTree<PathEntry>,
125 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
126
127 /// A number that increases every time the worktree begins scanning
128 /// a set of paths from the filesystem. This scanning could be caused
129 /// by some operation performed on the worktree, such as reading or
130 /// writing a file, or by an event reported by the filesystem.
131 scan_id: usize,
132
133 /// The latest scan id that has completed, and whose preceding scans
134 /// have all completed. The current `scan_id` could be more than one
135 /// greater than the `completed_scan_id` if operations are performed
136 /// on the worktree while it is processing a file-system event.
137 completed_scan_id: usize,
138}
139
140#[derive(Clone, Debug, PartialEq, Eq)]
141pub struct RepositoryEntry {
142 pub(crate) work_directory: WorkDirectoryEntry,
143 pub(crate) branch: Option<Arc<str>>,
144}
145
146impl RepositoryEntry {
147 pub fn branch(&self) -> Option<Arc<str>> {
148 self.branch.clone()
149 }
150
151 pub fn work_directory_id(&self) -> ProjectEntryId {
152 *self.work_directory
153 }
154
155 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
156 snapshot
157 .entry_for_id(self.work_directory_id())
158 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
159 }
160
161 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
162 proto::RepositoryEntry {
163 work_directory_id: self.work_directory_id().to_proto(),
164 branch: self.branch.as_ref().map(|str| str.to_string()),
165 }
166 }
167}
168
169impl From<&RepositoryEntry> for proto::RepositoryEntry {
170 fn from(value: &RepositoryEntry) -> Self {
171 proto::RepositoryEntry {
172 work_directory_id: value.work_directory.to_proto(),
173 branch: value.branch.as_ref().map(|str| str.to_string()),
174 }
175 }
176}
177
178/// This path corresponds to the 'content path' (the folder that contains the .git)
179#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
180pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
181
182impl Default for RepositoryWorkDirectory {
183 fn default() -> Self {
184 RepositoryWorkDirectory(Arc::from(Path::new("")))
185 }
186}
187
188impl AsRef<Path> for RepositoryWorkDirectory {
189 fn as_ref(&self) -> &Path {
190 self.0.as_ref()
191 }
192}
193
194#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
195pub struct WorkDirectoryEntry(ProjectEntryId);
196
197impl WorkDirectoryEntry {
198 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
199 let entry = worktree
200 .entry_for_id(self.0)
201 .ok_or_else(|| anyhow!("entry not found"))?;
202 let path = path
203 .strip_prefix(&entry.path)
204 .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
205 Ok(path.into())
206 }
207}
208
209impl Deref for WorkDirectoryEntry {
210 type Target = ProjectEntryId;
211
212 fn deref(&self) -> &Self::Target {
213 &self.0
214 }
215}
216
217impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
218 fn from(value: ProjectEntryId) -> Self {
219 WorkDirectoryEntry(value)
220 }
221}
222
223#[derive(Debug, Clone)]
224pub struct LocalSnapshot {
225 snapshot: Snapshot,
226 /// All of the gitignore files in the worktree, indexed by their relative path.
227 /// The boolean indicates whether the gitignore needs to be updated.
228 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
229 /// All of the git repositories in the worktree, indexed by the project entry
230 /// id of their parent directory.
231 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
232 file_scan_exclusions: Vec<PathMatcher>,
233 private_files: Vec<PathMatcher>,
234}
235
236struct BackgroundScannerState {
237 snapshot: LocalSnapshot,
238 scanned_dirs: HashSet<ProjectEntryId>,
239 path_prefixes_to_scan: HashSet<Arc<Path>>,
240 paths_to_scan: HashSet<Arc<Path>>,
241 /// The ids of all of the entries that were removed from the snapshot
242 /// as part of the current update. These entry ids may be re-used
243 /// if the same inode is discovered at a new path, or if the given
244 /// path is re-created after being deleted.
245 removed_entry_ids: HashMap<u64, ProjectEntryId>,
246 changed_paths: Vec<Arc<Path>>,
247 prev_snapshot: Snapshot,
248}
249
250#[derive(Debug, Clone)]
251pub struct LocalRepositoryEntry {
252 pub(crate) git_dir_scan_id: usize,
253 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
254 /// Path to the actual .git folder.
255 /// Note: if .git is a file, this points to the folder indicated by the .git file
256 pub(crate) git_dir_path: Arc<Path>,
257}
258
259impl Deref for LocalSnapshot {
260 type Target = Snapshot;
261
262 fn deref(&self) -> &Self::Target {
263 &self.snapshot
264 }
265}
266
267impl DerefMut for LocalSnapshot {
268 fn deref_mut(&mut self) -> &mut Self::Target {
269 &mut self.snapshot
270 }
271}
272
273enum ScanState {
274 Started,
275 Updated {
276 snapshot: LocalSnapshot,
277 changes: UpdatedEntriesSet,
278 barrier: Option<barrier::Sender>,
279 scanning: bool,
280 },
281}
282
283struct ShareState {
284 project_id: u64,
285 snapshots_tx:
286 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
287 resume_updates: watch::Sender<()>,
288 _maintain_remote_snapshot: Task<Option<()>>,
289}
290
291#[derive(Clone)]
292pub enum Event {
293 UpdatedEntries(UpdatedEntriesSet),
294 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
295}
296
297impl EventEmitter<Event> for Worktree {}
298
299impl Worktree {
300 pub async fn local(
301 client: Arc<Client>,
302 path: impl Into<Arc<Path>>,
303 visible: bool,
304 fs: Arc<dyn Fs>,
305 next_entry_id: Arc<AtomicUsize>,
306 cx: &mut AsyncAppContext,
307 ) -> Result<Model<Self>> {
308 // After determining whether the root entry is a file or a directory, populate the
309 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
310 let abs_path = path.into();
311
312 let metadata = fs
313 .metadata(&abs_path)
314 .await
315 .context("failed to stat worktree path")?;
316
317 let closure_fs = Arc::clone(&fs);
318 let closure_next_entry_id = Arc::clone(&next_entry_id);
319 let closure_abs_path = abs_path.to_path_buf();
320 cx.new_model(move |cx: &mut ModelContext<Worktree>| {
321 cx.observe_global::<SettingsStore>(move |this, cx| {
322 if let Self::Local(this) = this {
323 let new_file_scan_exclusions = path_matchers(
324 ProjectSettings::get_global(cx)
325 .file_scan_exclusions
326 .as_deref(),
327 "file_scan_exclusions",
328 );
329 let new_private_files = path_matchers(
330 ProjectSettings::get(Some((cx.handle().entity_id().as_u64() as usize, &Path::new(""))), cx).private_files.as_deref(),
331 "private_files",
332 );
333
334 if new_file_scan_exclusions != this.snapshot.file_scan_exclusions
335 || new_private_files != this.snapshot.private_files
336 {
337 this.snapshot.file_scan_exclusions = new_file_scan_exclusions;
338 this.snapshot.private_files = new_private_files;
339
340 log::info!(
341 "Re-scanning directories, new scan exclude files: {:?}, new dotenv files: {:?}",
342 this.snapshot
343 .file_scan_exclusions
344 .iter()
345 .map(ToString::to_string)
346 .collect::<Vec<_>>(),
347 this.snapshot
348 .private_files
349 .iter()
350 .map(ToString::to_string)
351 .collect::<Vec<_>>()
352 );
353
354 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
355 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) =
356 channel::unbounded();
357 this.scan_requests_tx = scan_requests_tx;
358 this.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
359 this._background_scanner_tasks = start_background_scan_tasks(
360 &closure_abs_path,
361 this.snapshot(),
362 scan_requests_rx,
363 path_prefixes_to_scan_rx,
364 Arc::clone(&closure_next_entry_id),
365 Arc::clone(&closure_fs),
366 cx,
367 );
368 this.is_scanning = watch::channel_with(true);
369 }
370 }
371 })
372 .detach();
373
374 let root_name = abs_path
375 .file_name()
376 .map_or(String::new(), |f| f.to_string_lossy().to_string());
377
378 let mut snapshot = LocalSnapshot {
379 file_scan_exclusions: path_matchers(
380 ProjectSettings::get_global(cx)
381 .file_scan_exclusions
382 .as_deref(),
383 "file_scan_exclusions",
384 ),
385 private_files: path_matchers(
386 ProjectSettings::get(Some((cx.handle().entity_id().as_u64() as usize, &Path::new(""))), cx).private_files.as_deref(),
387 "private_files",
388 ),
389 ignores_by_parent_abs_path: Default::default(),
390 git_repositories: Default::default(),
391 snapshot: Snapshot {
392 id: WorktreeId::from_usize(cx.entity_id().as_u64() as usize),
393 abs_path: abs_path.to_path_buf().into(),
394 root_name: root_name.clone(),
395 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
396 entries_by_path: Default::default(),
397 entries_by_id: Default::default(),
398 repository_entries: Default::default(),
399 scan_id: 1,
400 completed_scan_id: 0,
401 },
402 };
403
404 if let Some(metadata) = metadata {
405 snapshot.insert_entry(
406 Entry::new(
407 Arc::from(Path::new("")),
408 &metadata,
409 &next_entry_id,
410 snapshot.root_char_bag,
411 ),
412 fs.as_ref(),
413 );
414 }
415
416 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
417 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
418 let task_snapshot = snapshot.clone();
419 Worktree::Local(LocalWorktree {
420 snapshot,
421 is_scanning: watch::channel_with(true),
422 share: None,
423 scan_requests_tx,
424 path_prefixes_to_scan_tx,
425 _background_scanner_tasks: start_background_scan_tasks(
426 &abs_path,
427 task_snapshot,
428 scan_requests_rx,
429 path_prefixes_to_scan_rx,
430 Arc::clone(&next_entry_id),
431 Arc::clone(&fs),
432 cx,
433 ),
434 diagnostics: Default::default(),
435 diagnostic_summaries: Default::default(),
436 client,
437 fs,
438 visible,
439 })
440 })
441 }
442
443 pub fn remote(
444 project_remote_id: u64,
445 replica_id: ReplicaId,
446 worktree: proto::WorktreeMetadata,
447 client: Arc<Client>,
448 cx: &mut AppContext,
449 ) -> Model<Self> {
450 cx.new_model(|cx: &mut ModelContext<Self>| {
451 let snapshot = Snapshot {
452 id: WorktreeId(worktree.id as usize),
453 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
454 root_name: worktree.root_name.clone(),
455 root_char_bag: worktree
456 .root_name
457 .chars()
458 .map(|c| c.to_ascii_lowercase())
459 .collect(),
460 entries_by_path: Default::default(),
461 entries_by_id: Default::default(),
462 repository_entries: Default::default(),
463 scan_id: 1,
464 completed_scan_id: 0,
465 };
466
467 let (updates_tx, mut updates_rx) = mpsc::unbounded();
468 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
469 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
470
471 cx.background_executor()
472 .spawn({
473 let background_snapshot = background_snapshot.clone();
474 async move {
475 while let Some(update) = updates_rx.next().await {
476 if let Err(error) =
477 background_snapshot.lock().apply_remote_update(update)
478 {
479 log::error!("error applying worktree update: {}", error);
480 }
481 snapshot_updated_tx.send(()).await.ok();
482 }
483 }
484 })
485 .detach();
486
487 cx.spawn(|this, mut cx| async move {
488 while (snapshot_updated_rx.recv().await).is_some() {
489 this.update(&mut cx, |this, cx| {
490 let this = this.as_remote_mut().unwrap();
491 this.snapshot = this.background_snapshot.lock().clone();
492 cx.emit(Event::UpdatedEntries(Arc::from([])));
493 cx.notify();
494 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
495 if this.observed_snapshot(*scan_id) {
496 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
497 let _ = tx.send(());
498 } else {
499 break;
500 }
501 }
502 })?;
503 }
504 anyhow::Ok(())
505 })
506 .detach();
507
508 Worktree::Remote(RemoteWorktree {
509 project_id: project_remote_id,
510 replica_id,
511 snapshot: snapshot.clone(),
512 background_snapshot,
513 updates_tx: Some(updates_tx),
514 snapshot_subscriptions: Default::default(),
515 client: client.clone(),
516 diagnostic_summaries: Default::default(),
517 visible: worktree.visible,
518 disconnected: false,
519 })
520 })
521 }
522
523 pub fn as_local(&self) -> Option<&LocalWorktree> {
524 if let Worktree::Local(worktree) = self {
525 Some(worktree)
526 } else {
527 None
528 }
529 }
530
531 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
532 if let Worktree::Remote(worktree) = self {
533 Some(worktree)
534 } else {
535 None
536 }
537 }
538
539 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
540 if let Worktree::Local(worktree) = self {
541 Some(worktree)
542 } else {
543 None
544 }
545 }
546
547 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
548 if let Worktree::Remote(worktree) = self {
549 Some(worktree)
550 } else {
551 None
552 }
553 }
554
555 pub fn is_local(&self) -> bool {
556 matches!(self, Worktree::Local(_))
557 }
558
559 pub fn is_remote(&self) -> bool {
560 !self.is_local()
561 }
562
563 pub fn snapshot(&self) -> Snapshot {
564 match self {
565 Worktree::Local(worktree) => worktree.snapshot().snapshot,
566 Worktree::Remote(worktree) => worktree.snapshot(),
567 }
568 }
569
570 pub fn scan_id(&self) -> usize {
571 match self {
572 Worktree::Local(worktree) => worktree.snapshot.scan_id,
573 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
574 }
575 }
576
577 pub fn completed_scan_id(&self) -> usize {
578 match self {
579 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
580 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
581 }
582 }
583
584 pub fn is_visible(&self) -> bool {
585 match self {
586 Worktree::Local(worktree) => worktree.visible,
587 Worktree::Remote(worktree) => worktree.visible,
588 }
589 }
590
591 pub fn replica_id(&self) -> ReplicaId {
592 match self {
593 Worktree::Local(_) => 0,
594 Worktree::Remote(worktree) => worktree.replica_id,
595 }
596 }
597
598 pub fn diagnostic_summaries(
599 &self,
600 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
601 match self {
602 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
603 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
604 }
605 .iter()
606 .flat_map(|(path, summaries)| {
607 summaries
608 .iter()
609 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
610 })
611 }
612
613 pub fn abs_path(&self) -> Arc<Path> {
614 match self {
615 Worktree::Local(worktree) => worktree.abs_path.clone(),
616 Worktree::Remote(worktree) => worktree.abs_path.clone(),
617 }
618 }
619
620 pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
621 let entry = self.root_entry()?;
622 Some(File::for_entry(entry.clone(), cx.handle()))
623 }
624}
625
626fn start_background_scan_tasks(
627 abs_path: &Path,
628 snapshot: LocalSnapshot,
629 scan_requests_rx: channel::Receiver<ScanRequest>,
630 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
631 next_entry_id: Arc<AtomicUsize>,
632 fs: Arc<dyn Fs>,
633 cx: &mut ModelContext<'_, Worktree>,
634) -> Vec<Task<()>> {
635 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
636 let background_scanner = cx.background_executor().spawn({
637 let abs_path = abs_path.to_path_buf();
638 let background = cx.background_executor().clone();
639 async move {
640 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
641 let case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
642 log::error!(
643 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
644 );
645 true
646 });
647
648 BackgroundScanner::new(
649 snapshot,
650 next_entry_id,
651 fs,
652 case_sensitive,
653 scan_states_tx,
654 background,
655 scan_requests_rx,
656 path_prefixes_to_scan_rx,
657 )
658 .run(events)
659 .await;
660 }
661 });
662 let scan_state_updater = cx.spawn(|this, mut cx| async move {
663 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
664 this.update(&mut cx, |this, cx| {
665 let this = this.as_local_mut().unwrap();
666 match state {
667 ScanState::Started => {
668 *this.is_scanning.0.borrow_mut() = true;
669 }
670 ScanState::Updated {
671 snapshot,
672 changes,
673 barrier,
674 scanning,
675 } => {
676 *this.is_scanning.0.borrow_mut() = scanning;
677 this.set_snapshot(snapshot, changes, cx);
678 drop(barrier);
679 }
680 }
681 cx.notify();
682 })
683 .ok();
684 }
685 });
686 vec![background_scanner, scan_state_updater]
687}
688
689fn path_matchers(values: Option<&[String]>, context: &'static str) -> Vec<PathMatcher> {
690 values
691 .unwrap_or(&[])
692 .iter()
693 .sorted()
694 .filter_map(|pattern| {
695 PathMatcher::new(pattern)
696 .map(Some)
697 .unwrap_or_else(|e| {
698 log::error!(
699 "Skipping pattern {pattern} in `{}` project settings due to parsing error: {e:#}", context
700 );
701 None
702 })
703 })
704 .collect()
705}
706
707impl LocalWorktree {
708 pub fn contains_abs_path(&self, path: &Path) -> bool {
709 path.starts_with(&self.abs_path)
710 }
711
712 pub(crate) fn load_buffer(
713 &mut self,
714 id: BufferId,
715 path: &Path,
716 cx: &mut ModelContext<Worktree>,
717 ) -> Task<Result<Model<Buffer>>> {
718 let path = Arc::from(path);
719 cx.spawn(move |this, mut cx| async move {
720 let (file, contents, diff_base) = this
721 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))?
722 .await?;
723 let text_buffer = cx
724 .background_executor()
725 .spawn(async move { text::Buffer::new(0, id, contents) })
726 .await;
727 cx.new_model(|_| {
728 Buffer::build(
729 text_buffer,
730 diff_base,
731 Some(Arc::new(file)),
732 Capability::ReadWrite,
733 )
734 })
735 })
736 }
737
738 pub fn diagnostics_for_path(
739 &self,
740 path: &Path,
741 ) -> Vec<(
742 LanguageServerId,
743 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
744 )> {
745 self.diagnostics.get(path).cloned().unwrap_or_default()
746 }
747
748 pub fn clear_diagnostics_for_language_server(
749 &mut self,
750 server_id: LanguageServerId,
751 _: &mut ModelContext<Worktree>,
752 ) {
753 let worktree_id = self.id().to_proto();
754 self.diagnostic_summaries
755 .retain(|path, summaries_by_server_id| {
756 if summaries_by_server_id.remove(&server_id).is_some() {
757 if let Some(share) = self.share.as_ref() {
758 self.client
759 .send(proto::UpdateDiagnosticSummary {
760 project_id: share.project_id,
761 worktree_id,
762 summary: Some(proto::DiagnosticSummary {
763 path: path.to_string_lossy().to_string(),
764 language_server_id: server_id.0 as u64,
765 error_count: 0,
766 warning_count: 0,
767 }),
768 })
769 .log_err();
770 }
771 !summaries_by_server_id.is_empty()
772 } else {
773 true
774 }
775 });
776
777 self.diagnostics.retain(|_, diagnostics_by_server_id| {
778 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
779 diagnostics_by_server_id.remove(ix);
780 !diagnostics_by_server_id.is_empty()
781 } else {
782 true
783 }
784 });
785 }
786
787 pub fn update_diagnostics(
788 &mut self,
789 server_id: LanguageServerId,
790 worktree_path: Arc<Path>,
791 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
792 _: &mut ModelContext<Worktree>,
793 ) -> Result<bool> {
794 let summaries_by_server_id = self
795 .diagnostic_summaries
796 .entry(worktree_path.clone())
797 .or_default();
798
799 let old_summary = summaries_by_server_id
800 .remove(&server_id)
801 .unwrap_or_default();
802
803 let new_summary = DiagnosticSummary::new(&diagnostics);
804 if new_summary.is_empty() {
805 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
806 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
807 diagnostics_by_server_id.remove(ix);
808 }
809 if diagnostics_by_server_id.is_empty() {
810 self.diagnostics.remove(&worktree_path);
811 }
812 }
813 } else {
814 summaries_by_server_id.insert(server_id, new_summary);
815 let diagnostics_by_server_id =
816 self.diagnostics.entry(worktree_path.clone()).or_default();
817 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
818 Ok(ix) => {
819 diagnostics_by_server_id[ix] = (server_id, diagnostics);
820 }
821 Err(ix) => {
822 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
823 }
824 }
825 }
826
827 if !old_summary.is_empty() || !new_summary.is_empty() {
828 if let Some(share) = self.share.as_ref() {
829 self.client
830 .send(proto::UpdateDiagnosticSummary {
831 project_id: share.project_id,
832 worktree_id: self.id().to_proto(),
833 summary: Some(proto::DiagnosticSummary {
834 path: worktree_path.to_string_lossy().to_string(),
835 language_server_id: server_id.0 as u64,
836 error_count: new_summary.error_count as u32,
837 warning_count: new_summary.warning_count as u32,
838 }),
839 })
840 .log_err();
841 }
842 }
843
844 Ok(!old_summary.is_empty() || !new_summary.is_empty())
845 }
846
847 fn set_snapshot(
848 &mut self,
849 new_snapshot: LocalSnapshot,
850 entry_changes: UpdatedEntriesSet,
851 cx: &mut ModelContext<Worktree>,
852 ) {
853 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
854
855 self.snapshot = new_snapshot;
856
857 if let Some(share) = self.share.as_mut() {
858 share
859 .snapshots_tx
860 .unbounded_send((
861 self.snapshot.clone(),
862 entry_changes.clone(),
863 repo_changes.clone(),
864 ))
865 .ok();
866 }
867
868 if !entry_changes.is_empty() {
869 cx.emit(Event::UpdatedEntries(entry_changes));
870 }
871 if !repo_changes.is_empty() {
872 cx.emit(Event::UpdatedGitRepositories(repo_changes));
873 }
874 }
875
876 fn changed_repos(
877 &self,
878 old_snapshot: &LocalSnapshot,
879 new_snapshot: &LocalSnapshot,
880 ) -> UpdatedGitRepositoriesSet {
881 let mut changes = Vec::new();
882 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
883 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
884 loop {
885 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
886 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
887 match Ord::cmp(&new_entry_id, &old_entry_id) {
888 Ordering::Less => {
889 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
890 changes.push((
891 entry.path.clone(),
892 GitRepositoryChange {
893 old_repository: None,
894 },
895 ));
896 }
897 new_repos.next();
898 }
899 Ordering::Equal => {
900 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
901 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
902 let old_repo = old_snapshot
903 .repository_entries
904 .get(&RepositoryWorkDirectory(entry.path.clone()))
905 .cloned();
906 changes.push((
907 entry.path.clone(),
908 GitRepositoryChange {
909 old_repository: old_repo,
910 },
911 ));
912 }
913 }
914 new_repos.next();
915 old_repos.next();
916 }
917 Ordering::Greater => {
918 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
919 let old_repo = old_snapshot
920 .repository_entries
921 .get(&RepositoryWorkDirectory(entry.path.clone()))
922 .cloned();
923 changes.push((
924 entry.path.clone(),
925 GitRepositoryChange {
926 old_repository: old_repo,
927 },
928 ));
929 }
930 old_repos.next();
931 }
932 }
933 }
934 (Some((entry_id, _)), None) => {
935 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
936 changes.push((
937 entry.path.clone(),
938 GitRepositoryChange {
939 old_repository: None,
940 },
941 ));
942 }
943 new_repos.next();
944 }
945 (None, Some((entry_id, _))) => {
946 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
947 let old_repo = old_snapshot
948 .repository_entries
949 .get(&RepositoryWorkDirectory(entry.path.clone()))
950 .cloned();
951 changes.push((
952 entry.path.clone(),
953 GitRepositoryChange {
954 old_repository: old_repo,
955 },
956 ));
957 }
958 old_repos.next();
959 }
960 (None, None) => break,
961 }
962 }
963
964 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
965 (value.0.clone(), value.1.clone())
966 }
967
968 changes.into()
969 }
970
971 pub fn scan_complete(&self) -> impl Future<Output = ()> {
972 let mut is_scanning_rx = self.is_scanning.1.clone();
973 async move {
974 let mut is_scanning = is_scanning_rx.borrow().clone();
975 while is_scanning {
976 if let Some(value) = is_scanning_rx.recv().await {
977 is_scanning = value;
978 } else {
979 break;
980 }
981 }
982 }
983 }
984
985 pub fn snapshot(&self) -> LocalSnapshot {
986 self.snapshot.clone()
987 }
988
989 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
990 proto::WorktreeMetadata {
991 id: self.id().to_proto(),
992 root_name: self.root_name().to_string(),
993 visible: self.visible,
994 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
995 }
996 }
997
998 fn load(
999 &self,
1000 path: &Path,
1001 cx: &mut ModelContext<Worktree>,
1002 ) -> Task<Result<(File, String, Option<String>)>> {
1003 let path = Arc::from(path);
1004 let abs_path = self.absolutize(&path);
1005 let fs = self.fs.clone();
1006 let entry = self.refresh_entry(path.clone(), None, cx);
1007
1008 cx.spawn(|this, mut cx| async move {
1009 let abs_path = abs_path?;
1010 let text = fs.load(&abs_path).await?;
1011 let mut index_task = None;
1012 let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1013 if let Some(repo) = snapshot.repository_for_path(&path) {
1014 if let Some(repo_path) = repo.work_directory.relativize(&snapshot, &path).log_err()
1015 {
1016 if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1017 let git_repo = git_repo.repo_ptr.clone();
1018 index_task = Some(
1019 cx.background_executor()
1020 .spawn(async move { git_repo.lock().load_index_text(&repo_path) }),
1021 );
1022 }
1023 }
1024 }
1025
1026 let diff_base = if let Some(index_task) = index_task {
1027 index_task.await
1028 } else {
1029 None
1030 };
1031
1032 let worktree = this
1033 .upgrade()
1034 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1035 match entry.await? {
1036 Some(entry) => Ok((
1037 File {
1038 entry_id: Some(entry.id),
1039 worktree,
1040 path: entry.path,
1041 mtime: entry.mtime,
1042 is_local: true,
1043 is_deleted: false,
1044 is_private: entry.is_private,
1045 },
1046 text,
1047 diff_base,
1048 )),
1049 None => {
1050 let metadata = fs
1051 .metadata(&abs_path)
1052 .await
1053 .with_context(|| {
1054 format!("Loading metadata for excluded file {abs_path:?}")
1055 })?
1056 .with_context(|| {
1057 format!("Excluded file {abs_path:?} got removed during loading")
1058 })?;
1059 let is_private = snapshot.is_path_private(path.as_ref());
1060 Ok((
1061 File {
1062 entry_id: None,
1063 worktree,
1064 path,
1065 mtime: metadata.mtime,
1066 is_local: true,
1067 is_deleted: false,
1068 is_private,
1069 },
1070 text,
1071 diff_base,
1072 ))
1073 }
1074 }
1075 })
1076 }
1077
1078 pub fn save_buffer(
1079 &self,
1080 buffer_handle: Model<Buffer>,
1081 path: Arc<Path>,
1082 has_changed_file: bool,
1083 cx: &mut ModelContext<Worktree>,
1084 ) -> Task<Result<()>> {
1085 let buffer = buffer_handle.read(cx);
1086
1087 let rpc = self.client.clone();
1088 let buffer_id: u64 = buffer.remote_id().into();
1089 let project_id = self.share.as_ref().map(|share| share.project_id);
1090
1091 let text = buffer.as_rope().clone();
1092 let fingerprint = text.fingerprint();
1093 let version = buffer.version();
1094 let save = self.write_file(path.as_ref(), text, buffer.line_ending(), cx);
1095 let fs = Arc::clone(&self.fs);
1096 let abs_path = self.absolutize(&path);
1097 let is_private = self.snapshot.is_path_private(&path);
1098
1099 cx.spawn(move |this, mut cx| async move {
1100 let entry = save.await?;
1101 let abs_path = abs_path?;
1102 let this = this.upgrade().context("worktree dropped")?;
1103
1104 let (entry_id, mtime, path, is_dotenv) = match entry {
1105 Some(entry) => (Some(entry.id), entry.mtime, entry.path, entry.is_private),
1106 None => {
1107 let metadata = fs
1108 .metadata(&abs_path)
1109 .await
1110 .with_context(|| {
1111 format!(
1112 "Fetching metadata after saving the excluded buffer {abs_path:?}"
1113 )
1114 })?
1115 .with_context(|| {
1116 format!("Excluded buffer {path:?} got removed during saving")
1117 })?;
1118 (None, metadata.mtime, path, is_private)
1119 }
1120 };
1121
1122 if has_changed_file {
1123 let new_file = Arc::new(File {
1124 entry_id,
1125 worktree: this,
1126 path,
1127 mtime,
1128 is_local: true,
1129 is_deleted: false,
1130 is_private: is_dotenv,
1131 });
1132
1133 if let Some(project_id) = project_id {
1134 rpc.send(proto::UpdateBufferFile {
1135 project_id,
1136 buffer_id,
1137 file: Some(new_file.to_proto()),
1138 })
1139 .log_err();
1140 }
1141
1142 buffer_handle.update(&mut cx, |buffer, cx| {
1143 if has_changed_file {
1144 buffer.file_updated(new_file, cx);
1145 }
1146 })?;
1147 }
1148
1149 if let Some(project_id) = project_id {
1150 rpc.send(proto::BufferSaved {
1151 project_id,
1152 buffer_id,
1153 version: serialize_version(&version),
1154 mtime: Some(mtime.into()),
1155 fingerprint: serialize_fingerprint(fingerprint),
1156 })?;
1157 }
1158
1159 buffer_handle.update(&mut cx, |buffer, cx| {
1160 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1161 })?;
1162
1163 Ok(())
1164 })
1165 }
1166
1167 /// Find the lowest path in the worktree's datastructures that is an ancestor
1168 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1169 let mut lowest_ancestor = None;
1170 for path in path.ancestors() {
1171 if self.entry_for_path(path).is_some() {
1172 lowest_ancestor = Some(path.to_path_buf());
1173 break;
1174 }
1175 }
1176
1177 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1178 }
1179
1180 pub fn create_entry(
1181 &self,
1182 path: impl Into<Arc<Path>>,
1183 is_dir: bool,
1184 cx: &mut ModelContext<Worktree>,
1185 ) -> Task<Result<Option<Entry>>> {
1186 let path = path.into();
1187 let lowest_ancestor = self.lowest_ancestor(&path);
1188 let abs_path = self.absolutize(&path);
1189 let fs = self.fs.clone();
1190 let write = cx.background_executor().spawn(async move {
1191 if is_dir {
1192 fs.create_dir(&abs_path?).await
1193 } else {
1194 fs.save(&abs_path?, &Default::default(), Default::default())
1195 .await
1196 }
1197 });
1198
1199 cx.spawn(|this, mut cx| async move {
1200 write.await?;
1201 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1202 let mut refreshes = Vec::new();
1203 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1204 for refresh_path in refresh_paths.ancestors() {
1205 if refresh_path == Path::new("") {
1206 continue;
1207 }
1208 let refresh_full_path = lowest_ancestor.join(refresh_path);
1209
1210 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1211 refresh_full_path.into(),
1212 None,
1213 cx,
1214 ));
1215 }
1216 (
1217 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1218 refreshes,
1219 )
1220 })?;
1221 for refresh in refreshes {
1222 refresh.await.log_err();
1223 }
1224
1225 result.await
1226 })
1227 }
1228
1229 pub(crate) fn write_file(
1230 &self,
1231 path: impl Into<Arc<Path>>,
1232 text: Rope,
1233 line_ending: LineEnding,
1234 cx: &mut ModelContext<Worktree>,
1235 ) -> Task<Result<Option<Entry>>> {
1236 let path: Arc<Path> = path.into();
1237 let abs_path = self.absolutize(&path);
1238 let fs = self.fs.clone();
1239 let write = cx
1240 .background_executor()
1241 .spawn(async move { fs.save(&abs_path?, &text, line_ending).await });
1242
1243 cx.spawn(|this, mut cx| async move {
1244 write.await?;
1245 this.update(&mut cx, |this, cx| {
1246 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1247 })?
1248 .await
1249 })
1250 }
1251
1252 pub fn delete_entry(
1253 &self,
1254 entry_id: ProjectEntryId,
1255 cx: &mut ModelContext<Worktree>,
1256 ) -> Option<Task<Result<()>>> {
1257 let entry = self.entry_for_id(entry_id)?.clone();
1258 let abs_path = self.absolutize(&entry.path);
1259 let fs = self.fs.clone();
1260
1261 let delete = cx.background_executor().spawn(async move {
1262 if entry.is_file() {
1263 fs.remove_file(&abs_path?, Default::default()).await?;
1264 } else {
1265 fs.remove_dir(
1266 &abs_path?,
1267 RemoveOptions {
1268 recursive: true,
1269 ignore_if_not_exists: false,
1270 },
1271 )
1272 .await?;
1273 }
1274 anyhow::Ok(entry.path)
1275 });
1276
1277 Some(cx.spawn(|this, mut cx| async move {
1278 let path = delete.await?;
1279 this.update(&mut cx, |this, _| {
1280 this.as_local_mut()
1281 .unwrap()
1282 .refresh_entries_for_paths(vec![path])
1283 })?
1284 .recv()
1285 .await;
1286 Ok(())
1287 }))
1288 }
1289
1290 pub fn rename_entry(
1291 &self,
1292 entry_id: ProjectEntryId,
1293 new_path: impl Into<Arc<Path>>,
1294 cx: &mut ModelContext<Worktree>,
1295 ) -> Task<Result<Option<Entry>>> {
1296 let old_path = match self.entry_for_id(entry_id) {
1297 Some(entry) => entry.path.clone(),
1298 None => return Task::ready(Ok(None)),
1299 };
1300 let new_path = new_path.into();
1301 let abs_old_path = self.absolutize(&old_path);
1302 let abs_new_path = self.absolutize(&new_path);
1303 let fs = self.fs.clone();
1304 let rename = cx.background_executor().spawn(async move {
1305 fs.rename(&abs_old_path?, &abs_new_path?, Default::default())
1306 .await
1307 });
1308
1309 cx.spawn(|this, mut cx| async move {
1310 rename.await?;
1311 this.update(&mut cx, |this, cx| {
1312 this.as_local_mut()
1313 .unwrap()
1314 .refresh_entry(new_path.clone(), Some(old_path), cx)
1315 })?
1316 .await
1317 })
1318 }
1319
1320 pub fn copy_entry(
1321 &self,
1322 entry_id: ProjectEntryId,
1323 new_path: impl Into<Arc<Path>>,
1324 cx: &mut ModelContext<Worktree>,
1325 ) -> Task<Result<Option<Entry>>> {
1326 let old_path = match self.entry_for_id(entry_id) {
1327 Some(entry) => entry.path.clone(),
1328 None => return Task::ready(Ok(None)),
1329 };
1330 let new_path = new_path.into();
1331 let abs_old_path = self.absolutize(&old_path);
1332 let abs_new_path = self.absolutize(&new_path);
1333 let fs = self.fs.clone();
1334 let copy = cx.background_executor().spawn(async move {
1335 copy_recursive(
1336 fs.as_ref(),
1337 &abs_old_path?,
1338 &abs_new_path?,
1339 Default::default(),
1340 )
1341 .await
1342 });
1343
1344 cx.spawn(|this, mut cx| async move {
1345 copy.await?;
1346 this.update(&mut cx, |this, cx| {
1347 this.as_local_mut()
1348 .unwrap()
1349 .refresh_entry(new_path.clone(), None, cx)
1350 })?
1351 .await
1352 })
1353 }
1354
1355 pub fn expand_entry(
1356 &mut self,
1357 entry_id: ProjectEntryId,
1358 cx: &mut ModelContext<Worktree>,
1359 ) -> Option<Task<Result<()>>> {
1360 let path = self.entry_for_id(entry_id)?.path.clone();
1361 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1362 Some(cx.background_executor().spawn(async move {
1363 refresh.next().await;
1364 Ok(())
1365 }))
1366 }
1367
1368 pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1369 let (tx, rx) = barrier::channel();
1370 self.scan_requests_tx
1371 .try_send(ScanRequest {
1372 relative_paths: paths,
1373 done: tx,
1374 })
1375 .ok();
1376 rx
1377 }
1378
1379 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1380 self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1381 }
1382
1383 fn refresh_entry(
1384 &self,
1385 path: Arc<Path>,
1386 old_path: Option<Arc<Path>>,
1387 cx: &mut ModelContext<Worktree>,
1388 ) -> Task<Result<Option<Entry>>> {
1389 if self.is_path_excluded(path.to_path_buf()) {
1390 return Task::ready(Ok(None));
1391 }
1392 let paths = if let Some(old_path) = old_path.as_ref() {
1393 vec![old_path.clone(), path.clone()]
1394 } else {
1395 vec![path.clone()]
1396 };
1397 let mut refresh = self.refresh_entries_for_paths(paths);
1398 cx.spawn(move |this, mut cx| async move {
1399 refresh.recv().await;
1400 let new_entry = this.update(&mut cx, |this, _| {
1401 this.entry_for_path(path)
1402 .cloned()
1403 .ok_or_else(|| anyhow!("failed to read path after update"))
1404 })??;
1405 Ok(Some(new_entry))
1406 })
1407 }
1408
1409 pub fn observe_updates<F, Fut>(
1410 &mut self,
1411 project_id: u64,
1412 cx: &mut ModelContext<Worktree>,
1413 callback: F,
1414 ) -> oneshot::Receiver<()>
1415 where
1416 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1417 Fut: Send + Future<Output = bool>,
1418 {
1419 #[cfg(any(test, feature = "test-support"))]
1420 const MAX_CHUNK_SIZE: usize = 2;
1421 #[cfg(not(any(test, feature = "test-support")))]
1422 const MAX_CHUNK_SIZE: usize = 256;
1423
1424 let (share_tx, share_rx) = oneshot::channel();
1425
1426 if let Some(share) = self.share.as_mut() {
1427 share_tx.send(()).ok();
1428 *share.resume_updates.borrow_mut() = ();
1429 return share_rx;
1430 }
1431
1432 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1433 let (snapshots_tx, mut snapshots_rx) =
1434 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1435 snapshots_tx
1436 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1437 .ok();
1438
1439 let worktree_id = cx.entity_id().as_u64();
1440 let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1441 let mut is_first = true;
1442 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1443 let update;
1444 if is_first {
1445 update = snapshot.build_initial_update(project_id, worktree_id);
1446 is_first = false;
1447 } else {
1448 update =
1449 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1450 }
1451
1452 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1453 let _ = resume_updates_rx.try_recv();
1454 loop {
1455 let result = callback(update.clone());
1456 if result.await {
1457 break;
1458 } else {
1459 log::info!("waiting to resume updates");
1460 if resume_updates_rx.next().await.is_none() {
1461 return Some(());
1462 }
1463 }
1464 }
1465 }
1466 }
1467 share_tx.send(()).ok();
1468 Some(())
1469 });
1470
1471 self.share = Some(ShareState {
1472 project_id,
1473 snapshots_tx,
1474 resume_updates: resume_updates_tx,
1475 _maintain_remote_snapshot,
1476 });
1477 share_rx
1478 }
1479
1480 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1481 let client = self.client.clone();
1482
1483 for (path, summaries) in &self.diagnostic_summaries {
1484 for (&server_id, summary) in summaries {
1485 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1486 project_id,
1487 worktree_id: cx.entity_id().as_u64(),
1488 summary: Some(summary.to_proto(server_id, path)),
1489 }) {
1490 return Task::ready(Err(e));
1491 }
1492 }
1493 }
1494
1495 let rx = self.observe_updates(project_id, cx, move |update| {
1496 client.request(update).map(|result| result.is_ok())
1497 });
1498 cx.background_executor()
1499 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1500 }
1501
1502 pub fn unshare(&mut self) {
1503 self.share.take();
1504 }
1505
1506 pub fn is_shared(&self) -> bool {
1507 self.share.is_some()
1508 }
1509}
1510
1511impl RemoteWorktree {
1512 fn snapshot(&self) -> Snapshot {
1513 self.snapshot.clone()
1514 }
1515
1516 pub fn disconnected_from_host(&mut self) {
1517 self.updates_tx.take();
1518 self.snapshot_subscriptions.clear();
1519 self.disconnected = true;
1520 }
1521
1522 pub fn save_buffer(
1523 &self,
1524 buffer_handle: Model<Buffer>,
1525 cx: &mut ModelContext<Worktree>,
1526 ) -> Task<Result<()>> {
1527 let buffer = buffer_handle.read(cx);
1528 let buffer_id = buffer.remote_id().into();
1529 let version = buffer.version();
1530 let rpc = self.client.clone();
1531 let project_id = self.project_id;
1532 cx.spawn(move |_, mut cx| async move {
1533 let response = rpc
1534 .request(proto::SaveBuffer {
1535 project_id,
1536 buffer_id,
1537 version: serialize_version(&version),
1538 })
1539 .await?;
1540 let version = deserialize_version(&response.version);
1541 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1542 let mtime = response
1543 .mtime
1544 .ok_or_else(|| anyhow!("missing mtime"))?
1545 .into();
1546
1547 buffer_handle.update(&mut cx, |buffer, cx| {
1548 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1549 })?;
1550
1551 Ok(())
1552 })
1553 }
1554
1555 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1556 if let Some(updates_tx) = &self.updates_tx {
1557 updates_tx
1558 .unbounded_send(update)
1559 .expect("consumer runs to completion");
1560 }
1561 }
1562
1563 fn observed_snapshot(&self, scan_id: usize) -> bool {
1564 self.completed_scan_id >= scan_id
1565 }
1566
1567 pub(crate) fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1568 let (tx, rx) = oneshot::channel();
1569 if self.observed_snapshot(scan_id) {
1570 let _ = tx.send(());
1571 } else if self.disconnected {
1572 drop(tx);
1573 } else {
1574 match self
1575 .snapshot_subscriptions
1576 .binary_search_by_key(&scan_id, |probe| probe.0)
1577 {
1578 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1579 }
1580 }
1581
1582 async move {
1583 rx.await?;
1584 Ok(())
1585 }
1586 }
1587
1588 pub fn update_diagnostic_summary(
1589 &mut self,
1590 path: Arc<Path>,
1591 summary: &proto::DiagnosticSummary,
1592 ) {
1593 let server_id = LanguageServerId(summary.language_server_id as usize);
1594 let summary = DiagnosticSummary {
1595 error_count: summary.error_count as usize,
1596 warning_count: summary.warning_count as usize,
1597 };
1598
1599 if summary.is_empty() {
1600 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1601 summaries.remove(&server_id);
1602 if summaries.is_empty() {
1603 self.diagnostic_summaries.remove(&path);
1604 }
1605 }
1606 } else {
1607 self.diagnostic_summaries
1608 .entry(path)
1609 .or_default()
1610 .insert(server_id, summary);
1611 }
1612 }
1613
1614 pub fn insert_entry(
1615 &mut self,
1616 entry: proto::Entry,
1617 scan_id: usize,
1618 cx: &mut ModelContext<Worktree>,
1619 ) -> Task<Result<Entry>> {
1620 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1621 cx.spawn(|this, mut cx| async move {
1622 wait_for_snapshot.await?;
1623 this.update(&mut cx, |worktree, _| {
1624 let worktree = worktree.as_remote_mut().unwrap();
1625 let mut snapshot = worktree.background_snapshot.lock();
1626 let entry = snapshot.insert_entry(entry);
1627 worktree.snapshot = snapshot.clone();
1628 entry
1629 })?
1630 })
1631 }
1632
1633 pub(crate) fn delete_entry(
1634 &mut self,
1635 id: ProjectEntryId,
1636 scan_id: usize,
1637 cx: &mut ModelContext<Worktree>,
1638 ) -> Task<Result<()>> {
1639 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1640 cx.spawn(move |this, mut cx| async move {
1641 wait_for_snapshot.await?;
1642 this.update(&mut cx, |worktree, _| {
1643 let worktree = worktree.as_remote_mut().unwrap();
1644 let mut snapshot = worktree.background_snapshot.lock();
1645 snapshot.delete_entry(id);
1646 worktree.snapshot = snapshot.clone();
1647 })?;
1648 Ok(())
1649 })
1650 }
1651}
1652
1653impl Snapshot {
1654 pub fn id(&self) -> WorktreeId {
1655 self.id
1656 }
1657
1658 pub fn abs_path(&self) -> &Arc<Path> {
1659 &self.abs_path
1660 }
1661
1662 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1663 if path
1664 .components()
1665 .any(|component| !matches!(component, std::path::Component::Normal(_)))
1666 {
1667 return Err(anyhow!("invalid path"));
1668 }
1669 if path.file_name().is_some() {
1670 Ok(self.abs_path.join(path))
1671 } else {
1672 Ok(self.abs_path.to_path_buf())
1673 }
1674 }
1675
1676 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1677 self.entries_by_id.get(&entry_id, &()).is_some()
1678 }
1679
1680 fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1681 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1682 let old_entry = self.entries_by_id.insert_or_replace(
1683 PathEntry {
1684 id: entry.id,
1685 path: entry.path.clone(),
1686 is_ignored: entry.is_ignored,
1687 scan_id: 0,
1688 },
1689 &(),
1690 );
1691 if let Some(old_entry) = old_entry {
1692 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1693 }
1694 self.entries_by_path.insert_or_replace(entry.clone(), &());
1695 Ok(entry)
1696 }
1697
1698 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1699 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1700 self.entries_by_path = {
1701 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1702 let mut new_entries_by_path =
1703 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1704 while let Some(entry) = cursor.item() {
1705 if entry.path.starts_with(&removed_entry.path) {
1706 self.entries_by_id.remove(&entry.id, &());
1707 cursor.next(&());
1708 } else {
1709 break;
1710 }
1711 }
1712 new_entries_by_path.append(cursor.suffix(&()), &());
1713 new_entries_by_path
1714 };
1715
1716 Some(removed_entry.path)
1717 }
1718
1719 #[cfg(any(test, feature = "test-support"))]
1720 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1721 let path = path.into();
1722 self.entries_by_path
1723 .get(&PathKey(Arc::from(path)), &())
1724 .and_then(|entry| entry.git_status)
1725 }
1726
1727 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1728 let mut entries_by_path_edits = Vec::new();
1729 let mut entries_by_id_edits = Vec::new();
1730
1731 for entry_id in update.removed_entries {
1732 let entry_id = ProjectEntryId::from_proto(entry_id);
1733 entries_by_id_edits.push(Edit::Remove(entry_id));
1734 if let Some(entry) = self.entry_for_id(entry_id) {
1735 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1736 }
1737 }
1738
1739 for entry in update.updated_entries {
1740 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1741 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1742 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1743 }
1744 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1745 if old_entry.id != entry.id {
1746 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1747 }
1748 }
1749 entries_by_id_edits.push(Edit::Insert(PathEntry {
1750 id: entry.id,
1751 path: entry.path.clone(),
1752 is_ignored: entry.is_ignored,
1753 scan_id: 0,
1754 }));
1755 entries_by_path_edits.push(Edit::Insert(entry));
1756 }
1757
1758 self.entries_by_path.edit(entries_by_path_edits, &());
1759 self.entries_by_id.edit(entries_by_id_edits, &());
1760
1761 update.removed_repositories.sort_unstable();
1762 self.repository_entries.retain(|_, entry| {
1763 if let Ok(_) = update
1764 .removed_repositories
1765 .binary_search(&entry.work_directory.to_proto())
1766 {
1767 false
1768 } else {
1769 true
1770 }
1771 });
1772
1773 for repository in update.updated_repositories {
1774 let work_directory_entry: WorkDirectoryEntry =
1775 ProjectEntryId::from_proto(repository.work_directory_id).into();
1776
1777 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1778 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1779 if self.repository_entries.get(&work_directory).is_some() {
1780 self.repository_entries.update(&work_directory, |repo| {
1781 repo.branch = repository.branch.map(Into::into);
1782 });
1783 } else {
1784 self.repository_entries.insert(
1785 work_directory,
1786 RepositoryEntry {
1787 work_directory: work_directory_entry,
1788 branch: repository.branch.map(Into::into),
1789 },
1790 )
1791 }
1792 } else {
1793 log::error!("no work directory entry for repository {:?}", repository)
1794 }
1795 }
1796
1797 self.scan_id = update.scan_id as usize;
1798 if update.is_last_update {
1799 self.completed_scan_id = update.scan_id as usize;
1800 }
1801
1802 Ok(())
1803 }
1804
1805 pub fn file_count(&self) -> usize {
1806 self.entries_by_path.summary().file_count
1807 }
1808
1809 pub fn visible_file_count(&self) -> usize {
1810 self.entries_by_path.summary().non_ignored_file_count
1811 }
1812
1813 fn traverse_from_offset(
1814 &self,
1815 include_dirs: bool,
1816 include_ignored: bool,
1817 start_offset: usize,
1818 ) -> Traversal {
1819 let mut cursor = self.entries_by_path.cursor();
1820 cursor.seek(
1821 &TraversalTarget::Count {
1822 count: start_offset,
1823 include_dirs,
1824 include_ignored,
1825 },
1826 Bias::Right,
1827 &(),
1828 );
1829 Traversal {
1830 cursor,
1831 include_dirs,
1832 include_ignored,
1833 }
1834 }
1835
1836 fn traverse_from_path(
1837 &self,
1838 include_dirs: bool,
1839 include_ignored: bool,
1840 path: &Path,
1841 ) -> Traversal {
1842 let mut cursor = self.entries_by_path.cursor();
1843 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1844 Traversal {
1845 cursor,
1846 include_dirs,
1847 include_ignored,
1848 }
1849 }
1850
1851 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1852 self.traverse_from_offset(false, include_ignored, start)
1853 }
1854
1855 pub fn entries(&self, include_ignored: bool) -> Traversal {
1856 self.traverse_from_offset(true, include_ignored, 0)
1857 }
1858
1859 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1860 self.repository_entries
1861 .iter()
1862 .map(|(path, entry)| (&path.0, entry))
1863 }
1864
1865 /// Get the repository whose work directory contains the given path.
1866 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1867 self.repository_entries
1868 .get(&RepositoryWorkDirectory(path.into()))
1869 .cloned()
1870 }
1871
1872 /// Get the repository whose work directory contains the given path.
1873 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1874 self.repository_and_work_directory_for_path(path)
1875 .map(|e| e.1)
1876 }
1877
1878 pub fn repository_and_work_directory_for_path(
1879 &self,
1880 path: &Path,
1881 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1882 self.repository_entries
1883 .iter()
1884 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1885 .last()
1886 .map(|(path, repo)| (path.clone(), repo.clone()))
1887 }
1888
1889 /// Given an ordered iterator of entries, returns an iterator of those entries,
1890 /// along with their containing git repository.
1891 pub fn entries_with_repositories<'a>(
1892 &'a self,
1893 entries: impl 'a + Iterator<Item = &'a Entry>,
1894 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1895 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1896 let mut repositories = self.repositories().peekable();
1897 entries.map(move |entry| {
1898 while let Some((repo_path, _)) = containing_repos.last() {
1899 if !entry.path.starts_with(repo_path) {
1900 containing_repos.pop();
1901 } else {
1902 break;
1903 }
1904 }
1905 while let Some((repo_path, _)) = repositories.peek() {
1906 if entry.path.starts_with(repo_path) {
1907 containing_repos.push(repositories.next().unwrap());
1908 } else {
1909 break;
1910 }
1911 }
1912 let repo = containing_repos.last().map(|(_, repo)| *repo);
1913 (entry, repo)
1914 })
1915 }
1916
1917 /// Updates the `git_status` of the given entries such that files'
1918 /// statuses bubble up to their ancestor directories.
1919 pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
1920 let mut cursor = self
1921 .entries_by_path
1922 .cursor::<(TraversalProgress, GitStatuses)>();
1923 let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
1924
1925 let mut result_ix = 0;
1926 loop {
1927 let next_entry = result.get(result_ix);
1928 let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
1929
1930 let entry_to_finish = match (containing_entry, next_entry) {
1931 (Some(_), None) => entry_stack.pop(),
1932 (Some(containing_entry), Some(next_path)) => {
1933 if !next_path.path.starts_with(&containing_entry.path) {
1934 entry_stack.pop()
1935 } else {
1936 None
1937 }
1938 }
1939 (None, Some(_)) => None,
1940 (None, None) => break,
1941 };
1942
1943 if let Some((entry_ix, prev_statuses)) = entry_to_finish {
1944 cursor.seek_forward(
1945 &TraversalTarget::PathSuccessor(&result[entry_ix].path),
1946 Bias::Left,
1947 &(),
1948 );
1949
1950 let statuses = cursor.start().1 - prev_statuses;
1951
1952 result[entry_ix].git_status = if statuses.conflict > 0 {
1953 Some(GitFileStatus::Conflict)
1954 } else if statuses.modified > 0 {
1955 Some(GitFileStatus::Modified)
1956 } else if statuses.added > 0 {
1957 Some(GitFileStatus::Added)
1958 } else {
1959 None
1960 };
1961 } else {
1962 if result[result_ix].is_dir() {
1963 cursor.seek_forward(
1964 &TraversalTarget::Path(&result[result_ix].path),
1965 Bias::Left,
1966 &(),
1967 );
1968 entry_stack.push((result_ix, cursor.start().1));
1969 }
1970 result_ix += 1;
1971 }
1972 }
1973 }
1974
1975 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1976 let empty_path = Path::new("");
1977 self.entries_by_path
1978 .cursor::<()>()
1979 .filter(move |entry| entry.path.as_ref() != empty_path)
1980 .map(|entry| &entry.path)
1981 }
1982
1983 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1984 let mut cursor = self.entries_by_path.cursor();
1985 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1986 let traversal = Traversal {
1987 cursor,
1988 include_dirs: true,
1989 include_ignored: true,
1990 };
1991 ChildEntriesIter {
1992 traversal,
1993 parent_path,
1994 }
1995 }
1996
1997 pub fn descendent_entries<'a>(
1998 &'a self,
1999 include_dirs: bool,
2000 include_ignored: bool,
2001 parent_path: &'a Path,
2002 ) -> DescendentEntriesIter<'a> {
2003 let mut cursor = self.entries_by_path.cursor();
2004 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
2005 let mut traversal = Traversal {
2006 cursor,
2007 include_dirs,
2008 include_ignored,
2009 };
2010
2011 if traversal.end_offset() == traversal.start_offset() {
2012 traversal.advance();
2013 }
2014
2015 DescendentEntriesIter {
2016 traversal,
2017 parent_path,
2018 }
2019 }
2020
2021 pub fn root_entry(&self) -> Option<&Entry> {
2022 self.entry_for_path("")
2023 }
2024
2025 pub fn root_name(&self) -> &str {
2026 &self.root_name
2027 }
2028
2029 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2030 self.repository_entries
2031 .get(&RepositoryWorkDirectory(Path::new("").into()))
2032 .map(|entry| entry.to_owned())
2033 }
2034
2035 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2036 self.repository_entries.values()
2037 }
2038
2039 pub fn scan_id(&self) -> usize {
2040 self.scan_id
2041 }
2042
2043 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2044 let path = path.as_ref();
2045 self.traverse_from_path(true, true, path)
2046 .entry()
2047 .and_then(|entry| {
2048 if entry.path.as_ref() == path {
2049 Some(entry)
2050 } else {
2051 None
2052 }
2053 })
2054 }
2055
2056 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2057 let entry = self.entries_by_id.get(&id, &())?;
2058 self.entry_for_path(&entry.path)
2059 }
2060
2061 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2062 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2063 }
2064}
2065
2066impl LocalSnapshot {
2067 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
2068 self.git_repositories.get(&repo.work_directory.0)
2069 }
2070
2071 pub(crate) fn local_repo_for_path(
2072 &self,
2073 path: &Path,
2074 ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
2075 let (path, repo) = self.repository_and_work_directory_for_path(path)?;
2076 Some((path, self.git_repositories.get(&repo.work_directory_id())?))
2077 }
2078
2079 fn build_update(
2080 &self,
2081 project_id: u64,
2082 worktree_id: u64,
2083 entry_changes: UpdatedEntriesSet,
2084 repo_changes: UpdatedGitRepositoriesSet,
2085 ) -> proto::UpdateWorktree {
2086 let mut updated_entries = Vec::new();
2087 let mut removed_entries = Vec::new();
2088 let mut updated_repositories = Vec::new();
2089 let mut removed_repositories = Vec::new();
2090
2091 for (_, entry_id, path_change) in entry_changes.iter() {
2092 if let PathChange::Removed = path_change {
2093 removed_entries.push(entry_id.0 as u64);
2094 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2095 updated_entries.push(proto::Entry::from(entry));
2096 }
2097 }
2098
2099 for (work_dir_path, change) in repo_changes.iter() {
2100 let new_repo = self
2101 .repository_entries
2102 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2103 match (&change.old_repository, new_repo) {
2104 (Some(old_repo), Some(new_repo)) => {
2105 updated_repositories.push(new_repo.build_update(old_repo));
2106 }
2107 (None, Some(new_repo)) => {
2108 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2109 }
2110 (Some(old_repo), None) => {
2111 removed_repositories.push(old_repo.work_directory.0.to_proto());
2112 }
2113 _ => {}
2114 }
2115 }
2116
2117 removed_entries.sort_unstable();
2118 updated_entries.sort_unstable_by_key(|e| e.id);
2119 removed_repositories.sort_unstable();
2120 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2121
2122 // TODO - optimize, knowing that removed_entries are sorted.
2123 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2124
2125 proto::UpdateWorktree {
2126 project_id,
2127 worktree_id,
2128 abs_path: self.abs_path().to_string_lossy().into(),
2129 root_name: self.root_name().to_string(),
2130 updated_entries,
2131 removed_entries,
2132 scan_id: self.scan_id as u64,
2133 is_last_update: self.completed_scan_id == self.scan_id,
2134 updated_repositories,
2135 removed_repositories,
2136 }
2137 }
2138
2139 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2140 let mut updated_entries = self
2141 .entries_by_path
2142 .iter()
2143 .map(proto::Entry::from)
2144 .collect::<Vec<_>>();
2145 updated_entries.sort_unstable_by_key(|e| e.id);
2146
2147 let mut updated_repositories = self
2148 .repository_entries
2149 .values()
2150 .map(proto::RepositoryEntry::from)
2151 .collect::<Vec<_>>();
2152 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2153
2154 proto::UpdateWorktree {
2155 project_id,
2156 worktree_id,
2157 abs_path: self.abs_path().to_string_lossy().into(),
2158 root_name: self.root_name().to_string(),
2159 updated_entries,
2160 removed_entries: Vec::new(),
2161 scan_id: self.scan_id as u64,
2162 is_last_update: self.completed_scan_id == self.scan_id,
2163 updated_repositories,
2164 removed_repositories: Vec::new(),
2165 }
2166 }
2167
2168 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2169 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2170 let abs_path = self.abs_path.join(&entry.path);
2171 match smol::block_on(build_gitignore(&abs_path, fs)) {
2172 Ok(ignore) => {
2173 self.ignores_by_parent_abs_path
2174 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2175 }
2176 Err(error) => {
2177 log::error!(
2178 "error loading .gitignore file {:?} - {:?}",
2179 &entry.path,
2180 error
2181 );
2182 }
2183 }
2184 }
2185
2186 if entry.kind == EntryKind::PendingDir {
2187 if let Some(existing_entry) =
2188 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2189 {
2190 entry.kind = existing_entry.kind;
2191 }
2192 }
2193
2194 let scan_id = self.scan_id;
2195 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2196 if let Some(removed) = removed {
2197 if removed.id != entry.id {
2198 self.entries_by_id.remove(&removed.id, &());
2199 }
2200 }
2201 self.entries_by_id.insert_or_replace(
2202 PathEntry {
2203 id: entry.id,
2204 path: entry.path.clone(),
2205 is_ignored: entry.is_ignored,
2206 scan_id,
2207 },
2208 &(),
2209 );
2210
2211 entry
2212 }
2213
2214 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2215 let mut inodes = TreeSet::default();
2216 for ancestor in path.ancestors().skip(1) {
2217 if let Some(entry) = self.entry_for_path(ancestor) {
2218 inodes.insert(entry.inode);
2219 }
2220 }
2221 inodes
2222 }
2223
2224 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2225 let mut new_ignores = Vec::new();
2226 for ancestor in abs_path.ancestors().skip(1) {
2227 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2228 new_ignores.push((ancestor, Some(ignore.clone())));
2229 } else {
2230 new_ignores.push((ancestor, None));
2231 }
2232 }
2233
2234 let mut ignore_stack = IgnoreStack::none();
2235 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2236 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2237 ignore_stack = IgnoreStack::all();
2238 break;
2239 } else if let Some(ignore) = ignore {
2240 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2241 }
2242 }
2243
2244 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2245 ignore_stack = IgnoreStack::all();
2246 }
2247
2248 ignore_stack
2249 }
2250
2251 #[cfg(test)]
2252 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2253 self.entries_by_path
2254 .cursor::<()>()
2255 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2256 }
2257
2258 #[cfg(test)]
2259 pub fn check_invariants(&self, git_state: bool) {
2260 use pretty_assertions::assert_eq;
2261
2262 assert_eq!(
2263 self.entries_by_path
2264 .cursor::<()>()
2265 .map(|e| (&e.path, e.id))
2266 .collect::<Vec<_>>(),
2267 self.entries_by_id
2268 .cursor::<()>()
2269 .map(|e| (&e.path, e.id))
2270 .collect::<collections::BTreeSet<_>>()
2271 .into_iter()
2272 .collect::<Vec<_>>(),
2273 "entries_by_path and entries_by_id are inconsistent"
2274 );
2275
2276 let mut files = self.files(true, 0);
2277 let mut visible_files = self.files(false, 0);
2278 for entry in self.entries_by_path.cursor::<()>() {
2279 if entry.is_file() {
2280 assert_eq!(files.next().unwrap().inode, entry.inode);
2281 if !entry.is_ignored && !entry.is_external {
2282 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2283 }
2284 }
2285 }
2286
2287 assert!(files.next().is_none());
2288 assert!(visible_files.next().is_none());
2289
2290 let mut bfs_paths = Vec::new();
2291 let mut stack = self
2292 .root_entry()
2293 .map(|e| e.path.as_ref())
2294 .into_iter()
2295 .collect::<Vec<_>>();
2296 while let Some(path) = stack.pop() {
2297 bfs_paths.push(path);
2298 let ix = stack.len();
2299 for child_entry in self.child_entries(path) {
2300 stack.insert(ix, &child_entry.path);
2301 }
2302 }
2303
2304 let dfs_paths_via_iter = self
2305 .entries_by_path
2306 .cursor::<()>()
2307 .map(|e| e.path.as_ref())
2308 .collect::<Vec<_>>();
2309 assert_eq!(bfs_paths, dfs_paths_via_iter);
2310
2311 let dfs_paths_via_traversal = self
2312 .entries(true)
2313 .map(|e| e.path.as_ref())
2314 .collect::<Vec<_>>();
2315 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2316
2317 if git_state {
2318 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2319 let ignore_parent_path =
2320 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2321 assert!(self.entry_for_path(&ignore_parent_path).is_some());
2322 assert!(self
2323 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2324 .is_some());
2325 }
2326 }
2327 }
2328
2329 #[cfg(test)]
2330 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2331 let mut paths = Vec::new();
2332 for entry in self.entries_by_path.cursor::<()>() {
2333 if include_ignored || !entry.is_ignored {
2334 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2335 }
2336 }
2337 paths.sort_by(|a, b| a.0.cmp(b.0));
2338 paths
2339 }
2340
2341 pub fn is_path_private(&self, path: &Path) -> bool {
2342 path.ancestors().any(|ancestor| {
2343 self.private_files
2344 .iter()
2345 .any(|exclude_matcher| exclude_matcher.is_match(&ancestor))
2346 })
2347 }
2348
2349 pub fn is_path_excluded(&self, mut path: PathBuf) -> bool {
2350 loop {
2351 if self
2352 .file_scan_exclusions
2353 .iter()
2354 .any(|exclude_matcher| exclude_matcher.is_match(&path))
2355 {
2356 return true;
2357 }
2358 if !path.pop() {
2359 return false;
2360 }
2361 }
2362 }
2363}
2364
2365impl BackgroundScannerState {
2366 fn should_scan_directory(&self, entry: &Entry) -> bool {
2367 (!entry.is_external && !entry.is_ignored)
2368 || entry.path.file_name() == Some(*DOT_GIT)
2369 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2370 || self
2371 .paths_to_scan
2372 .iter()
2373 .any(|p| p.starts_with(&entry.path))
2374 || self
2375 .path_prefixes_to_scan
2376 .iter()
2377 .any(|p| entry.path.starts_with(p))
2378 }
2379
2380 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2381 let path = entry.path.clone();
2382 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2383 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2384 let mut containing_repository = None;
2385 if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2386 if let Some((workdir_path, repo)) = self.snapshot.local_repo_for_path(&path) {
2387 if let Ok(repo_path) = path.strip_prefix(&workdir_path.0) {
2388 containing_repository = Some((
2389 workdir_path,
2390 repo.repo_ptr.clone(),
2391 repo.repo_ptr.lock().staged_statuses(repo_path),
2392 ));
2393 }
2394 }
2395 }
2396 if !ancestor_inodes.contains(&entry.inode) {
2397 ancestor_inodes.insert(entry.inode);
2398 scan_job_tx
2399 .try_send(ScanJob {
2400 abs_path,
2401 path,
2402 ignore_stack,
2403 scan_queue: scan_job_tx.clone(),
2404 ancestor_inodes,
2405 is_external: entry.is_external,
2406 containing_repository,
2407 })
2408 .unwrap();
2409 }
2410 }
2411
2412 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2413 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2414 entry.id = removed_entry_id;
2415 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2416 entry.id = existing_entry.id;
2417 }
2418 }
2419
2420 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2421 self.reuse_entry_id(&mut entry);
2422 let entry = self.snapshot.insert_entry(entry, fs);
2423 if entry.path.file_name() == Some(&DOT_GIT) {
2424 self.build_git_repository(entry.path.clone(), fs);
2425 }
2426
2427 #[cfg(test)]
2428 self.snapshot.check_invariants(false);
2429
2430 entry
2431 }
2432
2433 fn populate_dir(
2434 &mut self,
2435 parent_path: &Arc<Path>,
2436 entries: impl IntoIterator<Item = Entry>,
2437 ignore: Option<Arc<Gitignore>>,
2438 ) {
2439 let mut parent_entry = if let Some(parent_entry) = self
2440 .snapshot
2441 .entries_by_path
2442 .get(&PathKey(parent_path.clone()), &())
2443 {
2444 parent_entry.clone()
2445 } else {
2446 log::warn!(
2447 "populating a directory {:?} that has been removed",
2448 parent_path
2449 );
2450 return;
2451 };
2452
2453 match parent_entry.kind {
2454 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2455 EntryKind::Dir => {}
2456 _ => return,
2457 }
2458
2459 if let Some(ignore) = ignore {
2460 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2461 self.snapshot
2462 .ignores_by_parent_abs_path
2463 .insert(abs_parent_path, (ignore, false));
2464 }
2465
2466 let parent_entry_id = parent_entry.id;
2467 self.scanned_dirs.insert(parent_entry_id);
2468 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2469 let mut entries_by_id_edits = Vec::new();
2470
2471 for entry in entries {
2472 entries_by_id_edits.push(Edit::Insert(PathEntry {
2473 id: entry.id,
2474 path: entry.path.clone(),
2475 is_ignored: entry.is_ignored,
2476 scan_id: self.snapshot.scan_id,
2477 }));
2478 entries_by_path_edits.push(Edit::Insert(entry));
2479 }
2480
2481 self.snapshot
2482 .entries_by_path
2483 .edit(entries_by_path_edits, &());
2484 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2485
2486 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2487 self.changed_paths.insert(ix, parent_path.clone());
2488 }
2489
2490 #[cfg(test)]
2491 self.snapshot.check_invariants(false);
2492 }
2493
2494 fn remove_path(&mut self, path: &Path) {
2495 let mut new_entries;
2496 let removed_entries;
2497 {
2498 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2499 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2500 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2501 new_entries.append(cursor.suffix(&()), &());
2502 }
2503 self.snapshot.entries_by_path = new_entries;
2504
2505 let mut entries_by_id_edits = Vec::new();
2506 for entry in removed_entries.cursor::<()>() {
2507 let removed_entry_id = self
2508 .removed_entry_ids
2509 .entry(entry.inode)
2510 .or_insert(entry.id);
2511 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2512 entries_by_id_edits.push(Edit::Remove(entry.id));
2513 }
2514 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2515
2516 if path.file_name() == Some(&GITIGNORE) {
2517 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2518 if let Some((_, needs_update)) = self
2519 .snapshot
2520 .ignores_by_parent_abs_path
2521 .get_mut(abs_parent_path.as_path())
2522 {
2523 *needs_update = true;
2524 }
2525 }
2526
2527 #[cfg(test)]
2528 self.snapshot.check_invariants(false);
2529 }
2530
2531 fn reload_repositories(&mut self, dot_git_dirs_to_reload: &HashSet<PathBuf>, fs: &dyn Fs) {
2532 let scan_id = self.snapshot.scan_id;
2533
2534 for dot_git_dir in dot_git_dirs_to_reload {
2535 // If there is already a repository for this .git directory, reload
2536 // the status for all of its files.
2537 let repository = self
2538 .snapshot
2539 .git_repositories
2540 .iter()
2541 .find_map(|(entry_id, repo)| {
2542 (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2543 });
2544 match repository {
2545 None => {
2546 self.build_git_repository(Arc::from(dot_git_dir.as_path()), fs);
2547 }
2548 Some((entry_id, repository)) => {
2549 if repository.git_dir_scan_id == scan_id {
2550 continue;
2551 }
2552 let Some(work_dir) = self
2553 .snapshot
2554 .entry_for_id(entry_id)
2555 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
2556 else {
2557 continue;
2558 };
2559
2560 log::info!("reload git repository {dot_git_dir:?}");
2561 let repository = repository.repo_ptr.lock();
2562 let branch = repository.branch_name();
2563 repository.reload_index();
2564
2565 self.snapshot
2566 .git_repositories
2567 .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2568 self.snapshot
2569 .snapshot
2570 .repository_entries
2571 .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2572
2573 self.update_git_statuses(&work_dir, &*repository);
2574 }
2575 }
2576 }
2577
2578 // Remove any git repositories whose .git entry no longer exists.
2579 let snapshot = &mut self.snapshot;
2580 let mut ids_to_preserve = HashSet::default();
2581 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
2582 let exists_in_snapshot = snapshot
2583 .entry_for_id(work_directory_id)
2584 .map_or(false, |entry| {
2585 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2586 });
2587 if exists_in_snapshot {
2588 ids_to_preserve.insert(work_directory_id);
2589 } else {
2590 let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
2591 let git_dir_excluded = snapshot.is_path_excluded(entry.git_dir_path.to_path_buf());
2592 if git_dir_excluded
2593 && !matches!(smol::block_on(fs.metadata(&git_dir_abs_path)), Ok(None))
2594 {
2595 ids_to_preserve.insert(work_directory_id);
2596 }
2597 }
2598 }
2599 snapshot
2600 .git_repositories
2601 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
2602 snapshot
2603 .repository_entries
2604 .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
2605 }
2606
2607 fn build_git_repository(
2608 &mut self,
2609 dot_git_path: Arc<Path>,
2610 fs: &dyn Fs,
2611 ) -> Option<(
2612 RepositoryWorkDirectory,
2613 Arc<Mutex<dyn GitRepository>>,
2614 TreeMap<RepoPath, GitFileStatus>,
2615 )> {
2616 log::info!("build git repository {:?}", dot_git_path);
2617
2618 let work_dir_path: Arc<Path> = dot_git_path.parent().unwrap().into();
2619
2620 // Guard against repositories inside the repository metadata
2621 if work_dir_path.iter().any(|component| component == *DOT_GIT) {
2622 return None;
2623 };
2624
2625 let work_dir_id = self
2626 .snapshot
2627 .entry_for_path(work_dir_path.clone())
2628 .map(|entry| entry.id)?;
2629
2630 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2631 return None;
2632 }
2633
2634 let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2635 let repository = fs.open_repo(abs_path.as_path())?;
2636 let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2637
2638 let repo_lock = repository.lock();
2639 self.snapshot.repository_entries.insert(
2640 work_directory.clone(),
2641 RepositoryEntry {
2642 work_directory: work_dir_id.into(),
2643 branch: repo_lock.branch_name().map(Into::into),
2644 },
2645 );
2646
2647 let staged_statuses = self.update_git_statuses(&work_directory, &*repo_lock);
2648 drop(repo_lock);
2649
2650 self.snapshot.git_repositories.insert(
2651 work_dir_id,
2652 LocalRepositoryEntry {
2653 git_dir_scan_id: 0,
2654 repo_ptr: repository.clone(),
2655 git_dir_path: dot_git_path.clone(),
2656 },
2657 );
2658
2659 Some((work_directory, repository, staged_statuses))
2660 }
2661
2662 fn update_git_statuses(
2663 &mut self,
2664 work_directory: &RepositoryWorkDirectory,
2665 repo: &dyn GitRepository,
2666 ) -> TreeMap<RepoPath, GitFileStatus> {
2667 let staged_statuses = repo.staged_statuses(Path::new(""));
2668
2669 let mut changes = vec![];
2670 let mut edits = vec![];
2671
2672 for mut entry in self
2673 .snapshot
2674 .descendent_entries(false, false, &work_directory.0)
2675 .cloned()
2676 {
2677 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2678 continue;
2679 };
2680 let repo_path = RepoPath(repo_path.to_path_buf());
2681 let git_file_status = combine_git_statuses(
2682 staged_statuses.get(&repo_path).copied(),
2683 repo.unstaged_status(&repo_path, entry.mtime),
2684 );
2685 if entry.git_status != git_file_status {
2686 entry.git_status = git_file_status;
2687 changes.push(entry.path.clone());
2688 edits.push(Edit::Insert(entry));
2689 }
2690 }
2691
2692 self.snapshot.entries_by_path.edit(edits, &());
2693 util::extend_sorted(&mut self.changed_paths, changes, usize::MAX, Ord::cmp);
2694 staged_statuses
2695 }
2696}
2697
2698async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2699 let contents = fs.load(abs_path).await?;
2700 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2701 let mut builder = GitignoreBuilder::new(parent);
2702 for line in contents.lines() {
2703 builder.add_line(Some(abs_path.into()), line)?;
2704 }
2705 Ok(builder.build()?)
2706}
2707
2708impl WorktreeId {
2709 pub fn from_usize(handle_id: usize) -> Self {
2710 Self(handle_id)
2711 }
2712
2713 pub(crate) fn from_proto(id: u64) -> Self {
2714 Self(id as usize)
2715 }
2716
2717 pub fn to_proto(&self) -> u64 {
2718 self.0 as u64
2719 }
2720
2721 pub fn to_usize(&self) -> usize {
2722 self.0
2723 }
2724}
2725
2726impl fmt::Display for WorktreeId {
2727 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2728 self.0.fmt(f)
2729 }
2730}
2731
2732impl Deref for Worktree {
2733 type Target = Snapshot;
2734
2735 fn deref(&self) -> &Self::Target {
2736 match self {
2737 Worktree::Local(worktree) => &worktree.snapshot,
2738 Worktree::Remote(worktree) => &worktree.snapshot,
2739 }
2740 }
2741}
2742
2743impl Deref for LocalWorktree {
2744 type Target = LocalSnapshot;
2745
2746 fn deref(&self) -> &Self::Target {
2747 &self.snapshot
2748 }
2749}
2750
2751impl Deref for RemoteWorktree {
2752 type Target = Snapshot;
2753
2754 fn deref(&self) -> &Self::Target {
2755 &self.snapshot
2756 }
2757}
2758
2759impl fmt::Debug for LocalWorktree {
2760 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2761 self.snapshot.fmt(f)
2762 }
2763}
2764
2765impl fmt::Debug for Snapshot {
2766 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2767 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2768 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2769
2770 impl<'a> fmt::Debug for EntriesByPath<'a> {
2771 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2772 f.debug_map()
2773 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2774 .finish()
2775 }
2776 }
2777
2778 impl<'a> fmt::Debug for EntriesById<'a> {
2779 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2780 f.debug_list().entries(self.0.iter()).finish()
2781 }
2782 }
2783
2784 f.debug_struct("Snapshot")
2785 .field("id", &self.id)
2786 .field("root_name", &self.root_name)
2787 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2788 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2789 .finish()
2790 }
2791}
2792
2793#[derive(Clone, PartialEq)]
2794pub struct File {
2795 pub worktree: Model<Worktree>,
2796 pub path: Arc<Path>,
2797 pub mtime: SystemTime,
2798 pub(crate) entry_id: Option<ProjectEntryId>,
2799 pub(crate) is_local: bool,
2800 pub(crate) is_deleted: bool,
2801 pub(crate) is_private: bool,
2802}
2803
2804impl language::File for File {
2805 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2806 if self.is_local {
2807 Some(self)
2808 } else {
2809 None
2810 }
2811 }
2812
2813 fn mtime(&self) -> SystemTime {
2814 self.mtime
2815 }
2816
2817 fn path(&self) -> &Arc<Path> {
2818 &self.path
2819 }
2820
2821 fn full_path(&self, cx: &AppContext) -> PathBuf {
2822 let mut full_path = PathBuf::new();
2823 let worktree = self.worktree.read(cx);
2824
2825 if worktree.is_visible() {
2826 full_path.push(worktree.root_name());
2827 } else {
2828 let path = worktree.abs_path();
2829
2830 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2831 full_path.push("~");
2832 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2833 } else {
2834 full_path.push(path)
2835 }
2836 }
2837
2838 if self.path.components().next().is_some() {
2839 full_path.push(&self.path);
2840 }
2841
2842 full_path
2843 }
2844
2845 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2846 /// of its worktree, then this method will return the name of the worktree itself.
2847 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2848 self.path
2849 .file_name()
2850 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2851 }
2852
2853 fn worktree_id(&self) -> usize {
2854 self.worktree.entity_id().as_u64() as usize
2855 }
2856
2857 fn is_deleted(&self) -> bool {
2858 self.is_deleted
2859 }
2860
2861 fn as_any(&self) -> &dyn Any {
2862 self
2863 }
2864
2865 fn to_proto(&self) -> rpc::proto::File {
2866 rpc::proto::File {
2867 worktree_id: self.worktree.entity_id().as_u64(),
2868 entry_id: self.entry_id.map(|id| id.to_proto()),
2869 path: self.path.to_string_lossy().into(),
2870 mtime: Some(self.mtime.into()),
2871 is_deleted: self.is_deleted,
2872 }
2873 }
2874
2875 fn is_private(&self) -> bool {
2876 self.is_private
2877 }
2878}
2879
2880impl language::LocalFile for File {
2881 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2882 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
2883 if self.path.as_ref() == Path::new("") {
2884 worktree_path.to_path_buf()
2885 } else {
2886 worktree_path.join(&self.path)
2887 }
2888 }
2889
2890 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2891 let worktree = self.worktree.read(cx).as_local().unwrap();
2892 let abs_path = worktree.absolutize(&self.path);
2893 let fs = worktree.fs.clone();
2894 cx.background_executor()
2895 .spawn(async move { fs.load(&abs_path?).await })
2896 }
2897
2898 fn buffer_reloaded(
2899 &self,
2900 buffer_id: BufferId,
2901 version: &clock::Global,
2902 fingerprint: RopeFingerprint,
2903 line_ending: LineEnding,
2904 mtime: SystemTime,
2905 cx: &mut AppContext,
2906 ) {
2907 let worktree = self.worktree.read(cx).as_local().unwrap();
2908 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2909 worktree
2910 .client
2911 .send(proto::BufferReloaded {
2912 project_id,
2913 buffer_id: buffer_id.into(),
2914 version: serialize_version(version),
2915 mtime: Some(mtime.into()),
2916 fingerprint: serialize_fingerprint(fingerprint),
2917 line_ending: serialize_line_ending(line_ending) as i32,
2918 })
2919 .log_err();
2920 }
2921 }
2922}
2923
2924impl File {
2925 pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
2926 Arc::new(Self {
2927 worktree,
2928 path: entry.path.clone(),
2929 mtime: entry.mtime,
2930 entry_id: Some(entry.id),
2931 is_local: true,
2932 is_deleted: false,
2933 is_private: entry.is_private,
2934 })
2935 }
2936
2937 pub fn from_proto(
2938 proto: rpc::proto::File,
2939 worktree: Model<Worktree>,
2940 cx: &AppContext,
2941 ) -> Result<Self> {
2942 let worktree_id = worktree
2943 .read(cx)
2944 .as_remote()
2945 .ok_or_else(|| anyhow!("not remote"))?
2946 .id();
2947
2948 if worktree_id.to_proto() != proto.worktree_id {
2949 return Err(anyhow!("worktree id does not match file"));
2950 }
2951
2952 Ok(Self {
2953 worktree,
2954 path: Path::new(&proto.path).into(),
2955 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2956 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
2957 is_local: false,
2958 is_deleted: proto.is_deleted,
2959 is_private: false,
2960 })
2961 }
2962
2963 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2964 file.and_then(|f| f.as_any().downcast_ref())
2965 }
2966
2967 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2968 self.worktree.read(cx).id()
2969 }
2970
2971 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2972 if self.is_deleted {
2973 None
2974 } else {
2975 self.entry_id
2976 }
2977 }
2978}
2979
2980#[derive(Clone, Debug, PartialEq, Eq)]
2981pub struct Entry {
2982 pub id: ProjectEntryId,
2983 pub kind: EntryKind,
2984 pub path: Arc<Path>,
2985 pub inode: u64,
2986 pub mtime: SystemTime,
2987 pub is_symlink: bool,
2988
2989 /// Whether this entry is ignored by Git.
2990 ///
2991 /// We only scan ignored entries once the directory is expanded and
2992 /// exclude them from searches.
2993 pub is_ignored: bool,
2994
2995 /// Whether this entry's canonical path is outside of the worktree.
2996 /// This means the entry is only accessible from the worktree root via a
2997 /// symlink.
2998 ///
2999 /// We only scan entries outside of the worktree once the symlinked
3000 /// directory is expanded. External entries are treated like gitignored
3001 /// entries in that they are not included in searches.
3002 pub is_external: bool,
3003 pub git_status: Option<GitFileStatus>,
3004 /// Whether this entry is considered to be a `.env` file.
3005 pub is_private: bool,
3006}
3007
3008#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3009pub enum EntryKind {
3010 UnloadedDir,
3011 PendingDir,
3012 Dir,
3013 File(CharBag),
3014}
3015
3016#[derive(Clone, Copy, Debug, PartialEq)]
3017pub enum PathChange {
3018 /// A filesystem entry was was created.
3019 Added,
3020 /// A filesystem entry was removed.
3021 Removed,
3022 /// A filesystem entry was updated.
3023 Updated,
3024 /// A filesystem entry was either updated or added. We don't know
3025 /// whether or not it already existed, because the path had not
3026 /// been loaded before the event.
3027 AddedOrUpdated,
3028 /// A filesystem entry was found during the initial scan of the worktree.
3029 Loaded,
3030}
3031
3032pub struct GitRepositoryChange {
3033 /// The previous state of the repository, if it already existed.
3034 pub old_repository: Option<RepositoryEntry>,
3035}
3036
3037pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3038pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3039
3040impl Entry {
3041 fn new(
3042 path: Arc<Path>,
3043 metadata: &fs::Metadata,
3044 next_entry_id: &AtomicUsize,
3045 root_char_bag: CharBag,
3046 ) -> Self {
3047 Self {
3048 id: ProjectEntryId::new(next_entry_id),
3049 kind: if metadata.is_dir {
3050 EntryKind::PendingDir
3051 } else {
3052 EntryKind::File(char_bag_for_path(root_char_bag, &path))
3053 },
3054 path,
3055 inode: metadata.inode,
3056 mtime: metadata.mtime,
3057 is_symlink: metadata.is_symlink,
3058 is_ignored: false,
3059 is_external: false,
3060 is_private: false,
3061 git_status: None,
3062 }
3063 }
3064
3065 pub fn is_dir(&self) -> bool {
3066 self.kind.is_dir()
3067 }
3068
3069 pub fn is_file(&self) -> bool {
3070 self.kind.is_file()
3071 }
3072
3073 pub fn git_status(&self) -> Option<GitFileStatus> {
3074 self.git_status
3075 }
3076}
3077
3078impl EntryKind {
3079 pub fn is_dir(&self) -> bool {
3080 matches!(
3081 self,
3082 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3083 )
3084 }
3085
3086 pub fn is_unloaded(&self) -> bool {
3087 matches!(self, EntryKind::UnloadedDir)
3088 }
3089
3090 pub fn is_file(&self) -> bool {
3091 matches!(self, EntryKind::File(_))
3092 }
3093}
3094
3095impl sum_tree::Item for Entry {
3096 type Summary = EntrySummary;
3097
3098 fn summary(&self) -> Self::Summary {
3099 let non_ignored_count = if self.is_ignored || self.is_external {
3100 0
3101 } else {
3102 1
3103 };
3104 let file_count;
3105 let non_ignored_file_count;
3106 if self.is_file() {
3107 file_count = 1;
3108 non_ignored_file_count = non_ignored_count;
3109 } else {
3110 file_count = 0;
3111 non_ignored_file_count = 0;
3112 }
3113
3114 let mut statuses = GitStatuses::default();
3115 match self.git_status {
3116 Some(status) => match status {
3117 GitFileStatus::Added => statuses.added = 1,
3118 GitFileStatus::Modified => statuses.modified = 1,
3119 GitFileStatus::Conflict => statuses.conflict = 1,
3120 },
3121 None => {}
3122 }
3123
3124 EntrySummary {
3125 max_path: self.path.clone(),
3126 count: 1,
3127 non_ignored_count,
3128 file_count,
3129 non_ignored_file_count,
3130 statuses,
3131 }
3132 }
3133}
3134
3135impl sum_tree::KeyedItem for Entry {
3136 type Key = PathKey;
3137
3138 fn key(&self) -> Self::Key {
3139 PathKey(self.path.clone())
3140 }
3141}
3142
3143#[derive(Clone, Debug)]
3144pub struct EntrySummary {
3145 max_path: Arc<Path>,
3146 count: usize,
3147 non_ignored_count: usize,
3148 file_count: usize,
3149 non_ignored_file_count: usize,
3150 statuses: GitStatuses,
3151}
3152
3153impl Default for EntrySummary {
3154 fn default() -> Self {
3155 Self {
3156 max_path: Arc::from(Path::new("")),
3157 count: 0,
3158 non_ignored_count: 0,
3159 file_count: 0,
3160 non_ignored_file_count: 0,
3161 statuses: Default::default(),
3162 }
3163 }
3164}
3165
3166impl sum_tree::Summary for EntrySummary {
3167 type Context = ();
3168
3169 fn add_summary(&mut self, rhs: &Self, _: &()) {
3170 self.max_path = rhs.max_path.clone();
3171 self.count += rhs.count;
3172 self.non_ignored_count += rhs.non_ignored_count;
3173 self.file_count += rhs.file_count;
3174 self.non_ignored_file_count += rhs.non_ignored_file_count;
3175 self.statuses += rhs.statuses;
3176 }
3177}
3178
3179#[derive(Clone, Debug)]
3180struct PathEntry {
3181 id: ProjectEntryId,
3182 path: Arc<Path>,
3183 is_ignored: bool,
3184 scan_id: usize,
3185}
3186
3187impl sum_tree::Item for PathEntry {
3188 type Summary = PathEntrySummary;
3189
3190 fn summary(&self) -> Self::Summary {
3191 PathEntrySummary { max_id: self.id }
3192 }
3193}
3194
3195impl sum_tree::KeyedItem for PathEntry {
3196 type Key = ProjectEntryId;
3197
3198 fn key(&self) -> Self::Key {
3199 self.id
3200 }
3201}
3202
3203#[derive(Clone, Debug, Default)]
3204struct PathEntrySummary {
3205 max_id: ProjectEntryId,
3206}
3207
3208impl sum_tree::Summary for PathEntrySummary {
3209 type Context = ();
3210
3211 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3212 self.max_id = summary.max_id;
3213 }
3214}
3215
3216impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3217 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3218 *self = summary.max_id;
3219 }
3220}
3221
3222#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3223pub struct PathKey(Arc<Path>);
3224
3225impl Default for PathKey {
3226 fn default() -> Self {
3227 Self(Path::new("").into())
3228 }
3229}
3230
3231impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3232 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3233 self.0 = summary.max_path.clone();
3234 }
3235}
3236
3237struct BackgroundScanner {
3238 state: Mutex<BackgroundScannerState>,
3239 fs: Arc<dyn Fs>,
3240 fs_case_sensitive: bool,
3241 status_updates_tx: UnboundedSender<ScanState>,
3242 executor: BackgroundExecutor,
3243 scan_requests_rx: channel::Receiver<ScanRequest>,
3244 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3245 next_entry_id: Arc<AtomicUsize>,
3246 phase: BackgroundScannerPhase,
3247}
3248
3249#[derive(PartialEq)]
3250enum BackgroundScannerPhase {
3251 InitialScan,
3252 EventsReceivedDuringInitialScan,
3253 Events,
3254}
3255
3256impl BackgroundScanner {
3257 fn new(
3258 snapshot: LocalSnapshot,
3259 next_entry_id: Arc<AtomicUsize>,
3260 fs: Arc<dyn Fs>,
3261 fs_case_sensitive: bool,
3262 status_updates_tx: UnboundedSender<ScanState>,
3263 executor: BackgroundExecutor,
3264 scan_requests_rx: channel::Receiver<ScanRequest>,
3265 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3266 ) -> Self {
3267 Self {
3268 fs,
3269 fs_case_sensitive,
3270 status_updates_tx,
3271 executor,
3272 scan_requests_rx,
3273 path_prefixes_to_scan_rx,
3274 next_entry_id,
3275 state: Mutex::new(BackgroundScannerState {
3276 prev_snapshot: snapshot.snapshot.clone(),
3277 snapshot,
3278 scanned_dirs: Default::default(),
3279 path_prefixes_to_scan: Default::default(),
3280 paths_to_scan: Default::default(),
3281 removed_entry_ids: Default::default(),
3282 changed_paths: Default::default(),
3283 }),
3284 phase: BackgroundScannerPhase::InitialScan,
3285 }
3286 }
3287
3288 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fs::Event>>>>) {
3289 use futures::FutureExt as _;
3290
3291 // Populate ignores above the root.
3292 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3293 for ancestor in root_abs_path.ancestors().skip(1) {
3294 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3295 {
3296 self.state
3297 .lock()
3298 .snapshot
3299 .ignores_by_parent_abs_path
3300 .insert(ancestor.into(), (ignore.into(), false));
3301 }
3302 }
3303
3304 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3305 {
3306 let mut state = self.state.lock();
3307 state.snapshot.scan_id += 1;
3308 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3309 let ignore_stack = state
3310 .snapshot
3311 .ignore_stack_for_abs_path(&root_abs_path, true);
3312 if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3313 root_entry.is_ignored = true;
3314 state.insert_entry(root_entry.clone(), self.fs.as_ref());
3315 }
3316 state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3317 }
3318 };
3319
3320 // Perform an initial scan of the directory.
3321 drop(scan_job_tx);
3322 self.scan_dirs(true, scan_job_rx).await;
3323 {
3324 let mut state = self.state.lock();
3325 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3326 }
3327
3328 self.send_status_update(false, None);
3329
3330 // Process any any FS events that occurred while performing the initial scan.
3331 // For these events, update events cannot be as precise, because we didn't
3332 // have the previous state loaded yet.
3333 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3334 if let Poll::Ready(Some(events)) = futures::poll!(fs_events_rx.next()) {
3335 let mut paths = fs::fs_events_paths(events);
3336
3337 while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
3338 paths.extend(fs::fs_events_paths(more_events));
3339 }
3340 self.process_events(paths).await;
3341 }
3342
3343 // Continue processing events until the worktree is dropped.
3344 self.phase = BackgroundScannerPhase::Events;
3345 loop {
3346 select_biased! {
3347 // Process any path refresh requests from the worktree. Prioritize
3348 // these before handling changes reported by the filesystem.
3349 request = self.scan_requests_rx.recv().fuse() => {
3350 let Ok(request) = request else { break };
3351 if !self.process_scan_request(request, false).await {
3352 return;
3353 }
3354 }
3355
3356 path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3357 let Ok(path_prefix) = path_prefix else { break };
3358 log::trace!("adding path prefix {:?}", path_prefix);
3359
3360 let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3361 if did_scan {
3362 let abs_path =
3363 {
3364 let mut state = self.state.lock();
3365 state.path_prefixes_to_scan.insert(path_prefix.clone());
3366 state.snapshot.abs_path.join(&path_prefix)
3367 };
3368
3369 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3370 self.process_events(vec![abs_path]).await;
3371 }
3372 }
3373 }
3374
3375 events = fs_events_rx.next().fuse() => {
3376 let Some(events) = events else { break };
3377 let mut paths = fs::fs_events_paths(events);
3378
3379 while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
3380 paths.extend(fs::fs_events_paths(more_events));
3381 }
3382 self.process_events(paths.clone()).await;
3383 }
3384 }
3385 }
3386 }
3387
3388 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3389 log::debug!("rescanning paths {:?}", request.relative_paths);
3390
3391 request.relative_paths.sort_unstable();
3392 self.forcibly_load_paths(&request.relative_paths).await;
3393
3394 let root_path = self.state.lock().snapshot.abs_path.clone();
3395 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3396 Ok(path) => path,
3397 Err(err) => {
3398 log::error!("failed to canonicalize root path: {}", err);
3399 return false;
3400 }
3401 };
3402 let abs_paths = request
3403 .relative_paths
3404 .iter()
3405 .map(|path| {
3406 if path.file_name().is_some() {
3407 root_canonical_path.join(path)
3408 } else {
3409 root_canonical_path.clone()
3410 }
3411 })
3412 .collect::<Vec<_>>();
3413
3414 self.reload_entries_for_paths(
3415 root_path,
3416 root_canonical_path,
3417 &request.relative_paths,
3418 abs_paths,
3419 None,
3420 )
3421 .await;
3422 self.send_status_update(scanning, Some(request.done))
3423 }
3424
3425 async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3426 let root_path = self.state.lock().snapshot.abs_path.clone();
3427 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3428 Ok(path) => path,
3429 Err(err) => {
3430 log::error!("failed to canonicalize root path: {}", err);
3431 return;
3432 }
3433 };
3434
3435 let mut relative_paths = Vec::with_capacity(abs_paths.len());
3436 let mut dot_git_paths_to_reload = HashSet::default();
3437 abs_paths.sort_unstable();
3438 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3439 abs_paths.retain(|abs_path| {
3440 let snapshot = &self.state.lock().snapshot;
3441 {
3442 let mut is_git_related = false;
3443 if let Some(dot_git_dir) = abs_path
3444 .ancestors()
3445 .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3446 {
3447 let dot_git_path = dot_git_dir
3448 .strip_prefix(&root_canonical_path)
3449 .ok()
3450 .map(|path| path.to_path_buf())
3451 .unwrap_or_else(|| dot_git_dir.to_path_buf());
3452 dot_git_paths_to_reload.insert(dot_git_path.to_path_buf());
3453 is_git_related = true;
3454 }
3455
3456 let relative_path: Arc<Path> =
3457 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3458 path.into()
3459 } else {
3460 log::error!(
3461 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3462 );
3463 return false;
3464 };
3465
3466 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3467 snapshot
3468 .entry_for_path(parent)
3469 .map_or(false, |entry| entry.kind == EntryKind::Dir)
3470 });
3471 if !parent_dir_is_loaded {
3472 log::debug!("ignoring event {relative_path:?} within unloaded directory");
3473 return false;
3474 }
3475
3476 if snapshot.is_path_excluded(relative_path.to_path_buf()) {
3477 if !is_git_related {
3478 log::debug!("ignoring FS event for excluded path {relative_path:?}");
3479 }
3480 return false;
3481 }
3482
3483 relative_paths.push(relative_path);
3484 true
3485 }
3486 });
3487
3488 if dot_git_paths_to_reload.is_empty() && relative_paths.is_empty() {
3489 return;
3490 }
3491
3492 if !relative_paths.is_empty() {
3493 log::debug!("received fs events {:?}", relative_paths);
3494
3495 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3496 self.reload_entries_for_paths(
3497 root_path,
3498 root_canonical_path,
3499 &relative_paths,
3500 abs_paths,
3501 Some(scan_job_tx.clone()),
3502 )
3503 .await;
3504 drop(scan_job_tx);
3505 self.scan_dirs(false, scan_job_rx).await;
3506
3507 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3508 self.update_ignore_statuses(scan_job_tx).await;
3509 self.scan_dirs(false, scan_job_rx).await;
3510 }
3511
3512 {
3513 let mut state = self.state.lock();
3514 if !dot_git_paths_to_reload.is_empty() {
3515 if relative_paths.is_empty() {
3516 state.snapshot.scan_id += 1;
3517 }
3518 log::debug!("reloading repositories: {dot_git_paths_to_reload:?}");
3519 state.reload_repositories(&dot_git_paths_to_reload, self.fs.as_ref());
3520 }
3521 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3522 for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3523 state.scanned_dirs.remove(&entry_id);
3524 }
3525 }
3526
3527 self.send_status_update(false, None);
3528 }
3529
3530 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3531 let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3532 {
3533 let mut state = self.state.lock();
3534 let root_path = state.snapshot.abs_path.clone();
3535 for path in paths {
3536 for ancestor in path.ancestors() {
3537 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3538 if entry.kind == EntryKind::UnloadedDir {
3539 let abs_path = root_path.join(ancestor);
3540 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3541 state.paths_to_scan.insert(path.clone());
3542 break;
3543 }
3544 }
3545 }
3546 }
3547 drop(scan_job_tx);
3548 }
3549 while let Some(job) = scan_job_rx.next().await {
3550 self.scan_dir(&job).await.log_err();
3551 }
3552
3553 mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3554 }
3555
3556 async fn scan_dirs(
3557 &self,
3558 enable_progress_updates: bool,
3559 scan_jobs_rx: channel::Receiver<ScanJob>,
3560 ) {
3561 use futures::FutureExt as _;
3562
3563 if self
3564 .status_updates_tx
3565 .unbounded_send(ScanState::Started)
3566 .is_err()
3567 {
3568 return;
3569 }
3570
3571 let progress_update_count = AtomicUsize::new(0);
3572 self.executor
3573 .scoped(|scope| {
3574 for _ in 0..self.executor.num_cpus() {
3575 scope.spawn(async {
3576 let mut last_progress_update_count = 0;
3577 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3578 futures::pin_mut!(progress_update_timer);
3579
3580 loop {
3581 select_biased! {
3582 // Process any path refresh requests before moving on to process
3583 // the scan queue, so that user operations are prioritized.
3584 request = self.scan_requests_rx.recv().fuse() => {
3585 let Ok(request) = request else { break };
3586 if !self.process_scan_request(request, true).await {
3587 return;
3588 }
3589 }
3590
3591 // Send periodic progress updates to the worktree. Use an atomic counter
3592 // to ensure that only one of the workers sends a progress update after
3593 // the update interval elapses.
3594 _ = progress_update_timer => {
3595 match progress_update_count.compare_exchange(
3596 last_progress_update_count,
3597 last_progress_update_count + 1,
3598 SeqCst,
3599 SeqCst
3600 ) {
3601 Ok(_) => {
3602 last_progress_update_count += 1;
3603 self.send_status_update(true, None);
3604 }
3605 Err(count) => {
3606 last_progress_update_count = count;
3607 }
3608 }
3609 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3610 }
3611
3612 // Recursively load directories from the file system.
3613 job = scan_jobs_rx.recv().fuse() => {
3614 let Ok(job) = job else { break };
3615 if let Err(err) = self.scan_dir(&job).await {
3616 if job.path.as_ref() != Path::new("") {
3617 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3618 }
3619 }
3620 }
3621 }
3622 }
3623 })
3624 }
3625 })
3626 .await;
3627 }
3628
3629 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3630 let mut state = self.state.lock();
3631 if state.changed_paths.is_empty() && scanning {
3632 return true;
3633 }
3634
3635 let new_snapshot = state.snapshot.clone();
3636 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3637 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3638 state.changed_paths.clear();
3639
3640 self.status_updates_tx
3641 .unbounded_send(ScanState::Updated {
3642 snapshot: new_snapshot,
3643 changes,
3644 scanning,
3645 barrier,
3646 })
3647 .is_ok()
3648 }
3649
3650 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3651 let root_abs_path;
3652 let mut ignore_stack;
3653 let mut new_ignore;
3654 let root_char_bag;
3655 let next_entry_id;
3656 {
3657 let state = self.state.lock();
3658 let snapshot = &state.snapshot;
3659 root_abs_path = snapshot.abs_path().clone();
3660 if snapshot.is_path_excluded(job.path.to_path_buf()) {
3661 log::error!("skipping excluded directory {:?}", job.path);
3662 return Ok(());
3663 }
3664 log::debug!("scanning directory {:?}", job.path);
3665 ignore_stack = job.ignore_stack.clone();
3666 new_ignore = None;
3667 root_char_bag = snapshot.root_char_bag;
3668 next_entry_id = self.next_entry_id.clone();
3669 drop(state);
3670 }
3671
3672 let mut dotgit_path = None;
3673 let mut root_canonical_path = None;
3674 let mut new_entries: Vec<Entry> = Vec::new();
3675 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3676 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3677 while let Some(child_abs_path) = child_paths.next().await {
3678 let child_abs_path: Arc<Path> = match child_abs_path {
3679 Ok(child_abs_path) => child_abs_path.into(),
3680 Err(error) => {
3681 log::error!("error processing entry {:?}", error);
3682 continue;
3683 }
3684 };
3685 let child_name = child_abs_path.file_name().unwrap();
3686 let child_path: Arc<Path> = job.path.join(child_name).into();
3687 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3688 if child_name == *GITIGNORE {
3689 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3690 Ok(ignore) => {
3691 let ignore = Arc::new(ignore);
3692 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3693 new_ignore = Some(ignore);
3694 }
3695 Err(error) => {
3696 log::error!(
3697 "error loading .gitignore file {:?} - {:?}",
3698 child_name,
3699 error
3700 );
3701 }
3702 }
3703
3704 // Update ignore status of any child entries we've already processed to reflect the
3705 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3706 // there should rarely be too numerous. Update the ignore stack associated with any
3707 // new jobs as well.
3708 let mut new_jobs = new_jobs.iter_mut();
3709 for entry in &mut new_entries {
3710 let entry_abs_path = root_abs_path.join(&entry.path);
3711 entry.is_ignored =
3712 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3713
3714 if entry.is_dir() {
3715 if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3716 job.ignore_stack = if entry.is_ignored {
3717 IgnoreStack::all()
3718 } else {
3719 ignore_stack.clone()
3720 };
3721 }
3722 }
3723 }
3724 }
3725 // If we find a .git, we'll need to load the repository.
3726 else if child_name == *DOT_GIT {
3727 dotgit_path = Some(child_path.clone());
3728 }
3729
3730 {
3731 let relative_path = job.path.join(child_name);
3732 let mut state = self.state.lock();
3733 if state.snapshot.is_path_excluded(relative_path.clone()) {
3734 log::debug!("skipping excluded child entry {relative_path:?}");
3735 state.remove_path(&relative_path);
3736 continue;
3737 }
3738 drop(state);
3739 }
3740
3741 let child_metadata = match self.fs.metadata(&child_abs_path).await {
3742 Ok(Some(metadata)) => metadata,
3743 Ok(None) => continue,
3744 Err(err) => {
3745 log::error!("error processing {child_abs_path:?}: {err:?}");
3746 continue;
3747 }
3748 };
3749
3750 let mut child_entry = Entry::new(
3751 child_path.clone(),
3752 &child_metadata,
3753 &next_entry_id,
3754 root_char_bag,
3755 );
3756
3757 if job.is_external {
3758 child_entry.is_external = true;
3759 } else if child_metadata.is_symlink {
3760 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3761 Ok(path) => path,
3762 Err(err) => {
3763 log::error!(
3764 "error reading target of symlink {:?}: {:?}",
3765 child_abs_path,
3766 err
3767 );
3768 continue;
3769 }
3770 };
3771
3772 // lazily canonicalize the root path in order to determine if
3773 // symlinks point outside of the worktree.
3774 let root_canonical_path = match &root_canonical_path {
3775 Some(path) => path,
3776 None => match self.fs.canonicalize(&root_abs_path).await {
3777 Ok(path) => root_canonical_path.insert(path),
3778 Err(err) => {
3779 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3780 continue;
3781 }
3782 },
3783 };
3784
3785 if !canonical_path.starts_with(root_canonical_path) {
3786 child_entry.is_external = true;
3787 }
3788 }
3789
3790 if child_entry.is_dir() {
3791 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3792
3793 // Avoid recursing until crash in the case of a recursive symlink
3794 if !job.ancestor_inodes.contains(&child_entry.inode) {
3795 let mut ancestor_inodes = job.ancestor_inodes.clone();
3796 ancestor_inodes.insert(child_entry.inode);
3797
3798 new_jobs.push(Some(ScanJob {
3799 abs_path: child_abs_path.clone(),
3800 path: child_path,
3801 is_external: child_entry.is_external,
3802 ignore_stack: if child_entry.is_ignored {
3803 IgnoreStack::all()
3804 } else {
3805 ignore_stack.clone()
3806 },
3807 ancestor_inodes,
3808 scan_queue: job.scan_queue.clone(),
3809 containing_repository: job.containing_repository.clone(),
3810 }));
3811 } else {
3812 new_jobs.push(None);
3813 }
3814 } else {
3815 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3816 if !child_entry.is_ignored {
3817 if let Some((repository_dir, repository, staged_statuses)) =
3818 &job.containing_repository
3819 {
3820 if let Ok(repo_path) = child_entry.path.strip_prefix(&repository_dir.0) {
3821 let repo_path = RepoPath(repo_path.into());
3822 child_entry.git_status = combine_git_statuses(
3823 staged_statuses.get(&repo_path).copied(),
3824 repository
3825 .lock()
3826 .unstaged_status(&repo_path, child_entry.mtime),
3827 );
3828 }
3829 }
3830 }
3831 }
3832
3833 {
3834 let relative_path = job.path.join(child_name);
3835 let state = self.state.lock();
3836 if state.snapshot.is_path_private(&relative_path) {
3837 log::debug!("detected private file: {relative_path:?}");
3838 child_entry.is_private = true;
3839 }
3840 drop(state)
3841 }
3842
3843 new_entries.push(child_entry);
3844 }
3845
3846 let mut state = self.state.lock();
3847
3848 // Identify any subdirectories that should not be scanned.
3849 let mut job_ix = 0;
3850 for entry in &mut new_entries {
3851 state.reuse_entry_id(entry);
3852 if entry.is_dir() {
3853 if state.should_scan_directory(entry) {
3854 job_ix += 1;
3855 } else {
3856 log::debug!("defer scanning directory {:?}", entry.path);
3857 entry.kind = EntryKind::UnloadedDir;
3858 new_jobs.remove(job_ix);
3859 }
3860 }
3861 }
3862
3863 state.populate_dir(&job.path, new_entries, new_ignore);
3864
3865 let repository =
3866 dotgit_path.and_then(|path| state.build_git_repository(path, self.fs.as_ref()));
3867
3868 for new_job in new_jobs {
3869 if let Some(mut new_job) = new_job {
3870 if let Some(containing_repository) = &repository {
3871 new_job.containing_repository = Some(containing_repository.clone());
3872 }
3873
3874 job.scan_queue
3875 .try_send(new_job)
3876 .expect("channel is unbounded");
3877 }
3878 }
3879
3880 Ok(())
3881 }
3882
3883 async fn reload_entries_for_paths(
3884 &self,
3885 root_abs_path: Arc<Path>,
3886 root_canonical_path: PathBuf,
3887 relative_paths: &[Arc<Path>],
3888 abs_paths: Vec<PathBuf>,
3889 scan_queue_tx: Option<Sender<ScanJob>>,
3890 ) {
3891 let metadata = futures::future::join_all(
3892 abs_paths
3893 .iter()
3894 .map(|abs_path| async move {
3895 let metadata = self.fs.metadata(abs_path).await?;
3896 if let Some(metadata) = metadata {
3897 let canonical_path = self.fs.canonicalize(abs_path).await?;
3898
3899 // If we're on a case-insensitive filesystem (default on macOS), we want
3900 // to only ignore metadata for non-symlink files if their absolute-path matches
3901 // the canonical-path.
3902 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
3903 // and we want to ignore the metadata for the old path (`test.txt`) so it's
3904 // treated as removed.
3905 if !self.fs_case_sensitive && !metadata.is_symlink {
3906 let canonical_file_name = canonical_path.file_name();
3907 let file_name = abs_path.file_name();
3908 if canonical_file_name != file_name {
3909 return Ok(None);
3910 }
3911 }
3912
3913 anyhow::Ok(Some((metadata, canonical_path)))
3914 } else {
3915 Ok(None)
3916 }
3917 })
3918 .collect::<Vec<_>>(),
3919 )
3920 .await;
3921
3922 let mut state = self.state.lock();
3923 let snapshot = &mut state.snapshot;
3924 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3925 let doing_recursive_update = scan_queue_tx.is_some();
3926 snapshot.scan_id += 1;
3927 if is_idle && !doing_recursive_update {
3928 snapshot.completed_scan_id = snapshot.scan_id;
3929 }
3930
3931 // Remove any entries for paths that no longer exist or are being recursively
3932 // refreshed. Do this before adding any new entries, so that renames can be
3933 // detected regardless of the order of the paths.
3934 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
3935 if matches!(metadata, Ok(None)) || doing_recursive_update {
3936 log::trace!("remove path {:?}", path);
3937 state.remove_path(path);
3938 }
3939 }
3940
3941 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
3942 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3943 match metadata {
3944 Ok(Some((metadata, canonical_path))) => {
3945 let ignore_stack = state
3946 .snapshot
3947 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3948
3949 let mut fs_entry = Entry::new(
3950 path.clone(),
3951 metadata,
3952 self.next_entry_id.as_ref(),
3953 state.snapshot.root_char_bag,
3954 );
3955 let is_dir = fs_entry.is_dir();
3956 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
3957 fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
3958 fs_entry.is_private = state.snapshot.is_path_private(path);
3959
3960 if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
3961 if let Some((work_dir, repo)) = state.snapshot.local_repo_for_path(path) {
3962 if let Ok(repo_path) = path.strip_prefix(work_dir.0) {
3963 let repo_path = RepoPath(repo_path.into());
3964 let repo = repo.repo_ptr.lock();
3965 fs_entry.git_status = repo.status(&repo_path, fs_entry.mtime);
3966 }
3967 }
3968 }
3969
3970 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
3971 if state.should_scan_directory(&fs_entry) {
3972 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
3973 } else {
3974 fs_entry.kind = EntryKind::UnloadedDir;
3975 }
3976 }
3977
3978 state.insert_entry(fs_entry, self.fs.as_ref());
3979 }
3980 Ok(None) => {
3981 self.remove_repo_path(path, &mut state.snapshot);
3982 }
3983 Err(err) => {
3984 // TODO - create a special 'error' entry in the entries tree to mark this
3985 log::error!("error reading file {abs_path:?} on event: {err:#}");
3986 }
3987 }
3988 }
3989
3990 util::extend_sorted(
3991 &mut state.changed_paths,
3992 relative_paths.iter().cloned(),
3993 usize::MAX,
3994 Ord::cmp,
3995 );
3996 }
3997
3998 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3999 if !path
4000 .components()
4001 .any(|component| component.as_os_str() == *DOT_GIT)
4002 {
4003 if let Some(repository) = snapshot.repository_for_work_directory(path) {
4004 let entry = repository.work_directory.0;
4005 snapshot.git_repositories.remove(&entry);
4006 snapshot
4007 .snapshot
4008 .repository_entries
4009 .remove(&RepositoryWorkDirectory(path.into()));
4010 return Some(());
4011 }
4012 }
4013
4014 // TODO statuses
4015 // Track when a .git is removed and iterate over the file system there
4016
4017 Some(())
4018 }
4019
4020 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4021 use futures::FutureExt as _;
4022
4023 let mut snapshot = self.state.lock().snapshot.clone();
4024 let mut ignores_to_update = Vec::new();
4025 let mut ignores_to_delete = Vec::new();
4026 let abs_path = snapshot.abs_path.clone();
4027 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
4028 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4029 if *needs_update {
4030 *needs_update = false;
4031 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4032 ignores_to_update.push(parent_abs_path.clone());
4033 }
4034 }
4035
4036 let ignore_path = parent_path.join(&*GITIGNORE);
4037 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4038 ignores_to_delete.push(parent_abs_path.clone());
4039 }
4040 }
4041 }
4042
4043 for parent_abs_path in ignores_to_delete {
4044 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
4045 self.state
4046 .lock()
4047 .snapshot
4048 .ignores_by_parent_abs_path
4049 .remove(&parent_abs_path);
4050 }
4051
4052 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4053 ignores_to_update.sort_unstable();
4054 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4055 while let Some(parent_abs_path) = ignores_to_update.next() {
4056 while ignores_to_update
4057 .peek()
4058 .map_or(false, |p| p.starts_with(&parent_abs_path))
4059 {
4060 ignores_to_update.next().unwrap();
4061 }
4062
4063 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4064 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
4065 abs_path: parent_abs_path,
4066 ignore_stack,
4067 ignore_queue: ignore_queue_tx.clone(),
4068 scan_queue: scan_job_tx.clone(),
4069 }))
4070 .unwrap();
4071 }
4072 drop(ignore_queue_tx);
4073
4074 self.executor
4075 .scoped(|scope| {
4076 for _ in 0..self.executor.num_cpus() {
4077 scope.spawn(async {
4078 loop {
4079 select_biased! {
4080 // Process any path refresh requests before moving on to process
4081 // the queue of ignore statuses.
4082 request = self.scan_requests_rx.recv().fuse() => {
4083 let Ok(request) = request else { break };
4084 if !self.process_scan_request(request, true).await {
4085 return;
4086 }
4087 }
4088
4089 // Recursively process directories whose ignores have changed.
4090 job = ignore_queue_rx.recv().fuse() => {
4091 let Ok(job) = job else { break };
4092 self.update_ignore_status(job, &snapshot).await;
4093 }
4094 }
4095 }
4096 });
4097 }
4098 })
4099 .await;
4100 }
4101
4102 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4103 log::trace!("update ignore status {:?}", job.abs_path);
4104
4105 let mut ignore_stack = job.ignore_stack;
4106 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4107 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4108 }
4109
4110 let mut entries_by_id_edits = Vec::new();
4111 let mut entries_by_path_edits = Vec::new();
4112 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4113 for mut entry in snapshot.child_entries(path).cloned() {
4114 let was_ignored = entry.is_ignored;
4115 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4116 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4117 if entry.is_dir() {
4118 let child_ignore_stack = if entry.is_ignored {
4119 IgnoreStack::all()
4120 } else {
4121 ignore_stack.clone()
4122 };
4123
4124 // Scan any directories that were previously ignored and weren't previously scanned.
4125 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4126 let state = self.state.lock();
4127 if state.should_scan_directory(&entry) {
4128 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4129 }
4130 }
4131
4132 job.ignore_queue
4133 .send(UpdateIgnoreStatusJob {
4134 abs_path: abs_path.clone(),
4135 ignore_stack: child_ignore_stack,
4136 ignore_queue: job.ignore_queue.clone(),
4137 scan_queue: job.scan_queue.clone(),
4138 })
4139 .await
4140 .unwrap();
4141 }
4142
4143 if entry.is_ignored != was_ignored {
4144 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4145 path_entry.scan_id = snapshot.scan_id;
4146 path_entry.is_ignored = entry.is_ignored;
4147 entries_by_id_edits.push(Edit::Insert(path_entry));
4148 entries_by_path_edits.push(Edit::Insert(entry));
4149 }
4150 }
4151
4152 let state = &mut self.state.lock();
4153 for edit in &entries_by_path_edits {
4154 if let Edit::Insert(entry) = edit {
4155 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4156 state.changed_paths.insert(ix, entry.path.clone());
4157 }
4158 }
4159 }
4160
4161 state
4162 .snapshot
4163 .entries_by_path
4164 .edit(entries_by_path_edits, &());
4165 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4166 }
4167
4168 fn build_change_set(
4169 &self,
4170 old_snapshot: &Snapshot,
4171 new_snapshot: &Snapshot,
4172 event_paths: &[Arc<Path>],
4173 ) -> UpdatedEntriesSet {
4174 use BackgroundScannerPhase::*;
4175 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4176
4177 // Identify which paths have changed. Use the known set of changed
4178 // parent paths to optimize the search.
4179 let mut changes = Vec::new();
4180 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4181 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4182 let mut last_newly_loaded_dir_path = None;
4183 old_paths.next(&());
4184 new_paths.next(&());
4185 for path in event_paths {
4186 let path = PathKey(path.clone());
4187 if old_paths.item().map_or(false, |e| e.path < path.0) {
4188 old_paths.seek_forward(&path, Bias::Left, &());
4189 }
4190 if new_paths.item().map_or(false, |e| e.path < path.0) {
4191 new_paths.seek_forward(&path, Bias::Left, &());
4192 }
4193 loop {
4194 match (old_paths.item(), new_paths.item()) {
4195 (Some(old_entry), Some(new_entry)) => {
4196 if old_entry.path > path.0
4197 && new_entry.path > path.0
4198 && !old_entry.path.starts_with(&path.0)
4199 && !new_entry.path.starts_with(&path.0)
4200 {
4201 break;
4202 }
4203
4204 match Ord::cmp(&old_entry.path, &new_entry.path) {
4205 Ordering::Less => {
4206 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4207 old_paths.next(&());
4208 }
4209 Ordering::Equal => {
4210 if self.phase == EventsReceivedDuringInitialScan {
4211 if old_entry.id != new_entry.id {
4212 changes.push((
4213 old_entry.path.clone(),
4214 old_entry.id,
4215 Removed,
4216 ));
4217 }
4218 // If the worktree was not fully initialized when this event was generated,
4219 // we can't know whether this entry was added during the scan or whether
4220 // it was merely updated.
4221 changes.push((
4222 new_entry.path.clone(),
4223 new_entry.id,
4224 AddedOrUpdated,
4225 ));
4226 } else if old_entry.id != new_entry.id {
4227 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4228 changes.push((new_entry.path.clone(), new_entry.id, Added));
4229 } else if old_entry != new_entry {
4230 if old_entry.kind.is_unloaded() {
4231 last_newly_loaded_dir_path = Some(&new_entry.path);
4232 changes.push((
4233 new_entry.path.clone(),
4234 new_entry.id,
4235 Loaded,
4236 ));
4237 } else {
4238 changes.push((
4239 new_entry.path.clone(),
4240 new_entry.id,
4241 Updated,
4242 ));
4243 }
4244 }
4245 old_paths.next(&());
4246 new_paths.next(&());
4247 }
4248 Ordering::Greater => {
4249 let is_newly_loaded = self.phase == InitialScan
4250 || last_newly_loaded_dir_path
4251 .as_ref()
4252 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4253 changes.push((
4254 new_entry.path.clone(),
4255 new_entry.id,
4256 if is_newly_loaded { Loaded } else { Added },
4257 ));
4258 new_paths.next(&());
4259 }
4260 }
4261 }
4262 (Some(old_entry), None) => {
4263 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4264 old_paths.next(&());
4265 }
4266 (None, Some(new_entry)) => {
4267 let is_newly_loaded = self.phase == InitialScan
4268 || last_newly_loaded_dir_path
4269 .as_ref()
4270 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4271 changes.push((
4272 new_entry.path.clone(),
4273 new_entry.id,
4274 if is_newly_loaded { Loaded } else { Added },
4275 ));
4276 new_paths.next(&());
4277 }
4278 (None, None) => break,
4279 }
4280 }
4281 }
4282
4283 changes.into()
4284 }
4285
4286 async fn progress_timer(&self, running: bool) {
4287 if !running {
4288 return futures::future::pending().await;
4289 }
4290
4291 #[cfg(any(test, feature = "test-support"))]
4292 if self.fs.is_fake() {
4293 return self.executor.simulate_random_delay().await;
4294 }
4295
4296 smol::Timer::after(Duration::from_millis(100)).await;
4297 }
4298}
4299
4300fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4301 let mut result = root_char_bag;
4302 result.extend(
4303 path.to_string_lossy()
4304 .chars()
4305 .map(|c| c.to_ascii_lowercase()),
4306 );
4307 result
4308}
4309
4310struct ScanJob {
4311 abs_path: Arc<Path>,
4312 path: Arc<Path>,
4313 ignore_stack: Arc<IgnoreStack>,
4314 scan_queue: Sender<ScanJob>,
4315 ancestor_inodes: TreeSet<u64>,
4316 is_external: bool,
4317 containing_repository: Option<(
4318 RepositoryWorkDirectory,
4319 Arc<Mutex<dyn GitRepository>>,
4320 TreeMap<RepoPath, GitFileStatus>,
4321 )>,
4322}
4323
4324struct UpdateIgnoreStatusJob {
4325 abs_path: Arc<Path>,
4326 ignore_stack: Arc<IgnoreStack>,
4327 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4328 scan_queue: Sender<ScanJob>,
4329}
4330
4331pub trait WorktreeModelHandle {
4332 #[cfg(any(test, feature = "test-support"))]
4333 fn flush_fs_events<'a>(
4334 &self,
4335 cx: &'a mut gpui::TestAppContext,
4336 ) -> futures::future::LocalBoxFuture<'a, ()>;
4337}
4338
4339impl WorktreeModelHandle for Model<Worktree> {
4340 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4341 // occurred before the worktree was constructed. These events can cause the worktree to perform
4342 // extra directory scans, and emit extra scan-state notifications.
4343 //
4344 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4345 // to ensure that all redundant FS events have already been processed.
4346 #[cfg(any(test, feature = "test-support"))]
4347 fn flush_fs_events<'a>(
4348 &self,
4349 cx: &'a mut gpui::TestAppContext,
4350 ) -> futures::future::LocalBoxFuture<'a, ()> {
4351 let file_name = "fs-event-sentinel";
4352
4353 let tree = self.clone();
4354 let (fs, root_path) = self.update(cx, |tree, _| {
4355 let tree = tree.as_local().unwrap();
4356 (tree.fs.clone(), tree.abs_path().clone())
4357 });
4358
4359 async move {
4360 fs.create_file(&root_path.join(file_name), Default::default())
4361 .await
4362 .unwrap();
4363
4364 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4365 .await;
4366
4367 fs.remove_file(&root_path.join(file_name), Default::default())
4368 .await
4369 .unwrap();
4370 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4371 .await;
4372
4373 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4374 .await;
4375 }
4376 .boxed_local()
4377 }
4378}
4379
4380#[derive(Clone, Debug)]
4381struct TraversalProgress<'a> {
4382 max_path: &'a Path,
4383 count: usize,
4384 non_ignored_count: usize,
4385 file_count: usize,
4386 non_ignored_file_count: usize,
4387}
4388
4389impl<'a> TraversalProgress<'a> {
4390 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
4391 match (include_ignored, include_dirs) {
4392 (true, true) => self.count,
4393 (true, false) => self.file_count,
4394 (false, true) => self.non_ignored_count,
4395 (false, false) => self.non_ignored_file_count,
4396 }
4397 }
4398}
4399
4400impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4401 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4402 self.max_path = summary.max_path.as_ref();
4403 self.count += summary.count;
4404 self.non_ignored_count += summary.non_ignored_count;
4405 self.file_count += summary.file_count;
4406 self.non_ignored_file_count += summary.non_ignored_file_count;
4407 }
4408}
4409
4410impl<'a> Default for TraversalProgress<'a> {
4411 fn default() -> Self {
4412 Self {
4413 max_path: Path::new(""),
4414 count: 0,
4415 non_ignored_count: 0,
4416 file_count: 0,
4417 non_ignored_file_count: 0,
4418 }
4419 }
4420}
4421
4422#[derive(Clone, Debug, Default, Copy)]
4423struct GitStatuses {
4424 added: usize,
4425 modified: usize,
4426 conflict: usize,
4427}
4428
4429impl AddAssign for GitStatuses {
4430 fn add_assign(&mut self, rhs: Self) {
4431 self.added += rhs.added;
4432 self.modified += rhs.modified;
4433 self.conflict += rhs.conflict;
4434 }
4435}
4436
4437impl Sub for GitStatuses {
4438 type Output = GitStatuses;
4439
4440 fn sub(self, rhs: Self) -> Self::Output {
4441 GitStatuses {
4442 added: self.added - rhs.added,
4443 modified: self.modified - rhs.modified,
4444 conflict: self.conflict - rhs.conflict,
4445 }
4446 }
4447}
4448
4449impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4450 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4451 *self += summary.statuses
4452 }
4453}
4454
4455pub struct Traversal<'a> {
4456 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4457 include_ignored: bool,
4458 include_dirs: bool,
4459}
4460
4461impl<'a> Traversal<'a> {
4462 pub fn advance(&mut self) -> bool {
4463 self.cursor.seek_forward(
4464 &TraversalTarget::Count {
4465 count: self.end_offset() + 1,
4466 include_dirs: self.include_dirs,
4467 include_ignored: self.include_ignored,
4468 },
4469 Bias::Left,
4470 &(),
4471 )
4472 }
4473
4474 pub fn advance_to_sibling(&mut self) -> bool {
4475 while let Some(entry) = self.cursor.item() {
4476 self.cursor.seek_forward(
4477 &TraversalTarget::PathSuccessor(&entry.path),
4478 Bias::Left,
4479 &(),
4480 );
4481 if let Some(entry) = self.cursor.item() {
4482 if (self.include_dirs || !entry.is_dir())
4483 && (self.include_ignored || !entry.is_ignored)
4484 {
4485 return true;
4486 }
4487 }
4488 }
4489 false
4490 }
4491
4492 pub fn entry(&self) -> Option<&'a Entry> {
4493 self.cursor.item()
4494 }
4495
4496 pub fn start_offset(&self) -> usize {
4497 self.cursor
4498 .start()
4499 .count(self.include_dirs, self.include_ignored)
4500 }
4501
4502 pub fn end_offset(&self) -> usize {
4503 self.cursor
4504 .end(&())
4505 .count(self.include_dirs, self.include_ignored)
4506 }
4507}
4508
4509impl<'a> Iterator for Traversal<'a> {
4510 type Item = &'a Entry;
4511
4512 fn next(&mut self) -> Option<Self::Item> {
4513 if let Some(item) = self.entry() {
4514 self.advance();
4515 Some(item)
4516 } else {
4517 None
4518 }
4519 }
4520}
4521
4522#[derive(Debug)]
4523enum TraversalTarget<'a> {
4524 Path(&'a Path),
4525 PathSuccessor(&'a Path),
4526 Count {
4527 count: usize,
4528 include_ignored: bool,
4529 include_dirs: bool,
4530 },
4531}
4532
4533impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4534 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4535 match self {
4536 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4537 TraversalTarget::PathSuccessor(path) => {
4538 if !cursor_location.max_path.starts_with(path) {
4539 Ordering::Equal
4540 } else {
4541 Ordering::Greater
4542 }
4543 }
4544 TraversalTarget::Count {
4545 count,
4546 include_dirs,
4547 include_ignored,
4548 } => Ord::cmp(
4549 count,
4550 &cursor_location.count(*include_dirs, *include_ignored),
4551 ),
4552 }
4553 }
4554}
4555
4556impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4557 for TraversalTarget<'b>
4558{
4559 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4560 self.cmp(&cursor_location.0, &())
4561 }
4562}
4563
4564struct ChildEntriesIter<'a> {
4565 parent_path: &'a Path,
4566 traversal: Traversal<'a>,
4567}
4568
4569impl<'a> Iterator for ChildEntriesIter<'a> {
4570 type Item = &'a Entry;
4571
4572 fn next(&mut self) -> Option<Self::Item> {
4573 if let Some(item) = self.traversal.entry() {
4574 if item.path.starts_with(&self.parent_path) {
4575 self.traversal.advance_to_sibling();
4576 return Some(item);
4577 }
4578 }
4579 None
4580 }
4581}
4582
4583pub struct DescendentEntriesIter<'a> {
4584 parent_path: &'a Path,
4585 traversal: Traversal<'a>,
4586}
4587
4588impl<'a> Iterator for DescendentEntriesIter<'a> {
4589 type Item = &'a Entry;
4590
4591 fn next(&mut self) -> Option<Self::Item> {
4592 if let Some(item) = self.traversal.entry() {
4593 if item.path.starts_with(&self.parent_path) {
4594 self.traversal.advance();
4595 return Some(item);
4596 }
4597 }
4598 None
4599 }
4600}
4601
4602impl<'a> From<&'a Entry> for proto::Entry {
4603 fn from(entry: &'a Entry) -> Self {
4604 Self {
4605 id: entry.id.to_proto(),
4606 is_dir: entry.is_dir(),
4607 path: entry.path.to_string_lossy().into(),
4608 inode: entry.inode,
4609 mtime: Some(entry.mtime.into()),
4610 is_symlink: entry.is_symlink,
4611 is_ignored: entry.is_ignored,
4612 is_external: entry.is_external,
4613 git_status: entry.git_status.map(git_status_to_proto),
4614 }
4615 }
4616}
4617
4618impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4619 type Error = anyhow::Error;
4620
4621 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4622 if let Some(mtime) = entry.mtime {
4623 let kind = if entry.is_dir {
4624 EntryKind::Dir
4625 } else {
4626 let mut char_bag = *root_char_bag;
4627 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4628 EntryKind::File(char_bag)
4629 };
4630 let path: Arc<Path> = PathBuf::from(entry.path).into();
4631 Ok(Entry {
4632 id: ProjectEntryId::from_proto(entry.id),
4633 kind,
4634 path,
4635 inode: entry.inode,
4636 mtime: mtime.into(),
4637 is_symlink: entry.is_symlink,
4638 is_ignored: entry.is_ignored,
4639 is_external: entry.is_external,
4640 git_status: git_status_from_proto(entry.git_status),
4641 is_private: false,
4642 })
4643 } else {
4644 Err(anyhow!(
4645 "missing mtime in remote worktree entry {:?}",
4646 entry.path
4647 ))
4648 }
4649 }
4650}
4651
4652fn combine_git_statuses(
4653 staged: Option<GitFileStatus>,
4654 unstaged: Option<GitFileStatus>,
4655) -> Option<GitFileStatus> {
4656 if let Some(staged) = staged {
4657 if let Some(unstaged) = unstaged {
4658 if unstaged != staged {
4659 Some(GitFileStatus::Modified)
4660 } else {
4661 Some(staged)
4662 }
4663 } else {
4664 Some(staged)
4665 }
4666 } else {
4667 unstaged
4668 }
4669}
4670
4671fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
4672 git_status.and_then(|status| {
4673 proto::GitStatus::from_i32(status).map(|status| match status {
4674 proto::GitStatus::Added => GitFileStatus::Added,
4675 proto::GitStatus::Modified => GitFileStatus::Modified,
4676 proto::GitStatus::Conflict => GitFileStatus::Conflict,
4677 })
4678 })
4679}
4680
4681fn git_status_to_proto(status: GitFileStatus) -> i32 {
4682 match status {
4683 GitFileStatus::Added => proto::GitStatus::Added as i32,
4684 GitFileStatus::Modified => proto::GitStatus::Modified as i32,
4685 GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
4686 }
4687}