1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{Context as _, Result, anyhow};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{Fs, MTime, PathEvent, RemoveOptions, Watcher, copy_recursive};
11use futures::{
12 FutureExt as _, Stream, StreamExt,
13 channel::{
14 mpsc::{self, UnboundedSender},
15 oneshot,
16 },
17 select_biased,
18 task::Poll,
19};
20use fuzzy::CharBag;
21use git::{
22 COMMIT_MESSAGE, DOT_GIT, FSMONITOR_DAEMON, GITIGNORE, INDEX_LOCK, LFS_DIR,
23 repository::RepoPath, status::GitSummary,
24};
25use gpui::{
26 App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, EventEmitter, Task,
27};
28use ignore::IgnoreStack;
29use language::DiskState;
30
31use parking_lot::Mutex;
32use paths::local_settings_folder_relative_path;
33use postage::{
34 barrier,
35 prelude::{Sink as _, Stream as _},
36 watch,
37};
38use rpc::{
39 AnyProtoClient,
40 proto::{self, FromProto, ToProto, split_worktree_update},
41};
42pub use settings::WorktreeId;
43use settings::{Settings, SettingsLocation, SettingsStore};
44use smallvec::{SmallVec, smallvec};
45use smol::channel::{self, Sender};
46use std::{
47 any::Any,
48 cmp::Ordering,
49 collections::hash_map,
50 convert::TryFrom,
51 ffi::OsStr,
52 fmt,
53 future::Future,
54 mem::{self},
55 ops::{Deref, DerefMut},
56 path::{Component, Path, PathBuf},
57 pin::Pin,
58 sync::{
59 Arc,
60 atomic::{AtomicUsize, Ordering::SeqCst},
61 },
62 time::{Duration, Instant},
63};
64use sum_tree::{Bias, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet, Unit};
65use text::{LineEnding, Rope};
66use util::{
67 ResultExt,
68 paths::{PathMatcher, SanitizedPath, home_dir},
69};
70pub use worktree_settings::WorktreeSettings;
71
72pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
73
74/// A set of local or remote files that are being opened as part of a project.
75/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
76/// Stores git repositories data and the diagnostics for the file(s).
77///
78/// Has an absolute path, and may be set to be visible in Zed UI or not.
79/// May correspond to a directory or a single file.
80/// Possible examples:
81/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
82/// * a directory opened in Zed — may be added as a visible entry to the current worktree
83///
84/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
85pub enum Worktree {
86 Local(LocalWorktree),
87 Remote(RemoteWorktree),
88}
89
90/// An entry, created in the worktree.
91#[derive(Debug)]
92pub enum CreatedEntry {
93 /// Got created and indexed by the worktree, receiving a corresponding entry.
94 Included(Entry),
95 /// Got created, but not indexed due to falling under exclusion filters.
96 Excluded { abs_path: PathBuf },
97}
98
99pub struct LoadedFile {
100 pub file: Arc<File>,
101 pub text: String,
102}
103
104pub struct LoadedBinaryFile {
105 pub file: Arc<File>,
106 pub content: Vec<u8>,
107}
108
109pub struct LocalWorktree {
110 snapshot: LocalSnapshot,
111 scan_requests_tx: channel::Sender<ScanRequest>,
112 path_prefixes_to_scan_tx: channel::Sender<PathPrefixScanRequest>,
113 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
114 _background_scanner_tasks: Vec<Task<()>>,
115 update_observer: Option<UpdateObservationState>,
116 fs: Arc<dyn Fs>,
117 fs_case_sensitive: bool,
118 visible: bool,
119 next_entry_id: Arc<AtomicUsize>,
120 settings: WorktreeSettings,
121 share_private_files: bool,
122}
123
124pub struct PathPrefixScanRequest {
125 path: Arc<Path>,
126 done: SmallVec<[barrier::Sender; 1]>,
127}
128
129struct ScanRequest {
130 relative_paths: Vec<Arc<Path>>,
131 done: SmallVec<[barrier::Sender; 1]>,
132}
133
134pub struct RemoteWorktree {
135 snapshot: Snapshot,
136 background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
137 project_id: u64,
138 client: AnyProtoClient,
139 file_scan_inclusions: PathMatcher,
140 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
141 update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
142 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
143 replica_id: ReplicaId,
144 visible: bool,
145 disconnected: bool,
146}
147
148#[derive(Clone)]
149pub struct Snapshot {
150 id: WorktreeId,
151 abs_path: SanitizedPath,
152 root_name: String,
153 root_char_bag: CharBag,
154 entries_by_path: SumTree<Entry>,
155 entries_by_id: SumTree<PathEntry>,
156 always_included_entries: Vec<Arc<Path>>,
157
158 /// A number that increases every time the worktree begins scanning
159 /// a set of paths from the filesystem. This scanning could be caused
160 /// by some operation performed on the worktree, such as reading or
161 /// writing a file, or by an event reported by the filesystem.
162 scan_id: usize,
163
164 /// The latest scan id that has completed, and whose preceding scans
165 /// have all completed. The current `scan_id` could be more than one
166 /// greater than the `completed_scan_id` if operations are performed
167 /// on the worktree while it is processing a file-system event.
168 completed_scan_id: usize,
169}
170
171/// This path corresponds to the 'content path' of a repository in relation
172/// to Zed's project root.
173/// In the majority of the cases, this is the folder that contains the .git folder.
174/// But if a sub-folder of a git repository is opened, this corresponds to the
175/// project root and the .git folder is located in a parent directory.
176#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]
177pub enum WorkDirectory {
178 InProject {
179 relative_path: Arc<Path>,
180 },
181 AboveProject {
182 absolute_path: Arc<Path>,
183 location_in_repo: Arc<Path>,
184 },
185}
186
187impl WorkDirectory {
188 #[cfg(test)]
189 fn in_project(path: &str) -> Self {
190 let path = Path::new(path);
191 Self::InProject {
192 relative_path: path.into(),
193 }
194 }
195
196 //#[cfg(test)]
197 //fn canonicalize(&self) -> Self {
198 // match self {
199 // WorkDirectory::InProject { relative_path } => WorkDirectory::InProject {
200 // relative_path: relative_path.clone(),
201 // },
202 // WorkDirectory::AboveProject {
203 // absolute_path,
204 // location_in_repo,
205 // } => WorkDirectory::AboveProject {
206 // absolute_path: absolute_path.canonicalize().unwrap().into(),
207 // location_in_repo: location_in_repo.clone(),
208 // },
209 // }
210 //}
211
212 fn path_key(&self) -> PathKey {
213 match self {
214 WorkDirectory::InProject { relative_path } => PathKey(relative_path.clone()),
215 WorkDirectory::AboveProject { .. } => PathKey(Path::new("").into()),
216 }
217 }
218
219 /// Returns true if the given path is a child of the work directory.
220 ///
221 /// Note that the path may not be a member of this repository, if there
222 /// is a repository in a directory between these two paths
223 /// external .git folder in a parent folder of the project root.
224 #[track_caller]
225 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
226 let path = path.as_ref();
227 debug_assert!(path.is_relative());
228 match self {
229 WorkDirectory::InProject { relative_path } => path.starts_with(relative_path),
230 WorkDirectory::AboveProject { .. } => true,
231 }
232 }
233
234 /// relativize returns the given project path relative to the root folder of the
235 /// repository.
236 /// If the root of the repository (and its .git folder) are located in a parent folder
237 /// of the project root folder, then the returned RepoPath is relative to the root
238 /// of the repository and not a valid path inside the project.
239 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
240 // path is assumed to be relative to worktree root.
241 debug_assert!(path.is_relative());
242 match self {
243 WorkDirectory::InProject { relative_path } => Ok(path
244 .strip_prefix(relative_path)
245 .map_err(|_| {
246 anyhow!(
247 "could not relativize {:?} against {:?}",
248 path,
249 relative_path
250 )
251 })?
252 .into()),
253 WorkDirectory::AboveProject {
254 location_in_repo, ..
255 } => {
256 // Avoid joining a `/` to location_in_repo in the case of a single-file worktree.
257 if path == Path::new("") {
258 Ok(RepoPath(location_in_repo.clone()))
259 } else {
260 Ok(location_in_repo.join(path).into())
261 }
262 }
263 }
264 }
265
266 /// This is the opposite operation to `relativize` above
267 pub fn try_unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
268 match self {
269 WorkDirectory::InProject { relative_path } => Some(relative_path.join(path).into()),
270 WorkDirectory::AboveProject {
271 location_in_repo, ..
272 } => {
273 // If we fail to strip the prefix, that means this status entry is
274 // external to this worktree, and we definitely won't have an entry_id
275 path.strip_prefix(location_in_repo).ok().map(Into::into)
276 }
277 }
278 }
279
280 pub fn unrelativize(&self, path: &RepoPath) -> Arc<Path> {
281 match self {
282 WorkDirectory::InProject { relative_path } => relative_path.join(path).into(),
283 WorkDirectory::AboveProject {
284 location_in_repo, ..
285 } => {
286 if &path.0 == location_in_repo {
287 // Single-file worktree
288 return location_in_repo
289 .file_name()
290 .map(Path::new)
291 .unwrap_or(Path::new(""))
292 .into();
293 }
294 let mut location_in_repo = &**location_in_repo;
295 let mut parents = PathBuf::new();
296 loop {
297 if let Ok(segment) = path.strip_prefix(location_in_repo) {
298 return parents.join(segment).into();
299 }
300 location_in_repo = location_in_repo.parent().unwrap_or(Path::new(""));
301 parents.push(Component::ParentDir);
302 }
303 }
304 }
305 }
306
307 pub fn display_name(&self) -> String {
308 match self {
309 WorkDirectory::InProject { relative_path } => relative_path.display().to_string(),
310 WorkDirectory::AboveProject {
311 absolute_path,
312 location_in_repo,
313 } => {
314 let num_of_dots = location_in_repo.components().count();
315
316 "../".repeat(num_of_dots)
317 + &absolute_path
318 .file_name()
319 .map(|s| s.to_string_lossy())
320 .unwrap_or_default()
321 + "/"
322 }
323 }
324 }
325}
326
327impl Default for WorkDirectory {
328 fn default() -> Self {
329 Self::InProject {
330 relative_path: Arc::from(Path::new("")),
331 }
332 }
333}
334
335#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
336pub struct WorkDirectoryEntry(ProjectEntryId);
337
338impl Deref for WorkDirectoryEntry {
339 type Target = ProjectEntryId;
340
341 fn deref(&self) -> &Self::Target {
342 &self.0
343 }
344}
345
346impl From<ProjectEntryId> for WorkDirectoryEntry {
347 fn from(value: ProjectEntryId) -> Self {
348 WorkDirectoryEntry(value)
349 }
350}
351
352#[derive(Debug, Clone)]
353pub struct LocalSnapshot {
354 snapshot: Snapshot,
355 /// All of the gitignore files in the worktree, indexed by their relative path.
356 /// The boolean indicates whether the gitignore needs to be updated.
357 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
358 /// All of the git repositories in the worktree, indexed by the project entry
359 /// id of their parent directory.
360 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
361 /// The file handle of the root dir
362 /// (so we can find it after it's been moved)
363 root_file_handle: Option<Arc<dyn fs::FileHandle>>,
364}
365
366struct BackgroundScannerState {
367 snapshot: LocalSnapshot,
368 scanned_dirs: HashSet<ProjectEntryId>,
369 path_prefixes_to_scan: HashSet<Arc<Path>>,
370 paths_to_scan: HashSet<Arc<Path>>,
371 /// The ids of all of the entries that were removed from the snapshot
372 /// as part of the current update. These entry ids may be re-used
373 /// if the same inode is discovered at a new path, or if the given
374 /// path is re-created after being deleted.
375 removed_entries: HashMap<u64, Entry>,
376 changed_paths: Vec<Arc<Path>>,
377 prev_snapshot: Snapshot,
378}
379
380#[derive(Debug, Clone)]
381struct LocalRepositoryEntry {
382 work_directory_id: ProjectEntryId,
383 work_directory: WorkDirectory,
384 work_directory_abs_path: Arc<Path>,
385 git_dir_scan_id: usize,
386 original_dot_git_abs_path: Arc<Path>,
387 /// Absolute path to the actual .git folder.
388 /// Note: if .git is a file, this points to the folder indicated by the .git file
389 dot_git_dir_abs_path: Arc<Path>,
390 /// Absolute path to the .git file, if we're in a git worktree.
391 dot_git_worktree_abs_path: Option<Arc<Path>>,
392}
393
394impl sum_tree::Item for LocalRepositoryEntry {
395 type Summary = PathSummary<Unit>;
396
397 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
398 PathSummary {
399 max_path: self.work_directory.path_key().0,
400 item_summary: Unit,
401 }
402 }
403}
404
405impl KeyedItem for LocalRepositoryEntry {
406 type Key = PathKey;
407
408 fn key(&self) -> Self::Key {
409 self.work_directory.path_key()
410 }
411}
412
413//impl LocalRepositoryEntry {
414// pub fn repo(&self) -> &Arc<dyn GitRepository> {
415// &self.repo_ptr
416// }
417//}
418
419impl Deref for LocalRepositoryEntry {
420 type Target = WorkDirectory;
421
422 fn deref(&self) -> &Self::Target {
423 &self.work_directory
424 }
425}
426
427impl Deref for LocalSnapshot {
428 type Target = Snapshot;
429
430 fn deref(&self) -> &Self::Target {
431 &self.snapshot
432 }
433}
434
435impl DerefMut for LocalSnapshot {
436 fn deref_mut(&mut self) -> &mut Self::Target {
437 &mut self.snapshot
438 }
439}
440
441#[derive(Debug)]
442enum ScanState {
443 Started,
444 Updated {
445 snapshot: LocalSnapshot,
446 changes: UpdatedEntriesSet,
447 barrier: SmallVec<[barrier::Sender; 1]>,
448 scanning: bool,
449 },
450 RootUpdated {
451 new_path: Option<SanitizedPath>,
452 },
453}
454
455struct UpdateObservationState {
456 snapshots_tx: mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet)>,
457 resume_updates: watch::Sender<()>,
458 _maintain_remote_snapshot: Task<Option<()>>,
459}
460
461#[derive(Clone)]
462pub enum Event {
463 UpdatedEntries(UpdatedEntriesSet),
464 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
465 DeletedEntry(ProjectEntryId),
466}
467
468const EMPTY_PATH: &str = "";
469
470impl EventEmitter<Event> for Worktree {}
471
472impl Worktree {
473 pub async fn local(
474 path: impl Into<Arc<Path>>,
475 visible: bool,
476 fs: Arc<dyn Fs>,
477 next_entry_id: Arc<AtomicUsize>,
478 cx: &mut AsyncApp,
479 ) -> Result<Entity<Self>> {
480 let abs_path = path.into();
481 let metadata = fs
482 .metadata(&abs_path)
483 .await
484 .context("failed to stat worktree path")?;
485
486 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
487 log::error!(
488 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
489 );
490 true
491 });
492
493 let root_file_handle = fs.open_handle(&abs_path).await.log_err();
494
495 cx.new(move |cx: &mut Context<Worktree>| {
496 let mut snapshot = LocalSnapshot {
497 ignores_by_parent_abs_path: Default::default(),
498 git_repositories: Default::default(),
499 snapshot: Snapshot::new(
500 cx.entity_id().as_u64(),
501 abs_path
502 .file_name()
503 .map_or(String::new(), |f| f.to_string_lossy().to_string()),
504 abs_path.clone(),
505 ),
506 root_file_handle,
507 };
508
509 let worktree_id = snapshot.id();
510 let settings_location = Some(SettingsLocation {
511 worktree_id,
512 path: Path::new(EMPTY_PATH),
513 });
514
515 let settings = WorktreeSettings::get(settings_location, cx).clone();
516 cx.observe_global::<SettingsStore>(move |this, cx| {
517 if let Self::Local(this) = this {
518 let settings = WorktreeSettings::get(settings_location, cx).clone();
519 if this.settings != settings {
520 this.settings = settings;
521 this.restart_background_scanners(cx);
522 }
523 }
524 })
525 .detach();
526
527 let share_private_files = false;
528 if let Some(metadata) = metadata {
529 let mut entry = Entry::new(
530 Arc::from(Path::new("")),
531 &metadata,
532 &next_entry_id,
533 snapshot.root_char_bag,
534 None,
535 );
536 if !metadata.is_dir {
537 entry.is_private = !share_private_files
538 && settings.is_path_private(abs_path.file_name().unwrap().as_ref());
539 }
540 snapshot.insert_entry(entry, fs.as_ref());
541 }
542
543 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
544 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
545 let mut worktree = LocalWorktree {
546 share_private_files,
547 next_entry_id,
548 snapshot,
549 is_scanning: watch::channel_with(true),
550 update_observer: None,
551 scan_requests_tx,
552 path_prefixes_to_scan_tx,
553 _background_scanner_tasks: Vec::new(),
554 fs,
555 fs_case_sensitive,
556 visible,
557 settings,
558 };
559 worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
560 Worktree::Local(worktree)
561 })
562 }
563
564 pub fn remote(
565 project_id: u64,
566 replica_id: ReplicaId,
567 worktree: proto::WorktreeMetadata,
568 client: AnyProtoClient,
569 cx: &mut App,
570 ) -> Entity<Self> {
571 cx.new(|cx: &mut Context<Self>| {
572 let snapshot = Snapshot::new(
573 worktree.id,
574 worktree.root_name,
575 Arc::<Path>::from_proto(worktree.abs_path),
576 );
577
578 let background_snapshot = Arc::new(Mutex::new((
579 snapshot.clone(),
580 Vec::<proto::UpdateWorktree>::new(),
581 )));
582 let (background_updates_tx, mut background_updates_rx) =
583 mpsc::unbounded::<proto::UpdateWorktree>();
584 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
585
586 let worktree_id = snapshot.id();
587 let settings_location = Some(SettingsLocation {
588 worktree_id,
589 path: Path::new(EMPTY_PATH),
590 });
591
592 let settings = WorktreeSettings::get(settings_location, cx).clone();
593 let worktree = RemoteWorktree {
594 client,
595 project_id,
596 replica_id,
597 snapshot,
598 file_scan_inclusions: settings.file_scan_inclusions.clone(),
599 background_snapshot: background_snapshot.clone(),
600 updates_tx: Some(background_updates_tx),
601 update_observer: None,
602 snapshot_subscriptions: Default::default(),
603 visible: worktree.visible,
604 disconnected: false,
605 };
606
607 // Apply updates to a separate snapshot in a background task, then
608 // send them to a foreground task which updates the model.
609 cx.background_spawn(async move {
610 while let Some(update) = background_updates_rx.next().await {
611 {
612 let mut lock = background_snapshot.lock();
613 lock.0
614 .apply_remote_update(update.clone(), &settings.file_scan_inclusions)
615 .log_err();
616 lock.1.push(update);
617 }
618 snapshot_updated_tx.send(()).await.ok();
619 }
620 })
621 .detach();
622
623 // On the foreground task, update to the latest snapshot and notify
624 // any update observer of all updates that led to that snapshot.
625 cx.spawn(async move |this, cx| {
626 while (snapshot_updated_rx.recv().await).is_some() {
627 this.update(cx, |this, cx| {
628 let mut entries_changed = false;
629 let this = this.as_remote_mut().unwrap();
630 {
631 let mut lock = this.background_snapshot.lock();
632 this.snapshot = lock.0.clone();
633 for update in lock.1.drain(..) {
634 entries_changed |= !update.updated_entries.is_empty()
635 || !update.removed_entries.is_empty();
636 if let Some(tx) = &this.update_observer {
637 tx.unbounded_send(update).ok();
638 }
639 }
640 };
641
642 if entries_changed {
643 cx.emit(Event::UpdatedEntries(Arc::default()));
644 }
645 cx.notify();
646 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
647 if this.observed_snapshot(*scan_id) {
648 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
649 let _ = tx.send(());
650 } else {
651 break;
652 }
653 }
654 })?;
655 }
656 anyhow::Ok(())
657 })
658 .detach();
659
660 Worktree::Remote(worktree)
661 })
662 }
663
664 pub fn as_local(&self) -> Option<&LocalWorktree> {
665 if let Worktree::Local(worktree) = self {
666 Some(worktree)
667 } else {
668 None
669 }
670 }
671
672 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
673 if let Worktree::Remote(worktree) = self {
674 Some(worktree)
675 } else {
676 None
677 }
678 }
679
680 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
681 if let Worktree::Local(worktree) = self {
682 Some(worktree)
683 } else {
684 None
685 }
686 }
687
688 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
689 if let Worktree::Remote(worktree) = self {
690 Some(worktree)
691 } else {
692 None
693 }
694 }
695
696 pub fn is_local(&self) -> bool {
697 matches!(self, Worktree::Local(_))
698 }
699
700 pub fn is_remote(&self) -> bool {
701 !self.is_local()
702 }
703
704 pub fn settings_location(&self, _: &Context<Self>) -> SettingsLocation<'static> {
705 SettingsLocation {
706 worktree_id: self.id(),
707 path: Path::new(EMPTY_PATH),
708 }
709 }
710
711 pub fn snapshot(&self) -> Snapshot {
712 match self {
713 Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
714 Worktree::Remote(worktree) => worktree.snapshot.clone(),
715 }
716 }
717
718 pub fn scan_id(&self) -> usize {
719 match self {
720 Worktree::Local(worktree) => worktree.snapshot.scan_id,
721 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
722 }
723 }
724
725 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
726 proto::WorktreeMetadata {
727 id: self.id().to_proto(),
728 root_name: self.root_name().to_string(),
729 visible: self.is_visible(),
730 abs_path: self.abs_path().to_proto(),
731 }
732 }
733
734 pub fn completed_scan_id(&self) -> usize {
735 match self {
736 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
737 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
738 }
739 }
740
741 pub fn is_visible(&self) -> bool {
742 match self {
743 Worktree::Local(worktree) => worktree.visible,
744 Worktree::Remote(worktree) => worktree.visible,
745 }
746 }
747
748 pub fn replica_id(&self) -> ReplicaId {
749 match self {
750 Worktree::Local(_) => 0,
751 Worktree::Remote(worktree) => worktree.replica_id,
752 }
753 }
754
755 pub fn abs_path(&self) -> Arc<Path> {
756 match self {
757 Worktree::Local(worktree) => worktree.abs_path.clone().into(),
758 Worktree::Remote(worktree) => worktree.abs_path.clone().into(),
759 }
760 }
761
762 pub fn root_file(&self, cx: &Context<Self>) -> Option<Arc<File>> {
763 let entry = self.root_entry()?;
764 Some(File::for_entry(entry.clone(), cx.entity()))
765 }
766
767 pub fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
768 where
769 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
770 Fut: 'static + Send + Future<Output = bool>,
771 {
772 match self {
773 Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
774 Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
775 }
776 }
777
778 pub fn stop_observing_updates(&mut self) {
779 match self {
780 Worktree::Local(this) => {
781 this.update_observer.take();
782 }
783 Worktree::Remote(this) => {
784 this.update_observer.take();
785 }
786 }
787 }
788
789 #[cfg(any(test, feature = "test-support"))]
790 pub fn has_update_observer(&self) -> bool {
791 match self {
792 Worktree::Local(this) => this.update_observer.is_some(),
793 Worktree::Remote(this) => this.update_observer.is_some(),
794 }
795 }
796
797 pub fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
798 match self {
799 Worktree::Local(this) => this.load_file(path, cx),
800 Worktree::Remote(_) => {
801 Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
802 }
803 }
804 }
805
806 pub fn load_binary_file(
807 &self,
808 path: &Path,
809 cx: &Context<Worktree>,
810 ) -> Task<Result<LoadedBinaryFile>> {
811 match self {
812 Worktree::Local(this) => this.load_binary_file(path, cx),
813 Worktree::Remote(_) => {
814 Task::ready(Err(anyhow!("remote worktrees can't yet load binary files")))
815 }
816 }
817 }
818
819 pub fn write_file(
820 &self,
821 path: &Path,
822 text: Rope,
823 line_ending: LineEnding,
824 cx: &Context<Worktree>,
825 ) -> Task<Result<Arc<File>>> {
826 match self {
827 Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
828 Worktree::Remote(_) => {
829 Task::ready(Err(anyhow!("remote worktree can't yet write files")))
830 }
831 }
832 }
833
834 pub fn create_entry(
835 &mut self,
836 path: impl Into<Arc<Path>>,
837 is_directory: bool,
838 cx: &Context<Worktree>,
839 ) -> Task<Result<CreatedEntry>> {
840 let path: Arc<Path> = path.into();
841 let worktree_id = self.id();
842 match self {
843 Worktree::Local(this) => this.create_entry(path, is_directory, cx),
844 Worktree::Remote(this) => {
845 let project_id = this.project_id;
846 let request = this.client.request(proto::CreateProjectEntry {
847 worktree_id: worktree_id.to_proto(),
848 project_id,
849 path: path.as_ref().to_proto(),
850 is_directory,
851 });
852 cx.spawn(async move |this, cx| {
853 let response = request.await?;
854 match response.entry {
855 Some(entry) => this
856 .update(cx, |worktree, cx| {
857 worktree.as_remote_mut().unwrap().insert_entry(
858 entry,
859 response.worktree_scan_id as usize,
860 cx,
861 )
862 })?
863 .await
864 .map(CreatedEntry::Included),
865 None => {
866 let abs_path = this.update(cx, |worktree, _| {
867 worktree
868 .absolutize(&path)
869 .with_context(|| format!("absolutizing {path:?}"))
870 })??;
871 Ok(CreatedEntry::Excluded { abs_path })
872 }
873 }
874 })
875 }
876 }
877 }
878
879 pub fn delete_entry(
880 &mut self,
881 entry_id: ProjectEntryId,
882 trash: bool,
883 cx: &mut Context<Worktree>,
884 ) -> Option<Task<Result<()>>> {
885 let task = match self {
886 Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
887 Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
888 }?;
889
890 let entry = match &*self {
891 Worktree::Local(this) => this.entry_for_id(entry_id),
892 Worktree::Remote(this) => this.entry_for_id(entry_id),
893 }?;
894
895 let mut ids = vec![entry_id];
896 let path = &*entry.path;
897
898 self.get_children_ids_recursive(path, &mut ids);
899
900 for id in ids {
901 cx.emit(Event::DeletedEntry(id));
902 }
903 Some(task)
904 }
905
906 fn get_children_ids_recursive(&self, path: &Path, ids: &mut Vec<ProjectEntryId>) {
907 let children_iter = self.child_entries(path);
908 for child in children_iter {
909 ids.push(child.id);
910 self.get_children_ids_recursive(&child.path, ids);
911 }
912 }
913
914 pub fn rename_entry(
915 &mut self,
916 entry_id: ProjectEntryId,
917 new_path: impl Into<Arc<Path>>,
918 cx: &Context<Self>,
919 ) -> Task<Result<CreatedEntry>> {
920 let new_path = new_path.into();
921 match self {
922 Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
923 Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
924 }
925 }
926
927 pub fn copy_entry(
928 &mut self,
929 entry_id: ProjectEntryId,
930 relative_worktree_source_path: Option<PathBuf>,
931 new_path: impl Into<Arc<Path>>,
932 cx: &Context<Self>,
933 ) -> Task<Result<Option<Entry>>> {
934 let new_path: Arc<Path> = new_path.into();
935 match self {
936 Worktree::Local(this) => {
937 this.copy_entry(entry_id, relative_worktree_source_path, new_path, cx)
938 }
939 Worktree::Remote(this) => {
940 let relative_worktree_source_path = relative_worktree_source_path
941 .map(|relative_worktree_source_path| relative_worktree_source_path.to_proto());
942 let response = this.client.request(proto::CopyProjectEntry {
943 project_id: this.project_id,
944 entry_id: entry_id.to_proto(),
945 relative_worktree_source_path,
946 new_path: new_path.to_proto(),
947 });
948 cx.spawn(async move |this, cx| {
949 let response = response.await?;
950 match response.entry {
951 Some(entry) => this
952 .update(cx, |worktree, cx| {
953 worktree.as_remote_mut().unwrap().insert_entry(
954 entry,
955 response.worktree_scan_id as usize,
956 cx,
957 )
958 })?
959 .await
960 .map(Some),
961 None => Ok(None),
962 }
963 })
964 }
965 }
966 }
967
968 pub fn copy_external_entries(
969 &mut self,
970 target_directory: PathBuf,
971 paths: Vec<Arc<Path>>,
972 overwrite_existing_files: bool,
973 cx: &Context<Worktree>,
974 ) -> Task<Result<Vec<ProjectEntryId>>> {
975 match self {
976 Worktree::Local(this) => {
977 this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
978 }
979 _ => Task::ready(Err(anyhow!(
980 "Copying external entries is not supported for remote worktrees"
981 ))),
982 }
983 }
984
985 pub fn expand_entry(
986 &mut self,
987 entry_id: ProjectEntryId,
988 cx: &Context<Worktree>,
989 ) -> Option<Task<Result<()>>> {
990 match self {
991 Worktree::Local(this) => this.expand_entry(entry_id, cx),
992 Worktree::Remote(this) => {
993 let response = this.client.request(proto::ExpandProjectEntry {
994 project_id: this.project_id,
995 entry_id: entry_id.to_proto(),
996 });
997 Some(cx.spawn(async move |this, cx| {
998 let response = response.await?;
999 this.update(cx, |this, _| {
1000 this.as_remote_mut()
1001 .unwrap()
1002 .wait_for_snapshot(response.worktree_scan_id as usize)
1003 })?
1004 .await?;
1005 Ok(())
1006 }))
1007 }
1008 }
1009 }
1010
1011 pub fn expand_all_for_entry(
1012 &mut self,
1013 entry_id: ProjectEntryId,
1014 cx: &Context<Worktree>,
1015 ) -> Option<Task<Result<()>>> {
1016 match self {
1017 Worktree::Local(this) => this.expand_all_for_entry(entry_id, cx),
1018 Worktree::Remote(this) => {
1019 let response = this.client.request(proto::ExpandAllForProjectEntry {
1020 project_id: this.project_id,
1021 entry_id: entry_id.to_proto(),
1022 });
1023 Some(cx.spawn(async move |this, cx| {
1024 let response = response.await?;
1025 this.update(cx, |this, _| {
1026 this.as_remote_mut()
1027 .unwrap()
1028 .wait_for_snapshot(response.worktree_scan_id as usize)
1029 })?
1030 .await?;
1031 Ok(())
1032 }))
1033 }
1034 }
1035 }
1036
1037 pub async fn handle_create_entry(
1038 this: Entity<Self>,
1039 request: proto::CreateProjectEntry,
1040 mut cx: AsyncApp,
1041 ) -> Result<proto::ProjectEntryResponse> {
1042 let (scan_id, entry) = this.update(&mut cx, |this, cx| {
1043 (
1044 this.scan_id(),
1045 this.create_entry(
1046 Arc::<Path>::from_proto(request.path),
1047 request.is_directory,
1048 cx,
1049 ),
1050 )
1051 })?;
1052 Ok(proto::ProjectEntryResponse {
1053 entry: match &entry.await? {
1054 CreatedEntry::Included(entry) => Some(entry.into()),
1055 CreatedEntry::Excluded { .. } => None,
1056 },
1057 worktree_scan_id: scan_id as u64,
1058 })
1059 }
1060
1061 pub async fn handle_delete_entry(
1062 this: Entity<Self>,
1063 request: proto::DeleteProjectEntry,
1064 mut cx: AsyncApp,
1065 ) -> Result<proto::ProjectEntryResponse> {
1066 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1067 (
1068 this.scan_id(),
1069 this.delete_entry(
1070 ProjectEntryId::from_proto(request.entry_id),
1071 request.use_trash,
1072 cx,
1073 ),
1074 )
1075 })?;
1076 task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
1077 Ok(proto::ProjectEntryResponse {
1078 entry: None,
1079 worktree_scan_id: scan_id as u64,
1080 })
1081 }
1082
1083 pub async fn handle_expand_entry(
1084 this: Entity<Self>,
1085 request: proto::ExpandProjectEntry,
1086 mut cx: AsyncApp,
1087 ) -> Result<proto::ExpandProjectEntryResponse> {
1088 let task = this.update(&mut cx, |this, cx| {
1089 this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1090 })?;
1091 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1092 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1093 Ok(proto::ExpandProjectEntryResponse {
1094 worktree_scan_id: scan_id as u64,
1095 })
1096 }
1097
1098 pub async fn handle_expand_all_for_entry(
1099 this: Entity<Self>,
1100 request: proto::ExpandAllForProjectEntry,
1101 mut cx: AsyncApp,
1102 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
1103 let task = this.update(&mut cx, |this, cx| {
1104 this.expand_all_for_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1105 })?;
1106 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1107 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1108 Ok(proto::ExpandAllForProjectEntryResponse {
1109 worktree_scan_id: scan_id as u64,
1110 })
1111 }
1112
1113 pub async fn handle_rename_entry(
1114 this: Entity<Self>,
1115 request: proto::RenameProjectEntry,
1116 mut cx: AsyncApp,
1117 ) -> Result<proto::ProjectEntryResponse> {
1118 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1119 (
1120 this.scan_id(),
1121 this.rename_entry(
1122 ProjectEntryId::from_proto(request.entry_id),
1123 Arc::<Path>::from_proto(request.new_path),
1124 cx,
1125 ),
1126 )
1127 })?;
1128 Ok(proto::ProjectEntryResponse {
1129 entry: match &task.await? {
1130 CreatedEntry::Included(entry) => Some(entry.into()),
1131 CreatedEntry::Excluded { .. } => None,
1132 },
1133 worktree_scan_id: scan_id as u64,
1134 })
1135 }
1136
1137 pub async fn handle_copy_entry(
1138 this: Entity<Self>,
1139 request: proto::CopyProjectEntry,
1140 mut cx: AsyncApp,
1141 ) -> Result<proto::ProjectEntryResponse> {
1142 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1143 let relative_worktree_source_path = request
1144 .relative_worktree_source_path
1145 .map(PathBuf::from_proto);
1146 (
1147 this.scan_id(),
1148 this.copy_entry(
1149 ProjectEntryId::from_proto(request.entry_id),
1150 relative_worktree_source_path,
1151 PathBuf::from_proto(request.new_path),
1152 cx,
1153 ),
1154 )
1155 })?;
1156 Ok(proto::ProjectEntryResponse {
1157 entry: task.await?.as_ref().map(|e| e.into()),
1158 worktree_scan_id: scan_id as u64,
1159 })
1160 }
1161
1162 pub fn dot_git_abs_path(&self, work_directory: &WorkDirectory) -> PathBuf {
1163 let mut path = match work_directory {
1164 WorkDirectory::InProject { relative_path } => self.abs_path().join(relative_path),
1165 WorkDirectory::AboveProject { absolute_path, .. } => absolute_path.as_ref().to_owned(),
1166 };
1167 path.push(".git");
1168 path
1169 }
1170
1171 pub fn is_single_file(&self) -> bool {
1172 self.root_dir().is_none()
1173 }
1174}
1175
1176impl LocalWorktree {
1177 pub fn fs(&self) -> &Arc<dyn Fs> {
1178 &self.fs
1179 }
1180
1181 pub fn is_path_private(&self, path: &Path) -> bool {
1182 !self.share_private_files && self.settings.is_path_private(path)
1183 }
1184
1185 fn restart_background_scanners(&mut self, cx: &Context<Worktree>) {
1186 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
1187 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
1188 self.scan_requests_tx = scan_requests_tx;
1189 self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
1190
1191 self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
1192 let always_included_entries = mem::take(&mut self.snapshot.always_included_entries);
1193 log::debug!(
1194 "refreshing entries for the following always included paths: {:?}",
1195 always_included_entries
1196 );
1197
1198 // Cleans up old always included entries to ensure they get updated properly. Otherwise,
1199 // nested always included entries may not get updated and will result in out-of-date info.
1200 self.refresh_entries_for_paths(always_included_entries);
1201 }
1202
1203 fn start_background_scanner(
1204 &mut self,
1205 scan_requests_rx: channel::Receiver<ScanRequest>,
1206 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
1207 cx: &Context<Worktree>,
1208 ) {
1209 let snapshot = self.snapshot();
1210 let share_private_files = self.share_private_files;
1211 let next_entry_id = self.next_entry_id.clone();
1212 let fs = self.fs.clone();
1213 let settings = self.settings.clone();
1214 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
1215 let background_scanner = cx.background_spawn({
1216 let abs_path = snapshot.abs_path.as_path().to_path_buf();
1217 let background = cx.background_executor().clone();
1218 async move {
1219 let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
1220 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
1221 log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
1222 true
1223 });
1224
1225 let mut scanner = BackgroundScanner {
1226 fs,
1227 fs_case_sensitive,
1228 status_updates_tx: scan_states_tx,
1229 executor: background,
1230 scan_requests_rx,
1231 path_prefixes_to_scan_rx,
1232 next_entry_id,
1233 state: Mutex::new(BackgroundScannerState {
1234 prev_snapshot: snapshot.snapshot.clone(),
1235 snapshot,
1236 scanned_dirs: Default::default(),
1237 path_prefixes_to_scan: Default::default(),
1238 paths_to_scan: Default::default(),
1239 removed_entries: Default::default(),
1240 changed_paths: Default::default(),
1241 }),
1242 phase: BackgroundScannerPhase::InitialScan,
1243 share_private_files,
1244 settings,
1245 watcher,
1246 };
1247
1248 scanner
1249 .run(Box::pin(events.map(|events| events.into_iter().collect())))
1250 .await;
1251 }
1252 });
1253 let scan_state_updater = cx.spawn(async move |this, cx| {
1254 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1255 this.update(cx, |this, cx| {
1256 let this = this.as_local_mut().unwrap();
1257 match state {
1258 ScanState::Started => {
1259 *this.is_scanning.0.borrow_mut() = true;
1260 }
1261 ScanState::Updated {
1262 snapshot,
1263 changes,
1264 barrier,
1265 scanning,
1266 } => {
1267 *this.is_scanning.0.borrow_mut() = scanning;
1268 this.set_snapshot(snapshot, changes, cx);
1269 drop(barrier);
1270 }
1271 ScanState::RootUpdated { new_path } => {
1272 this.update_abs_path_and_refresh(new_path, cx);
1273 }
1274 }
1275 })
1276 .ok();
1277 }
1278 });
1279 self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1280 self.is_scanning = watch::channel_with(true);
1281 }
1282
1283 fn set_snapshot(
1284 &mut self,
1285 mut new_snapshot: LocalSnapshot,
1286 entry_changes: UpdatedEntriesSet,
1287 cx: &mut Context<Worktree>,
1288 ) {
1289 let repo_changes = self.changed_repos(&self.snapshot, &mut new_snapshot);
1290 self.snapshot = new_snapshot;
1291
1292 if let Some(share) = self.update_observer.as_mut() {
1293 share
1294 .snapshots_tx
1295 .unbounded_send((self.snapshot.clone(), entry_changes.clone()))
1296 .ok();
1297 }
1298
1299 if !entry_changes.is_empty() {
1300 cx.emit(Event::UpdatedEntries(entry_changes));
1301 }
1302 if !repo_changes.is_empty() {
1303 cx.emit(Event::UpdatedGitRepositories(repo_changes));
1304 }
1305 }
1306
1307 fn changed_repos(
1308 &self,
1309 old_snapshot: &LocalSnapshot,
1310 new_snapshot: &mut LocalSnapshot,
1311 ) -> UpdatedGitRepositoriesSet {
1312 let mut changes = Vec::new();
1313 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1314 let new_repos = new_snapshot.git_repositories.clone();
1315 let mut new_repos = new_repos.iter().peekable();
1316
1317 loop {
1318 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1319 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1320 match Ord::cmp(&new_entry_id, &old_entry_id) {
1321 Ordering::Less => {
1322 changes.push(UpdatedGitRepository {
1323 work_directory_id: new_entry_id,
1324 old_work_directory_abs_path: None,
1325 new_work_directory_abs_path: Some(
1326 new_repo.work_directory_abs_path.clone(),
1327 ),
1328 dot_git_abs_path: Some(new_repo.original_dot_git_abs_path.clone()),
1329 });
1330 new_repos.next();
1331 }
1332 Ordering::Equal => {
1333 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id
1334 || new_repo.work_directory_abs_path
1335 != old_repo.work_directory_abs_path
1336 {
1337 changes.push(UpdatedGitRepository {
1338 work_directory_id: new_entry_id,
1339 old_work_directory_abs_path: Some(
1340 old_repo.work_directory_abs_path.clone(),
1341 ),
1342 new_work_directory_abs_path: Some(
1343 new_repo.work_directory_abs_path.clone(),
1344 ),
1345 dot_git_abs_path: Some(
1346 new_repo.original_dot_git_abs_path.clone(),
1347 ),
1348 });
1349 }
1350 new_repos.next();
1351 old_repos.next();
1352 }
1353 Ordering::Greater => {
1354 changes.push(UpdatedGitRepository {
1355 work_directory_id: old_entry_id,
1356 old_work_directory_abs_path: Some(
1357 old_repo.work_directory_abs_path.clone(),
1358 ),
1359 new_work_directory_abs_path: None,
1360 dot_git_abs_path: None,
1361 });
1362 old_repos.next();
1363 }
1364 }
1365 }
1366 (Some((entry_id, repo)), None) => {
1367 changes.push(UpdatedGitRepository {
1368 work_directory_id: entry_id,
1369 old_work_directory_abs_path: None,
1370 new_work_directory_abs_path: Some(repo.work_directory_abs_path.clone()),
1371 dot_git_abs_path: Some(repo.original_dot_git_abs_path.clone()),
1372 });
1373 new_repos.next();
1374 }
1375 (None, Some((entry_id, repo))) => {
1376 changes.push(UpdatedGitRepository {
1377 work_directory_id: entry_id,
1378 old_work_directory_abs_path: Some(repo.work_directory_abs_path.clone()),
1379 new_work_directory_abs_path: None,
1380 dot_git_abs_path: Some(repo.original_dot_git_abs_path.clone()),
1381 });
1382 old_repos.next();
1383 }
1384 (None, None) => break,
1385 }
1386 }
1387
1388 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1389 (value.0.clone(), value.1.clone())
1390 }
1391
1392 changes.into()
1393 }
1394
1395 pub fn scan_complete(&self) -> impl Future<Output = ()> + use<> {
1396 let mut is_scanning_rx = self.is_scanning.1.clone();
1397 async move {
1398 let mut is_scanning = *is_scanning_rx.borrow();
1399 while is_scanning {
1400 if let Some(value) = is_scanning_rx.recv().await {
1401 is_scanning = value;
1402 } else {
1403 break;
1404 }
1405 }
1406 }
1407 }
1408
1409 pub fn snapshot(&self) -> LocalSnapshot {
1410 self.snapshot.clone()
1411 }
1412
1413 pub fn settings(&self) -> WorktreeSettings {
1414 self.settings.clone()
1415 }
1416
1417 fn load_binary_file(
1418 &self,
1419 path: &Path,
1420 cx: &Context<Worktree>,
1421 ) -> Task<Result<LoadedBinaryFile>> {
1422 let path = Arc::from(path);
1423 let abs_path = self.absolutize(&path);
1424 let fs = self.fs.clone();
1425 let entry = self.refresh_entry(path.clone(), None, cx);
1426 let is_private = self.is_path_private(path.as_ref());
1427
1428 let worktree = cx.weak_entity();
1429 cx.background_spawn(async move {
1430 let abs_path = abs_path?;
1431 let content = fs.load_bytes(&abs_path).await?;
1432
1433 let worktree = worktree
1434 .upgrade()
1435 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1436 let file = match entry.await? {
1437 Some(entry) => File::for_entry(entry, worktree),
1438 None => {
1439 let metadata = fs
1440 .metadata(&abs_path)
1441 .await
1442 .with_context(|| {
1443 format!("Loading metadata for excluded file {abs_path:?}")
1444 })?
1445 .with_context(|| {
1446 format!("Excluded file {abs_path:?} got removed during loading")
1447 })?;
1448 Arc::new(File {
1449 entry_id: None,
1450 worktree,
1451 path,
1452 disk_state: DiskState::Present {
1453 mtime: metadata.mtime,
1454 },
1455 is_local: true,
1456 is_private,
1457 })
1458 }
1459 };
1460
1461 Ok(LoadedBinaryFile { file, content })
1462 })
1463 }
1464
1465 fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1466 let path = Arc::from(path);
1467 let abs_path = self.absolutize(&path);
1468 let fs = self.fs.clone();
1469 let entry = self.refresh_entry(path.clone(), None, cx);
1470 let is_private = self.is_path_private(path.as_ref());
1471
1472 cx.spawn(async move |this, _cx| {
1473 let abs_path = abs_path?;
1474 // WARN: Temporary workaround for #27283.
1475 // We are not efficient with our memory usage per file, and use in excess of 64GB for a 10GB file
1476 // Therefore, as a temporary workaround to prevent system freezes, we just bail before opening a file
1477 // if it is too large
1478 // 5GB seems to be more reasonable, peaking at ~16GB, while 6GB jumps up to >24GB which seems like a
1479 // reasonable limit
1480 {
1481 const FILE_SIZE_MAX: u64 = 6 * 1024 * 1024 * 1024; // 6GB
1482 if let Ok(Some(metadata)) = fs.metadata(&abs_path).await {
1483 if metadata.len >= FILE_SIZE_MAX {
1484 anyhow::bail!("File is too large to load");
1485 }
1486 }
1487 }
1488 let text = fs.load(&abs_path).await?;
1489
1490 let worktree = this
1491 .upgrade()
1492 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1493 let file = match entry.await? {
1494 Some(entry) => File::for_entry(entry, worktree),
1495 None => {
1496 let metadata = fs
1497 .metadata(&abs_path)
1498 .await
1499 .with_context(|| {
1500 format!("Loading metadata for excluded file {abs_path:?}")
1501 })?
1502 .with_context(|| {
1503 format!("Excluded file {abs_path:?} got removed during loading")
1504 })?;
1505 Arc::new(File {
1506 entry_id: None,
1507 worktree,
1508 path,
1509 disk_state: DiskState::Present {
1510 mtime: metadata.mtime,
1511 },
1512 is_local: true,
1513 is_private,
1514 })
1515 }
1516 };
1517
1518 Ok(LoadedFile { file, text })
1519 })
1520 }
1521
1522 /// Find the lowest path in the worktree's datastructures that is an ancestor
1523 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1524 let mut lowest_ancestor = None;
1525 for path in path.ancestors() {
1526 if self.entry_for_path(path).is_some() {
1527 lowest_ancestor = Some(path.to_path_buf());
1528 break;
1529 }
1530 }
1531
1532 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1533 }
1534
1535 fn create_entry(
1536 &self,
1537 path: impl Into<Arc<Path>>,
1538 is_dir: bool,
1539 cx: &Context<Worktree>,
1540 ) -> Task<Result<CreatedEntry>> {
1541 let path = path.into();
1542 let abs_path = match self.absolutize(&path) {
1543 Ok(path) => path,
1544 Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1545 };
1546 let path_excluded = self.settings.is_path_excluded(&abs_path);
1547 let fs = self.fs.clone();
1548 let task_abs_path = abs_path.clone();
1549 let write = cx.background_spawn(async move {
1550 if is_dir {
1551 fs.create_dir(&task_abs_path)
1552 .await
1553 .with_context(|| format!("creating directory {task_abs_path:?}"))
1554 } else {
1555 fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1556 .await
1557 .with_context(|| format!("creating file {task_abs_path:?}"))
1558 }
1559 });
1560
1561 let lowest_ancestor = self.lowest_ancestor(&path);
1562 cx.spawn(async move |this, cx| {
1563 write.await?;
1564 if path_excluded {
1565 return Ok(CreatedEntry::Excluded { abs_path });
1566 }
1567
1568 let (result, refreshes) = this.update(cx, |this, cx| {
1569 let mut refreshes = Vec::new();
1570 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1571 for refresh_path in refresh_paths.ancestors() {
1572 if refresh_path == Path::new("") {
1573 continue;
1574 }
1575 let refresh_full_path = lowest_ancestor.join(refresh_path);
1576
1577 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1578 refresh_full_path.into(),
1579 None,
1580 cx,
1581 ));
1582 }
1583 (
1584 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1585 refreshes,
1586 )
1587 })?;
1588 for refresh in refreshes {
1589 refresh.await.log_err();
1590 }
1591
1592 Ok(result
1593 .await?
1594 .map(CreatedEntry::Included)
1595 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1596 })
1597 }
1598
1599 fn write_file(
1600 &self,
1601 path: impl Into<Arc<Path>>,
1602 text: Rope,
1603 line_ending: LineEnding,
1604 cx: &Context<Worktree>,
1605 ) -> Task<Result<Arc<File>>> {
1606 let path = path.into();
1607 let fs = self.fs.clone();
1608 let is_private = self.is_path_private(&path);
1609 let Ok(abs_path) = self.absolutize(&path) else {
1610 return Task::ready(Err(anyhow!("invalid path {path:?}")));
1611 };
1612
1613 let write = cx.background_spawn({
1614 let fs = fs.clone();
1615 let abs_path = abs_path.clone();
1616 async move { fs.save(&abs_path, &text, line_ending).await }
1617 });
1618
1619 cx.spawn(async move |this, cx| {
1620 write.await?;
1621 let entry = this
1622 .update(cx, |this, cx| {
1623 this.as_local_mut()
1624 .unwrap()
1625 .refresh_entry(path.clone(), None, cx)
1626 })?
1627 .await?;
1628 let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1629 if let Some(entry) = entry {
1630 Ok(File::for_entry(entry, worktree))
1631 } else {
1632 let metadata = fs
1633 .metadata(&abs_path)
1634 .await
1635 .with_context(|| {
1636 format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1637 })?
1638 .with_context(|| {
1639 format!("Excluded buffer {path:?} got removed during saving")
1640 })?;
1641 Ok(Arc::new(File {
1642 worktree,
1643 path,
1644 disk_state: DiskState::Present {
1645 mtime: metadata.mtime,
1646 },
1647 entry_id: None,
1648 is_local: true,
1649 is_private,
1650 }))
1651 }
1652 })
1653 }
1654
1655 fn delete_entry(
1656 &self,
1657 entry_id: ProjectEntryId,
1658 trash: bool,
1659 cx: &Context<Worktree>,
1660 ) -> Option<Task<Result<()>>> {
1661 let entry = self.entry_for_id(entry_id)?.clone();
1662 let abs_path = self.absolutize(&entry.path);
1663 let fs = self.fs.clone();
1664
1665 let delete = cx.background_spawn(async move {
1666 if entry.is_file() {
1667 if trash {
1668 fs.trash_file(&abs_path?, Default::default()).await?;
1669 } else {
1670 fs.remove_file(&abs_path?, Default::default()).await?;
1671 }
1672 } else if trash {
1673 fs.trash_dir(
1674 &abs_path?,
1675 RemoveOptions {
1676 recursive: true,
1677 ignore_if_not_exists: false,
1678 },
1679 )
1680 .await?;
1681 } else {
1682 fs.remove_dir(
1683 &abs_path?,
1684 RemoveOptions {
1685 recursive: true,
1686 ignore_if_not_exists: false,
1687 },
1688 )
1689 .await?;
1690 }
1691 anyhow::Ok(entry.path)
1692 });
1693
1694 Some(cx.spawn(async move |this, cx| {
1695 let path = delete.await?;
1696 this.update(cx, |this, _| {
1697 this.as_local_mut()
1698 .unwrap()
1699 .refresh_entries_for_paths(vec![path])
1700 })?
1701 .recv()
1702 .await;
1703 Ok(())
1704 }))
1705 }
1706
1707 /// Rename an entry.
1708 ///
1709 /// `new_path` is the new relative path to the worktree root.
1710 /// If the root entry is renamed then `new_path` is the new root name instead.
1711 fn rename_entry(
1712 &self,
1713 entry_id: ProjectEntryId,
1714 new_path: impl Into<Arc<Path>>,
1715 cx: &Context<Worktree>,
1716 ) -> Task<Result<CreatedEntry>> {
1717 let old_path = match self.entry_for_id(entry_id) {
1718 Some(entry) => entry.path.clone(),
1719 None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1720 };
1721 let new_path = new_path.into();
1722 let abs_old_path = self.absolutize(&old_path);
1723
1724 let is_root_entry = self.root_entry().is_some_and(|e| e.id == entry_id);
1725 let abs_new_path = if is_root_entry {
1726 let Some(root_parent_path) = self.abs_path().parent() else {
1727 return Task::ready(Err(anyhow!("no parent for path {:?}", self.abs_path)));
1728 };
1729 root_parent_path.join(&new_path)
1730 } else {
1731 let Ok(absolutize_path) = self.absolutize(&new_path) else {
1732 return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1733 };
1734 absolutize_path
1735 };
1736 let abs_path = abs_new_path.clone();
1737 let fs = self.fs.clone();
1738 let case_sensitive = self.fs_case_sensitive;
1739 let rename = cx.background_spawn(async move {
1740 let abs_old_path = abs_old_path?;
1741 let abs_new_path = abs_new_path;
1742
1743 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1744 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1745
1746 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1747 // we want to overwrite, because otherwise we run into a file-already-exists error.
1748 let overwrite = !case_sensitive
1749 && abs_old_path != abs_new_path
1750 && abs_old_path_lower == abs_new_path_lower;
1751
1752 fs.rename(
1753 &abs_old_path,
1754 &abs_new_path,
1755 fs::RenameOptions {
1756 overwrite,
1757 ..Default::default()
1758 },
1759 )
1760 .await
1761 .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1762 });
1763
1764 cx.spawn(async move |this, cx| {
1765 rename.await?;
1766 Ok(this
1767 .update(cx, |this, cx| {
1768 let local = this.as_local_mut().unwrap();
1769 if is_root_entry {
1770 // We eagerly update `abs_path` and refresh this worktree.
1771 // Otherwise, the FS watcher would do it on the `RootUpdated` event,
1772 // but with a noticeable delay, so we handle it proactively.
1773 local.update_abs_path_and_refresh(
1774 Some(SanitizedPath::from(abs_path.clone())),
1775 cx,
1776 );
1777 Task::ready(Ok(this.root_entry().cloned()))
1778 } else {
1779 local.refresh_entry(new_path.clone(), Some(old_path), cx)
1780 }
1781 })?
1782 .await?
1783 .map(CreatedEntry::Included)
1784 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1785 })
1786 }
1787
1788 fn copy_entry(
1789 &self,
1790 entry_id: ProjectEntryId,
1791 relative_worktree_source_path: Option<PathBuf>,
1792 new_path: impl Into<Arc<Path>>,
1793 cx: &Context<Worktree>,
1794 ) -> Task<Result<Option<Entry>>> {
1795 let old_path = match self.entry_for_id(entry_id) {
1796 Some(entry) => entry.path.clone(),
1797 None => return Task::ready(Ok(None)),
1798 };
1799 let new_path = new_path.into();
1800 let abs_old_path =
1801 if let Some(relative_worktree_source_path) = relative_worktree_source_path {
1802 Ok(self.abs_path().join(relative_worktree_source_path))
1803 } else {
1804 self.absolutize(&old_path)
1805 };
1806 let abs_new_path = self.absolutize(&new_path);
1807 let fs = self.fs.clone();
1808 let copy = cx.background_spawn(async move {
1809 copy_recursive(
1810 fs.as_ref(),
1811 &abs_old_path?,
1812 &abs_new_path?,
1813 Default::default(),
1814 )
1815 .await
1816 });
1817
1818 cx.spawn(async move |this, cx| {
1819 copy.await?;
1820 this.update(cx, |this, cx| {
1821 this.as_local_mut()
1822 .unwrap()
1823 .refresh_entry(new_path.clone(), None, cx)
1824 })?
1825 .await
1826 })
1827 }
1828
1829 pub fn copy_external_entries(
1830 &self,
1831 target_directory: PathBuf,
1832 paths: Vec<Arc<Path>>,
1833 overwrite_existing_files: bool,
1834 cx: &Context<Worktree>,
1835 ) -> Task<Result<Vec<ProjectEntryId>>> {
1836 let worktree_path = self.abs_path().clone();
1837 let fs = self.fs.clone();
1838 let paths = paths
1839 .into_iter()
1840 .filter_map(|source| {
1841 let file_name = source.file_name()?;
1842 let mut target = target_directory.clone();
1843 target.push(file_name);
1844
1845 // Do not allow copying the same file to itself.
1846 if source.as_ref() != target.as_path() {
1847 Some((source, target))
1848 } else {
1849 None
1850 }
1851 })
1852 .collect::<Vec<_>>();
1853
1854 let paths_to_refresh = paths
1855 .iter()
1856 .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
1857 .collect::<Vec<_>>();
1858
1859 cx.spawn(async move |this, cx| {
1860 cx.background_spawn(async move {
1861 for (source, target) in paths {
1862 copy_recursive(
1863 fs.as_ref(),
1864 &source,
1865 &target,
1866 fs::CopyOptions {
1867 overwrite: overwrite_existing_files,
1868 ..Default::default()
1869 },
1870 )
1871 .await
1872 .with_context(|| {
1873 anyhow!("Failed to copy file from {source:?} to {target:?}")
1874 })?;
1875 }
1876 Ok::<(), anyhow::Error>(())
1877 })
1878 .await
1879 .log_err();
1880 let mut refresh = cx.read_entity(
1881 &this.upgrade().with_context(|| "Dropped worktree")?,
1882 |this, _| {
1883 Ok::<postage::barrier::Receiver, anyhow::Error>(
1884 this.as_local()
1885 .with_context(|| "Worktree is not local")?
1886 .refresh_entries_for_paths(paths_to_refresh.clone()),
1887 )
1888 },
1889 )??;
1890
1891 cx.background_spawn(async move {
1892 refresh.next().await;
1893 Ok::<(), anyhow::Error>(())
1894 })
1895 .await
1896 .log_err();
1897
1898 let this = this.upgrade().with_context(|| "Dropped worktree")?;
1899 cx.read_entity(&this, |this, _| {
1900 paths_to_refresh
1901 .iter()
1902 .filter_map(|path| Some(this.entry_for_path(path)?.id))
1903 .collect()
1904 })
1905 })
1906 }
1907
1908 fn expand_entry(
1909 &self,
1910 entry_id: ProjectEntryId,
1911 cx: &Context<Worktree>,
1912 ) -> Option<Task<Result<()>>> {
1913 let path = self.entry_for_id(entry_id)?.path.clone();
1914 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1915 Some(cx.background_spawn(async move {
1916 refresh.next().await;
1917 Ok(())
1918 }))
1919 }
1920
1921 fn expand_all_for_entry(
1922 &self,
1923 entry_id: ProjectEntryId,
1924 cx: &Context<Worktree>,
1925 ) -> Option<Task<Result<()>>> {
1926 let path = self.entry_for_id(entry_id).unwrap().path.clone();
1927 let mut rx = self.add_path_prefix_to_scan(path.clone());
1928 Some(cx.background_spawn(async move {
1929 rx.next().await;
1930 Ok(())
1931 }))
1932 }
1933
1934 fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1935 let (tx, rx) = barrier::channel();
1936 self.scan_requests_tx
1937 .try_send(ScanRequest {
1938 relative_paths: paths,
1939 done: smallvec![tx],
1940 })
1941 .ok();
1942 rx
1943 }
1944
1945 #[cfg(feature = "test-support")]
1946 pub fn manually_refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1947 self.refresh_entries_for_paths(paths)
1948 }
1949
1950 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) -> barrier::Receiver {
1951 let (tx, rx) = barrier::channel();
1952 self.path_prefixes_to_scan_tx
1953 .try_send(PathPrefixScanRequest {
1954 path: path_prefix,
1955 done: smallvec![tx],
1956 })
1957 .ok();
1958 rx
1959 }
1960
1961 fn refresh_entry(
1962 &self,
1963 path: Arc<Path>,
1964 old_path: Option<Arc<Path>>,
1965 cx: &Context<Worktree>,
1966 ) -> Task<Result<Option<Entry>>> {
1967 if self.settings.is_path_excluded(&path) {
1968 return Task::ready(Ok(None));
1969 }
1970 let paths = if let Some(old_path) = old_path.as_ref() {
1971 vec![old_path.clone(), path.clone()]
1972 } else {
1973 vec![path.clone()]
1974 };
1975 let t0 = Instant::now();
1976 let mut refresh = self.refresh_entries_for_paths(paths);
1977 cx.spawn(async move |this, cx| {
1978 refresh.recv().await;
1979 log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
1980 let new_entry = this.update(cx, |this, _| {
1981 this.entry_for_path(path)
1982 .cloned()
1983 .ok_or_else(|| anyhow!("failed to read path after update"))
1984 })??;
1985 Ok(Some(new_entry))
1986 })
1987 }
1988
1989 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
1990 where
1991 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1992 Fut: 'static + Send + Future<Output = bool>,
1993 {
1994 if let Some(observer) = self.update_observer.as_mut() {
1995 *observer.resume_updates.borrow_mut() = ();
1996 return;
1997 }
1998
1999 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
2000 let (snapshots_tx, mut snapshots_rx) =
2001 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet)>();
2002 snapshots_tx
2003 .unbounded_send((self.snapshot(), Arc::default()))
2004 .ok();
2005
2006 let worktree_id = cx.entity_id().as_u64();
2007 let _maintain_remote_snapshot = cx.background_spawn(async move {
2008 let mut is_first = true;
2009 while let Some((snapshot, entry_changes)) = snapshots_rx.next().await {
2010 let update = if is_first {
2011 is_first = false;
2012 snapshot.build_initial_update(project_id, worktree_id)
2013 } else {
2014 snapshot.build_update(project_id, worktree_id, entry_changes)
2015 };
2016
2017 for update in proto::split_worktree_update(update) {
2018 let _ = resume_updates_rx.try_recv();
2019 loop {
2020 let result = callback(update.clone());
2021 if result.await {
2022 break;
2023 } else {
2024 log::info!("waiting to resume updates");
2025 if resume_updates_rx.next().await.is_none() {
2026 return Some(());
2027 }
2028 }
2029 }
2030 }
2031 }
2032 Some(())
2033 });
2034
2035 self.update_observer = Some(UpdateObservationState {
2036 snapshots_tx,
2037 resume_updates: resume_updates_tx,
2038 _maintain_remote_snapshot,
2039 });
2040 }
2041
2042 pub fn share_private_files(&mut self, cx: &Context<Worktree>) {
2043 self.share_private_files = true;
2044 self.restart_background_scanners(cx);
2045 }
2046
2047 fn update_abs_path_and_refresh(
2048 &mut self,
2049 new_path: Option<SanitizedPath>,
2050 cx: &Context<Worktree>,
2051 ) {
2052 if let Some(new_path) = new_path {
2053 self.snapshot.git_repositories = Default::default();
2054 self.snapshot.ignores_by_parent_abs_path = Default::default();
2055 let root_name = new_path
2056 .as_path()
2057 .file_name()
2058 .map_or(String::new(), |f| f.to_string_lossy().to_string());
2059 self.snapshot.update_abs_path(new_path, root_name);
2060 }
2061 self.restart_background_scanners(cx);
2062 }
2063}
2064
2065impl RemoteWorktree {
2066 pub fn project_id(&self) -> u64 {
2067 self.project_id
2068 }
2069
2070 pub fn client(&self) -> AnyProtoClient {
2071 self.client.clone()
2072 }
2073
2074 pub fn disconnected_from_host(&mut self) {
2075 self.updates_tx.take();
2076 self.snapshot_subscriptions.clear();
2077 self.disconnected = true;
2078 }
2079
2080 pub fn update_from_remote(&self, update: proto::UpdateWorktree) {
2081 if let Some(updates_tx) = &self.updates_tx {
2082 updates_tx
2083 .unbounded_send(update)
2084 .expect("consumer runs to completion");
2085 }
2086 }
2087
2088 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2089 where
2090 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2091 Fut: 'static + Send + Future<Output = bool>,
2092 {
2093 let (tx, mut rx) = mpsc::unbounded();
2094 let initial_update = self
2095 .snapshot
2096 .build_initial_update(project_id, self.id().to_proto());
2097 self.update_observer = Some(tx);
2098 cx.spawn(async move |this, cx| {
2099 let mut update = initial_update;
2100 'outer: loop {
2101 // SSH projects use a special project ID of 0, and we need to
2102 // remap it to the correct one here.
2103 update.project_id = project_id;
2104
2105 for chunk in split_worktree_update(update) {
2106 if !callback(chunk).await {
2107 break 'outer;
2108 }
2109 }
2110
2111 if let Some(next_update) = rx.next().await {
2112 update = next_update;
2113 } else {
2114 break;
2115 }
2116 }
2117 this.update(cx, |this, _| {
2118 let this = this.as_remote_mut().unwrap();
2119 this.update_observer.take();
2120 })
2121 })
2122 .detach();
2123 }
2124
2125 fn observed_snapshot(&self, scan_id: usize) -> bool {
2126 self.completed_scan_id >= scan_id
2127 }
2128
2129 pub fn wait_for_snapshot(
2130 &mut self,
2131 scan_id: usize,
2132 ) -> impl Future<Output = Result<()>> + use<> {
2133 let (tx, rx) = oneshot::channel();
2134 if self.observed_snapshot(scan_id) {
2135 let _ = tx.send(());
2136 } else if self.disconnected {
2137 drop(tx);
2138 } else {
2139 match self
2140 .snapshot_subscriptions
2141 .binary_search_by_key(&scan_id, |probe| probe.0)
2142 {
2143 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
2144 }
2145 }
2146
2147 async move {
2148 rx.await?;
2149 Ok(())
2150 }
2151 }
2152
2153 fn insert_entry(
2154 &mut self,
2155 entry: proto::Entry,
2156 scan_id: usize,
2157 cx: &Context<Worktree>,
2158 ) -> Task<Result<Entry>> {
2159 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
2160 cx.spawn(async move |this, cx| {
2161 wait_for_snapshot.await?;
2162 this.update(cx, |worktree, _| {
2163 let worktree = worktree.as_remote_mut().unwrap();
2164 let snapshot = &mut worktree.background_snapshot.lock().0;
2165 let entry = snapshot.insert_entry(entry, &worktree.file_scan_inclusions);
2166 worktree.snapshot = snapshot.clone();
2167 entry
2168 })?
2169 })
2170 }
2171
2172 fn delete_entry(
2173 &self,
2174 entry_id: ProjectEntryId,
2175 trash: bool,
2176 cx: &Context<Worktree>,
2177 ) -> Option<Task<Result<()>>> {
2178 let response = self.client.request(proto::DeleteProjectEntry {
2179 project_id: self.project_id,
2180 entry_id: entry_id.to_proto(),
2181 use_trash: trash,
2182 });
2183 Some(cx.spawn(async move |this, cx| {
2184 let response = response.await?;
2185 let scan_id = response.worktree_scan_id as usize;
2186
2187 this.update(cx, move |this, _| {
2188 this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
2189 })?
2190 .await?;
2191
2192 this.update(cx, |this, _| {
2193 let this = this.as_remote_mut().unwrap();
2194 let snapshot = &mut this.background_snapshot.lock().0;
2195 snapshot.delete_entry(entry_id);
2196 this.snapshot = snapshot.clone();
2197 })
2198 }))
2199 }
2200
2201 fn rename_entry(
2202 &self,
2203 entry_id: ProjectEntryId,
2204 new_path: impl Into<Arc<Path>>,
2205 cx: &Context<Worktree>,
2206 ) -> Task<Result<CreatedEntry>> {
2207 let new_path: Arc<Path> = new_path.into();
2208 let response = self.client.request(proto::RenameProjectEntry {
2209 project_id: self.project_id,
2210 entry_id: entry_id.to_proto(),
2211 new_path: new_path.as_ref().to_proto(),
2212 });
2213 cx.spawn(async move |this, cx| {
2214 let response = response.await?;
2215 match response.entry {
2216 Some(entry) => this
2217 .update(cx, |this, cx| {
2218 this.as_remote_mut().unwrap().insert_entry(
2219 entry,
2220 response.worktree_scan_id as usize,
2221 cx,
2222 )
2223 })?
2224 .await
2225 .map(CreatedEntry::Included),
2226 None => {
2227 let abs_path = this.update(cx, |worktree, _| {
2228 worktree
2229 .absolutize(&new_path)
2230 .with_context(|| format!("absolutizing {new_path:?}"))
2231 })??;
2232 Ok(CreatedEntry::Excluded { abs_path })
2233 }
2234 }
2235 })
2236 }
2237}
2238
2239impl Snapshot {
2240 pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
2241 Snapshot {
2242 id: WorktreeId::from_usize(id as usize),
2243 abs_path: abs_path.into(),
2244 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
2245 root_name,
2246 always_included_entries: Default::default(),
2247 entries_by_path: Default::default(),
2248 entries_by_id: Default::default(),
2249 scan_id: 1,
2250 completed_scan_id: 0,
2251 }
2252 }
2253
2254 pub fn id(&self) -> WorktreeId {
2255 self.id
2256 }
2257
2258 // TODO:
2259 // Consider the following:
2260 //
2261 // ```rust
2262 // let abs_path: Arc<Path> = snapshot.abs_path(); // e.g. "C:\Users\user\Desktop\project"
2263 // let some_non_trimmed_path = Path::new("\\\\?\\C:\\Users\\user\\Desktop\\project\\main.rs");
2264 // // The caller perform some actions here:
2265 // some_non_trimmed_path.strip_prefix(abs_path); // This fails
2266 // some_non_trimmed_path.starts_with(abs_path); // This fails too
2267 // ```
2268 //
2269 // This is definitely a bug, but it's not clear if we should handle it here or not.
2270 pub fn abs_path(&self) -> &Arc<Path> {
2271 self.abs_path.as_path()
2272 }
2273
2274 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2275 let mut updated_entries = self
2276 .entries_by_path
2277 .iter()
2278 .map(proto::Entry::from)
2279 .collect::<Vec<_>>();
2280 updated_entries.sort_unstable_by_key(|e| e.id);
2281
2282 proto::UpdateWorktree {
2283 project_id,
2284 worktree_id,
2285 abs_path: self.abs_path().to_proto(),
2286 root_name: self.root_name().to_string(),
2287 updated_entries,
2288 removed_entries: Vec::new(),
2289 scan_id: self.scan_id as u64,
2290 is_last_update: self.completed_scan_id == self.scan_id,
2291 // Sent in separate messages.
2292 updated_repositories: Vec::new(),
2293 removed_repositories: Vec::new(),
2294 }
2295 }
2296
2297 pub fn work_directory_abs_path(&self, work_directory: &WorkDirectory) -> Result<PathBuf> {
2298 match work_directory {
2299 WorkDirectory::InProject { relative_path } => self.absolutize(relative_path),
2300 WorkDirectory::AboveProject { absolute_path, .. } => {
2301 Ok(absolute_path.as_ref().to_owned())
2302 }
2303 }
2304 }
2305
2306 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
2307 if path
2308 .components()
2309 .any(|component| !matches!(component, std::path::Component::Normal(_)))
2310 {
2311 return Err(anyhow!("invalid path"));
2312 }
2313 if path.file_name().is_some() {
2314 Ok(self.abs_path.as_path().join(path))
2315 } else {
2316 Ok(self.abs_path.as_path().to_path_buf())
2317 }
2318 }
2319
2320 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
2321 self.entries_by_id.get(&entry_id, &()).is_some()
2322 }
2323
2324 fn insert_entry(
2325 &mut self,
2326 entry: proto::Entry,
2327 always_included_paths: &PathMatcher,
2328 ) -> Result<Entry> {
2329 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2330 let old_entry = self.entries_by_id.insert_or_replace(
2331 PathEntry {
2332 id: entry.id,
2333 path: entry.path.clone(),
2334 is_ignored: entry.is_ignored,
2335 scan_id: 0,
2336 },
2337 &(),
2338 );
2339 if let Some(old_entry) = old_entry {
2340 self.entries_by_path.remove(&PathKey(old_entry.path), &());
2341 }
2342 self.entries_by_path.insert_or_replace(entry.clone(), &());
2343 Ok(entry)
2344 }
2345
2346 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2347 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2348 self.entries_by_path = {
2349 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>(&());
2350 let mut new_entries_by_path =
2351 cursor.slice(&TraversalTarget::path(&removed_entry.path), Bias::Left, &());
2352 while let Some(entry) = cursor.item() {
2353 if entry.path.starts_with(&removed_entry.path) {
2354 self.entries_by_id.remove(&entry.id, &());
2355 cursor.next(&());
2356 } else {
2357 break;
2358 }
2359 }
2360 new_entries_by_path.append(cursor.suffix(&()), &());
2361 new_entries_by_path
2362 };
2363
2364 Some(removed_entry.path)
2365 }
2366
2367 fn update_abs_path(&mut self, abs_path: SanitizedPath, root_name: String) {
2368 self.abs_path = abs_path;
2369 if root_name != self.root_name {
2370 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
2371 self.root_name = root_name;
2372 }
2373 }
2374
2375 fn apply_remote_update(
2376 &mut self,
2377 update: proto::UpdateWorktree,
2378 always_included_paths: &PathMatcher,
2379 ) -> Result<()> {
2380 log::debug!(
2381 "applying remote worktree update. {} entries updated, {} removed",
2382 update.updated_entries.len(),
2383 update.removed_entries.len()
2384 );
2385 self.update_abs_path(
2386 SanitizedPath::from(PathBuf::from_proto(update.abs_path)),
2387 update.root_name,
2388 );
2389
2390 let mut entries_by_path_edits = Vec::new();
2391 let mut entries_by_id_edits = Vec::new();
2392
2393 for entry_id in update.removed_entries {
2394 let entry_id = ProjectEntryId::from_proto(entry_id);
2395 entries_by_id_edits.push(Edit::Remove(entry_id));
2396 if let Some(entry) = self.entry_for_id(entry_id) {
2397 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2398 }
2399 }
2400
2401 for entry in update.updated_entries {
2402 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2403 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2404 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2405 }
2406 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2407 if old_entry.id != entry.id {
2408 entries_by_id_edits.push(Edit::Remove(old_entry.id));
2409 }
2410 }
2411 entries_by_id_edits.push(Edit::Insert(PathEntry {
2412 id: entry.id,
2413 path: entry.path.clone(),
2414 is_ignored: entry.is_ignored,
2415 scan_id: 0,
2416 }));
2417 entries_by_path_edits.push(Edit::Insert(entry));
2418 }
2419
2420 self.entries_by_path.edit(entries_by_path_edits, &());
2421 self.entries_by_id.edit(entries_by_id_edits, &());
2422
2423 self.scan_id = update.scan_id as usize;
2424 if update.is_last_update {
2425 self.completed_scan_id = update.scan_id as usize;
2426 }
2427
2428 Ok(())
2429 }
2430
2431 pub fn entry_count(&self) -> usize {
2432 self.entries_by_path.summary().count
2433 }
2434
2435 pub fn visible_entry_count(&self) -> usize {
2436 self.entries_by_path.summary().non_ignored_count
2437 }
2438
2439 pub fn dir_count(&self) -> usize {
2440 let summary = self.entries_by_path.summary();
2441 summary.count - summary.file_count
2442 }
2443
2444 pub fn visible_dir_count(&self) -> usize {
2445 let summary = self.entries_by_path.summary();
2446 summary.non_ignored_count - summary.non_ignored_file_count
2447 }
2448
2449 pub fn file_count(&self) -> usize {
2450 self.entries_by_path.summary().file_count
2451 }
2452
2453 pub fn visible_file_count(&self) -> usize {
2454 self.entries_by_path.summary().non_ignored_file_count
2455 }
2456
2457 fn traverse_from_offset(
2458 &self,
2459 include_files: bool,
2460 include_dirs: bool,
2461 include_ignored: bool,
2462 start_offset: usize,
2463 ) -> Traversal {
2464 let mut cursor = self.entries_by_path.cursor(&());
2465 cursor.seek(
2466 &TraversalTarget::Count {
2467 count: start_offset,
2468 include_files,
2469 include_dirs,
2470 include_ignored,
2471 },
2472 Bias::Right,
2473 &(),
2474 );
2475 Traversal {
2476 snapshot: self,
2477 cursor,
2478 include_files,
2479 include_dirs,
2480 include_ignored,
2481 }
2482 }
2483
2484 pub fn traverse_from_path(
2485 &self,
2486 include_files: bool,
2487 include_dirs: bool,
2488 include_ignored: bool,
2489 path: &Path,
2490 ) -> Traversal {
2491 Traversal::new(self, include_files, include_dirs, include_ignored, path)
2492 }
2493
2494 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2495 self.traverse_from_offset(true, false, include_ignored, start)
2496 }
2497
2498 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2499 self.traverse_from_offset(false, true, include_ignored, start)
2500 }
2501
2502 pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2503 self.traverse_from_offset(true, true, include_ignored, start)
2504 }
2505
2506 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2507 let empty_path = Path::new("");
2508 self.entries_by_path
2509 .cursor::<()>(&())
2510 .filter(move |entry| entry.path.as_ref() != empty_path)
2511 .map(|entry| &entry.path)
2512 }
2513
2514 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2515 let options = ChildEntriesOptions {
2516 include_files: true,
2517 include_dirs: true,
2518 include_ignored: true,
2519 };
2520 self.child_entries_with_options(parent_path, options)
2521 }
2522
2523 pub fn child_entries_with_options<'a>(
2524 &'a self,
2525 parent_path: &'a Path,
2526 options: ChildEntriesOptions,
2527 ) -> ChildEntriesIter<'a> {
2528 let mut cursor = self.entries_by_path.cursor(&());
2529 cursor.seek(&TraversalTarget::path(parent_path), Bias::Right, &());
2530 let traversal = Traversal {
2531 snapshot: self,
2532 cursor,
2533 include_files: options.include_files,
2534 include_dirs: options.include_dirs,
2535 include_ignored: options.include_ignored,
2536 };
2537 ChildEntriesIter {
2538 traversal,
2539 parent_path,
2540 }
2541 }
2542
2543 pub fn root_entry(&self) -> Option<&Entry> {
2544 self.entry_for_path("")
2545 }
2546
2547 /// TODO: what's the difference between `root_dir` and `abs_path`?
2548 /// is there any? if so, document it.
2549 pub fn root_dir(&self) -> Option<Arc<Path>> {
2550 self.root_entry()
2551 .filter(|entry| entry.is_dir())
2552 .map(|_| self.abs_path().clone())
2553 }
2554
2555 pub fn root_name(&self) -> &str {
2556 &self.root_name
2557 }
2558
2559 pub fn scan_id(&self) -> usize {
2560 self.scan_id
2561 }
2562
2563 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2564 let path = path.as_ref();
2565 debug_assert!(path.is_relative());
2566 self.traverse_from_path(true, true, true, path)
2567 .entry()
2568 .and_then(|entry| {
2569 if entry.path.as_ref() == path {
2570 Some(entry)
2571 } else {
2572 None
2573 }
2574 })
2575 }
2576
2577 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2578 let entry = self.entries_by_id.get(&id, &())?;
2579 self.entry_for_path(&entry.path)
2580 }
2581
2582 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2583 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2584 }
2585}
2586
2587impl LocalSnapshot {
2588 fn local_repo_for_work_directory_path(&self, path: &Path) -> Option<&LocalRepositoryEntry> {
2589 self.git_repositories
2590 .iter()
2591 .map(|(_, entry)| entry)
2592 .find(|entry| entry.work_directory.path_key() == PathKey(path.into()))
2593 }
2594
2595 fn build_update(
2596 &self,
2597 project_id: u64,
2598 worktree_id: u64,
2599 entry_changes: UpdatedEntriesSet,
2600 ) -> proto::UpdateWorktree {
2601 let mut updated_entries = Vec::new();
2602 let mut removed_entries = Vec::new();
2603
2604 for (_, entry_id, path_change) in entry_changes.iter() {
2605 if let PathChange::Removed = path_change {
2606 removed_entries.push(entry_id.0 as u64);
2607 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2608 updated_entries.push(proto::Entry::from(entry));
2609 }
2610 }
2611
2612 removed_entries.sort_unstable();
2613 updated_entries.sort_unstable_by_key(|e| e.id);
2614
2615 // TODO - optimize, knowing that removed_entries are sorted.
2616 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2617
2618 proto::UpdateWorktree {
2619 project_id,
2620 worktree_id,
2621 abs_path: self.abs_path().to_proto(),
2622 root_name: self.root_name().to_string(),
2623 updated_entries,
2624 removed_entries,
2625 scan_id: self.scan_id as u64,
2626 is_last_update: self.completed_scan_id == self.scan_id,
2627 // Sent in separate messages.
2628 updated_repositories: Vec::new(),
2629 removed_repositories: Vec::new(),
2630 }
2631 }
2632
2633 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2634 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2635 let abs_path = self.abs_path.as_path().join(&entry.path);
2636 match smol::block_on(build_gitignore(&abs_path, fs)) {
2637 Ok(ignore) => {
2638 self.ignores_by_parent_abs_path
2639 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2640 }
2641 Err(error) => {
2642 log::error!(
2643 "error loading .gitignore file {:?} - {:?}",
2644 &entry.path,
2645 error
2646 );
2647 }
2648 }
2649 }
2650
2651 if entry.kind == EntryKind::PendingDir {
2652 if let Some(existing_entry) =
2653 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2654 {
2655 entry.kind = existing_entry.kind;
2656 }
2657 }
2658
2659 let scan_id = self.scan_id;
2660 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2661 if let Some(removed) = removed {
2662 if removed.id != entry.id {
2663 self.entries_by_id.remove(&removed.id, &());
2664 }
2665 }
2666 self.entries_by_id.insert_or_replace(
2667 PathEntry {
2668 id: entry.id,
2669 path: entry.path.clone(),
2670 is_ignored: entry.is_ignored,
2671 scan_id,
2672 },
2673 &(),
2674 );
2675
2676 entry
2677 }
2678
2679 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2680 let mut inodes = TreeSet::default();
2681 for ancestor in path.ancestors().skip(1) {
2682 if let Some(entry) = self.entry_for_path(ancestor) {
2683 inodes.insert(entry.inode);
2684 }
2685 }
2686 inodes
2687 }
2688
2689 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2690 let mut new_ignores = Vec::new();
2691 for (index, ancestor) in abs_path.ancestors().enumerate() {
2692 if index > 0 {
2693 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2694 new_ignores.push((ancestor, Some(ignore.clone())));
2695 } else {
2696 new_ignores.push((ancestor, None));
2697 }
2698 }
2699 if ancestor.join(*DOT_GIT).exists() {
2700 break;
2701 }
2702 }
2703
2704 let mut ignore_stack = IgnoreStack::none();
2705 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2706 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2707 ignore_stack = IgnoreStack::all();
2708 break;
2709 } else if let Some(ignore) = ignore {
2710 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2711 }
2712 }
2713
2714 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2715 ignore_stack = IgnoreStack::all();
2716 }
2717
2718 ignore_stack
2719 }
2720
2721 #[cfg(test)]
2722 fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2723 self.entries_by_path
2724 .cursor::<()>(&())
2725 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2726 }
2727
2728 #[cfg(test)]
2729 pub fn check_invariants(&self, git_state: bool) {
2730 use pretty_assertions::assert_eq;
2731
2732 assert_eq!(
2733 self.entries_by_path
2734 .cursor::<()>(&())
2735 .map(|e| (&e.path, e.id))
2736 .collect::<Vec<_>>(),
2737 self.entries_by_id
2738 .cursor::<()>(&())
2739 .map(|e| (&e.path, e.id))
2740 .collect::<collections::BTreeSet<_>>()
2741 .into_iter()
2742 .collect::<Vec<_>>(),
2743 "entries_by_path and entries_by_id are inconsistent"
2744 );
2745
2746 let mut files = self.files(true, 0);
2747 let mut visible_files = self.files(false, 0);
2748 for entry in self.entries_by_path.cursor::<()>(&()) {
2749 if entry.is_file() {
2750 assert_eq!(files.next().unwrap().inode, entry.inode);
2751 if (!entry.is_ignored && !entry.is_external) || entry.is_always_included {
2752 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2753 }
2754 }
2755 }
2756
2757 assert!(files.next().is_none());
2758 assert!(visible_files.next().is_none());
2759
2760 let mut bfs_paths = Vec::new();
2761 let mut stack = self
2762 .root_entry()
2763 .map(|e| e.path.as_ref())
2764 .into_iter()
2765 .collect::<Vec<_>>();
2766 while let Some(path) = stack.pop() {
2767 bfs_paths.push(path);
2768 let ix = stack.len();
2769 for child_entry in self.child_entries(path) {
2770 stack.insert(ix, &child_entry.path);
2771 }
2772 }
2773
2774 let dfs_paths_via_iter = self
2775 .entries_by_path
2776 .cursor::<()>(&())
2777 .map(|e| e.path.as_ref())
2778 .collect::<Vec<_>>();
2779 assert_eq!(bfs_paths, dfs_paths_via_iter);
2780
2781 let dfs_paths_via_traversal = self
2782 .entries(true, 0)
2783 .map(|e| e.path.as_ref())
2784 .collect::<Vec<_>>();
2785 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2786
2787 if git_state {
2788 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2789 let ignore_parent_path = ignore_parent_abs_path
2790 .strip_prefix(self.abs_path.as_path())
2791 .unwrap();
2792 assert!(self.entry_for_path(ignore_parent_path).is_some());
2793 assert!(
2794 self.entry_for_path(ignore_parent_path.join(*GITIGNORE))
2795 .is_some()
2796 );
2797 }
2798 }
2799 }
2800
2801 #[cfg(test)]
2802 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2803 let mut paths = Vec::new();
2804 for entry in self.entries_by_path.cursor::<()>(&()) {
2805 if include_ignored || !entry.is_ignored {
2806 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2807 }
2808 }
2809 paths.sort_by(|a, b| a.0.cmp(b.0));
2810 paths
2811 }
2812}
2813
2814impl BackgroundScannerState {
2815 fn should_scan_directory(&self, entry: &Entry) -> bool {
2816 (!entry.is_external && (!entry.is_ignored || entry.is_always_included))
2817 || entry.path.file_name() == Some(*DOT_GIT)
2818 || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
2819 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2820 || self
2821 .paths_to_scan
2822 .iter()
2823 .any(|p| p.starts_with(&entry.path))
2824 || self
2825 .path_prefixes_to_scan
2826 .iter()
2827 .any(|p| entry.path.starts_with(p))
2828 }
2829
2830 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2831 let path = entry.path.clone();
2832 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2833 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2834
2835 if !ancestor_inodes.contains(&entry.inode) {
2836 ancestor_inodes.insert(entry.inode);
2837 scan_job_tx
2838 .try_send(ScanJob {
2839 abs_path,
2840 path,
2841 ignore_stack,
2842 scan_queue: scan_job_tx.clone(),
2843 ancestor_inodes,
2844 is_external: entry.is_external,
2845 })
2846 .unwrap();
2847 }
2848 }
2849
2850 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2851 if let Some(mtime) = entry.mtime {
2852 // If an entry with the same inode was removed from the worktree during this scan,
2853 // then it *might* represent the same file or directory. But the OS might also have
2854 // re-used the inode for a completely different file or directory.
2855 //
2856 // Conditionally reuse the old entry's id:
2857 // * if the mtime is the same, the file was probably been renamed.
2858 // * if the path is the same, the file may just have been updated
2859 if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
2860 if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
2861 entry.id = removed_entry.id;
2862 }
2863 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2864 entry.id = existing_entry.id;
2865 }
2866 }
2867 }
2868
2869 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
2870 self.reuse_entry_id(&mut entry);
2871 let entry = self.snapshot.insert_entry(entry, fs);
2872 if entry.path.file_name() == Some(&DOT_GIT) {
2873 self.insert_git_repository(entry.path.clone(), fs, watcher);
2874 }
2875
2876 #[cfg(test)]
2877 self.snapshot.check_invariants(false);
2878
2879 entry
2880 }
2881
2882 fn populate_dir(
2883 &mut self,
2884 parent_path: &Arc<Path>,
2885 entries: impl IntoIterator<Item = Entry>,
2886 ignore: Option<Arc<Gitignore>>,
2887 ) {
2888 let mut parent_entry = if let Some(parent_entry) = self
2889 .snapshot
2890 .entries_by_path
2891 .get(&PathKey(parent_path.clone()), &())
2892 {
2893 parent_entry.clone()
2894 } else {
2895 log::warn!(
2896 "populating a directory {:?} that has been removed",
2897 parent_path
2898 );
2899 return;
2900 };
2901
2902 match parent_entry.kind {
2903 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2904 EntryKind::Dir => {}
2905 _ => return,
2906 }
2907
2908 if let Some(ignore) = ignore {
2909 let abs_parent_path = self.snapshot.abs_path.as_path().join(parent_path).into();
2910 self.snapshot
2911 .ignores_by_parent_abs_path
2912 .insert(abs_parent_path, (ignore, false));
2913 }
2914
2915 let parent_entry_id = parent_entry.id;
2916 self.scanned_dirs.insert(parent_entry_id);
2917 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2918 let mut entries_by_id_edits = Vec::new();
2919
2920 for entry in entries {
2921 entries_by_id_edits.push(Edit::Insert(PathEntry {
2922 id: entry.id,
2923 path: entry.path.clone(),
2924 is_ignored: entry.is_ignored,
2925 scan_id: self.snapshot.scan_id,
2926 }));
2927 entries_by_path_edits.push(Edit::Insert(entry));
2928 }
2929
2930 self.snapshot
2931 .entries_by_path
2932 .edit(entries_by_path_edits, &());
2933 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2934
2935 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2936 self.changed_paths.insert(ix, parent_path.clone());
2937 }
2938
2939 #[cfg(test)]
2940 self.snapshot.check_invariants(false);
2941 }
2942
2943 fn remove_path(&mut self, path: &Path) {
2944 log::trace!("background scanner removing path {path:?}");
2945 let mut new_entries;
2946 let removed_entries;
2947 {
2948 let mut cursor = self
2949 .snapshot
2950 .entries_by_path
2951 .cursor::<TraversalProgress>(&());
2952 new_entries = cursor.slice(&TraversalTarget::path(path), Bias::Left, &());
2953 removed_entries = cursor.slice(&TraversalTarget::successor(path), Bias::Left, &());
2954 new_entries.append(cursor.suffix(&()), &());
2955 }
2956 self.snapshot.entries_by_path = new_entries;
2957
2958 let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
2959 for entry in removed_entries.cursor::<()>(&()) {
2960 match self.removed_entries.entry(entry.inode) {
2961 hash_map::Entry::Occupied(mut e) => {
2962 let prev_removed_entry = e.get_mut();
2963 if entry.id > prev_removed_entry.id {
2964 *prev_removed_entry = entry.clone();
2965 }
2966 }
2967 hash_map::Entry::Vacant(e) => {
2968 e.insert(entry.clone());
2969 }
2970 }
2971
2972 if entry.path.file_name() == Some(&GITIGNORE) {
2973 let abs_parent_path = self
2974 .snapshot
2975 .abs_path
2976 .as_path()
2977 .join(entry.path.parent().unwrap());
2978 if let Some((_, needs_update)) = self
2979 .snapshot
2980 .ignores_by_parent_abs_path
2981 .get_mut(abs_parent_path.as_path())
2982 {
2983 *needs_update = true;
2984 }
2985 }
2986
2987 if let Err(ix) = removed_ids.binary_search(&entry.id) {
2988 removed_ids.insert(ix, entry.id);
2989 }
2990 }
2991
2992 self.snapshot.entries_by_id.edit(
2993 removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
2994 &(),
2995 );
2996 self.snapshot
2997 .git_repositories
2998 .retain(|id, _| removed_ids.binary_search(id).is_err());
2999
3000 #[cfg(test)]
3001 self.snapshot.check_invariants(false);
3002 }
3003
3004 fn insert_git_repository(
3005 &mut self,
3006 dot_git_path: Arc<Path>,
3007 fs: &dyn Fs,
3008 watcher: &dyn Watcher,
3009 ) {
3010 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
3011 Some(parent_dir) => {
3012 // Guard against repositories inside the repository metadata
3013 if parent_dir.iter().any(|component| component == *DOT_GIT) {
3014 log::debug!(
3015 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
3016 );
3017 return;
3018 };
3019 log::debug!(
3020 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
3021 );
3022
3023 parent_dir.into()
3024 }
3025 None => {
3026 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
3027 // no files inside that directory are tracked by git, so no need to build the repo around it
3028 log::debug!(
3029 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
3030 );
3031 return;
3032 }
3033 };
3034
3035 self.insert_git_repository_for_path(
3036 WorkDirectory::InProject {
3037 relative_path: work_dir_path,
3038 },
3039 dot_git_path,
3040 fs,
3041 watcher,
3042 );
3043 }
3044
3045 fn insert_git_repository_for_path(
3046 &mut self,
3047 work_directory: WorkDirectory,
3048 dot_git_path: Arc<Path>,
3049 fs: &dyn Fs,
3050 watcher: &dyn Watcher,
3051 ) -> Option<LocalRepositoryEntry> {
3052 log::trace!("insert git repository for {dot_git_path:?}");
3053 let work_dir_entry = self.snapshot.entry_for_path(work_directory.path_key().0)?;
3054 let work_directory_abs_path = self
3055 .snapshot
3056 .work_directory_abs_path(&work_directory)
3057 .log_err()?;
3058
3059 if self
3060 .snapshot
3061 .git_repositories
3062 .get(&work_dir_entry.id)
3063 .is_some()
3064 {
3065 log::trace!("existing git repository for {work_directory:?}");
3066 return None;
3067 }
3068
3069 let dot_git_abs_path = self.snapshot.abs_path.as_path().join(&dot_git_path);
3070
3071 // TODO add these watchers without building a whole repository by parsing .git-with-indirection
3072 let t0 = Instant::now();
3073 let repository = fs.open_repo(&dot_git_abs_path)?;
3074 log::trace!("opened git repo for {dot_git_abs_path:?}");
3075
3076 let repository_path = repository.path();
3077 watcher.add(&repository_path).log_err()?;
3078
3079 let actual_dot_git_dir_abs_path = repository.main_repository_path();
3080 let dot_git_worktree_abs_path = if actual_dot_git_dir_abs_path == dot_git_abs_path {
3081 None
3082 } else {
3083 // The two paths could be different because we opened a git worktree.
3084 // When that happens:
3085 //
3086 // * `dot_git_abs_path` is a file that points to the worktree-subdirectory in the actual
3087 // .git directory.
3088 //
3089 // * `repository_path` is the worktree-subdirectory.
3090 //
3091 // * `actual_dot_git_dir_abs_path` is the path to the actual .git directory. In git
3092 // documentation this is called the "commondir".
3093 watcher.add(&dot_git_abs_path).log_err()?;
3094 Some(Arc::from(dot_git_abs_path.as_path()))
3095 };
3096
3097 log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
3098
3099 let work_directory_id = work_dir_entry.id;
3100
3101 let local_repository = LocalRepositoryEntry {
3102 work_directory_id,
3103 work_directory,
3104 git_dir_scan_id: 0,
3105 original_dot_git_abs_path: dot_git_abs_path.as_path().into(),
3106 dot_git_dir_abs_path: actual_dot_git_dir_abs_path.into(),
3107 work_directory_abs_path: work_directory_abs_path.as_path().into(),
3108 dot_git_worktree_abs_path,
3109 };
3110
3111 self.snapshot
3112 .git_repositories
3113 .insert(work_directory_id, local_repository.clone());
3114
3115 log::trace!("inserting new local git repository");
3116 Some(local_repository)
3117 }
3118}
3119
3120async fn is_git_dir(path: &Path, fs: &dyn Fs) -> bool {
3121 if path.file_name() == Some(&*DOT_GIT) {
3122 return true;
3123 }
3124
3125 // If we're in a bare repository, we are not inside a `.git` folder. In a
3126 // bare repository, the root folder contains what would normally be in the
3127 // `.git` folder.
3128 let head_metadata = fs.metadata(&path.join("HEAD")).await;
3129 if !matches!(head_metadata, Ok(Some(_))) {
3130 return false;
3131 }
3132 let config_metadata = fs.metadata(&path.join("config")).await;
3133 matches!(config_metadata, Ok(Some(_)))
3134}
3135
3136async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
3137 let contents = fs.load(abs_path).await?;
3138 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
3139 let mut builder = GitignoreBuilder::new(parent);
3140 for line in contents.lines() {
3141 builder.add_line(Some(abs_path.into()), line)?;
3142 }
3143 Ok(builder.build()?)
3144}
3145
3146impl Deref for Worktree {
3147 type Target = Snapshot;
3148
3149 fn deref(&self) -> &Self::Target {
3150 match self {
3151 Worktree::Local(worktree) => &worktree.snapshot,
3152 Worktree::Remote(worktree) => &worktree.snapshot,
3153 }
3154 }
3155}
3156
3157impl Deref for LocalWorktree {
3158 type Target = LocalSnapshot;
3159
3160 fn deref(&self) -> &Self::Target {
3161 &self.snapshot
3162 }
3163}
3164
3165impl Deref for RemoteWorktree {
3166 type Target = Snapshot;
3167
3168 fn deref(&self) -> &Self::Target {
3169 &self.snapshot
3170 }
3171}
3172
3173impl fmt::Debug for LocalWorktree {
3174 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3175 self.snapshot.fmt(f)
3176 }
3177}
3178
3179impl fmt::Debug for Snapshot {
3180 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3181 struct EntriesById<'a>(&'a SumTree<PathEntry>);
3182 struct EntriesByPath<'a>(&'a SumTree<Entry>);
3183
3184 impl fmt::Debug for EntriesByPath<'_> {
3185 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3186 f.debug_map()
3187 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
3188 .finish()
3189 }
3190 }
3191
3192 impl fmt::Debug for EntriesById<'_> {
3193 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3194 f.debug_list().entries(self.0.iter()).finish()
3195 }
3196 }
3197
3198 f.debug_struct("Snapshot")
3199 .field("id", &self.id)
3200 .field("root_name", &self.root_name)
3201 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
3202 .field("entries_by_id", &EntriesById(&self.entries_by_id))
3203 .finish()
3204 }
3205}
3206
3207#[derive(Clone, PartialEq)]
3208pub struct File {
3209 pub worktree: Entity<Worktree>,
3210 pub path: Arc<Path>,
3211 pub disk_state: DiskState,
3212 pub entry_id: Option<ProjectEntryId>,
3213 pub is_local: bool,
3214 pub is_private: bool,
3215}
3216
3217impl language::File for File {
3218 fn as_local(&self) -> Option<&dyn language::LocalFile> {
3219 if self.is_local { Some(self) } else { None }
3220 }
3221
3222 fn disk_state(&self) -> DiskState {
3223 self.disk_state
3224 }
3225
3226 fn path(&self) -> &Arc<Path> {
3227 &self.path
3228 }
3229
3230 fn full_path(&self, cx: &App) -> PathBuf {
3231 let mut full_path = PathBuf::new();
3232 let worktree = self.worktree.read(cx);
3233
3234 if worktree.is_visible() {
3235 full_path.push(worktree.root_name());
3236 } else {
3237 let path = worktree.abs_path();
3238
3239 if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3240 full_path.push("~");
3241 full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3242 } else {
3243 full_path.push(path)
3244 }
3245 }
3246
3247 if self.path.components().next().is_some() {
3248 full_path.push(&self.path);
3249 }
3250
3251 full_path
3252 }
3253
3254 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3255 /// of its worktree, then this method will return the name of the worktree itself.
3256 fn file_name<'a>(&'a self, cx: &'a App) -> &'a OsStr {
3257 self.path
3258 .file_name()
3259 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3260 }
3261
3262 fn worktree_id(&self, cx: &App) -> WorktreeId {
3263 self.worktree.read(cx).id()
3264 }
3265
3266 fn as_any(&self) -> &dyn Any {
3267 self
3268 }
3269
3270 fn to_proto(&self, cx: &App) -> rpc::proto::File {
3271 rpc::proto::File {
3272 worktree_id: self.worktree.read(cx).id().to_proto(),
3273 entry_id: self.entry_id.map(|id| id.to_proto()),
3274 path: self.path.as_ref().to_proto(),
3275 mtime: self.disk_state.mtime().map(|time| time.into()),
3276 is_deleted: self.disk_state == DiskState::Deleted,
3277 }
3278 }
3279
3280 fn is_private(&self) -> bool {
3281 self.is_private
3282 }
3283}
3284
3285impl language::LocalFile for File {
3286 fn abs_path(&self, cx: &App) -> PathBuf {
3287 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3288 if self.path.as_ref() == Path::new("") {
3289 worktree_path.as_path().to_path_buf()
3290 } else {
3291 worktree_path.as_path().join(&self.path)
3292 }
3293 }
3294
3295 fn load(&self, cx: &App) -> Task<Result<String>> {
3296 let worktree = self.worktree.read(cx).as_local().unwrap();
3297 let abs_path = worktree.absolutize(&self.path);
3298 let fs = worktree.fs.clone();
3299 cx.background_spawn(async move { fs.load(&abs_path?).await })
3300 }
3301
3302 fn load_bytes(&self, cx: &App) -> Task<Result<Vec<u8>>> {
3303 let worktree = self.worktree.read(cx).as_local().unwrap();
3304 let abs_path = worktree.absolutize(&self.path);
3305 let fs = worktree.fs.clone();
3306 cx.background_spawn(async move { fs.load_bytes(&abs_path?).await })
3307 }
3308}
3309
3310impl File {
3311 pub fn for_entry(entry: Entry, worktree: Entity<Worktree>) -> Arc<Self> {
3312 Arc::new(Self {
3313 worktree,
3314 path: entry.path.clone(),
3315 disk_state: if let Some(mtime) = entry.mtime {
3316 DiskState::Present { mtime }
3317 } else {
3318 DiskState::New
3319 },
3320 entry_id: Some(entry.id),
3321 is_local: true,
3322 is_private: entry.is_private,
3323 })
3324 }
3325
3326 pub fn from_proto(
3327 proto: rpc::proto::File,
3328 worktree: Entity<Worktree>,
3329 cx: &App,
3330 ) -> Result<Self> {
3331 let worktree_id = worktree
3332 .read(cx)
3333 .as_remote()
3334 .ok_or_else(|| anyhow!("not remote"))?
3335 .id();
3336
3337 if worktree_id.to_proto() != proto.worktree_id {
3338 return Err(anyhow!("worktree id does not match file"));
3339 }
3340
3341 let disk_state = if proto.is_deleted {
3342 DiskState::Deleted
3343 } else {
3344 if let Some(mtime) = proto.mtime.map(&Into::into) {
3345 DiskState::Present { mtime }
3346 } else {
3347 DiskState::New
3348 }
3349 };
3350
3351 Ok(Self {
3352 worktree,
3353 path: Arc::<Path>::from_proto(proto.path),
3354 disk_state,
3355 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3356 is_local: false,
3357 is_private: false,
3358 })
3359 }
3360
3361 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3362 file.and_then(|f| f.as_any().downcast_ref())
3363 }
3364
3365 pub fn worktree_id(&self, cx: &App) -> WorktreeId {
3366 self.worktree.read(cx).id()
3367 }
3368
3369 pub fn project_entry_id(&self, _: &App) -> Option<ProjectEntryId> {
3370 match self.disk_state {
3371 DiskState::Deleted => None,
3372 _ => self.entry_id,
3373 }
3374 }
3375}
3376
3377#[derive(Clone, Debug, PartialEq, Eq)]
3378pub struct Entry {
3379 pub id: ProjectEntryId,
3380 pub kind: EntryKind,
3381 pub path: Arc<Path>,
3382 pub inode: u64,
3383 pub mtime: Option<MTime>,
3384
3385 pub canonical_path: Option<Arc<Path>>,
3386 /// Whether this entry is ignored by Git.
3387 ///
3388 /// We only scan ignored entries once the directory is expanded and
3389 /// exclude them from searches.
3390 pub is_ignored: bool,
3391
3392 /// Whether this entry is always included in searches.
3393 ///
3394 /// This is used for entries that are always included in searches, even
3395 /// if they are ignored by git. Overridden by file_scan_exclusions.
3396 pub is_always_included: bool,
3397
3398 /// Whether this entry's canonical path is outside of the worktree.
3399 /// This means the entry is only accessible from the worktree root via a
3400 /// symlink.
3401 ///
3402 /// We only scan entries outside of the worktree once the symlinked
3403 /// directory is expanded. External entries are treated like gitignored
3404 /// entries in that they are not included in searches.
3405 pub is_external: bool,
3406
3407 /// Whether this entry is considered to be a `.env` file.
3408 pub is_private: bool,
3409 /// The entry's size on disk, in bytes.
3410 pub size: u64,
3411 pub char_bag: CharBag,
3412 pub is_fifo: bool,
3413}
3414
3415#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3416pub enum EntryKind {
3417 UnloadedDir,
3418 PendingDir,
3419 Dir,
3420 File,
3421}
3422
3423#[derive(Clone, Copy, Debug, PartialEq)]
3424pub enum PathChange {
3425 /// A filesystem entry was was created.
3426 Added,
3427 /// A filesystem entry was removed.
3428 Removed,
3429 /// A filesystem entry was updated.
3430 Updated,
3431 /// A filesystem entry was either updated or added. We don't know
3432 /// whether or not it already existed, because the path had not
3433 /// been loaded before the event.
3434 AddedOrUpdated,
3435 /// A filesystem entry was found during the initial scan of the worktree.
3436 Loaded,
3437}
3438
3439#[derive(Clone, Debug, PartialEq, Eq)]
3440pub struct UpdatedGitRepository {
3441 /// ID of the repository's working directory.
3442 ///
3443 /// For a repo that's above the worktree root, this is the ID of the worktree root, and hence not unique.
3444 /// It's included here to aid the GitStore in detecting when a repository's working directory is renamed.
3445 pub work_directory_id: ProjectEntryId,
3446 pub old_work_directory_abs_path: Option<Arc<Path>>,
3447 pub new_work_directory_abs_path: Option<Arc<Path>>,
3448 /// For a normal git repository checkout, the absolute path to the .git directory.
3449 /// For a worktree, the absolute path to the worktree's subdirectory inside the .git directory.
3450 pub dot_git_abs_path: Option<Arc<Path>>,
3451}
3452
3453pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3454pub type UpdatedGitRepositoriesSet = Arc<[UpdatedGitRepository]>;
3455
3456#[derive(Clone, Debug)]
3457pub struct PathProgress<'a> {
3458 pub max_path: &'a Path,
3459}
3460
3461#[derive(Clone, Debug)]
3462pub struct PathSummary<S> {
3463 pub max_path: Arc<Path>,
3464 pub item_summary: S,
3465}
3466
3467impl<S: Summary> Summary for PathSummary<S> {
3468 type Context = S::Context;
3469
3470 fn zero(cx: &Self::Context) -> Self {
3471 Self {
3472 max_path: Path::new("").into(),
3473 item_summary: S::zero(cx),
3474 }
3475 }
3476
3477 fn add_summary(&mut self, rhs: &Self, cx: &Self::Context) {
3478 self.max_path = rhs.max_path.clone();
3479 self.item_summary.add_summary(&rhs.item_summary, cx);
3480 }
3481}
3482
3483impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathProgress<'a> {
3484 fn zero(_: &<PathSummary<S> as Summary>::Context) -> Self {
3485 Self {
3486 max_path: Path::new(""),
3487 }
3488 }
3489
3490 fn add_summary(
3491 &mut self,
3492 summary: &'a PathSummary<S>,
3493 _: &<PathSummary<S> as Summary>::Context,
3494 ) {
3495 self.max_path = summary.max_path.as_ref()
3496 }
3497}
3498
3499impl<'a> sum_tree::Dimension<'a, PathSummary<GitSummary>> for GitSummary {
3500 fn zero(_cx: &()) -> Self {
3501 Default::default()
3502 }
3503
3504 fn add_summary(&mut self, summary: &'a PathSummary<GitSummary>, _: &()) {
3505 *self += summary.item_summary
3506 }
3507}
3508
3509impl<'a> sum_tree::SeekTarget<'a, PathSummary<GitSummary>, (TraversalProgress<'a>, GitSummary)>
3510 for PathTarget<'_>
3511{
3512 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitSummary), _: &()) -> Ordering {
3513 self.cmp_path(&cursor_location.0.max_path)
3514 }
3515}
3516
3517impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathKey {
3518 fn zero(_: &S::Context) -> Self {
3519 Default::default()
3520 }
3521
3522 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
3523 self.0 = summary.max_path.clone();
3524 }
3525}
3526
3527impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for TraversalProgress<'a> {
3528 fn zero(_cx: &S::Context) -> Self {
3529 Default::default()
3530 }
3531
3532 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
3533 self.max_path = summary.max_path.as_ref();
3534 }
3535}
3536
3537impl Entry {
3538 fn new(
3539 path: Arc<Path>,
3540 metadata: &fs::Metadata,
3541 next_entry_id: &AtomicUsize,
3542 root_char_bag: CharBag,
3543 canonical_path: Option<Arc<Path>>,
3544 ) -> Self {
3545 let char_bag = char_bag_for_path(root_char_bag, &path);
3546 Self {
3547 id: ProjectEntryId::new(next_entry_id),
3548 kind: if metadata.is_dir {
3549 EntryKind::PendingDir
3550 } else {
3551 EntryKind::File
3552 },
3553 path,
3554 inode: metadata.inode,
3555 mtime: Some(metadata.mtime),
3556 size: metadata.len,
3557 canonical_path,
3558 is_ignored: false,
3559 is_always_included: false,
3560 is_external: false,
3561 is_private: false,
3562 char_bag,
3563 is_fifo: metadata.is_fifo,
3564 }
3565 }
3566
3567 pub fn is_created(&self) -> bool {
3568 self.mtime.is_some()
3569 }
3570
3571 pub fn is_dir(&self) -> bool {
3572 self.kind.is_dir()
3573 }
3574
3575 pub fn is_file(&self) -> bool {
3576 self.kind.is_file()
3577 }
3578}
3579
3580impl EntryKind {
3581 pub fn is_dir(&self) -> bool {
3582 matches!(
3583 self,
3584 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3585 )
3586 }
3587
3588 pub fn is_unloaded(&self) -> bool {
3589 matches!(self, EntryKind::UnloadedDir)
3590 }
3591
3592 pub fn is_file(&self) -> bool {
3593 matches!(self, EntryKind::File)
3594 }
3595}
3596
3597impl sum_tree::Item for Entry {
3598 type Summary = EntrySummary;
3599
3600 fn summary(&self, _cx: &()) -> Self::Summary {
3601 let non_ignored_count = if (self.is_ignored || self.is_external) && !self.is_always_included
3602 {
3603 0
3604 } else {
3605 1
3606 };
3607 let file_count;
3608 let non_ignored_file_count;
3609 if self.is_file() {
3610 file_count = 1;
3611 non_ignored_file_count = non_ignored_count;
3612 } else {
3613 file_count = 0;
3614 non_ignored_file_count = 0;
3615 }
3616
3617 EntrySummary {
3618 max_path: self.path.clone(),
3619 count: 1,
3620 non_ignored_count,
3621 file_count,
3622 non_ignored_file_count,
3623 }
3624 }
3625}
3626
3627impl sum_tree::KeyedItem for Entry {
3628 type Key = PathKey;
3629
3630 fn key(&self) -> Self::Key {
3631 PathKey(self.path.clone())
3632 }
3633}
3634
3635#[derive(Clone, Debug)]
3636pub struct EntrySummary {
3637 max_path: Arc<Path>,
3638 count: usize,
3639 non_ignored_count: usize,
3640 file_count: usize,
3641 non_ignored_file_count: usize,
3642}
3643
3644impl Default for EntrySummary {
3645 fn default() -> Self {
3646 Self {
3647 max_path: Arc::from(Path::new("")),
3648 count: 0,
3649 non_ignored_count: 0,
3650 file_count: 0,
3651 non_ignored_file_count: 0,
3652 }
3653 }
3654}
3655
3656impl sum_tree::Summary for EntrySummary {
3657 type Context = ();
3658
3659 fn zero(_cx: &()) -> Self {
3660 Default::default()
3661 }
3662
3663 fn add_summary(&mut self, rhs: &Self, _: &()) {
3664 self.max_path = rhs.max_path.clone();
3665 self.count += rhs.count;
3666 self.non_ignored_count += rhs.non_ignored_count;
3667 self.file_count += rhs.file_count;
3668 self.non_ignored_file_count += rhs.non_ignored_file_count;
3669 }
3670}
3671
3672#[derive(Clone, Debug)]
3673struct PathEntry {
3674 id: ProjectEntryId,
3675 path: Arc<Path>,
3676 is_ignored: bool,
3677 scan_id: usize,
3678}
3679
3680impl sum_tree::Item for PathEntry {
3681 type Summary = PathEntrySummary;
3682
3683 fn summary(&self, _cx: &()) -> Self::Summary {
3684 PathEntrySummary { max_id: self.id }
3685 }
3686}
3687
3688impl sum_tree::KeyedItem for PathEntry {
3689 type Key = ProjectEntryId;
3690
3691 fn key(&self) -> Self::Key {
3692 self.id
3693 }
3694}
3695
3696#[derive(Clone, Debug, Default)]
3697struct PathEntrySummary {
3698 max_id: ProjectEntryId,
3699}
3700
3701impl sum_tree::Summary for PathEntrySummary {
3702 type Context = ();
3703
3704 fn zero(_cx: &Self::Context) -> Self {
3705 Default::default()
3706 }
3707
3708 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3709 self.max_id = summary.max_id;
3710 }
3711}
3712
3713impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3714 fn zero(_cx: &()) -> Self {
3715 Default::default()
3716 }
3717
3718 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3719 *self = summary.max_id;
3720 }
3721}
3722
3723#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
3724pub struct PathKey(pub Arc<Path>);
3725
3726impl Default for PathKey {
3727 fn default() -> Self {
3728 Self(Path::new("").into())
3729 }
3730}
3731
3732impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3733 fn zero(_cx: &()) -> Self {
3734 Default::default()
3735 }
3736
3737 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3738 self.0 = summary.max_path.clone();
3739 }
3740}
3741
3742struct BackgroundScanner {
3743 state: Mutex<BackgroundScannerState>,
3744 fs: Arc<dyn Fs>,
3745 fs_case_sensitive: bool,
3746 status_updates_tx: UnboundedSender<ScanState>,
3747 executor: BackgroundExecutor,
3748 scan_requests_rx: channel::Receiver<ScanRequest>,
3749 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
3750 next_entry_id: Arc<AtomicUsize>,
3751 phase: BackgroundScannerPhase,
3752 watcher: Arc<dyn Watcher>,
3753 settings: WorktreeSettings,
3754 share_private_files: bool,
3755}
3756
3757#[derive(Copy, Clone, PartialEq)]
3758enum BackgroundScannerPhase {
3759 InitialScan,
3760 EventsReceivedDuringInitialScan,
3761 Events,
3762}
3763
3764impl BackgroundScanner {
3765 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
3766 // If the worktree root does not contain a git repository, then find
3767 // the git repository in an ancestor directory. Find any gitignore files
3768 // in ancestor directories.
3769 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3770 let mut containing_git_repository = None;
3771 for (index, ancestor) in root_abs_path.as_path().ancestors().enumerate() {
3772 if index != 0 {
3773 if Some(ancestor) == self.fs.home_dir().as_deref() {
3774 // Unless $HOME is itself the worktree root, don't consider it as a
3775 // containing git repository---expensive and likely unwanted.
3776 break;
3777 } else if let Ok(ignore) =
3778 build_gitignore(&ancestor.join(*GITIGNORE), self.fs.as_ref()).await
3779 {
3780 self.state
3781 .lock()
3782 .snapshot
3783 .ignores_by_parent_abs_path
3784 .insert(ancestor.into(), (ignore.into(), false));
3785 }
3786 }
3787
3788 let ancestor_dot_git = ancestor.join(*DOT_GIT);
3789 log::trace!("considering ancestor: {ancestor_dot_git:?}");
3790 // Check whether the directory or file called `.git` exists (in the
3791 // case of worktrees it's a file.)
3792 if self
3793 .fs
3794 .metadata(&ancestor_dot_git)
3795 .await
3796 .is_ok_and(|metadata| metadata.is_some())
3797 {
3798 if index != 0 {
3799 // We canonicalize, since the FS events use the canonicalized path.
3800 if let Some(ancestor_dot_git) =
3801 self.fs.canonicalize(&ancestor_dot_git).await.log_err()
3802 {
3803 let location_in_repo = root_abs_path
3804 .as_path()
3805 .strip_prefix(ancestor)
3806 .unwrap()
3807 .into();
3808 log::info!(
3809 "inserting parent git repo for this worktree: {location_in_repo:?}"
3810 );
3811 // We associate the external git repo with our root folder and
3812 // also mark where in the git repo the root folder is located.
3813 let local_repository = self.state.lock().insert_git_repository_for_path(
3814 WorkDirectory::AboveProject {
3815 absolute_path: ancestor.into(),
3816 location_in_repo,
3817 },
3818 ancestor_dot_git.clone().into(),
3819 self.fs.as_ref(),
3820 self.watcher.as_ref(),
3821 );
3822
3823 if local_repository.is_some() {
3824 containing_git_repository = Some(ancestor_dot_git)
3825 }
3826 };
3827 }
3828
3829 // Reached root of git repository.
3830 break;
3831 }
3832 }
3833
3834 log::info!("containing git repository: {containing_git_repository:?}");
3835
3836 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3837 {
3838 let mut state = self.state.lock();
3839 state.snapshot.scan_id += 1;
3840 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3841 let ignore_stack = state
3842 .snapshot
3843 .ignore_stack_for_abs_path(root_abs_path.as_path(), true);
3844 if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) {
3845 root_entry.is_ignored = true;
3846 state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
3847 }
3848 state.enqueue_scan_dir(root_abs_path.into(), &root_entry, &scan_job_tx);
3849 }
3850 };
3851
3852 // Perform an initial scan of the directory.
3853 drop(scan_job_tx);
3854 self.scan_dirs(true, scan_job_rx).await;
3855 {
3856 let mut state = self.state.lock();
3857 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3858 }
3859
3860 self.send_status_update(false, SmallVec::new());
3861
3862 // Process any any FS events that occurred while performing the initial scan.
3863 // For these events, update events cannot be as precise, because we didn't
3864 // have the previous state loaded yet.
3865 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3866 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3867 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3868 paths.extend(more_paths);
3869 }
3870 self.process_events(paths.into_iter().map(Into::into).collect())
3871 .await;
3872 }
3873 if let Some(abs_path) = containing_git_repository {
3874 self.process_events(vec![abs_path]).await;
3875 }
3876
3877 // Continue processing events until the worktree is dropped.
3878 self.phase = BackgroundScannerPhase::Events;
3879
3880 loop {
3881 select_biased! {
3882 // Process any path refresh requests from the worktree. Prioritize
3883 // these before handling changes reported by the filesystem.
3884 request = self.next_scan_request().fuse() => {
3885 let Ok(request) = request else { break };
3886 if !self.process_scan_request(request, false).await {
3887 return;
3888 }
3889 }
3890
3891 path_prefix_request = self.path_prefixes_to_scan_rx.recv().fuse() => {
3892 let Ok(request) = path_prefix_request else { break };
3893 log::trace!("adding path prefix {:?}", request.path);
3894
3895 let did_scan = self.forcibly_load_paths(&[request.path.clone()]).await;
3896 if did_scan {
3897 let abs_path =
3898 {
3899 let mut state = self.state.lock();
3900 state.path_prefixes_to_scan.insert(request.path.clone());
3901 state.snapshot.abs_path.as_path().join(&request.path)
3902 };
3903
3904 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3905 self.process_events(vec![abs_path]).await;
3906 }
3907 }
3908 self.send_status_update(false, request.done);
3909 }
3910
3911 paths = fs_events_rx.next().fuse() => {
3912 let Some(mut paths) = paths else { break };
3913 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3914 paths.extend(more_paths);
3915 }
3916 self.process_events(paths.into_iter().map(Into::into).collect()).await;
3917 }
3918 }
3919 }
3920 }
3921
3922 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3923 log::debug!("rescanning paths {:?}", request.relative_paths);
3924
3925 request.relative_paths.sort_unstable();
3926 self.forcibly_load_paths(&request.relative_paths).await;
3927
3928 let root_path = self.state.lock().snapshot.abs_path.clone();
3929 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
3930 Ok(path) => SanitizedPath::from(path),
3931 Err(err) => {
3932 log::error!("failed to canonicalize root path: {}", err);
3933 return true;
3934 }
3935 };
3936 let abs_paths = request
3937 .relative_paths
3938 .iter()
3939 .map(|path| {
3940 if path.file_name().is_some() {
3941 root_canonical_path.as_path().join(path).to_path_buf()
3942 } else {
3943 root_canonical_path.as_path().to_path_buf()
3944 }
3945 })
3946 .collect::<Vec<_>>();
3947
3948 {
3949 let mut state = self.state.lock();
3950 let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
3951 state.snapshot.scan_id += 1;
3952 if is_idle {
3953 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3954 }
3955 }
3956
3957 self.reload_entries_for_paths(
3958 root_path,
3959 root_canonical_path,
3960 &request.relative_paths,
3961 abs_paths,
3962 None,
3963 )
3964 .await;
3965
3966 self.send_status_update(scanning, request.done)
3967 }
3968
3969 async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
3970 let root_path = self.state.lock().snapshot.abs_path.clone();
3971 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
3972 Ok(path) => SanitizedPath::from(path),
3973 Err(err) => {
3974 let new_path = self
3975 .state
3976 .lock()
3977 .snapshot
3978 .root_file_handle
3979 .clone()
3980 .and_then(|handle| handle.current_path(&self.fs).log_err())
3981 .map(SanitizedPath::from)
3982 .filter(|new_path| *new_path != root_path);
3983
3984 if let Some(new_path) = new_path.as_ref() {
3985 log::info!(
3986 "root renamed from {} to {}",
3987 root_path.as_path().display(),
3988 new_path.as_path().display()
3989 )
3990 } else {
3991 log::warn!("root path could not be canonicalized: {}", err);
3992 }
3993 self.status_updates_tx
3994 .unbounded_send(ScanState::RootUpdated { new_path })
3995 .ok();
3996 return;
3997 }
3998 };
3999
4000 // Certain directories may have FS changes, but do not lead to git data changes that Zed cares about.
4001 // Ignore these, to avoid Zed unnecessarily rescanning git metadata.
4002 let skipped_files_in_dot_git = HashSet::from_iter([*COMMIT_MESSAGE, *INDEX_LOCK]);
4003 let skipped_dirs_in_dot_git = [*FSMONITOR_DAEMON, *LFS_DIR];
4004
4005 let mut relative_paths = Vec::with_capacity(abs_paths.len());
4006 let mut dot_git_abs_paths = Vec::new();
4007 abs_paths.sort_unstable();
4008 abs_paths.dedup_by(|a, b| a.starts_with(b));
4009 abs_paths.retain(|abs_path| {
4010 let abs_path = SanitizedPath::from(abs_path);
4011
4012 let snapshot = &self.state.lock().snapshot;
4013 {
4014 let mut is_git_related = false;
4015
4016 let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| {
4017 if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) {
4018 let path_in_git_dir = abs_path.as_path().strip_prefix(ancestor).expect("stripping off the ancestor");
4019 Some((ancestor.to_owned(), path_in_git_dir.to_owned()))
4020 } else {
4021 None
4022 }
4023 });
4024
4025 if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
4026 if skipped_files_in_dot_git.contains(path_in_git_dir.as_os_str()) || skipped_dirs_in_dot_git.iter().any(|skipped_git_subdir| path_in_git_dir.starts_with(skipped_git_subdir)) {
4027 log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
4028 return false;
4029 }
4030
4031 is_git_related = true;
4032 if !dot_git_abs_paths.contains(&dot_git_abs_path) {
4033 dot_git_abs_paths.push(dot_git_abs_path);
4034 }
4035 }
4036
4037 let relative_path: Arc<Path> =
4038 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
4039 path.into()
4040 } else {
4041 if is_git_related {
4042 log::debug!(
4043 "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
4044 );
4045 } else {
4046 log::error!(
4047 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
4048 );
4049 }
4050 return false;
4051 };
4052
4053 if abs_path.0.file_name() == Some(*GITIGNORE) {
4054 for (_, repo) in snapshot.git_repositories.iter().filter(|(_, repo)| repo.directory_contains(&relative_path)) {
4055 if !dot_git_abs_paths.iter().any(|dot_git_abs_path| dot_git_abs_path == repo.dot_git_dir_abs_path.as_ref()) {
4056 dot_git_abs_paths.push(repo.dot_git_dir_abs_path.to_path_buf());
4057 }
4058 }
4059 }
4060
4061 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
4062 snapshot
4063 .entry_for_path(parent)
4064 .map_or(false, |entry| entry.kind == EntryKind::Dir)
4065 });
4066 if !parent_dir_is_loaded {
4067 log::debug!("ignoring event {relative_path:?} within unloaded directory");
4068 return false;
4069 }
4070
4071 if self.settings.is_path_excluded(&relative_path) {
4072 if !is_git_related {
4073 log::debug!("ignoring FS event for excluded path {relative_path:?}");
4074 }
4075 return false;
4076 }
4077
4078 relative_paths.push(relative_path);
4079 true
4080 }
4081 });
4082
4083 if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
4084 return;
4085 }
4086
4087 self.state.lock().snapshot.scan_id += 1;
4088
4089 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4090 log::debug!("received fs events {:?}", relative_paths);
4091 self.reload_entries_for_paths(
4092 root_path,
4093 root_canonical_path,
4094 &relative_paths,
4095 abs_paths,
4096 Some(scan_job_tx.clone()),
4097 )
4098 .await;
4099
4100 self.update_ignore_statuses(scan_job_tx).await;
4101 self.scan_dirs(false, scan_job_rx).await;
4102
4103 if !dot_git_abs_paths.is_empty() {
4104 self.update_git_repositories(dot_git_abs_paths);
4105 }
4106
4107 {
4108 let mut state = self.state.lock();
4109 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4110 for (_, entry) in mem::take(&mut state.removed_entries) {
4111 state.scanned_dirs.remove(&entry.id);
4112 }
4113 }
4114 self.send_status_update(false, SmallVec::new());
4115 // send_status_update_inner(phase, state, status_update_tx, false, SmallVec::new());
4116 }
4117
4118 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
4119 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4120 {
4121 let mut state = self.state.lock();
4122 let root_path = state.snapshot.abs_path.clone();
4123 for path in paths {
4124 for ancestor in path.ancestors() {
4125 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
4126 if entry.kind == EntryKind::UnloadedDir {
4127 let abs_path = root_path.as_path().join(ancestor);
4128 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
4129 state.paths_to_scan.insert(path.clone());
4130 break;
4131 }
4132 }
4133 }
4134 }
4135 drop(scan_job_tx);
4136 }
4137 while let Ok(job) = scan_job_rx.recv().await {
4138 self.scan_dir(&job).await.log_err();
4139 }
4140
4141 !mem::take(&mut self.state.lock().paths_to_scan).is_empty()
4142 }
4143
4144 async fn scan_dirs(
4145 &self,
4146 enable_progress_updates: bool,
4147 scan_jobs_rx: channel::Receiver<ScanJob>,
4148 ) {
4149 if self
4150 .status_updates_tx
4151 .unbounded_send(ScanState::Started)
4152 .is_err()
4153 {
4154 return;
4155 }
4156
4157 let progress_update_count = AtomicUsize::new(0);
4158 self.executor
4159 .scoped(|scope| {
4160 for _ in 0..self.executor.num_cpus() {
4161 scope.spawn(async {
4162 let mut last_progress_update_count = 0;
4163 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
4164 futures::pin_mut!(progress_update_timer);
4165
4166 loop {
4167 select_biased! {
4168 // Process any path refresh requests before moving on to process
4169 // the scan queue, so that user operations are prioritized.
4170 request = self.next_scan_request().fuse() => {
4171 let Ok(request) = request else { break };
4172 if !self.process_scan_request(request, true).await {
4173 return;
4174 }
4175 }
4176
4177 // Send periodic progress updates to the worktree. Use an atomic counter
4178 // to ensure that only one of the workers sends a progress update after
4179 // the update interval elapses.
4180 _ = progress_update_timer => {
4181 match progress_update_count.compare_exchange(
4182 last_progress_update_count,
4183 last_progress_update_count + 1,
4184 SeqCst,
4185 SeqCst
4186 ) {
4187 Ok(_) => {
4188 last_progress_update_count += 1;
4189 self.send_status_update(true, SmallVec::new());
4190 }
4191 Err(count) => {
4192 last_progress_update_count = count;
4193 }
4194 }
4195 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
4196 }
4197
4198 // Recursively load directories from the file system.
4199 job = scan_jobs_rx.recv().fuse() => {
4200 let Ok(job) = job else { break };
4201 if let Err(err) = self.scan_dir(&job).await {
4202 if job.path.as_ref() != Path::new("") {
4203 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
4204 }
4205 }
4206 }
4207 }
4208 }
4209 });
4210 }
4211 })
4212 .await;
4213 }
4214
4215 fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
4216 let mut state = self.state.lock();
4217 if state.changed_paths.is_empty() && scanning {
4218 return true;
4219 }
4220
4221 let new_snapshot = state.snapshot.clone();
4222 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
4223 let changes = build_diff(
4224 self.phase,
4225 &old_snapshot,
4226 &new_snapshot,
4227 &state.changed_paths,
4228 );
4229 state.changed_paths.clear();
4230
4231 self.status_updates_tx
4232 .unbounded_send(ScanState::Updated {
4233 snapshot: new_snapshot,
4234 changes,
4235 scanning,
4236 barrier,
4237 })
4238 .is_ok()
4239 }
4240
4241 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
4242 let root_abs_path;
4243 let root_char_bag;
4244 {
4245 let snapshot = &self.state.lock().snapshot;
4246 if self.settings.is_path_excluded(&job.path) {
4247 log::error!("skipping excluded directory {:?}", job.path);
4248 return Ok(());
4249 }
4250 log::trace!("scanning directory {:?}", job.path);
4251 root_abs_path = snapshot.abs_path().clone();
4252 root_char_bag = snapshot.root_char_bag;
4253 }
4254
4255 let next_entry_id = self.next_entry_id.clone();
4256 let mut ignore_stack = job.ignore_stack.clone();
4257 let mut new_ignore = None;
4258 let mut root_canonical_path = None;
4259 let mut new_entries: Vec<Entry> = Vec::new();
4260 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
4261 let mut child_paths = self
4262 .fs
4263 .read_dir(&job.abs_path)
4264 .await?
4265 .filter_map(|entry| async {
4266 match entry {
4267 Ok(entry) => Some(entry),
4268 Err(error) => {
4269 log::error!("error processing entry {:?}", error);
4270 None
4271 }
4272 }
4273 })
4274 .collect::<Vec<_>>()
4275 .await;
4276
4277 // Ensure that .git and .gitignore are processed first.
4278 swap_to_front(&mut child_paths, *GITIGNORE);
4279 swap_to_front(&mut child_paths, *DOT_GIT);
4280
4281 for child_abs_path in child_paths {
4282 let child_abs_path: Arc<Path> = child_abs_path.into();
4283 let child_name = child_abs_path.file_name().unwrap();
4284 let child_path: Arc<Path> = job.path.join(child_name).into();
4285
4286 if child_name == *DOT_GIT {
4287 let mut state = self.state.lock();
4288 state.insert_git_repository(
4289 child_path.clone(),
4290 self.fs.as_ref(),
4291 self.watcher.as_ref(),
4292 );
4293 } else if child_name == *GITIGNORE {
4294 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
4295 Ok(ignore) => {
4296 let ignore = Arc::new(ignore);
4297 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4298 new_ignore = Some(ignore);
4299 }
4300 Err(error) => {
4301 log::error!(
4302 "error loading .gitignore file {:?} - {:?}",
4303 child_name,
4304 error
4305 );
4306 }
4307 }
4308 }
4309
4310 if self.settings.is_path_excluded(&child_path) {
4311 log::debug!("skipping excluded child entry {child_path:?}");
4312 self.state.lock().remove_path(&child_path);
4313 continue;
4314 }
4315
4316 let child_metadata = match self.fs.metadata(&child_abs_path).await {
4317 Ok(Some(metadata)) => metadata,
4318 Ok(None) => continue,
4319 Err(err) => {
4320 log::error!("error processing {child_abs_path:?}: {err:?}");
4321 continue;
4322 }
4323 };
4324
4325 let mut child_entry = Entry::new(
4326 child_path.clone(),
4327 &child_metadata,
4328 &next_entry_id,
4329 root_char_bag,
4330 None,
4331 );
4332
4333 if job.is_external {
4334 child_entry.is_external = true;
4335 } else if child_metadata.is_symlink {
4336 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
4337 Ok(path) => path,
4338 Err(err) => {
4339 log::error!(
4340 "error reading target of symlink {:?}: {:?}",
4341 child_abs_path,
4342 err
4343 );
4344 continue;
4345 }
4346 };
4347
4348 // lazily canonicalize the root path in order to determine if
4349 // symlinks point outside of the worktree.
4350 let root_canonical_path = match &root_canonical_path {
4351 Some(path) => path,
4352 None => match self.fs.canonicalize(&root_abs_path).await {
4353 Ok(path) => root_canonical_path.insert(path),
4354 Err(err) => {
4355 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
4356 continue;
4357 }
4358 },
4359 };
4360
4361 if !canonical_path.starts_with(root_canonical_path) {
4362 child_entry.is_external = true;
4363 }
4364
4365 child_entry.canonical_path = Some(canonical_path.into());
4366 }
4367
4368 if child_entry.is_dir() {
4369 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
4370 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4371
4372 // Avoid recursing until crash in the case of a recursive symlink
4373 if job.ancestor_inodes.contains(&child_entry.inode) {
4374 new_jobs.push(None);
4375 } else {
4376 let mut ancestor_inodes = job.ancestor_inodes.clone();
4377 ancestor_inodes.insert(child_entry.inode);
4378
4379 new_jobs.push(Some(ScanJob {
4380 abs_path: child_abs_path.clone(),
4381 path: child_path,
4382 is_external: child_entry.is_external,
4383 ignore_stack: if child_entry.is_ignored {
4384 IgnoreStack::all()
4385 } else {
4386 ignore_stack.clone()
4387 },
4388 ancestor_inodes,
4389 scan_queue: job.scan_queue.clone(),
4390 }));
4391 }
4392 } else {
4393 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
4394 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4395 }
4396
4397 {
4398 let relative_path = job.path.join(child_name);
4399 if self.is_path_private(&relative_path) {
4400 log::debug!("detected private file: {relative_path:?}");
4401 child_entry.is_private = true;
4402 }
4403 }
4404
4405 new_entries.push(child_entry);
4406 }
4407
4408 let mut state = self.state.lock();
4409
4410 // Identify any subdirectories that should not be scanned.
4411 let mut job_ix = 0;
4412 for entry in &mut new_entries {
4413 state.reuse_entry_id(entry);
4414 if entry.is_dir() {
4415 if state.should_scan_directory(entry) {
4416 job_ix += 1;
4417 } else {
4418 log::debug!("defer scanning directory {:?}", entry.path);
4419 entry.kind = EntryKind::UnloadedDir;
4420 new_jobs.remove(job_ix);
4421 }
4422 }
4423 if entry.is_always_included {
4424 state
4425 .snapshot
4426 .always_included_entries
4427 .push(entry.path.clone());
4428 }
4429 }
4430
4431 state.populate_dir(&job.path, new_entries, new_ignore);
4432 self.watcher.add(job.abs_path.as_ref()).log_err();
4433
4434 for new_job in new_jobs.into_iter().flatten() {
4435 job.scan_queue
4436 .try_send(new_job)
4437 .expect("channel is unbounded");
4438 }
4439
4440 Ok(())
4441 }
4442
4443 /// All list arguments should be sorted before calling this function
4444 async fn reload_entries_for_paths(
4445 &self,
4446 root_abs_path: SanitizedPath,
4447 root_canonical_path: SanitizedPath,
4448 relative_paths: &[Arc<Path>],
4449 abs_paths: Vec<PathBuf>,
4450 scan_queue_tx: Option<Sender<ScanJob>>,
4451 ) {
4452 // grab metadata for all requested paths
4453 let metadata = futures::future::join_all(
4454 abs_paths
4455 .iter()
4456 .map(|abs_path| async move {
4457 let metadata = self.fs.metadata(abs_path).await?;
4458 if let Some(metadata) = metadata {
4459 let canonical_path = self.fs.canonicalize(abs_path).await?;
4460
4461 // If we're on a case-insensitive filesystem (default on macOS), we want
4462 // to only ignore metadata for non-symlink files if their absolute-path matches
4463 // the canonical-path.
4464 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4465 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4466 // treated as removed.
4467 if !self.fs_case_sensitive && !metadata.is_symlink {
4468 let canonical_file_name = canonical_path.file_name();
4469 let file_name = abs_path.file_name();
4470 if canonical_file_name != file_name {
4471 return Ok(None);
4472 }
4473 }
4474
4475 anyhow::Ok(Some((metadata, SanitizedPath::from(canonical_path))))
4476 } else {
4477 Ok(None)
4478 }
4479 })
4480 .collect::<Vec<_>>(),
4481 )
4482 .await;
4483
4484 let mut state = self.state.lock();
4485 let doing_recursive_update = scan_queue_tx.is_some();
4486
4487 // Remove any entries for paths that no longer exist or are being recursively
4488 // refreshed. Do this before adding any new entries, so that renames can be
4489 // detected regardless of the order of the paths.
4490 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4491 if matches!(metadata, Ok(None)) || doing_recursive_update {
4492 log::trace!("remove path {:?}", path);
4493 state.remove_path(path);
4494 }
4495 }
4496
4497 for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
4498 let abs_path: Arc<Path> = root_abs_path.as_path().join(path).into();
4499 match metadata {
4500 Ok(Some((metadata, canonical_path))) => {
4501 let ignore_stack = state
4502 .snapshot
4503 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4504 let is_external = !canonical_path.starts_with(&root_canonical_path);
4505 let mut fs_entry = Entry::new(
4506 path.clone(),
4507 &metadata,
4508 self.next_entry_id.as_ref(),
4509 state.snapshot.root_char_bag,
4510 if metadata.is_symlink {
4511 Some(canonical_path.as_path().to_path_buf().into())
4512 } else {
4513 None
4514 },
4515 );
4516
4517 let is_dir = fs_entry.is_dir();
4518 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4519 fs_entry.is_external = is_external;
4520 fs_entry.is_private = self.is_path_private(path);
4521 fs_entry.is_always_included = self.settings.is_path_always_included(path);
4522
4523 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, is_dir) {
4524 if state.should_scan_directory(&fs_entry)
4525 || (fs_entry.path.as_os_str().is_empty()
4526 && abs_path.file_name() == Some(*DOT_GIT))
4527 {
4528 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4529 } else {
4530 fs_entry.kind = EntryKind::UnloadedDir;
4531 }
4532 }
4533
4534 state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
4535 }
4536 Ok(None) => {
4537 self.remove_repo_path(path, &mut state.snapshot);
4538 }
4539 Err(err) => {
4540 log::error!("error reading file {abs_path:?} on event: {err:#}");
4541 }
4542 }
4543 }
4544
4545 util::extend_sorted(
4546 &mut state.changed_paths,
4547 relative_paths.iter().cloned(),
4548 usize::MAX,
4549 Ord::cmp,
4550 );
4551 }
4552
4553 fn remove_repo_path(&self, path: &Arc<Path>, snapshot: &mut LocalSnapshot) -> Option<()> {
4554 if !path
4555 .components()
4556 .any(|component| component.as_os_str() == *DOT_GIT)
4557 {
4558 if let Some(local_repo) = snapshot.local_repo_for_work_directory_path(path) {
4559 let id = local_repo.work_directory_id;
4560 log::debug!("remove repo path: {:?}", path);
4561 snapshot.git_repositories.remove(&id);
4562 return Some(());
4563 }
4564 }
4565
4566 Some(())
4567 }
4568
4569 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4570 let mut ignores_to_update = Vec::new();
4571 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4572 let prev_snapshot;
4573 {
4574 let snapshot = &mut self.state.lock().snapshot;
4575 let abs_path = snapshot.abs_path.clone();
4576 snapshot
4577 .ignores_by_parent_abs_path
4578 .retain(|parent_abs_path, (_, needs_update)| {
4579 if let Ok(parent_path) = parent_abs_path.strip_prefix(abs_path.as_path()) {
4580 if *needs_update {
4581 *needs_update = false;
4582 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4583 ignores_to_update.push(parent_abs_path.clone());
4584 }
4585 }
4586
4587 let ignore_path = parent_path.join(*GITIGNORE);
4588 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4589 return false;
4590 }
4591 }
4592 true
4593 });
4594
4595 ignores_to_update.sort_unstable();
4596 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4597 while let Some(parent_abs_path) = ignores_to_update.next() {
4598 while ignores_to_update
4599 .peek()
4600 .map_or(false, |p| p.starts_with(&parent_abs_path))
4601 {
4602 ignores_to_update.next().unwrap();
4603 }
4604
4605 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4606 ignore_queue_tx
4607 .send_blocking(UpdateIgnoreStatusJob {
4608 abs_path: parent_abs_path,
4609 ignore_stack,
4610 ignore_queue: ignore_queue_tx.clone(),
4611 scan_queue: scan_job_tx.clone(),
4612 })
4613 .unwrap();
4614 }
4615
4616 prev_snapshot = snapshot.clone();
4617 }
4618 drop(ignore_queue_tx);
4619
4620 self.executor
4621 .scoped(|scope| {
4622 for _ in 0..self.executor.num_cpus() {
4623 scope.spawn(async {
4624 loop {
4625 select_biased! {
4626 // Process any path refresh requests before moving on to process
4627 // the queue of ignore statuses.
4628 request = self.next_scan_request().fuse() => {
4629 let Ok(request) = request else { break };
4630 if !self.process_scan_request(request, true).await {
4631 return;
4632 }
4633 }
4634
4635 // Recursively process directories whose ignores have changed.
4636 job = ignore_queue_rx.recv().fuse() => {
4637 let Ok(job) = job else { break };
4638 self.update_ignore_status(job, &prev_snapshot).await;
4639 }
4640 }
4641 }
4642 });
4643 }
4644 })
4645 .await;
4646 }
4647
4648 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4649 log::trace!("update ignore status {:?}", job.abs_path);
4650
4651 let mut ignore_stack = job.ignore_stack;
4652 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4653 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4654 }
4655
4656 let mut entries_by_id_edits = Vec::new();
4657 let mut entries_by_path_edits = Vec::new();
4658 let path = job
4659 .abs_path
4660 .strip_prefix(snapshot.abs_path.as_path())
4661 .unwrap();
4662
4663 for mut entry in snapshot.child_entries(path).cloned() {
4664 let was_ignored = entry.is_ignored;
4665 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4666 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4667
4668 if entry.is_dir() {
4669 let child_ignore_stack = if entry.is_ignored {
4670 IgnoreStack::all()
4671 } else {
4672 ignore_stack.clone()
4673 };
4674
4675 // Scan any directories that were previously ignored and weren't previously scanned.
4676 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4677 let state = self.state.lock();
4678 if state.should_scan_directory(&entry) {
4679 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4680 }
4681 }
4682
4683 job.ignore_queue
4684 .send(UpdateIgnoreStatusJob {
4685 abs_path: abs_path.clone(),
4686 ignore_stack: child_ignore_stack,
4687 ignore_queue: job.ignore_queue.clone(),
4688 scan_queue: job.scan_queue.clone(),
4689 })
4690 .await
4691 .unwrap();
4692 }
4693
4694 if entry.is_ignored != was_ignored {
4695 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4696 path_entry.scan_id = snapshot.scan_id;
4697 path_entry.is_ignored = entry.is_ignored;
4698 entries_by_id_edits.push(Edit::Insert(path_entry));
4699 entries_by_path_edits.push(Edit::Insert(entry));
4700 }
4701 }
4702
4703 let state = &mut self.state.lock();
4704 for edit in &entries_by_path_edits {
4705 if let Edit::Insert(entry) = edit {
4706 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4707 state.changed_paths.insert(ix, entry.path.clone());
4708 }
4709 }
4710 }
4711
4712 state
4713 .snapshot
4714 .entries_by_path
4715 .edit(entries_by_path_edits, &());
4716 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4717 }
4718
4719 fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) {
4720 log::trace!("reloading repositories: {dot_git_paths:?}");
4721 let mut state = self.state.lock();
4722 let scan_id = state.snapshot.scan_id;
4723 for dot_git_dir in dot_git_paths {
4724 let existing_repository_entry =
4725 state
4726 .snapshot
4727 .git_repositories
4728 .iter()
4729 .find_map(|(_, repo)| {
4730 if repo.dot_git_dir_abs_path.as_ref() == &dot_git_dir
4731 || repo.dot_git_worktree_abs_path.as_deref() == Some(&dot_git_dir)
4732 {
4733 Some(repo.clone())
4734 } else {
4735 None
4736 }
4737 });
4738
4739 match existing_repository_entry {
4740 None => {
4741 let Ok(relative) = dot_git_dir.strip_prefix(state.snapshot.abs_path()) else {
4742 return;
4743 };
4744 state.insert_git_repository(
4745 relative.into(),
4746 self.fs.as_ref(),
4747 self.watcher.as_ref(),
4748 );
4749 }
4750 Some(local_repository) => {
4751 state.snapshot.git_repositories.update(
4752 &local_repository.work_directory_id,
4753 |entry| {
4754 entry.git_dir_scan_id = scan_id;
4755 },
4756 );
4757 }
4758 };
4759 }
4760
4761 // Remove any git repositories whose .git entry no longer exists.
4762 let snapshot = &mut state.snapshot;
4763 let mut ids_to_preserve = HashSet::default();
4764 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
4765 let exists_in_snapshot = snapshot
4766 .entry_for_id(work_directory_id)
4767 .map_or(false, |entry| {
4768 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
4769 });
4770
4771 if exists_in_snapshot
4772 || matches!(
4773 smol::block_on(self.fs.metadata(&entry.dot_git_dir_abs_path)),
4774 Ok(Some(_))
4775 )
4776 {
4777 ids_to_preserve.insert(work_directory_id);
4778 }
4779 }
4780
4781 snapshot
4782 .git_repositories
4783 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
4784 }
4785
4786 async fn progress_timer(&self, running: bool) {
4787 if !running {
4788 return futures::future::pending().await;
4789 }
4790
4791 #[cfg(any(test, feature = "test-support"))]
4792 if self.fs.is_fake() {
4793 return self.executor.simulate_random_delay().await;
4794 }
4795
4796 smol::Timer::after(FS_WATCH_LATENCY).await;
4797 }
4798
4799 fn is_path_private(&self, path: &Path) -> bool {
4800 !self.share_private_files && self.settings.is_path_private(path)
4801 }
4802
4803 async fn next_scan_request(&self) -> Result<ScanRequest> {
4804 let mut request = self.scan_requests_rx.recv().await?;
4805 while let Ok(next_request) = self.scan_requests_rx.try_recv() {
4806 request.relative_paths.extend(next_request.relative_paths);
4807 request.done.extend(next_request.done);
4808 }
4809 Ok(request)
4810 }
4811}
4812
4813fn build_diff(
4814 phase: BackgroundScannerPhase,
4815 old_snapshot: &Snapshot,
4816 new_snapshot: &Snapshot,
4817 event_paths: &[Arc<Path>],
4818) -> UpdatedEntriesSet {
4819 use BackgroundScannerPhase::*;
4820 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4821
4822 // Identify which paths have changed. Use the known set of changed
4823 // parent paths to optimize the search.
4824 let mut changes = Vec::new();
4825 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(&());
4826 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(&());
4827 let mut last_newly_loaded_dir_path = None;
4828 old_paths.next(&());
4829 new_paths.next(&());
4830 for path in event_paths {
4831 let path = PathKey(path.clone());
4832 if old_paths.item().map_or(false, |e| e.path < path.0) {
4833 old_paths.seek_forward(&path, Bias::Left, &());
4834 }
4835 if new_paths.item().map_or(false, |e| e.path < path.0) {
4836 new_paths.seek_forward(&path, Bias::Left, &());
4837 }
4838 loop {
4839 match (old_paths.item(), new_paths.item()) {
4840 (Some(old_entry), Some(new_entry)) => {
4841 if old_entry.path > path.0
4842 && new_entry.path > path.0
4843 && !old_entry.path.starts_with(&path.0)
4844 && !new_entry.path.starts_with(&path.0)
4845 {
4846 break;
4847 }
4848
4849 match Ord::cmp(&old_entry.path, &new_entry.path) {
4850 Ordering::Less => {
4851 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4852 old_paths.next(&());
4853 }
4854 Ordering::Equal => {
4855 if phase == EventsReceivedDuringInitialScan {
4856 if old_entry.id != new_entry.id {
4857 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4858 }
4859 // If the worktree was not fully initialized when this event was generated,
4860 // we can't know whether this entry was added during the scan or whether
4861 // it was merely updated.
4862 changes.push((
4863 new_entry.path.clone(),
4864 new_entry.id,
4865 AddedOrUpdated,
4866 ));
4867 } else if old_entry.id != new_entry.id {
4868 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4869 changes.push((new_entry.path.clone(), new_entry.id, Added));
4870 } else if old_entry != new_entry {
4871 if old_entry.kind.is_unloaded() {
4872 last_newly_loaded_dir_path = Some(&new_entry.path);
4873 changes.push((new_entry.path.clone(), new_entry.id, Loaded));
4874 } else {
4875 changes.push((new_entry.path.clone(), new_entry.id, Updated));
4876 }
4877 }
4878 old_paths.next(&());
4879 new_paths.next(&());
4880 }
4881 Ordering::Greater => {
4882 let is_newly_loaded = phase == InitialScan
4883 || last_newly_loaded_dir_path
4884 .as_ref()
4885 .map_or(false, |dir| new_entry.path.starts_with(dir));
4886 changes.push((
4887 new_entry.path.clone(),
4888 new_entry.id,
4889 if is_newly_loaded { Loaded } else { Added },
4890 ));
4891 new_paths.next(&());
4892 }
4893 }
4894 }
4895 (Some(old_entry), None) => {
4896 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4897 old_paths.next(&());
4898 }
4899 (None, Some(new_entry)) => {
4900 let is_newly_loaded = phase == InitialScan
4901 || last_newly_loaded_dir_path
4902 .as_ref()
4903 .map_or(false, |dir| new_entry.path.starts_with(dir));
4904 changes.push((
4905 new_entry.path.clone(),
4906 new_entry.id,
4907 if is_newly_loaded { Loaded } else { Added },
4908 ));
4909 new_paths.next(&());
4910 }
4911 (None, None) => break,
4912 }
4913 }
4914 }
4915
4916 changes.into()
4917}
4918
4919fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
4920 let position = child_paths
4921 .iter()
4922 .position(|path| path.file_name().unwrap() == file);
4923 if let Some(position) = position {
4924 let temp = child_paths.remove(position);
4925 child_paths.insert(0, temp);
4926 }
4927}
4928
4929fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4930 let mut result = root_char_bag;
4931 result.extend(
4932 path.to_string_lossy()
4933 .chars()
4934 .map(|c| c.to_ascii_lowercase()),
4935 );
4936 result
4937}
4938
4939#[derive(Debug)]
4940struct ScanJob {
4941 abs_path: Arc<Path>,
4942 path: Arc<Path>,
4943 ignore_stack: Arc<IgnoreStack>,
4944 scan_queue: Sender<ScanJob>,
4945 ancestor_inodes: TreeSet<u64>,
4946 is_external: bool,
4947}
4948
4949struct UpdateIgnoreStatusJob {
4950 abs_path: Arc<Path>,
4951 ignore_stack: Arc<IgnoreStack>,
4952 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4953 scan_queue: Sender<ScanJob>,
4954}
4955
4956pub trait WorktreeModelHandle {
4957 #[cfg(any(test, feature = "test-support"))]
4958 fn flush_fs_events<'a>(
4959 &self,
4960 cx: &'a mut gpui::TestAppContext,
4961 ) -> futures::future::LocalBoxFuture<'a, ()>;
4962
4963 #[cfg(any(test, feature = "test-support"))]
4964 fn flush_fs_events_in_root_git_repository<'a>(
4965 &self,
4966 cx: &'a mut gpui::TestAppContext,
4967 ) -> futures::future::LocalBoxFuture<'a, ()>;
4968}
4969
4970impl WorktreeModelHandle for Entity<Worktree> {
4971 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4972 // occurred before the worktree was constructed. These events can cause the worktree to perform
4973 // extra directory scans, and emit extra scan-state notifications.
4974 //
4975 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4976 // to ensure that all redundant FS events have already been processed.
4977 #[cfg(any(test, feature = "test-support"))]
4978 fn flush_fs_events<'a>(
4979 &self,
4980 cx: &'a mut gpui::TestAppContext,
4981 ) -> futures::future::LocalBoxFuture<'a, ()> {
4982 let file_name = "fs-event-sentinel";
4983
4984 let tree = self.clone();
4985 let (fs, root_path) = self.update(cx, |tree, _| {
4986 let tree = tree.as_local().unwrap();
4987 (tree.fs.clone(), tree.abs_path().clone())
4988 });
4989
4990 async move {
4991 fs.create_file(&root_path.join(file_name), Default::default())
4992 .await
4993 .unwrap();
4994
4995 let mut events = cx.events(&tree);
4996 while events.next().await.is_some() {
4997 if tree.update(cx, |tree, _| tree.entry_for_path(file_name).is_some()) {
4998 break;
4999 }
5000 }
5001
5002 fs.remove_file(&root_path.join(file_name), Default::default())
5003 .await
5004 .unwrap();
5005 while events.next().await.is_some() {
5006 if tree.update(cx, |tree, _| tree.entry_for_path(file_name).is_none()) {
5007 break;
5008 }
5009 }
5010
5011 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5012 .await;
5013 }
5014 .boxed_local()
5015 }
5016
5017 // This function is similar to flush_fs_events, except that it waits for events to be flushed in
5018 // the .git folder of the root repository.
5019 // The reason for its existence is that a repository's .git folder might live *outside* of the
5020 // worktree and thus its FS events might go through a different path.
5021 // In order to flush those, we need to create artificial events in the .git folder and wait
5022 // for the repository to be reloaded.
5023 #[cfg(any(test, feature = "test-support"))]
5024 fn flush_fs_events_in_root_git_repository<'a>(
5025 &self,
5026 cx: &'a mut gpui::TestAppContext,
5027 ) -> futures::future::LocalBoxFuture<'a, ()> {
5028 let file_name = "fs-event-sentinel";
5029
5030 let tree = self.clone();
5031 let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
5032 let tree = tree.as_local().unwrap();
5033 let local_repo_entry = tree
5034 .git_repositories
5035 .values()
5036 .min_by_key(|local_repo_entry| local_repo_entry.work_directory.clone())
5037 .unwrap();
5038 (
5039 tree.fs.clone(),
5040 local_repo_entry.dot_git_dir_abs_path.clone(),
5041 local_repo_entry.git_dir_scan_id,
5042 )
5043 });
5044
5045 let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
5046 let tree = tree.as_local().unwrap();
5047 // let repository = tree.repositories.first().unwrap();
5048 let local_repo_entry = tree
5049 .git_repositories
5050 .values()
5051 .min_by_key(|local_repo_entry| local_repo_entry.work_directory.clone())
5052 .unwrap();
5053
5054 if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
5055 *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
5056 true
5057 } else {
5058 false
5059 }
5060 };
5061
5062 async move {
5063 fs.create_file(&root_path.join(file_name), Default::default())
5064 .await
5065 .unwrap();
5066
5067 let mut events = cx.events(&tree);
5068 while events.next().await.is_some() {
5069 if tree.update(cx, |tree, _| scan_id_increased(tree, &mut git_dir_scan_id)) {
5070 break;
5071 }
5072 }
5073
5074 fs.remove_file(&root_path.join(file_name), Default::default())
5075 .await
5076 .unwrap();
5077
5078 while events.next().await.is_some() {
5079 if tree.update(cx, |tree, _| scan_id_increased(tree, &mut git_dir_scan_id)) {
5080 break;
5081 }
5082 }
5083
5084 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5085 .await;
5086 }
5087 .boxed_local()
5088 }
5089}
5090
5091#[derive(Clone, Debug)]
5092struct TraversalProgress<'a> {
5093 max_path: &'a Path,
5094 count: usize,
5095 non_ignored_count: usize,
5096 file_count: usize,
5097 non_ignored_file_count: usize,
5098}
5099
5100impl TraversalProgress<'_> {
5101 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
5102 match (include_files, include_dirs, include_ignored) {
5103 (true, true, true) => self.count,
5104 (true, true, false) => self.non_ignored_count,
5105 (true, false, true) => self.file_count,
5106 (true, false, false) => self.non_ignored_file_count,
5107 (false, true, true) => self.count - self.file_count,
5108 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
5109 (false, false, _) => 0,
5110 }
5111 }
5112}
5113
5114impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
5115 fn zero(_cx: &()) -> Self {
5116 Default::default()
5117 }
5118
5119 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
5120 self.max_path = summary.max_path.as_ref();
5121 self.count += summary.count;
5122 self.non_ignored_count += summary.non_ignored_count;
5123 self.file_count += summary.file_count;
5124 self.non_ignored_file_count += summary.non_ignored_file_count;
5125 }
5126}
5127
5128impl Default for TraversalProgress<'_> {
5129 fn default() -> Self {
5130 Self {
5131 max_path: Path::new(""),
5132 count: 0,
5133 non_ignored_count: 0,
5134 file_count: 0,
5135 non_ignored_file_count: 0,
5136 }
5137 }
5138}
5139
5140#[derive(Debug)]
5141pub struct Traversal<'a> {
5142 snapshot: &'a Snapshot,
5143 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
5144 include_ignored: bool,
5145 include_files: bool,
5146 include_dirs: bool,
5147}
5148
5149impl<'a> Traversal<'a> {
5150 fn new(
5151 snapshot: &'a Snapshot,
5152 include_files: bool,
5153 include_dirs: bool,
5154 include_ignored: bool,
5155 start_path: &Path,
5156 ) -> Self {
5157 let mut cursor = snapshot.entries_by_path.cursor(&());
5158 cursor.seek(&TraversalTarget::path(start_path), Bias::Left, &());
5159 let mut traversal = Self {
5160 snapshot,
5161 cursor,
5162 include_files,
5163 include_dirs,
5164 include_ignored,
5165 };
5166 if traversal.end_offset() == traversal.start_offset() {
5167 traversal.next();
5168 }
5169 traversal
5170 }
5171
5172 pub fn advance(&mut self) -> bool {
5173 self.advance_by(1)
5174 }
5175
5176 pub fn advance_by(&mut self, count: usize) -> bool {
5177 self.cursor.seek_forward(
5178 &TraversalTarget::Count {
5179 count: self.end_offset() + count,
5180 include_dirs: self.include_dirs,
5181 include_files: self.include_files,
5182 include_ignored: self.include_ignored,
5183 },
5184 Bias::Left,
5185 &(),
5186 )
5187 }
5188
5189 pub fn advance_to_sibling(&mut self) -> bool {
5190 while let Some(entry) = self.cursor.item() {
5191 self.cursor
5192 .seek_forward(&TraversalTarget::successor(&entry.path), Bias::Left, &());
5193 if let Some(entry) = self.cursor.item() {
5194 if (self.include_files || !entry.is_file())
5195 && (self.include_dirs || !entry.is_dir())
5196 && (self.include_ignored || !entry.is_ignored || entry.is_always_included)
5197 {
5198 return true;
5199 }
5200 }
5201 }
5202 false
5203 }
5204
5205 pub fn back_to_parent(&mut self) -> bool {
5206 let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
5207 return false;
5208 };
5209 self.cursor
5210 .seek(&TraversalTarget::path(parent_path), Bias::Left, &())
5211 }
5212
5213 pub fn entry(&self) -> Option<&'a Entry> {
5214 self.cursor.item()
5215 }
5216
5217 pub fn snapshot(&self) -> &'a Snapshot {
5218 self.snapshot
5219 }
5220
5221 pub fn start_offset(&self) -> usize {
5222 self.cursor
5223 .start()
5224 .count(self.include_files, self.include_dirs, self.include_ignored)
5225 }
5226
5227 pub fn end_offset(&self) -> usize {
5228 self.cursor
5229 .end(&())
5230 .count(self.include_files, self.include_dirs, self.include_ignored)
5231 }
5232}
5233
5234impl<'a> Iterator for Traversal<'a> {
5235 type Item = &'a Entry;
5236
5237 fn next(&mut self) -> Option<Self::Item> {
5238 if let Some(item) = self.entry() {
5239 self.advance();
5240 Some(item)
5241 } else {
5242 None
5243 }
5244 }
5245}
5246
5247#[derive(Debug, Clone, Copy)]
5248pub enum PathTarget<'a> {
5249 Path(&'a Path),
5250 Successor(&'a Path),
5251}
5252
5253impl PathTarget<'_> {
5254 fn cmp_path(&self, other: &Path) -> Ordering {
5255 match self {
5256 PathTarget::Path(path) => path.cmp(&other),
5257 PathTarget::Successor(path) => {
5258 if other.starts_with(path) {
5259 Ordering::Greater
5260 } else {
5261 Ordering::Equal
5262 }
5263 }
5264 }
5265 }
5266}
5267
5268impl<'a, S: Summary> SeekTarget<'a, PathSummary<S>, PathProgress<'a>> for PathTarget<'_> {
5269 fn cmp(&self, cursor_location: &PathProgress<'a>, _: &S::Context) -> Ordering {
5270 self.cmp_path(&cursor_location.max_path)
5271 }
5272}
5273
5274impl<'a, S: Summary> SeekTarget<'a, PathSummary<S>, TraversalProgress<'a>> for PathTarget<'_> {
5275 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &S::Context) -> Ordering {
5276 self.cmp_path(&cursor_location.max_path)
5277 }
5278}
5279
5280#[derive(Debug)]
5281enum TraversalTarget<'a> {
5282 Path(PathTarget<'a>),
5283 Count {
5284 count: usize,
5285 include_files: bool,
5286 include_ignored: bool,
5287 include_dirs: bool,
5288 },
5289}
5290
5291impl<'a> TraversalTarget<'a> {
5292 fn path(path: &'a Path) -> Self {
5293 Self::Path(PathTarget::Path(path))
5294 }
5295
5296 fn successor(path: &'a Path) -> Self {
5297 Self::Path(PathTarget::Successor(path))
5298 }
5299
5300 fn cmp_progress(&self, progress: &TraversalProgress) -> Ordering {
5301 match self {
5302 TraversalTarget::Path(path) => path.cmp_path(&progress.max_path),
5303 TraversalTarget::Count {
5304 count,
5305 include_files,
5306 include_dirs,
5307 include_ignored,
5308 } => Ord::cmp(
5309 count,
5310 &progress.count(*include_files, *include_dirs, *include_ignored),
5311 ),
5312 }
5313 }
5314}
5315
5316impl<'a> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'_> {
5317 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
5318 self.cmp_progress(cursor_location)
5319 }
5320}
5321
5322impl<'a> SeekTarget<'a, PathSummary<Unit>, TraversalProgress<'a>> for TraversalTarget<'_> {
5323 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
5324 self.cmp_progress(cursor_location)
5325 }
5326}
5327
5328pub struct ChildEntriesOptions {
5329 pub include_files: bool,
5330 pub include_dirs: bool,
5331 pub include_ignored: bool,
5332}
5333
5334pub struct ChildEntriesIter<'a> {
5335 parent_path: &'a Path,
5336 traversal: Traversal<'a>,
5337}
5338
5339impl<'a> Iterator for ChildEntriesIter<'a> {
5340 type Item = &'a Entry;
5341
5342 fn next(&mut self) -> Option<Self::Item> {
5343 if let Some(item) = self.traversal.entry() {
5344 if item.path.starts_with(self.parent_path) {
5345 self.traversal.advance_to_sibling();
5346 return Some(item);
5347 }
5348 }
5349 None
5350 }
5351}
5352
5353impl<'a> From<&'a Entry> for proto::Entry {
5354 fn from(entry: &'a Entry) -> Self {
5355 Self {
5356 id: entry.id.to_proto(),
5357 is_dir: entry.is_dir(),
5358 path: entry.path.as_ref().to_proto(),
5359 inode: entry.inode,
5360 mtime: entry.mtime.map(|time| time.into()),
5361 is_ignored: entry.is_ignored,
5362 is_external: entry.is_external,
5363 is_fifo: entry.is_fifo,
5364 size: Some(entry.size),
5365 canonical_path: entry
5366 .canonical_path
5367 .as_ref()
5368 .map(|path| path.as_ref().to_proto()),
5369 }
5370 }
5371}
5372
5373impl<'a> TryFrom<(&'a CharBag, &PathMatcher, proto::Entry)> for Entry {
5374 type Error = anyhow::Error;
5375
5376 fn try_from(
5377 (root_char_bag, always_included, entry): (&'a CharBag, &PathMatcher, proto::Entry),
5378 ) -> Result<Self> {
5379 let kind = if entry.is_dir {
5380 EntryKind::Dir
5381 } else {
5382 EntryKind::File
5383 };
5384
5385 let path = Arc::<Path>::from_proto(entry.path);
5386 let char_bag = char_bag_for_path(*root_char_bag, &path);
5387 let is_always_included = always_included.is_match(path.as_ref());
5388 Ok(Entry {
5389 id: ProjectEntryId::from_proto(entry.id),
5390 kind,
5391 path,
5392 inode: entry.inode,
5393 mtime: entry.mtime.map(|time| time.into()),
5394 size: entry.size.unwrap_or(0),
5395 canonical_path: entry
5396 .canonical_path
5397 .map(|path_string| Arc::from(PathBuf::from_proto(path_string))),
5398 is_ignored: entry.is_ignored,
5399 is_always_included,
5400 is_external: entry.is_external,
5401 is_private: false,
5402 char_bag,
5403 is_fifo: entry.is_fifo,
5404 })
5405 }
5406}
5407
5408#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
5409pub struct ProjectEntryId(usize);
5410
5411impl ProjectEntryId {
5412 pub const MAX: Self = Self(usize::MAX);
5413 pub const MIN: Self = Self(usize::MIN);
5414
5415 pub fn new(counter: &AtomicUsize) -> Self {
5416 Self(counter.fetch_add(1, SeqCst))
5417 }
5418
5419 pub fn from_proto(id: u64) -> Self {
5420 Self(id as usize)
5421 }
5422
5423 pub fn to_proto(&self) -> u64 {
5424 self.0 as u64
5425 }
5426
5427 pub fn to_usize(&self) -> usize {
5428 self.0
5429 }
5430}
5431
5432#[cfg(any(test, feature = "test-support"))]
5433impl CreatedEntry {
5434 pub fn to_included(self) -> Option<Entry> {
5435 match self {
5436 CreatedEntry::Included(entry) => Some(entry),
5437 CreatedEntry::Excluded { .. } => None,
5438 }
5439 }
5440}