1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{copy_recursive, Fs, MTime, PathEvent, RemoveOptions, Watcher};
11use futures::{
12 channel::{
13 mpsc::{self, UnboundedSender},
14 oneshot,
15 },
16 future::join_all,
17 select_biased,
18 task::Poll,
19 FutureExt as _, Stream, StreamExt,
20};
21use fuzzy::CharBag;
22use git::{
23 repository::{Branch, GitRepository, RepoPath, UpstreamTrackingStatus},
24 status::{
25 FileStatus, GitSummary, StatusCode, TrackedStatus, UnmergedStatus, UnmergedStatusCode,
26 },
27 GitHostingProviderRegistry, COMMIT_MESSAGE, DOT_GIT, FSMONITOR_DAEMON, GITIGNORE, INDEX_LOCK,
28};
29use gpui::{
30 App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, EventEmitter, Task,
31};
32use ignore::IgnoreStack;
33use language::DiskState;
34
35use parking_lot::Mutex;
36use paths::local_settings_folder_relative_path;
37use postage::{
38 barrier,
39 prelude::{Sink as _, Stream as _},
40 watch,
41};
42use rpc::{
43 proto::{self, split_worktree_update, FromProto, ToProto},
44 AnyProtoClient,
45};
46pub use settings::WorktreeId;
47use settings::{Settings, SettingsLocation, SettingsStore};
48use smallvec::{smallvec, SmallVec};
49use smol::channel::{self, Sender};
50use std::{
51 any::Any,
52 cmp::Ordering,
53 collections::hash_map,
54 convert::TryFrom,
55 ffi::OsStr,
56 fmt,
57 future::Future,
58 mem::{self},
59 ops::{Deref, DerefMut},
60 path::{Path, PathBuf},
61 pin::Pin,
62 sync::{
63 atomic::{self, AtomicU32, AtomicUsize, Ordering::SeqCst},
64 Arc,
65 },
66 time::{Duration, Instant},
67};
68use sum_tree::{
69 Bias, Cursor, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet, Unit,
70};
71use text::{LineEnding, Rope};
72use util::{
73 paths::{home_dir, PathMatcher, SanitizedPath},
74 ResultExt,
75};
76pub use worktree_settings::WorktreeSettings;
77
78#[cfg(feature = "test-support")]
79pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
80#[cfg(not(feature = "test-support"))]
81pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
82
83/// A set of local or remote files that are being opened as part of a project.
84/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
85/// Stores git repositories data and the diagnostics for the file(s).
86///
87/// Has an absolute path, and may be set to be visible in Zed UI or not.
88/// May correspond to a directory or a single file.
89/// Possible examples:
90/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
91/// * a directory opened in Zed — may be added as a visible entry to the current worktree
92///
93/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
94pub enum Worktree {
95 Local(LocalWorktree),
96 Remote(RemoteWorktree),
97}
98
99/// An entry, created in the worktree.
100#[derive(Debug)]
101pub enum CreatedEntry {
102 /// Got created and indexed by the worktree, receiving a corresponding entry.
103 Included(Entry),
104 /// Got created, but not indexed due to falling under exclusion filters.
105 Excluded { abs_path: PathBuf },
106}
107
108pub struct LoadedFile {
109 pub file: Arc<File>,
110 pub text: String,
111}
112
113pub struct LoadedBinaryFile {
114 pub file: Arc<File>,
115 pub content: Vec<u8>,
116}
117
118pub struct LocalWorktree {
119 snapshot: LocalSnapshot,
120 scan_requests_tx: channel::Sender<ScanRequest>,
121 path_prefixes_to_scan_tx: channel::Sender<PathPrefixScanRequest>,
122 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
123 _background_scanner_tasks: Vec<Task<()>>,
124 update_observer: Option<UpdateObservationState>,
125 fs: Arc<dyn Fs>,
126 fs_case_sensitive: bool,
127 visible: bool,
128 next_entry_id: Arc<AtomicUsize>,
129 settings: WorktreeSettings,
130 share_private_files: bool,
131}
132
133pub struct PathPrefixScanRequest {
134 path: Arc<Path>,
135 done: SmallVec<[barrier::Sender; 1]>,
136}
137
138struct ScanRequest {
139 relative_paths: Vec<Arc<Path>>,
140 done: SmallVec<[barrier::Sender; 1]>,
141}
142
143pub struct RemoteWorktree {
144 snapshot: Snapshot,
145 background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
146 project_id: u64,
147 client: AnyProtoClient,
148 file_scan_inclusions: PathMatcher,
149 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
150 update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
151 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
152 replica_id: ReplicaId,
153 visible: bool,
154 disconnected: bool,
155}
156
157#[derive(Clone)]
158pub struct Snapshot {
159 id: WorktreeId,
160 abs_path: SanitizedPath,
161 root_name: String,
162 root_char_bag: CharBag,
163 entries_by_path: SumTree<Entry>,
164 entries_by_id: SumTree<PathEntry>,
165 always_included_entries: Vec<Arc<Path>>,
166 repositories: SumTree<RepositoryEntry>,
167
168 /// A number that increases every time the worktree begins scanning
169 /// a set of paths from the filesystem. This scanning could be caused
170 /// by some operation performed on the worktree, such as reading or
171 /// writing a file, or by an event reported by the filesystem.
172 scan_id: usize,
173
174 /// The latest scan id that has completed, and whose preceding scans
175 /// have all completed. The current `scan_id` could be more than one
176 /// greater than the `completed_scan_id` if operations are performed
177 /// on the worktree while it is processing a file-system event.
178 completed_scan_id: usize,
179}
180
181#[derive(Debug, Clone, PartialEq, Eq)]
182pub struct RepositoryEntry {
183 /// The git status entries for this repository.
184 /// Note that the paths on this repository are relative to the git work directory.
185 /// If the .git folder is external to Zed, these paths will be relative to that folder,
186 /// and this data structure might reference files external to this worktree.
187 ///
188 /// For example:
189 ///
190 /// my_root_folder/ <-- repository root
191 /// .git
192 /// my_sub_folder_1/
193 /// project_root/ <-- Project root, Zed opened here
194 /// changed_file_1 <-- File with changes, in worktree
195 /// my_sub_folder_2/
196 /// changed_file_2 <-- File with changes, out of worktree
197 /// ...
198 ///
199 /// With this setup, this field would contain 2 entries, like so:
200 /// - my_sub_folder_1/project_root/changed_file_1
201 /// - my_sub_folder_2/changed_file_2
202 pub(crate) statuses_by_path: SumTree<StatusEntry>,
203 work_directory_id: ProjectEntryId,
204 pub work_directory: WorkDirectory,
205 pub(crate) current_branch: Option<Branch>,
206 pub current_merge_conflicts: TreeSet<RepoPath>,
207}
208
209impl Deref for RepositoryEntry {
210 type Target = WorkDirectory;
211
212 fn deref(&self) -> &Self::Target {
213 &self.work_directory
214 }
215}
216
217impl RepositoryEntry {
218 pub fn branch(&self) -> Option<&Branch> {
219 self.current_branch.as_ref()
220 }
221
222 pub fn work_directory_id(&self) -> ProjectEntryId {
223 self.work_directory_id
224 }
225
226 pub fn status(&self) -> impl Iterator<Item = StatusEntry> + '_ {
227 self.statuses_by_path.iter().cloned()
228 }
229
230 pub fn status_len(&self) -> usize {
231 self.statuses_by_path.summary().item_summary.count
232 }
233
234 pub fn status_summary(&self) -> GitSummary {
235 self.statuses_by_path.summary().item_summary
236 }
237
238 pub fn status_for_path(&self, path: &RepoPath) -> Option<StatusEntry> {
239 self.statuses_by_path
240 .get(&PathKey(path.0.clone()), &())
241 .cloned()
242 }
243
244 pub fn initial_update(&self) -> proto::RepositoryEntry {
245 proto::RepositoryEntry {
246 work_directory_id: self.work_directory_id.to_proto(),
247 branch: self
248 .current_branch
249 .as_ref()
250 .map(|branch| branch.name.to_string()),
251 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
252 updated_statuses: self
253 .statuses_by_path
254 .iter()
255 .map(|entry| entry.to_proto())
256 .collect(),
257 removed_statuses: Default::default(),
258 current_merge_conflicts: self
259 .current_merge_conflicts
260 .iter()
261 .map(|repo_path| repo_path.to_proto())
262 .collect(),
263 }
264 }
265
266 pub fn build_update(&self, old: &Self) -> proto::RepositoryEntry {
267 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
268 let mut removed_statuses: Vec<String> = Vec::new();
269
270 let mut new_statuses = self.statuses_by_path.iter().peekable();
271 let mut old_statuses = old.statuses_by_path.iter().peekable();
272
273 let mut current_new_entry = new_statuses.next();
274 let mut current_old_entry = old_statuses.next();
275 loop {
276 match (current_new_entry, current_old_entry) {
277 (Some(new_entry), Some(old_entry)) => {
278 match new_entry.repo_path.cmp(&old_entry.repo_path) {
279 Ordering::Less => {
280 updated_statuses.push(new_entry.to_proto());
281 current_new_entry = new_statuses.next();
282 }
283 Ordering::Equal => {
284 if new_entry.status != old_entry.status {
285 updated_statuses.push(new_entry.to_proto());
286 }
287 current_old_entry = old_statuses.next();
288 current_new_entry = new_statuses.next();
289 }
290 Ordering::Greater => {
291 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
292 current_old_entry = old_statuses.next();
293 }
294 }
295 }
296 (None, Some(old_entry)) => {
297 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
298 current_old_entry = old_statuses.next();
299 }
300 (Some(new_entry), None) => {
301 updated_statuses.push(new_entry.to_proto());
302 current_new_entry = new_statuses.next();
303 }
304 (None, None) => break,
305 }
306 }
307
308 proto::RepositoryEntry {
309 work_directory_id: self.work_directory_id.to_proto(),
310 branch: self
311 .current_branch
312 .as_ref()
313 .map(|branch| branch.name.to_string()),
314 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
315 updated_statuses,
316 removed_statuses,
317 current_merge_conflicts: self
318 .current_merge_conflicts
319 .iter()
320 .map(|path| path.as_ref().to_proto())
321 .collect(),
322 }
323 }
324}
325
326pub fn branch_to_proto(branch: &git::repository::Branch) -> proto::Branch {
327 proto::Branch {
328 is_head: branch.is_head,
329 name: branch.name.to_string(),
330 unix_timestamp: branch
331 .most_recent_commit
332 .as_ref()
333 .map(|commit| commit.commit_timestamp as u64),
334 upstream: branch.upstream.as_ref().map(|upstream| proto::GitUpstream {
335 ref_name: upstream.ref_name.to_string(),
336 tracking: upstream
337 .tracking
338 .status()
339 .map(|upstream| proto::UpstreamTracking {
340 ahead: upstream.ahead as u64,
341 behind: upstream.behind as u64,
342 }),
343 }),
344 most_recent_commit: branch
345 .most_recent_commit
346 .as_ref()
347 .map(|commit| proto::CommitSummary {
348 sha: commit.sha.to_string(),
349 subject: commit.subject.to_string(),
350 commit_timestamp: commit.commit_timestamp,
351 }),
352 }
353}
354
355pub fn proto_to_branch(proto: &proto::Branch) -> git::repository::Branch {
356 git::repository::Branch {
357 is_head: proto.is_head,
358 name: proto.name.clone().into(),
359 upstream: proto
360 .upstream
361 .as_ref()
362 .map(|upstream| git::repository::Upstream {
363 ref_name: upstream.ref_name.to_string().into(),
364 tracking: upstream
365 .tracking
366 .as_ref()
367 .map(|tracking| {
368 git::repository::UpstreamTracking::Tracked(UpstreamTrackingStatus {
369 ahead: tracking.ahead as u32,
370 behind: tracking.behind as u32,
371 })
372 })
373 .unwrap_or(git::repository::UpstreamTracking::Gone),
374 }),
375 most_recent_commit: proto.most_recent_commit.as_ref().map(|commit| {
376 git::repository::CommitSummary {
377 sha: commit.sha.to_string().into(),
378 subject: commit.subject.to_string().into(),
379 commit_timestamp: commit.commit_timestamp,
380 }
381 }),
382 }
383}
384
385/// This path corresponds to the 'content path' of a repository in relation
386/// to Zed's project root.
387/// In the majority of the cases, this is the folder that contains the .git folder.
388/// But if a sub-folder of a git repository is opened, this corresponds to the
389/// project root and the .git folder is located in a parent directory.
390#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]
391pub enum WorkDirectory {
392 InProject {
393 relative_path: Arc<Path>,
394 },
395 AboveProject {
396 absolute_path: Arc<Path>,
397 location_in_repo: Arc<Path>,
398 },
399}
400
401impl WorkDirectory {
402 #[cfg(test)]
403 fn in_project(path: &str) -> Self {
404 let path = Path::new(path);
405 Self::InProject {
406 relative_path: path.into(),
407 }
408 }
409
410 #[cfg(test)]
411 fn canonicalize(&self) -> Self {
412 match self {
413 WorkDirectory::InProject { relative_path } => WorkDirectory::InProject {
414 relative_path: relative_path.clone(),
415 },
416 WorkDirectory::AboveProject {
417 absolute_path,
418 location_in_repo,
419 } => WorkDirectory::AboveProject {
420 absolute_path: absolute_path.canonicalize().unwrap().into(),
421 location_in_repo: location_in_repo.clone(),
422 },
423 }
424 }
425
426 pub fn is_above_project(&self) -> bool {
427 match self {
428 WorkDirectory::InProject { .. } => false,
429 WorkDirectory::AboveProject { .. } => true,
430 }
431 }
432
433 fn path_key(&self) -> PathKey {
434 match self {
435 WorkDirectory::InProject { relative_path } => PathKey(relative_path.clone()),
436 WorkDirectory::AboveProject { .. } => PathKey(Path::new("").into()),
437 }
438 }
439
440 /// Returns true if the given path is a child of the work directory.
441 ///
442 /// Note that the path may not be a member of this repository, if there
443 /// is a repository in a directory between these two paths
444 /// external .git folder in a parent folder of the project root.
445 #[track_caller]
446 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
447 let path = path.as_ref();
448 debug_assert!(path.is_relative());
449 match self {
450 WorkDirectory::InProject { relative_path } => path.starts_with(relative_path),
451 WorkDirectory::AboveProject { .. } => true,
452 }
453 }
454
455 /// relativize returns the given project path relative to the root folder of the
456 /// repository.
457 /// If the root of the repository (and its .git folder) are located in a parent folder
458 /// of the project root folder, then the returned RepoPath is relative to the root
459 /// of the repository and not a valid path inside the project.
460 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
461 // path is assumed to be relative to worktree root.
462 debug_assert!(path.is_relative());
463 match self {
464 WorkDirectory::InProject { relative_path } => Ok(path
465 .strip_prefix(relative_path)
466 .map_err(|_| {
467 anyhow!(
468 "could not relativize {:?} against {:?}",
469 path,
470 relative_path
471 )
472 })?
473 .into()),
474 WorkDirectory::AboveProject {
475 location_in_repo, ..
476 } => {
477 // Avoid joining a `/` to location_in_repo in the case of a single-file worktree.
478 if path == Path::new("") {
479 Ok(RepoPath(location_in_repo.clone()))
480 } else {
481 Ok(location_in_repo.join(path).into())
482 }
483 }
484 }
485 }
486
487 /// This is the opposite operation to `relativize` above
488 pub fn unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
489 match self {
490 WorkDirectory::InProject { relative_path } => Some(relative_path.join(path).into()),
491 WorkDirectory::AboveProject {
492 location_in_repo, ..
493 } => {
494 // If we fail to strip the prefix, that means this status entry is
495 // external to this worktree, and we definitely won't have an entry_id
496 path.strip_prefix(location_in_repo).ok().map(Into::into)
497 }
498 }
499 }
500
501 pub fn display_name(&self) -> String {
502 match self {
503 WorkDirectory::InProject { relative_path } => relative_path.display().to_string(),
504 WorkDirectory::AboveProject {
505 absolute_path,
506 location_in_repo,
507 } => {
508 let num_of_dots = location_in_repo.components().count();
509
510 "../".repeat(num_of_dots)
511 + &absolute_path
512 .file_name()
513 .map(|s| s.to_string_lossy())
514 .unwrap_or_default()
515 + "/"
516 }
517 }
518 }
519}
520
521impl Default for WorkDirectory {
522 fn default() -> Self {
523 Self::InProject {
524 relative_path: Arc::from(Path::new("")),
525 }
526 }
527}
528
529#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
530pub struct WorkDirectoryEntry(ProjectEntryId);
531
532impl Deref for WorkDirectoryEntry {
533 type Target = ProjectEntryId;
534
535 fn deref(&self) -> &Self::Target {
536 &self.0
537 }
538}
539
540impl From<ProjectEntryId> for WorkDirectoryEntry {
541 fn from(value: ProjectEntryId) -> Self {
542 WorkDirectoryEntry(value)
543 }
544}
545
546#[derive(Debug, Clone)]
547pub struct LocalSnapshot {
548 snapshot: Snapshot,
549 /// All of the gitignore files in the worktree, indexed by their relative path.
550 /// The boolean indicates whether the gitignore needs to be updated.
551 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
552 /// All of the git repositories in the worktree, indexed by the project entry
553 /// id of their parent directory.
554 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
555 /// The file handle of the root dir
556 /// (so we can find it after it's been moved)
557 root_file_handle: Option<Arc<dyn fs::FileHandle>>,
558}
559
560struct BackgroundScannerState {
561 snapshot: LocalSnapshot,
562 scanned_dirs: HashSet<ProjectEntryId>,
563 path_prefixes_to_scan: HashSet<Arc<Path>>,
564 paths_to_scan: HashSet<Arc<Path>>,
565 /// The ids of all of the entries that were removed from the snapshot
566 /// as part of the current update. These entry ids may be re-used
567 /// if the same inode is discovered at a new path, or if the given
568 /// path is re-created after being deleted.
569 removed_entries: HashMap<u64, Entry>,
570 changed_paths: Vec<Arc<Path>>,
571 prev_snapshot: Snapshot,
572 git_hosting_provider_registry: Option<Arc<GitHostingProviderRegistry>>,
573 repository_scans: HashMap<PathKey, Task<()>>,
574}
575
576#[derive(Debug, Clone)]
577pub struct LocalRepositoryEntry {
578 pub(crate) work_directory_id: ProjectEntryId,
579 pub(crate) work_directory: WorkDirectory,
580 pub(crate) git_dir_scan_id: usize,
581 pub(crate) status_scan_id: usize,
582 pub(crate) repo_ptr: Arc<dyn GitRepository>,
583 /// Absolute path to the actual .git folder.
584 /// Note: if .git is a file, this points to the folder indicated by the .git file
585 pub(crate) dot_git_dir_abs_path: Arc<Path>,
586 /// Absolute path to the .git file, if we're in a git worktree.
587 pub(crate) dot_git_worktree_abs_path: Option<Arc<Path>>,
588 pub current_merge_head_shas: Vec<String>,
589 pub merge_message: Option<String>,
590}
591
592impl sum_tree::Item for LocalRepositoryEntry {
593 type Summary = PathSummary<Unit>;
594
595 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
596 PathSummary {
597 max_path: self.work_directory.path_key().0,
598 item_summary: Unit,
599 }
600 }
601}
602
603impl KeyedItem for LocalRepositoryEntry {
604 type Key = PathKey;
605
606 fn key(&self) -> Self::Key {
607 self.work_directory.path_key()
608 }
609}
610
611impl LocalRepositoryEntry {
612 pub fn repo(&self) -> &Arc<dyn GitRepository> {
613 &self.repo_ptr
614 }
615}
616
617impl Deref for LocalRepositoryEntry {
618 type Target = WorkDirectory;
619
620 fn deref(&self) -> &Self::Target {
621 &self.work_directory
622 }
623}
624
625impl Deref for LocalSnapshot {
626 type Target = Snapshot;
627
628 fn deref(&self) -> &Self::Target {
629 &self.snapshot
630 }
631}
632
633impl DerefMut for LocalSnapshot {
634 fn deref_mut(&mut self) -> &mut Self::Target {
635 &mut self.snapshot
636 }
637}
638
639#[derive(Debug)]
640enum ScanState {
641 Started,
642 Updated {
643 snapshot: LocalSnapshot,
644 changes: UpdatedEntriesSet,
645 barrier: SmallVec<[barrier::Sender; 1]>,
646 scanning: bool,
647 },
648 RootUpdated {
649 new_path: Option<SanitizedPath>,
650 },
651}
652
653struct UpdateObservationState {
654 snapshots_tx:
655 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
656 resume_updates: watch::Sender<()>,
657 _maintain_remote_snapshot: Task<Option<()>>,
658}
659
660#[derive(Clone)]
661pub enum Event {
662 UpdatedEntries(UpdatedEntriesSet),
663 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
664 DeletedEntry(ProjectEntryId),
665}
666
667const EMPTY_PATH: &str = "";
668
669impl EventEmitter<Event> for Worktree {}
670
671impl Worktree {
672 pub async fn local(
673 path: impl Into<Arc<Path>>,
674 visible: bool,
675 fs: Arc<dyn Fs>,
676 next_entry_id: Arc<AtomicUsize>,
677 cx: &mut AsyncApp,
678 ) -> Result<Entity<Self>> {
679 let abs_path = path.into();
680 let metadata = fs
681 .metadata(&abs_path)
682 .await
683 .context("failed to stat worktree path")?;
684
685 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
686 log::error!(
687 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
688 );
689 true
690 });
691
692 let root_file_handle = fs.open_handle(&abs_path).await.log_err();
693
694 cx.new(move |cx: &mut Context<Worktree>| {
695 let mut snapshot = LocalSnapshot {
696 ignores_by_parent_abs_path: Default::default(),
697 git_repositories: Default::default(),
698 snapshot: Snapshot::new(
699 cx.entity_id().as_u64(),
700 abs_path
701 .file_name()
702 .map_or(String::new(), |f| f.to_string_lossy().to_string()),
703 abs_path.clone(),
704 ),
705 root_file_handle,
706 };
707
708 let worktree_id = snapshot.id();
709 let settings_location = Some(SettingsLocation {
710 worktree_id,
711 path: Path::new(EMPTY_PATH),
712 });
713
714 let settings = WorktreeSettings::get(settings_location, cx).clone();
715 cx.observe_global::<SettingsStore>(move |this, cx| {
716 if let Self::Local(this) = this {
717 let settings = WorktreeSettings::get(settings_location, cx).clone();
718 if this.settings != settings {
719 this.settings = settings;
720 this.restart_background_scanners(cx);
721 }
722 }
723 })
724 .detach();
725
726 let share_private_files = false;
727 if let Some(metadata) = metadata {
728 let mut entry = Entry::new(
729 Arc::from(Path::new("")),
730 &metadata,
731 &next_entry_id,
732 snapshot.root_char_bag,
733 None,
734 );
735 if !metadata.is_dir {
736 entry.is_private = !share_private_files
737 && settings.is_path_private(abs_path.file_name().unwrap().as_ref());
738 }
739 snapshot.insert_entry(entry, fs.as_ref());
740 }
741
742 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
743 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
744 let mut worktree = LocalWorktree {
745 share_private_files,
746 next_entry_id,
747 snapshot,
748 is_scanning: watch::channel_with(true),
749 update_observer: None,
750 scan_requests_tx,
751 path_prefixes_to_scan_tx,
752 _background_scanner_tasks: Vec::new(),
753 fs,
754 fs_case_sensitive,
755 visible,
756 settings,
757 };
758 worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
759 Worktree::Local(worktree)
760 })
761 }
762
763 pub fn remote(
764 project_id: u64,
765 replica_id: ReplicaId,
766 worktree: proto::WorktreeMetadata,
767 client: AnyProtoClient,
768 cx: &mut App,
769 ) -> Entity<Self> {
770 cx.new(|cx: &mut Context<Self>| {
771 let snapshot = Snapshot::new(
772 worktree.id,
773 worktree.root_name,
774 Arc::<Path>::from_proto(worktree.abs_path),
775 );
776
777 let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
778 let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
779 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
780
781 let worktree_id = snapshot.id();
782 let settings_location = Some(SettingsLocation {
783 worktree_id,
784 path: Path::new(EMPTY_PATH),
785 });
786
787 let settings = WorktreeSettings::get(settings_location, cx).clone();
788 let worktree = RemoteWorktree {
789 client,
790 project_id,
791 replica_id,
792 snapshot,
793 file_scan_inclusions: settings.file_scan_inclusions.clone(),
794 background_snapshot: background_snapshot.clone(),
795 updates_tx: Some(background_updates_tx),
796 update_observer: None,
797 snapshot_subscriptions: Default::default(),
798 visible: worktree.visible,
799 disconnected: false,
800 };
801
802 // Apply updates to a separate snapshot in a background task, then
803 // send them to a foreground task which updates the model.
804 cx.background_spawn(async move {
805 while let Some(update) = background_updates_rx.next().await {
806 {
807 let mut lock = background_snapshot.lock();
808 if let Err(error) = lock
809 .0
810 .apply_remote_update(update.clone(), &settings.file_scan_inclusions)
811 {
812 log::error!("error applying worktree update: {}", error);
813 }
814 lock.1.push(update);
815 }
816 snapshot_updated_tx.send(()).await.ok();
817 }
818 })
819 .detach();
820
821 // On the foreground task, update to the latest snapshot and notify
822 // any update observer of all updates that led to that snapshot.
823 cx.spawn(|this, mut cx| async move {
824 while (snapshot_updated_rx.recv().await).is_some() {
825 this.update(&mut cx, |this, cx| {
826 let this = this.as_remote_mut().unwrap();
827 {
828 let mut lock = this.background_snapshot.lock();
829 this.snapshot = lock.0.clone();
830 if let Some(tx) = &this.update_observer {
831 for update in lock.1.drain(..) {
832 tx.unbounded_send(update).ok();
833 }
834 }
835 };
836 cx.emit(Event::UpdatedEntries(Arc::default()));
837 cx.notify();
838 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
839 if this.observed_snapshot(*scan_id) {
840 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
841 let _ = tx.send(());
842 } else {
843 break;
844 }
845 }
846 })?;
847 }
848 anyhow::Ok(())
849 })
850 .detach();
851
852 Worktree::Remote(worktree)
853 })
854 }
855
856 pub fn as_local(&self) -> Option<&LocalWorktree> {
857 if let Worktree::Local(worktree) = self {
858 Some(worktree)
859 } else {
860 None
861 }
862 }
863
864 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
865 if let Worktree::Remote(worktree) = self {
866 Some(worktree)
867 } else {
868 None
869 }
870 }
871
872 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
873 if let Worktree::Local(worktree) = self {
874 Some(worktree)
875 } else {
876 None
877 }
878 }
879
880 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
881 if let Worktree::Remote(worktree) = self {
882 Some(worktree)
883 } else {
884 None
885 }
886 }
887
888 pub fn is_local(&self) -> bool {
889 matches!(self, Worktree::Local(_))
890 }
891
892 pub fn is_remote(&self) -> bool {
893 !self.is_local()
894 }
895
896 pub fn settings_location(&self, _: &Context<Self>) -> SettingsLocation<'static> {
897 SettingsLocation {
898 worktree_id: self.id(),
899 path: Path::new(EMPTY_PATH),
900 }
901 }
902
903 pub fn snapshot(&self) -> Snapshot {
904 match self {
905 Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
906 Worktree::Remote(worktree) => worktree.snapshot.clone(),
907 }
908 }
909
910 pub fn scan_id(&self) -> usize {
911 match self {
912 Worktree::Local(worktree) => worktree.snapshot.scan_id,
913 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
914 }
915 }
916
917 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
918 proto::WorktreeMetadata {
919 id: self.id().to_proto(),
920 root_name: self.root_name().to_string(),
921 visible: self.is_visible(),
922 abs_path: self.abs_path().to_proto(),
923 }
924 }
925
926 pub fn completed_scan_id(&self) -> usize {
927 match self {
928 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
929 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
930 }
931 }
932
933 pub fn is_visible(&self) -> bool {
934 match self {
935 Worktree::Local(worktree) => worktree.visible,
936 Worktree::Remote(worktree) => worktree.visible,
937 }
938 }
939
940 pub fn replica_id(&self) -> ReplicaId {
941 match self {
942 Worktree::Local(_) => 0,
943 Worktree::Remote(worktree) => worktree.replica_id,
944 }
945 }
946
947 pub fn abs_path(&self) -> Arc<Path> {
948 match self {
949 Worktree::Local(worktree) => worktree.abs_path.clone().into(),
950 Worktree::Remote(worktree) => worktree.abs_path.clone().into(),
951 }
952 }
953
954 pub fn root_file(&self, cx: &Context<Self>) -> Option<Arc<File>> {
955 let entry = self.root_entry()?;
956 Some(File::for_entry(entry.clone(), cx.entity()))
957 }
958
959 pub fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
960 where
961 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
962 Fut: 'static + Send + Future<Output = bool>,
963 {
964 match self {
965 Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
966 Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
967 }
968 }
969
970 pub fn stop_observing_updates(&mut self) {
971 match self {
972 Worktree::Local(this) => {
973 this.update_observer.take();
974 }
975 Worktree::Remote(this) => {
976 this.update_observer.take();
977 }
978 }
979 }
980
981 #[cfg(any(test, feature = "test-support"))]
982 pub fn has_update_observer(&self) -> bool {
983 match self {
984 Worktree::Local(this) => this.update_observer.is_some(),
985 Worktree::Remote(this) => this.update_observer.is_some(),
986 }
987 }
988
989 pub fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
990 match self {
991 Worktree::Local(this) => this.load_file(path, cx),
992 Worktree::Remote(_) => {
993 Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
994 }
995 }
996 }
997
998 pub fn load_staged_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
999 match self {
1000 Worktree::Local(this) => {
1001 let path = Arc::from(path);
1002 let snapshot = this.snapshot();
1003 cx.background_spawn(async move {
1004 if let Some(repo) = snapshot.repository_for_path(&path) {
1005 if let Some(repo_path) = repo.relativize(&path).log_err() {
1006 if let Some(git_repo) =
1007 snapshot.git_repositories.get(&repo.work_directory_id)
1008 {
1009 return Ok(git_repo.repo_ptr.load_index_text(&repo_path));
1010 }
1011 }
1012 }
1013 Err(anyhow!("No repository found for {path:?}"))
1014 })
1015 }
1016 Worktree::Remote(_) => {
1017 Task::ready(Err(anyhow!("remote worktrees can't yet load staged files")))
1018 }
1019 }
1020 }
1021
1022 pub fn load_committed_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1023 match self {
1024 Worktree::Local(this) => {
1025 let path = Arc::from(path);
1026 let snapshot = this.snapshot();
1027 cx.background_spawn(async move {
1028 if let Some(repo) = snapshot.repository_for_path(&path) {
1029 if let Some(repo_path) = repo.relativize(&path).log_err() {
1030 if let Some(git_repo) =
1031 snapshot.git_repositories.get(&repo.work_directory_id)
1032 {
1033 return Ok(git_repo.repo_ptr.load_committed_text(&repo_path));
1034 }
1035 }
1036 }
1037 Err(anyhow!("No repository found for {path:?}"))
1038 })
1039 }
1040 Worktree::Remote(_) => Task::ready(Err(anyhow!(
1041 "remote worktrees can't yet load committed files"
1042 ))),
1043 }
1044 }
1045
1046 pub fn load_binary_file(
1047 &self,
1048 path: &Path,
1049 cx: &Context<Worktree>,
1050 ) -> Task<Result<LoadedBinaryFile>> {
1051 match self {
1052 Worktree::Local(this) => this.load_binary_file(path, cx),
1053 Worktree::Remote(_) => {
1054 Task::ready(Err(anyhow!("remote worktrees can't yet load binary files")))
1055 }
1056 }
1057 }
1058
1059 pub fn write_file(
1060 &self,
1061 path: &Path,
1062 text: Rope,
1063 line_ending: LineEnding,
1064 cx: &Context<Worktree>,
1065 ) -> Task<Result<Arc<File>>> {
1066 match self {
1067 Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
1068 Worktree::Remote(_) => {
1069 Task::ready(Err(anyhow!("remote worktree can't yet write files")))
1070 }
1071 }
1072 }
1073
1074 pub fn create_entry(
1075 &mut self,
1076 path: impl Into<Arc<Path>>,
1077 is_directory: bool,
1078 cx: &Context<Worktree>,
1079 ) -> Task<Result<CreatedEntry>> {
1080 let path: Arc<Path> = path.into();
1081 let worktree_id = self.id();
1082 match self {
1083 Worktree::Local(this) => this.create_entry(path, is_directory, cx),
1084 Worktree::Remote(this) => {
1085 let project_id = this.project_id;
1086 let request = this.client.request(proto::CreateProjectEntry {
1087 worktree_id: worktree_id.to_proto(),
1088 project_id,
1089 path: path.as_ref().to_proto(),
1090 is_directory,
1091 });
1092 cx.spawn(move |this, mut cx| async move {
1093 let response = request.await?;
1094 match response.entry {
1095 Some(entry) => this
1096 .update(&mut cx, |worktree, cx| {
1097 worktree.as_remote_mut().unwrap().insert_entry(
1098 entry,
1099 response.worktree_scan_id as usize,
1100 cx,
1101 )
1102 })?
1103 .await
1104 .map(CreatedEntry::Included),
1105 None => {
1106 let abs_path = this.update(&mut cx, |worktree, _| {
1107 worktree
1108 .absolutize(&path)
1109 .with_context(|| format!("absolutizing {path:?}"))
1110 })??;
1111 Ok(CreatedEntry::Excluded { abs_path })
1112 }
1113 }
1114 })
1115 }
1116 }
1117 }
1118
1119 pub fn delete_entry(
1120 &mut self,
1121 entry_id: ProjectEntryId,
1122 trash: bool,
1123 cx: &mut Context<Worktree>,
1124 ) -> Option<Task<Result<()>>> {
1125 let task = match self {
1126 Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
1127 Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
1128 }?;
1129
1130 let entry = match self {
1131 Worktree::Local(ref this) => this.entry_for_id(entry_id),
1132 Worktree::Remote(ref this) => this.entry_for_id(entry_id),
1133 }?;
1134
1135 let mut ids = vec![entry_id];
1136 let path = &*entry.path;
1137
1138 self.get_children_ids_recursive(path, &mut ids);
1139
1140 for id in ids {
1141 cx.emit(Event::DeletedEntry(id));
1142 }
1143 Some(task)
1144 }
1145
1146 fn get_children_ids_recursive(&self, path: &Path, ids: &mut Vec<ProjectEntryId>) {
1147 let children_iter = self.child_entries(path);
1148 for child in children_iter {
1149 ids.push(child.id);
1150 self.get_children_ids_recursive(&child.path, ids);
1151 }
1152 }
1153
1154 pub fn rename_entry(
1155 &mut self,
1156 entry_id: ProjectEntryId,
1157 new_path: impl Into<Arc<Path>>,
1158 cx: &Context<Self>,
1159 ) -> Task<Result<CreatedEntry>> {
1160 let new_path = new_path.into();
1161 match self {
1162 Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
1163 Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
1164 }
1165 }
1166
1167 pub fn copy_entry(
1168 &mut self,
1169 entry_id: ProjectEntryId,
1170 relative_worktree_source_path: Option<PathBuf>,
1171 new_path: impl Into<Arc<Path>>,
1172 cx: &Context<Self>,
1173 ) -> Task<Result<Option<Entry>>> {
1174 let new_path: Arc<Path> = new_path.into();
1175 match self {
1176 Worktree::Local(this) => {
1177 this.copy_entry(entry_id, relative_worktree_source_path, new_path, cx)
1178 }
1179 Worktree::Remote(this) => {
1180 let relative_worktree_source_path = relative_worktree_source_path
1181 .map(|relative_worktree_source_path| relative_worktree_source_path.to_proto());
1182 let response = this.client.request(proto::CopyProjectEntry {
1183 project_id: this.project_id,
1184 entry_id: entry_id.to_proto(),
1185 relative_worktree_source_path,
1186 new_path: new_path.to_proto(),
1187 });
1188 cx.spawn(move |this, mut cx| async move {
1189 let response = response.await?;
1190 match response.entry {
1191 Some(entry) => this
1192 .update(&mut cx, |worktree, cx| {
1193 worktree.as_remote_mut().unwrap().insert_entry(
1194 entry,
1195 response.worktree_scan_id as usize,
1196 cx,
1197 )
1198 })?
1199 .await
1200 .map(Some),
1201 None => Ok(None),
1202 }
1203 })
1204 }
1205 }
1206 }
1207
1208 pub fn copy_external_entries(
1209 &mut self,
1210 target_directory: PathBuf,
1211 paths: Vec<Arc<Path>>,
1212 overwrite_existing_files: bool,
1213 cx: &Context<Worktree>,
1214 ) -> Task<Result<Vec<ProjectEntryId>>> {
1215 match self {
1216 Worktree::Local(this) => {
1217 this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
1218 }
1219 _ => Task::ready(Err(anyhow!(
1220 "Copying external entries is not supported for remote worktrees"
1221 ))),
1222 }
1223 }
1224
1225 pub fn expand_entry(
1226 &mut self,
1227 entry_id: ProjectEntryId,
1228 cx: &Context<Worktree>,
1229 ) -> Option<Task<Result<()>>> {
1230 match self {
1231 Worktree::Local(this) => this.expand_entry(entry_id, cx),
1232 Worktree::Remote(this) => {
1233 let response = this.client.request(proto::ExpandProjectEntry {
1234 project_id: this.project_id,
1235 entry_id: entry_id.to_proto(),
1236 });
1237 Some(cx.spawn(move |this, mut cx| async move {
1238 let response = response.await?;
1239 this.update(&mut cx, |this, _| {
1240 this.as_remote_mut()
1241 .unwrap()
1242 .wait_for_snapshot(response.worktree_scan_id as usize)
1243 })?
1244 .await?;
1245 Ok(())
1246 }))
1247 }
1248 }
1249 }
1250
1251 pub fn expand_all_for_entry(
1252 &mut self,
1253 entry_id: ProjectEntryId,
1254 cx: &Context<Worktree>,
1255 ) -> Option<Task<Result<()>>> {
1256 match self {
1257 Worktree::Local(this) => this.expand_all_for_entry(entry_id, cx),
1258 Worktree::Remote(this) => {
1259 let response = this.client.request(proto::ExpandAllForProjectEntry {
1260 project_id: this.project_id,
1261 entry_id: entry_id.to_proto(),
1262 });
1263 Some(cx.spawn(move |this, mut cx| async move {
1264 let response = response.await?;
1265 this.update(&mut cx, |this, _| {
1266 this.as_remote_mut()
1267 .unwrap()
1268 .wait_for_snapshot(response.worktree_scan_id as usize)
1269 })?
1270 .await?;
1271 Ok(())
1272 }))
1273 }
1274 }
1275 }
1276
1277 pub async fn handle_create_entry(
1278 this: Entity<Self>,
1279 request: proto::CreateProjectEntry,
1280 mut cx: AsyncApp,
1281 ) -> Result<proto::ProjectEntryResponse> {
1282 let (scan_id, entry) = this.update(&mut cx, |this, cx| {
1283 (
1284 this.scan_id(),
1285 this.create_entry(
1286 Arc::<Path>::from_proto(request.path),
1287 request.is_directory,
1288 cx,
1289 ),
1290 )
1291 })?;
1292 Ok(proto::ProjectEntryResponse {
1293 entry: match &entry.await? {
1294 CreatedEntry::Included(entry) => Some(entry.into()),
1295 CreatedEntry::Excluded { .. } => None,
1296 },
1297 worktree_scan_id: scan_id as u64,
1298 })
1299 }
1300
1301 pub async fn handle_delete_entry(
1302 this: Entity<Self>,
1303 request: proto::DeleteProjectEntry,
1304 mut cx: AsyncApp,
1305 ) -> Result<proto::ProjectEntryResponse> {
1306 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1307 (
1308 this.scan_id(),
1309 this.delete_entry(
1310 ProjectEntryId::from_proto(request.entry_id),
1311 request.use_trash,
1312 cx,
1313 ),
1314 )
1315 })?;
1316 task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
1317 Ok(proto::ProjectEntryResponse {
1318 entry: None,
1319 worktree_scan_id: scan_id as u64,
1320 })
1321 }
1322
1323 pub async fn handle_expand_entry(
1324 this: Entity<Self>,
1325 request: proto::ExpandProjectEntry,
1326 mut cx: AsyncApp,
1327 ) -> Result<proto::ExpandProjectEntryResponse> {
1328 let task = this.update(&mut cx, |this, cx| {
1329 this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1330 })?;
1331 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1332 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1333 Ok(proto::ExpandProjectEntryResponse {
1334 worktree_scan_id: scan_id as u64,
1335 })
1336 }
1337
1338 pub async fn handle_expand_all_for_entry(
1339 this: Entity<Self>,
1340 request: proto::ExpandAllForProjectEntry,
1341 mut cx: AsyncApp,
1342 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
1343 let task = this.update(&mut cx, |this, cx| {
1344 this.expand_all_for_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1345 })?;
1346 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1347 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1348 Ok(proto::ExpandAllForProjectEntryResponse {
1349 worktree_scan_id: scan_id as u64,
1350 })
1351 }
1352
1353 pub async fn handle_rename_entry(
1354 this: Entity<Self>,
1355 request: proto::RenameProjectEntry,
1356 mut cx: AsyncApp,
1357 ) -> Result<proto::ProjectEntryResponse> {
1358 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1359 (
1360 this.scan_id(),
1361 this.rename_entry(
1362 ProjectEntryId::from_proto(request.entry_id),
1363 Arc::<Path>::from_proto(request.new_path),
1364 cx,
1365 ),
1366 )
1367 })?;
1368 Ok(proto::ProjectEntryResponse {
1369 entry: match &task.await? {
1370 CreatedEntry::Included(entry) => Some(entry.into()),
1371 CreatedEntry::Excluded { .. } => None,
1372 },
1373 worktree_scan_id: scan_id as u64,
1374 })
1375 }
1376
1377 pub async fn handle_copy_entry(
1378 this: Entity<Self>,
1379 request: proto::CopyProjectEntry,
1380 mut cx: AsyncApp,
1381 ) -> Result<proto::ProjectEntryResponse> {
1382 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1383 let relative_worktree_source_path = request
1384 .relative_worktree_source_path
1385 .map(PathBuf::from_proto);
1386 (
1387 this.scan_id(),
1388 this.copy_entry(
1389 ProjectEntryId::from_proto(request.entry_id),
1390 relative_worktree_source_path,
1391 PathBuf::from_proto(request.new_path),
1392 cx,
1393 ),
1394 )
1395 })?;
1396 Ok(proto::ProjectEntryResponse {
1397 entry: task.await?.as_ref().map(|e| e.into()),
1398 worktree_scan_id: scan_id as u64,
1399 })
1400 }
1401}
1402
1403impl LocalWorktree {
1404 pub fn fs(&self) -> &Arc<dyn Fs> {
1405 &self.fs
1406 }
1407
1408 pub fn is_path_private(&self, path: &Path) -> bool {
1409 !self.share_private_files && self.settings.is_path_private(path)
1410 }
1411
1412 fn restart_background_scanners(&mut self, cx: &Context<Worktree>) {
1413 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
1414 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
1415 self.scan_requests_tx = scan_requests_tx;
1416 self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
1417
1418 self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
1419 let always_included_entries = mem::take(&mut self.snapshot.always_included_entries);
1420 log::debug!(
1421 "refreshing entries for the following always included paths: {:?}",
1422 always_included_entries
1423 );
1424
1425 // Cleans up old always included entries to ensure they get updated properly. Otherwise,
1426 // nested always included entries may not get updated and will result in out-of-date info.
1427 self.refresh_entries_for_paths(always_included_entries);
1428 }
1429
1430 fn start_background_scanner(
1431 &mut self,
1432 scan_requests_rx: channel::Receiver<ScanRequest>,
1433 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
1434 cx: &Context<Worktree>,
1435 ) {
1436 let snapshot = self.snapshot();
1437 let share_private_files = self.share_private_files;
1438 let next_entry_id = self.next_entry_id.clone();
1439 let fs = self.fs.clone();
1440 let git_hosting_provider_registry = GitHostingProviderRegistry::try_global(cx);
1441 let settings = self.settings.clone();
1442 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
1443 let background_scanner = cx.background_spawn({
1444 let abs_path = snapshot.abs_path.as_path().to_path_buf();
1445 let background = cx.background_executor().clone();
1446 async move {
1447 let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
1448 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
1449 log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
1450 true
1451 });
1452
1453 let mut scanner = BackgroundScanner {
1454 fs,
1455 fs_case_sensitive,
1456 status_updates_tx: scan_states_tx,
1457 executor: background,
1458 scan_requests_rx,
1459 path_prefixes_to_scan_rx,
1460 next_entry_id,
1461 state: Arc::new(Mutex::new(BackgroundScannerState {
1462 prev_snapshot: snapshot.snapshot.clone(),
1463 snapshot,
1464 scanned_dirs: Default::default(),
1465 path_prefixes_to_scan: Default::default(),
1466 paths_to_scan: Default::default(),
1467 removed_entries: Default::default(),
1468 changed_paths: Default::default(),
1469 repository_scans: HashMap::default(),
1470 git_hosting_provider_registry,
1471 })),
1472 phase: BackgroundScannerPhase::InitialScan,
1473 share_private_files,
1474 settings,
1475 watcher,
1476 };
1477
1478 scanner
1479 .run(Box::pin(
1480 events.map(|events| events.into_iter().map(Into::into).collect()),
1481 ))
1482 .await;
1483 }
1484 });
1485 let scan_state_updater = cx.spawn(|this, mut cx| async move {
1486 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1487 this.update(&mut cx, |this, cx| {
1488 let this = this.as_local_mut().unwrap();
1489 match state {
1490 ScanState::Started => {
1491 *this.is_scanning.0.borrow_mut() = true;
1492 }
1493 ScanState::Updated {
1494 snapshot,
1495 changes,
1496 barrier,
1497 scanning,
1498 } => {
1499 *this.is_scanning.0.borrow_mut() = scanning;
1500 this.set_snapshot(snapshot, changes, cx);
1501 drop(barrier);
1502 }
1503 ScanState::RootUpdated { new_path } => {
1504 this.update_abs_path_and_refresh(new_path, cx);
1505 }
1506 }
1507 cx.notify();
1508 })
1509 .ok();
1510 }
1511 });
1512 self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1513 self.is_scanning = watch::channel_with(true);
1514 }
1515
1516 fn set_snapshot(
1517 &mut self,
1518 new_snapshot: LocalSnapshot,
1519 entry_changes: UpdatedEntriesSet,
1520 cx: &mut Context<Worktree>,
1521 ) {
1522 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1523 self.snapshot = new_snapshot;
1524
1525 if let Some(share) = self.update_observer.as_mut() {
1526 share
1527 .snapshots_tx
1528 .unbounded_send((
1529 self.snapshot.clone(),
1530 entry_changes.clone(),
1531 repo_changes.clone(),
1532 ))
1533 .ok();
1534 }
1535
1536 if !entry_changes.is_empty() {
1537 cx.emit(Event::UpdatedEntries(entry_changes));
1538 }
1539 if !repo_changes.is_empty() {
1540 cx.emit(Event::UpdatedGitRepositories(repo_changes));
1541 }
1542 }
1543
1544 fn changed_repos(
1545 &self,
1546 old_snapshot: &LocalSnapshot,
1547 new_snapshot: &LocalSnapshot,
1548 ) -> UpdatedGitRepositoriesSet {
1549 let mut changes = Vec::new();
1550 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1551 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1552
1553 loop {
1554 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1555 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1556 match Ord::cmp(&new_entry_id, &old_entry_id) {
1557 Ordering::Less => {
1558 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1559 changes.push((
1560 entry.path.clone(),
1561 GitRepositoryChange {
1562 old_repository: None,
1563 },
1564 ));
1565 }
1566 new_repos.next();
1567 }
1568 Ordering::Equal => {
1569 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id
1570 || new_repo.status_scan_id != old_repo.status_scan_id
1571 {
1572 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1573 let old_repo = old_snapshot
1574 .repositories
1575 .get(&PathKey(entry.path.clone()), &())
1576 .cloned();
1577 changes.push((
1578 entry.path.clone(),
1579 GitRepositoryChange {
1580 old_repository: old_repo,
1581 },
1582 ));
1583 }
1584 }
1585 new_repos.next();
1586 old_repos.next();
1587 }
1588 Ordering::Greater => {
1589 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1590 let old_repo = old_snapshot
1591 .repositories
1592 .get(&PathKey(entry.path.clone()), &())
1593 .cloned();
1594 changes.push((
1595 entry.path.clone(),
1596 GitRepositoryChange {
1597 old_repository: old_repo,
1598 },
1599 ));
1600 }
1601 old_repos.next();
1602 }
1603 }
1604 }
1605 (Some((entry_id, _)), None) => {
1606 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1607 changes.push((
1608 entry.path.clone(),
1609 GitRepositoryChange {
1610 old_repository: None,
1611 },
1612 ));
1613 }
1614 new_repos.next();
1615 }
1616 (None, Some((entry_id, _))) => {
1617 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1618 let old_repo = old_snapshot
1619 .repositories
1620 .get(&PathKey(entry.path.clone()), &())
1621 .cloned();
1622 changes.push((
1623 entry.path.clone(),
1624 GitRepositoryChange {
1625 old_repository: old_repo,
1626 },
1627 ));
1628 }
1629 old_repos.next();
1630 }
1631 (None, None) => break,
1632 }
1633 }
1634
1635 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1636 (value.0.clone(), value.1.clone())
1637 }
1638
1639 changes.into()
1640 }
1641
1642 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1643 let mut is_scanning_rx = self.is_scanning.1.clone();
1644 async move {
1645 let mut is_scanning = *is_scanning_rx.borrow();
1646 while is_scanning {
1647 if let Some(value) = is_scanning_rx.recv().await {
1648 is_scanning = value;
1649 } else {
1650 break;
1651 }
1652 }
1653 }
1654 }
1655
1656 pub fn snapshot(&self) -> LocalSnapshot {
1657 self.snapshot.clone()
1658 }
1659
1660 pub fn settings(&self) -> WorktreeSettings {
1661 self.settings.clone()
1662 }
1663
1664 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1665 self.git_repositories.get(&repo.work_directory_id)
1666 }
1667
1668 fn load_binary_file(
1669 &self,
1670 path: &Path,
1671 cx: &Context<Worktree>,
1672 ) -> Task<Result<LoadedBinaryFile>> {
1673 let path = Arc::from(path);
1674 let abs_path = self.absolutize(&path);
1675 let fs = self.fs.clone();
1676 let entry = self.refresh_entry(path.clone(), None, cx);
1677 let is_private = self.is_path_private(path.as_ref());
1678
1679 let worktree = cx.weak_entity();
1680 cx.background_spawn(async move {
1681 let abs_path = abs_path?;
1682 let content = fs.load_bytes(&abs_path).await?;
1683
1684 let worktree = worktree
1685 .upgrade()
1686 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1687 let file = match entry.await? {
1688 Some(entry) => File::for_entry(entry, worktree),
1689 None => {
1690 let metadata = fs
1691 .metadata(&abs_path)
1692 .await
1693 .with_context(|| {
1694 format!("Loading metadata for excluded file {abs_path:?}")
1695 })?
1696 .with_context(|| {
1697 format!("Excluded file {abs_path:?} got removed during loading")
1698 })?;
1699 Arc::new(File {
1700 entry_id: None,
1701 worktree,
1702 path,
1703 disk_state: DiskState::Present {
1704 mtime: metadata.mtime,
1705 },
1706 is_local: true,
1707 is_private,
1708 })
1709 }
1710 };
1711
1712 Ok(LoadedBinaryFile { file, content })
1713 })
1714 }
1715
1716 fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1717 let path = Arc::from(path);
1718 let abs_path = self.absolutize(&path);
1719 let fs = self.fs.clone();
1720 let entry = self.refresh_entry(path.clone(), None, cx);
1721 let is_private = self.is_path_private(path.as_ref());
1722
1723 cx.spawn(|this, _cx| async move {
1724 let abs_path = abs_path?;
1725 let text = fs.load(&abs_path).await?;
1726
1727 let worktree = this
1728 .upgrade()
1729 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1730 let file = match entry.await? {
1731 Some(entry) => File::for_entry(entry, worktree),
1732 None => {
1733 let metadata = fs
1734 .metadata(&abs_path)
1735 .await
1736 .with_context(|| {
1737 format!("Loading metadata for excluded file {abs_path:?}")
1738 })?
1739 .with_context(|| {
1740 format!("Excluded file {abs_path:?} got removed during loading")
1741 })?;
1742 Arc::new(File {
1743 entry_id: None,
1744 worktree,
1745 path,
1746 disk_state: DiskState::Present {
1747 mtime: metadata.mtime,
1748 },
1749 is_local: true,
1750 is_private,
1751 })
1752 }
1753 };
1754
1755 Ok(LoadedFile { file, text })
1756 })
1757 }
1758
1759 /// Find the lowest path in the worktree's datastructures that is an ancestor
1760 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1761 let mut lowest_ancestor = None;
1762 for path in path.ancestors() {
1763 if self.entry_for_path(path).is_some() {
1764 lowest_ancestor = Some(path.to_path_buf());
1765 break;
1766 }
1767 }
1768
1769 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1770 }
1771
1772 fn create_entry(
1773 &self,
1774 path: impl Into<Arc<Path>>,
1775 is_dir: bool,
1776 cx: &Context<Worktree>,
1777 ) -> Task<Result<CreatedEntry>> {
1778 let path = path.into();
1779 let abs_path = match self.absolutize(&path) {
1780 Ok(path) => path,
1781 Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1782 };
1783 let path_excluded = self.settings.is_path_excluded(&abs_path);
1784 let fs = self.fs.clone();
1785 let task_abs_path = abs_path.clone();
1786 let write = cx.background_spawn(async move {
1787 if is_dir {
1788 fs.create_dir(&task_abs_path)
1789 .await
1790 .with_context(|| format!("creating directory {task_abs_path:?}"))
1791 } else {
1792 fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1793 .await
1794 .with_context(|| format!("creating file {task_abs_path:?}"))
1795 }
1796 });
1797
1798 let lowest_ancestor = self.lowest_ancestor(&path);
1799 cx.spawn(|this, mut cx| async move {
1800 write.await?;
1801 if path_excluded {
1802 return Ok(CreatedEntry::Excluded { abs_path });
1803 }
1804
1805 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1806 let mut refreshes = Vec::new();
1807 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1808 for refresh_path in refresh_paths.ancestors() {
1809 if refresh_path == Path::new("") {
1810 continue;
1811 }
1812 let refresh_full_path = lowest_ancestor.join(refresh_path);
1813
1814 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1815 refresh_full_path.into(),
1816 None,
1817 cx,
1818 ));
1819 }
1820 (
1821 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1822 refreshes,
1823 )
1824 })?;
1825 for refresh in refreshes {
1826 refresh.await.log_err();
1827 }
1828
1829 Ok(result
1830 .await?
1831 .map(CreatedEntry::Included)
1832 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1833 })
1834 }
1835
1836 fn write_file(
1837 &self,
1838 path: impl Into<Arc<Path>>,
1839 text: Rope,
1840 line_ending: LineEnding,
1841 cx: &Context<Worktree>,
1842 ) -> Task<Result<Arc<File>>> {
1843 let path = path.into();
1844 let fs = self.fs.clone();
1845 let is_private = self.is_path_private(&path);
1846 let Ok(abs_path) = self.absolutize(&path) else {
1847 return Task::ready(Err(anyhow!("invalid path {path:?}")));
1848 };
1849
1850 let write = cx.background_spawn({
1851 let fs = fs.clone();
1852 let abs_path = abs_path.clone();
1853 async move { fs.save(&abs_path, &text, line_ending).await }
1854 });
1855
1856 cx.spawn(move |this, mut cx| async move {
1857 write.await?;
1858 let entry = this
1859 .update(&mut cx, |this, cx| {
1860 this.as_local_mut()
1861 .unwrap()
1862 .refresh_entry(path.clone(), None, cx)
1863 })?
1864 .await?;
1865 let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1866 if let Some(entry) = entry {
1867 Ok(File::for_entry(entry, worktree))
1868 } else {
1869 let metadata = fs
1870 .metadata(&abs_path)
1871 .await
1872 .with_context(|| {
1873 format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1874 })?
1875 .with_context(|| {
1876 format!("Excluded buffer {path:?} got removed during saving")
1877 })?;
1878 Ok(Arc::new(File {
1879 worktree,
1880 path,
1881 disk_state: DiskState::Present {
1882 mtime: metadata.mtime,
1883 },
1884 entry_id: None,
1885 is_local: true,
1886 is_private,
1887 }))
1888 }
1889 })
1890 }
1891
1892 fn delete_entry(
1893 &self,
1894 entry_id: ProjectEntryId,
1895 trash: bool,
1896 cx: &Context<Worktree>,
1897 ) -> Option<Task<Result<()>>> {
1898 let entry = self.entry_for_id(entry_id)?.clone();
1899 let abs_path = self.absolutize(&entry.path);
1900 let fs = self.fs.clone();
1901
1902 let delete = cx.background_spawn(async move {
1903 if entry.is_file() {
1904 if trash {
1905 fs.trash_file(&abs_path?, Default::default()).await?;
1906 } else {
1907 fs.remove_file(&abs_path?, Default::default()).await?;
1908 }
1909 } else if trash {
1910 fs.trash_dir(
1911 &abs_path?,
1912 RemoveOptions {
1913 recursive: true,
1914 ignore_if_not_exists: false,
1915 },
1916 )
1917 .await?;
1918 } else {
1919 fs.remove_dir(
1920 &abs_path?,
1921 RemoveOptions {
1922 recursive: true,
1923 ignore_if_not_exists: false,
1924 },
1925 )
1926 .await?;
1927 }
1928 anyhow::Ok(entry.path)
1929 });
1930
1931 Some(cx.spawn(|this, mut cx| async move {
1932 let path = delete.await?;
1933 this.update(&mut cx, |this, _| {
1934 this.as_local_mut()
1935 .unwrap()
1936 .refresh_entries_for_paths(vec![path])
1937 })?
1938 .recv()
1939 .await;
1940 Ok(())
1941 }))
1942 }
1943
1944 /// Rename an entry.
1945 ///
1946 /// `new_path` is the new relative path to the worktree root.
1947 /// If the root entry is renamed then `new_path` is the new root name instead.
1948 fn rename_entry(
1949 &self,
1950 entry_id: ProjectEntryId,
1951 new_path: impl Into<Arc<Path>>,
1952 cx: &Context<Worktree>,
1953 ) -> Task<Result<CreatedEntry>> {
1954 let old_path = match self.entry_for_id(entry_id) {
1955 Some(entry) => entry.path.clone(),
1956 None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1957 };
1958 let new_path = new_path.into();
1959 let abs_old_path = self.absolutize(&old_path);
1960
1961 let is_root_entry = self.root_entry().is_some_and(|e| e.id == entry_id);
1962 let abs_new_path = if is_root_entry {
1963 let Some(root_parent_path) = self.abs_path().parent() else {
1964 return Task::ready(Err(anyhow!("no parent for path {:?}", self.abs_path)));
1965 };
1966 root_parent_path.join(&new_path)
1967 } else {
1968 let Ok(absolutize_path) = self.absolutize(&new_path) else {
1969 return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1970 };
1971 absolutize_path
1972 };
1973 let abs_path = abs_new_path.clone();
1974 let fs = self.fs.clone();
1975 let case_sensitive = self.fs_case_sensitive;
1976 let rename = cx.background_spawn(async move {
1977 let abs_old_path = abs_old_path?;
1978 let abs_new_path = abs_new_path;
1979
1980 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1981 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1982
1983 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1984 // we want to overwrite, because otherwise we run into a file-already-exists error.
1985 let overwrite = !case_sensitive
1986 && abs_old_path != abs_new_path
1987 && abs_old_path_lower == abs_new_path_lower;
1988
1989 fs.rename(
1990 &abs_old_path,
1991 &abs_new_path,
1992 fs::RenameOptions {
1993 overwrite,
1994 ..Default::default()
1995 },
1996 )
1997 .await
1998 .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1999 });
2000
2001 cx.spawn(|this, mut cx| async move {
2002 rename.await?;
2003 Ok(this
2004 .update(&mut cx, |this, cx| {
2005 let local = this.as_local_mut().unwrap();
2006 if is_root_entry {
2007 // We eagerly update `abs_path` and refresh this worktree.
2008 // Otherwise, the FS watcher would do it on the `RootUpdated` event,
2009 // but with a noticeable delay, so we handle it proactively.
2010 local.update_abs_path_and_refresh(
2011 Some(SanitizedPath::from(abs_path.clone())),
2012 cx,
2013 );
2014 Task::ready(Ok(this.root_entry().cloned()))
2015 } else {
2016 local.refresh_entry(new_path.clone(), Some(old_path), cx)
2017 }
2018 })?
2019 .await?
2020 .map(CreatedEntry::Included)
2021 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
2022 })
2023 }
2024
2025 fn copy_entry(
2026 &self,
2027 entry_id: ProjectEntryId,
2028 relative_worktree_source_path: Option<PathBuf>,
2029 new_path: impl Into<Arc<Path>>,
2030 cx: &Context<Worktree>,
2031 ) -> Task<Result<Option<Entry>>> {
2032 let old_path = match self.entry_for_id(entry_id) {
2033 Some(entry) => entry.path.clone(),
2034 None => return Task::ready(Ok(None)),
2035 };
2036 let new_path = new_path.into();
2037 let abs_old_path =
2038 if let Some(relative_worktree_source_path) = relative_worktree_source_path {
2039 Ok(self.abs_path().join(relative_worktree_source_path))
2040 } else {
2041 self.absolutize(&old_path)
2042 };
2043 let abs_new_path = self.absolutize(&new_path);
2044 let fs = self.fs.clone();
2045 let copy = cx.background_spawn(async move {
2046 copy_recursive(
2047 fs.as_ref(),
2048 &abs_old_path?,
2049 &abs_new_path?,
2050 Default::default(),
2051 )
2052 .await
2053 });
2054
2055 cx.spawn(|this, mut cx| async move {
2056 copy.await?;
2057 this.update(&mut cx, |this, cx| {
2058 this.as_local_mut()
2059 .unwrap()
2060 .refresh_entry(new_path.clone(), None, cx)
2061 })?
2062 .await
2063 })
2064 }
2065
2066 pub fn copy_external_entries(
2067 &self,
2068 target_directory: PathBuf,
2069 paths: Vec<Arc<Path>>,
2070 overwrite_existing_files: bool,
2071 cx: &Context<Worktree>,
2072 ) -> Task<Result<Vec<ProjectEntryId>>> {
2073 let worktree_path = self.abs_path().clone();
2074 let fs = self.fs.clone();
2075 let paths = paths
2076 .into_iter()
2077 .filter_map(|source| {
2078 let file_name = source.file_name()?;
2079 let mut target = target_directory.clone();
2080 target.push(file_name);
2081
2082 // Do not allow copying the same file to itself.
2083 if source.as_ref() != target.as_path() {
2084 Some((source, target))
2085 } else {
2086 None
2087 }
2088 })
2089 .collect::<Vec<_>>();
2090
2091 let paths_to_refresh = paths
2092 .iter()
2093 .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
2094 .collect::<Vec<_>>();
2095
2096 cx.spawn(|this, cx| async move {
2097 cx.background_spawn(async move {
2098 for (source, target) in paths {
2099 copy_recursive(
2100 fs.as_ref(),
2101 &source,
2102 &target,
2103 fs::CopyOptions {
2104 overwrite: overwrite_existing_files,
2105 ..Default::default()
2106 },
2107 )
2108 .await
2109 .with_context(|| {
2110 anyhow!("Failed to copy file from {source:?} to {target:?}")
2111 })?;
2112 }
2113 Ok::<(), anyhow::Error>(())
2114 })
2115 .await
2116 .log_err();
2117 let mut refresh = cx.read_entity(
2118 &this.upgrade().with_context(|| "Dropped worktree")?,
2119 |this, _| {
2120 Ok::<postage::barrier::Receiver, anyhow::Error>(
2121 this.as_local()
2122 .with_context(|| "Worktree is not local")?
2123 .refresh_entries_for_paths(paths_to_refresh.clone()),
2124 )
2125 },
2126 )??;
2127
2128 cx.background_spawn(async move {
2129 refresh.next().await;
2130 Ok::<(), anyhow::Error>(())
2131 })
2132 .await
2133 .log_err();
2134
2135 let this = this.upgrade().with_context(|| "Dropped worktree")?;
2136 cx.read_entity(&this, |this, _| {
2137 paths_to_refresh
2138 .iter()
2139 .filter_map(|path| Some(this.entry_for_path(path)?.id))
2140 .collect()
2141 })
2142 })
2143 }
2144
2145 fn expand_entry(
2146 &self,
2147 entry_id: ProjectEntryId,
2148 cx: &Context<Worktree>,
2149 ) -> Option<Task<Result<()>>> {
2150 let path = self.entry_for_id(entry_id)?.path.clone();
2151 let mut refresh = self.refresh_entries_for_paths(vec![path]);
2152 Some(cx.background_spawn(async move {
2153 refresh.next().await;
2154 Ok(())
2155 }))
2156 }
2157
2158 fn expand_all_for_entry(
2159 &self,
2160 entry_id: ProjectEntryId,
2161 cx: &Context<Worktree>,
2162 ) -> Option<Task<Result<()>>> {
2163 let path = self.entry_for_id(entry_id).unwrap().path.clone();
2164 let mut rx = self.add_path_prefix_to_scan(path.clone());
2165 Some(cx.background_spawn(async move {
2166 rx.next().await;
2167 Ok(())
2168 }))
2169 }
2170
2171 fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
2172 let (tx, rx) = barrier::channel();
2173 self.scan_requests_tx
2174 .try_send(ScanRequest {
2175 relative_paths: paths,
2176 done: smallvec![tx],
2177 })
2178 .ok();
2179 rx
2180 }
2181
2182 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) -> barrier::Receiver {
2183 let (tx, rx) = barrier::channel();
2184 self.path_prefixes_to_scan_tx
2185 .try_send(PathPrefixScanRequest {
2186 path: path_prefix,
2187 done: smallvec![tx],
2188 })
2189 .ok();
2190 rx
2191 }
2192
2193 fn refresh_entry(
2194 &self,
2195 path: Arc<Path>,
2196 old_path: Option<Arc<Path>>,
2197 cx: &Context<Worktree>,
2198 ) -> Task<Result<Option<Entry>>> {
2199 if self.settings.is_path_excluded(&path) {
2200 return Task::ready(Ok(None));
2201 }
2202 let paths = if let Some(old_path) = old_path.as_ref() {
2203 vec![old_path.clone(), path.clone()]
2204 } else {
2205 vec![path.clone()]
2206 };
2207 let t0 = Instant::now();
2208 let mut refresh = self.refresh_entries_for_paths(paths);
2209 cx.spawn(move |this, mut cx| async move {
2210 refresh.recv().await;
2211 log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
2212 let new_entry = this.update(&mut cx, |this, _| {
2213 this.entry_for_path(path)
2214 .cloned()
2215 .ok_or_else(|| anyhow!("failed to read path after update"))
2216 })??;
2217 Ok(Some(new_entry))
2218 })
2219 }
2220
2221 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2222 where
2223 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2224 Fut: Send + Future<Output = bool>,
2225 {
2226 if let Some(observer) = self.update_observer.as_mut() {
2227 *observer.resume_updates.borrow_mut() = ();
2228 return;
2229 }
2230
2231 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
2232 let (snapshots_tx, mut snapshots_rx) =
2233 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
2234 snapshots_tx
2235 .unbounded_send((self.snapshot(), Arc::default(), Arc::default()))
2236 .ok();
2237
2238 let worktree_id = cx.entity_id().as_u64();
2239 let _maintain_remote_snapshot = cx.background_spawn(async move {
2240 let mut is_first = true;
2241 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
2242 let update = if is_first {
2243 is_first = false;
2244 snapshot.build_initial_update(project_id, worktree_id)
2245 } else {
2246 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes)
2247 };
2248
2249 for update in proto::split_worktree_update(update) {
2250 let _ = resume_updates_rx.try_recv();
2251 loop {
2252 let result = callback(update.clone());
2253 if result.await {
2254 break;
2255 } else {
2256 log::info!("waiting to resume updates");
2257 if resume_updates_rx.next().await.is_none() {
2258 return Some(());
2259 }
2260 }
2261 }
2262 }
2263 }
2264 Some(())
2265 });
2266
2267 self.update_observer = Some(UpdateObservationState {
2268 snapshots_tx,
2269 resume_updates: resume_updates_tx,
2270 _maintain_remote_snapshot,
2271 });
2272 }
2273
2274 pub fn share_private_files(&mut self, cx: &Context<Worktree>) {
2275 self.share_private_files = true;
2276 self.restart_background_scanners(cx);
2277 }
2278
2279 fn update_abs_path_and_refresh(
2280 &mut self,
2281 new_path: Option<SanitizedPath>,
2282 cx: &Context<Worktree>,
2283 ) {
2284 if let Some(new_path) = new_path {
2285 self.snapshot.git_repositories = Default::default();
2286 self.snapshot.ignores_by_parent_abs_path = Default::default();
2287 let root_name = new_path
2288 .as_path()
2289 .file_name()
2290 .map_or(String::new(), |f| f.to_string_lossy().to_string());
2291 self.snapshot.update_abs_path(new_path, root_name);
2292 }
2293 self.restart_background_scanners(cx);
2294 }
2295}
2296
2297impl RemoteWorktree {
2298 pub fn project_id(&self) -> u64 {
2299 self.project_id
2300 }
2301
2302 pub fn client(&self) -> AnyProtoClient {
2303 self.client.clone()
2304 }
2305
2306 pub fn disconnected_from_host(&mut self) {
2307 self.updates_tx.take();
2308 self.snapshot_subscriptions.clear();
2309 self.disconnected = true;
2310 }
2311
2312 pub fn update_from_remote(&self, update: proto::UpdateWorktree) {
2313 if let Some(updates_tx) = &self.updates_tx {
2314 updates_tx
2315 .unbounded_send(update)
2316 .expect("consumer runs to completion");
2317 }
2318 }
2319
2320 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2321 where
2322 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2323 Fut: 'static + Send + Future<Output = bool>,
2324 {
2325 let (tx, mut rx) = mpsc::unbounded();
2326 let initial_update = self
2327 .snapshot
2328 .build_initial_update(project_id, self.id().to_proto());
2329 self.update_observer = Some(tx);
2330 cx.spawn(|this, mut cx| async move {
2331 let mut update = initial_update;
2332 'outer: loop {
2333 // SSH projects use a special project ID of 0, and we need to
2334 // remap it to the correct one here.
2335 update.project_id = project_id;
2336
2337 for chunk in split_worktree_update(update) {
2338 if !callback(chunk).await {
2339 break 'outer;
2340 }
2341 }
2342
2343 if let Some(next_update) = rx.next().await {
2344 update = next_update;
2345 } else {
2346 break;
2347 }
2348 }
2349 this.update(&mut cx, |this, _| {
2350 let this = this.as_remote_mut().unwrap();
2351 this.update_observer.take();
2352 })
2353 })
2354 .detach();
2355 }
2356
2357 fn observed_snapshot(&self, scan_id: usize) -> bool {
2358 self.completed_scan_id >= scan_id
2359 }
2360
2361 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
2362 let (tx, rx) = oneshot::channel();
2363 if self.observed_snapshot(scan_id) {
2364 let _ = tx.send(());
2365 } else if self.disconnected {
2366 drop(tx);
2367 } else {
2368 match self
2369 .snapshot_subscriptions
2370 .binary_search_by_key(&scan_id, |probe| probe.0)
2371 {
2372 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
2373 }
2374 }
2375
2376 async move {
2377 rx.await?;
2378 Ok(())
2379 }
2380 }
2381
2382 fn insert_entry(
2383 &mut self,
2384 entry: proto::Entry,
2385 scan_id: usize,
2386 cx: &Context<Worktree>,
2387 ) -> Task<Result<Entry>> {
2388 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
2389 cx.spawn(|this, mut cx| async move {
2390 wait_for_snapshot.await?;
2391 this.update(&mut cx, |worktree, _| {
2392 let worktree = worktree.as_remote_mut().unwrap();
2393 let snapshot = &mut worktree.background_snapshot.lock().0;
2394 let entry = snapshot.insert_entry(entry, &worktree.file_scan_inclusions);
2395 worktree.snapshot = snapshot.clone();
2396 entry
2397 })?
2398 })
2399 }
2400
2401 fn delete_entry(
2402 &self,
2403 entry_id: ProjectEntryId,
2404 trash: bool,
2405 cx: &Context<Worktree>,
2406 ) -> Option<Task<Result<()>>> {
2407 let response = self.client.request(proto::DeleteProjectEntry {
2408 project_id: self.project_id,
2409 entry_id: entry_id.to_proto(),
2410 use_trash: trash,
2411 });
2412 Some(cx.spawn(move |this, mut cx| async move {
2413 let response = response.await?;
2414 let scan_id = response.worktree_scan_id as usize;
2415
2416 this.update(&mut cx, move |this, _| {
2417 this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
2418 })?
2419 .await?;
2420
2421 this.update(&mut cx, |this, _| {
2422 let this = this.as_remote_mut().unwrap();
2423 let snapshot = &mut this.background_snapshot.lock().0;
2424 snapshot.delete_entry(entry_id);
2425 this.snapshot = snapshot.clone();
2426 })
2427 }))
2428 }
2429
2430 fn rename_entry(
2431 &self,
2432 entry_id: ProjectEntryId,
2433 new_path: impl Into<Arc<Path>>,
2434 cx: &Context<Worktree>,
2435 ) -> Task<Result<CreatedEntry>> {
2436 let new_path: Arc<Path> = new_path.into();
2437 let response = self.client.request(proto::RenameProjectEntry {
2438 project_id: self.project_id,
2439 entry_id: entry_id.to_proto(),
2440 new_path: new_path.as_ref().to_proto(),
2441 });
2442 cx.spawn(move |this, mut cx| async move {
2443 let response = response.await?;
2444 match response.entry {
2445 Some(entry) => this
2446 .update(&mut cx, |this, cx| {
2447 this.as_remote_mut().unwrap().insert_entry(
2448 entry,
2449 response.worktree_scan_id as usize,
2450 cx,
2451 )
2452 })?
2453 .await
2454 .map(CreatedEntry::Included),
2455 None => {
2456 let abs_path = this.update(&mut cx, |worktree, _| {
2457 worktree
2458 .absolutize(&new_path)
2459 .with_context(|| format!("absolutizing {new_path:?}"))
2460 })??;
2461 Ok(CreatedEntry::Excluded { abs_path })
2462 }
2463 }
2464 })
2465 }
2466}
2467
2468impl Snapshot {
2469 pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
2470 Snapshot {
2471 id: WorktreeId::from_usize(id as usize),
2472 abs_path: abs_path.into(),
2473 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
2474 root_name,
2475 always_included_entries: Default::default(),
2476 entries_by_path: Default::default(),
2477 entries_by_id: Default::default(),
2478 repositories: Default::default(),
2479 scan_id: 1,
2480 completed_scan_id: 0,
2481 }
2482 }
2483
2484 pub fn id(&self) -> WorktreeId {
2485 self.id
2486 }
2487
2488 // TODO:
2489 // Consider the following:
2490 //
2491 // ```rust
2492 // let abs_path: Arc<Path> = snapshot.abs_path(); // e.g. "C:\Users\user\Desktop\project"
2493 // let some_non_trimmed_path = Path::new("\\\\?\\C:\\Users\\user\\Desktop\\project\\main.rs");
2494 // // The caller perform some actions here:
2495 // some_non_trimmed_path.strip_prefix(abs_path); // This fails
2496 // some_non_trimmed_path.starts_with(abs_path); // This fails too
2497 // ```
2498 //
2499 // This is definitely a bug, but it's not clear if we should handle it here or not.
2500 pub fn abs_path(&self) -> &Arc<Path> {
2501 self.abs_path.as_path()
2502 }
2503
2504 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2505 let mut updated_entries = self
2506 .entries_by_path
2507 .iter()
2508 .map(proto::Entry::from)
2509 .collect::<Vec<_>>();
2510 updated_entries.sort_unstable_by_key(|e| e.id);
2511
2512 let mut updated_repositories = self
2513 .repositories
2514 .iter()
2515 .map(|repository| repository.initial_update())
2516 .collect::<Vec<_>>();
2517 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2518
2519 proto::UpdateWorktree {
2520 project_id,
2521 worktree_id,
2522 abs_path: self.abs_path().to_proto(),
2523 root_name: self.root_name().to_string(),
2524 updated_entries,
2525 removed_entries: Vec::new(),
2526 scan_id: self.scan_id as u64,
2527 is_last_update: self.completed_scan_id == self.scan_id,
2528 updated_repositories,
2529 removed_repositories: Vec::new(),
2530 }
2531 }
2532
2533 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
2534 if path
2535 .components()
2536 .any(|component| !matches!(component, std::path::Component::Normal(_)))
2537 {
2538 return Err(anyhow!("invalid path"));
2539 }
2540 if path.file_name().is_some() {
2541 Ok(self.abs_path.as_path().join(path))
2542 } else {
2543 Ok(self.abs_path.as_path().to_path_buf())
2544 }
2545 }
2546
2547 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
2548 self.entries_by_id.get(&entry_id, &()).is_some()
2549 }
2550
2551 fn insert_entry(
2552 &mut self,
2553 entry: proto::Entry,
2554 always_included_paths: &PathMatcher,
2555 ) -> Result<Entry> {
2556 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2557 let old_entry = self.entries_by_id.insert_or_replace(
2558 PathEntry {
2559 id: entry.id,
2560 path: entry.path.clone(),
2561 is_ignored: entry.is_ignored,
2562 scan_id: 0,
2563 },
2564 &(),
2565 );
2566 if let Some(old_entry) = old_entry {
2567 self.entries_by_path.remove(&PathKey(old_entry.path), &());
2568 }
2569 self.entries_by_path.insert_or_replace(entry.clone(), &());
2570 Ok(entry)
2571 }
2572
2573 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2574 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2575 self.entries_by_path = {
2576 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>(&());
2577 let mut new_entries_by_path =
2578 cursor.slice(&TraversalTarget::path(&removed_entry.path), Bias::Left, &());
2579 while let Some(entry) = cursor.item() {
2580 if entry.path.starts_with(&removed_entry.path) {
2581 self.entries_by_id.remove(&entry.id, &());
2582 cursor.next(&());
2583 } else {
2584 break;
2585 }
2586 }
2587 new_entries_by_path.append(cursor.suffix(&()), &());
2588 new_entries_by_path
2589 };
2590
2591 Some(removed_entry.path)
2592 }
2593
2594 pub fn status_for_file(&self, path: impl AsRef<Path>) -> Option<FileStatus> {
2595 let path = path.as_ref();
2596 self.repository_for_path(path).and_then(|repo| {
2597 let repo_path = repo.relativize(path).unwrap();
2598 repo.statuses_by_path
2599 .get(&PathKey(repo_path.0), &())
2600 .map(|entry| entry.status)
2601 })
2602 }
2603
2604 fn update_abs_path(&mut self, abs_path: SanitizedPath, root_name: String) {
2605 self.abs_path = abs_path;
2606 if root_name != self.root_name {
2607 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
2608 self.root_name = root_name;
2609 }
2610 }
2611
2612 pub(crate) fn apply_remote_update(
2613 &mut self,
2614 mut update: proto::UpdateWorktree,
2615 always_included_paths: &PathMatcher,
2616 ) -> Result<()> {
2617 log::debug!(
2618 "applying remote worktree update. {} entries updated, {} removed",
2619 update.updated_entries.len(),
2620 update.removed_entries.len()
2621 );
2622 self.update_abs_path(
2623 SanitizedPath::from(PathBuf::from_proto(update.abs_path)),
2624 update.root_name,
2625 );
2626
2627 let mut entries_by_path_edits = Vec::new();
2628 let mut entries_by_id_edits = Vec::new();
2629
2630 for entry_id in update.removed_entries {
2631 let entry_id = ProjectEntryId::from_proto(entry_id);
2632 entries_by_id_edits.push(Edit::Remove(entry_id));
2633 if let Some(entry) = self.entry_for_id(entry_id) {
2634 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2635 }
2636 }
2637
2638 for entry in update.updated_entries {
2639 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2640 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2641 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2642 }
2643 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2644 if old_entry.id != entry.id {
2645 entries_by_id_edits.push(Edit::Remove(old_entry.id));
2646 }
2647 }
2648 entries_by_id_edits.push(Edit::Insert(PathEntry {
2649 id: entry.id,
2650 path: entry.path.clone(),
2651 is_ignored: entry.is_ignored,
2652 scan_id: 0,
2653 }));
2654 entries_by_path_edits.push(Edit::Insert(entry));
2655 }
2656
2657 self.entries_by_path.edit(entries_by_path_edits, &());
2658 self.entries_by_id.edit(entries_by_id_edits, &());
2659
2660 update.removed_repositories.sort_unstable();
2661 self.repositories.retain(&(), |entry: &RepositoryEntry| {
2662 update
2663 .removed_repositories
2664 .binary_search(&entry.work_directory_id.to_proto())
2665 .is_err()
2666 });
2667
2668 for repository in update.updated_repositories {
2669 let work_directory_id = ProjectEntryId::from_proto(repository.work_directory_id);
2670 if let Some(work_dir_entry) = self.entry_for_id(work_directory_id) {
2671 let conflicted_paths = TreeSet::from_ordered_entries(
2672 repository
2673 .current_merge_conflicts
2674 .into_iter()
2675 .map(|path| RepoPath(Path::new(&path).into())),
2676 );
2677
2678 if self
2679 .repositories
2680 .contains(&PathKey(work_dir_entry.path.clone()), &())
2681 {
2682 let edits = repository
2683 .removed_statuses
2684 .into_iter()
2685 .map(|path| Edit::Remove(PathKey(FromProto::from_proto(path))))
2686 .chain(repository.updated_statuses.into_iter().filter_map(
2687 |updated_status| {
2688 Some(Edit::Insert(updated_status.try_into().log_err()?))
2689 },
2690 ))
2691 .collect::<Vec<_>>();
2692
2693 self.repositories
2694 .update(&PathKey(work_dir_entry.path.clone()), &(), |repo| {
2695 repo.current_branch =
2696 repository.branch_summary.as_ref().map(proto_to_branch);
2697 repo.statuses_by_path.edit(edits, &());
2698 repo.current_merge_conflicts = conflicted_paths
2699 });
2700 } else {
2701 let statuses = SumTree::from_iter(
2702 repository
2703 .updated_statuses
2704 .into_iter()
2705 .filter_map(|updated_status| updated_status.try_into().log_err()),
2706 &(),
2707 );
2708
2709 self.repositories.insert_or_replace(
2710 RepositoryEntry {
2711 work_directory_id,
2712 // When syncing repository entries from a peer, we don't need
2713 // the location_in_repo field, since git operations don't happen locally
2714 // anyway.
2715 work_directory: WorkDirectory::InProject {
2716 relative_path: work_dir_entry.path.clone(),
2717 },
2718 current_branch: repository.branch_summary.as_ref().map(proto_to_branch),
2719 statuses_by_path: statuses,
2720 current_merge_conflicts: conflicted_paths,
2721 },
2722 &(),
2723 );
2724 }
2725 } else {
2726 log::error!(
2727 "no work directory entry for repository {:?}",
2728 repository.work_directory_id
2729 )
2730 }
2731 }
2732
2733 self.scan_id = update.scan_id as usize;
2734 if update.is_last_update {
2735 self.completed_scan_id = update.scan_id as usize;
2736 }
2737
2738 Ok(())
2739 }
2740
2741 pub fn entry_count(&self) -> usize {
2742 self.entries_by_path.summary().count
2743 }
2744
2745 pub fn visible_entry_count(&self) -> usize {
2746 self.entries_by_path.summary().non_ignored_count
2747 }
2748
2749 pub fn dir_count(&self) -> usize {
2750 let summary = self.entries_by_path.summary();
2751 summary.count - summary.file_count
2752 }
2753
2754 pub fn visible_dir_count(&self) -> usize {
2755 let summary = self.entries_by_path.summary();
2756 summary.non_ignored_count - summary.non_ignored_file_count
2757 }
2758
2759 pub fn file_count(&self) -> usize {
2760 self.entries_by_path.summary().file_count
2761 }
2762
2763 pub fn visible_file_count(&self) -> usize {
2764 self.entries_by_path.summary().non_ignored_file_count
2765 }
2766
2767 fn traverse_from_offset(
2768 &self,
2769 include_files: bool,
2770 include_dirs: bool,
2771 include_ignored: bool,
2772 start_offset: usize,
2773 ) -> Traversal {
2774 let mut cursor = self.entries_by_path.cursor(&());
2775 cursor.seek(
2776 &TraversalTarget::Count {
2777 count: start_offset,
2778 include_files,
2779 include_dirs,
2780 include_ignored,
2781 },
2782 Bias::Right,
2783 &(),
2784 );
2785 Traversal {
2786 snapshot: self,
2787 cursor,
2788 include_files,
2789 include_dirs,
2790 include_ignored,
2791 }
2792 }
2793
2794 pub fn traverse_from_path(
2795 &self,
2796 include_files: bool,
2797 include_dirs: bool,
2798 include_ignored: bool,
2799 path: &Path,
2800 ) -> Traversal {
2801 Traversal::new(self, include_files, include_dirs, include_ignored, path)
2802 }
2803
2804 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2805 self.traverse_from_offset(true, false, include_ignored, start)
2806 }
2807
2808 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2809 self.traverse_from_offset(false, true, include_ignored, start)
2810 }
2811
2812 pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2813 self.traverse_from_offset(true, true, include_ignored, start)
2814 }
2815
2816 #[cfg(any(feature = "test-support", test))]
2817 pub fn git_status(&self, work_dir: &Path) -> Option<Vec<StatusEntry>> {
2818 self.repositories
2819 .get(&PathKey(work_dir.into()), &())
2820 .map(|repo| repo.status().collect())
2821 }
2822
2823 pub fn repositories(&self) -> &SumTree<RepositoryEntry> {
2824 &self.repositories
2825 }
2826
2827 /// Get the repository whose work directory corresponds to the given path.
2828 pub(crate) fn repository(&self, work_directory: PathKey) -> Option<RepositoryEntry> {
2829 self.repositories.get(&work_directory, &()).cloned()
2830 }
2831
2832 /// Get the repository whose work directory contains the given path.
2833 #[track_caller]
2834 pub fn repository_for_path(&self, path: &Path) -> Option<&RepositoryEntry> {
2835 self.repositories
2836 .iter()
2837 .filter(|repo| repo.work_directory.directory_contains(path))
2838 .last()
2839 }
2840
2841 /// Given an ordered iterator of entries, returns an iterator of those entries,
2842 /// along with their containing git repository.
2843 #[track_caller]
2844 pub fn entries_with_repositories<'a>(
2845 &'a self,
2846 entries: impl 'a + Iterator<Item = &'a Entry>,
2847 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2848 let mut containing_repos = Vec::<&RepositoryEntry>::new();
2849 let mut repositories = self.repositories().iter().peekable();
2850 entries.map(move |entry| {
2851 while let Some(repository) = containing_repos.last() {
2852 if repository.directory_contains(&entry.path) {
2853 break;
2854 } else {
2855 containing_repos.pop();
2856 }
2857 }
2858 while let Some(repository) = repositories.peek() {
2859 if repository.directory_contains(&entry.path) {
2860 containing_repos.push(repositories.next().unwrap());
2861 } else {
2862 break;
2863 }
2864 }
2865 let repo = containing_repos.last().copied();
2866 (entry, repo)
2867 })
2868 }
2869
2870 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2871 let empty_path = Path::new("");
2872 self.entries_by_path
2873 .cursor::<()>(&())
2874 .filter(move |entry| entry.path.as_ref() != empty_path)
2875 .map(|entry| &entry.path)
2876 }
2877
2878 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2879 let options = ChildEntriesOptions {
2880 include_files: true,
2881 include_dirs: true,
2882 include_ignored: true,
2883 };
2884 self.child_entries_with_options(parent_path, options)
2885 }
2886
2887 pub fn child_entries_with_options<'a>(
2888 &'a self,
2889 parent_path: &'a Path,
2890 options: ChildEntriesOptions,
2891 ) -> ChildEntriesIter<'a> {
2892 let mut cursor = self.entries_by_path.cursor(&());
2893 cursor.seek(&TraversalTarget::path(parent_path), Bias::Right, &());
2894 let traversal = Traversal {
2895 snapshot: self,
2896 cursor,
2897 include_files: options.include_files,
2898 include_dirs: options.include_dirs,
2899 include_ignored: options.include_ignored,
2900 };
2901 ChildEntriesIter {
2902 traversal,
2903 parent_path,
2904 }
2905 }
2906
2907 pub fn root_entry(&self) -> Option<&Entry> {
2908 self.entry_for_path("")
2909 }
2910
2911 /// TODO: what's the difference between `root_dir` and `abs_path`?
2912 /// is there any? if so, document it.
2913 pub fn root_dir(&self) -> Option<Arc<Path>> {
2914 self.root_entry()
2915 .filter(|entry| entry.is_dir())
2916 .map(|_| self.abs_path().clone())
2917 }
2918
2919 pub fn root_name(&self) -> &str {
2920 &self.root_name
2921 }
2922
2923 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2924 self.repositories
2925 .get(&PathKey(Path::new("").into()), &())
2926 .map(|entry| entry.to_owned())
2927 }
2928
2929 pub fn git_entry(&self, work_directory_path: Arc<Path>) -> Option<RepositoryEntry> {
2930 self.repositories
2931 .get(&PathKey(work_directory_path), &())
2932 .map(|entry| entry.to_owned())
2933 }
2934
2935 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2936 self.repositories.iter()
2937 }
2938
2939 pub fn scan_id(&self) -> usize {
2940 self.scan_id
2941 }
2942
2943 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2944 let path = path.as_ref();
2945 debug_assert!(path.is_relative());
2946 self.traverse_from_path(true, true, true, path)
2947 .entry()
2948 .and_then(|entry| {
2949 if entry.path.as_ref() == path {
2950 Some(entry)
2951 } else {
2952 None
2953 }
2954 })
2955 }
2956
2957 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2958 let entry = self.entries_by_id.get(&id, &())?;
2959 self.entry_for_path(&entry.path)
2960 }
2961
2962 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2963 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2964 }
2965}
2966
2967impl LocalSnapshot {
2968 pub fn local_repo_for_path(&self, path: &Path) -> Option<&LocalRepositoryEntry> {
2969 let repository_entry = self.repository_for_path(path)?;
2970 let work_directory_id = repository_entry.work_directory_id();
2971 self.git_repositories.get(&work_directory_id)
2972 }
2973
2974 fn build_update(
2975 &self,
2976 project_id: u64,
2977 worktree_id: u64,
2978 entry_changes: UpdatedEntriesSet,
2979 repo_changes: UpdatedGitRepositoriesSet,
2980 ) -> proto::UpdateWorktree {
2981 let mut updated_entries = Vec::new();
2982 let mut removed_entries = Vec::new();
2983 let mut updated_repositories = Vec::new();
2984 let mut removed_repositories = Vec::new();
2985
2986 for (_, entry_id, path_change) in entry_changes.iter() {
2987 if let PathChange::Removed = path_change {
2988 removed_entries.push(entry_id.0 as u64);
2989 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2990 updated_entries.push(proto::Entry::from(entry));
2991 }
2992 }
2993
2994 for (work_dir_path, change) in repo_changes.iter() {
2995 let new_repo = self.repositories.get(&PathKey(work_dir_path.clone()), &());
2996 match (&change.old_repository, new_repo) {
2997 (Some(old_repo), Some(new_repo)) => {
2998 updated_repositories.push(new_repo.build_update(old_repo));
2999 }
3000 (None, Some(new_repo)) => {
3001 updated_repositories.push(new_repo.initial_update());
3002 }
3003 (Some(old_repo), None) => {
3004 removed_repositories.push(old_repo.work_directory_id.to_proto());
3005 }
3006 _ => {}
3007 }
3008 }
3009
3010 removed_entries.sort_unstable();
3011 updated_entries.sort_unstable_by_key(|e| e.id);
3012 removed_repositories.sort_unstable();
3013 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
3014
3015 // TODO - optimize, knowing that removed_entries are sorted.
3016 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
3017
3018 proto::UpdateWorktree {
3019 project_id,
3020 worktree_id,
3021 abs_path: self.abs_path().to_proto(),
3022 root_name: self.root_name().to_string(),
3023 updated_entries,
3024 removed_entries,
3025 scan_id: self.scan_id as u64,
3026 is_last_update: self.completed_scan_id == self.scan_id,
3027 updated_repositories,
3028 removed_repositories,
3029 }
3030 }
3031
3032 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
3033 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
3034 let abs_path = self.abs_path.as_path().join(&entry.path);
3035 match smol::block_on(build_gitignore(&abs_path, fs)) {
3036 Ok(ignore) => {
3037 self.ignores_by_parent_abs_path
3038 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
3039 }
3040 Err(error) => {
3041 log::error!(
3042 "error loading .gitignore file {:?} - {:?}",
3043 &entry.path,
3044 error
3045 );
3046 }
3047 }
3048 }
3049
3050 if entry.kind == EntryKind::PendingDir {
3051 if let Some(existing_entry) =
3052 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
3053 {
3054 entry.kind = existing_entry.kind;
3055 }
3056 }
3057
3058 let scan_id = self.scan_id;
3059 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
3060 if let Some(removed) = removed {
3061 if removed.id != entry.id {
3062 self.entries_by_id.remove(&removed.id, &());
3063 }
3064 }
3065 self.entries_by_id.insert_or_replace(
3066 PathEntry {
3067 id: entry.id,
3068 path: entry.path.clone(),
3069 is_ignored: entry.is_ignored,
3070 scan_id,
3071 },
3072 &(),
3073 );
3074
3075 entry
3076 }
3077
3078 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
3079 let mut inodes = TreeSet::default();
3080 for ancestor in path.ancestors().skip(1) {
3081 if let Some(entry) = self.entry_for_path(ancestor) {
3082 inodes.insert(entry.inode);
3083 }
3084 }
3085 inodes
3086 }
3087
3088 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
3089 let mut new_ignores = Vec::new();
3090 for (index, ancestor) in abs_path.ancestors().enumerate() {
3091 if index > 0 {
3092 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
3093 new_ignores.push((ancestor, Some(ignore.clone())));
3094 } else {
3095 new_ignores.push((ancestor, None));
3096 }
3097 }
3098 if ancestor.join(*DOT_GIT).exists() {
3099 break;
3100 }
3101 }
3102
3103 let mut ignore_stack = IgnoreStack::none();
3104 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
3105 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
3106 ignore_stack = IgnoreStack::all();
3107 break;
3108 } else if let Some(ignore) = ignore {
3109 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
3110 }
3111 }
3112
3113 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
3114 ignore_stack = IgnoreStack::all();
3115 }
3116
3117 ignore_stack
3118 }
3119
3120 #[cfg(test)]
3121 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
3122 self.entries_by_path
3123 .cursor::<()>(&())
3124 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
3125 }
3126
3127 #[cfg(test)]
3128 pub fn check_invariants(&self, git_state: bool) {
3129 use pretty_assertions::assert_eq;
3130
3131 assert_eq!(
3132 self.entries_by_path
3133 .cursor::<()>(&())
3134 .map(|e| (&e.path, e.id))
3135 .collect::<Vec<_>>(),
3136 self.entries_by_id
3137 .cursor::<()>(&())
3138 .map(|e| (&e.path, e.id))
3139 .collect::<collections::BTreeSet<_>>()
3140 .into_iter()
3141 .collect::<Vec<_>>(),
3142 "entries_by_path and entries_by_id are inconsistent"
3143 );
3144
3145 let mut files = self.files(true, 0);
3146 let mut visible_files = self.files(false, 0);
3147 for entry in self.entries_by_path.cursor::<()>(&()) {
3148 if entry.is_file() {
3149 assert_eq!(files.next().unwrap().inode, entry.inode);
3150 if (!entry.is_ignored && !entry.is_external) || entry.is_always_included {
3151 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3152 }
3153 }
3154 }
3155
3156 assert!(files.next().is_none());
3157 assert!(visible_files.next().is_none());
3158
3159 let mut bfs_paths = Vec::new();
3160 let mut stack = self
3161 .root_entry()
3162 .map(|e| e.path.as_ref())
3163 .into_iter()
3164 .collect::<Vec<_>>();
3165 while let Some(path) = stack.pop() {
3166 bfs_paths.push(path);
3167 let ix = stack.len();
3168 for child_entry in self.child_entries(path) {
3169 stack.insert(ix, &child_entry.path);
3170 }
3171 }
3172
3173 let dfs_paths_via_iter = self
3174 .entries_by_path
3175 .cursor::<()>(&())
3176 .map(|e| e.path.as_ref())
3177 .collect::<Vec<_>>();
3178 assert_eq!(bfs_paths, dfs_paths_via_iter);
3179
3180 let dfs_paths_via_traversal = self
3181 .entries(true, 0)
3182 .map(|e| e.path.as_ref())
3183 .collect::<Vec<_>>();
3184 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3185
3186 if git_state {
3187 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3188 let ignore_parent_path = ignore_parent_abs_path
3189 .strip_prefix(self.abs_path.as_path())
3190 .unwrap();
3191 assert!(self.entry_for_path(ignore_parent_path).is_some());
3192 assert!(self
3193 .entry_for_path(ignore_parent_path.join(*GITIGNORE))
3194 .is_some());
3195 }
3196 }
3197 }
3198
3199 #[cfg(test)]
3200 fn check_git_invariants(&self) {
3201 let dotgit_paths = self
3202 .git_repositories
3203 .iter()
3204 .map(|repo| repo.1.dot_git_dir_abs_path.clone())
3205 .collect::<HashSet<_>>();
3206 let work_dir_paths = self
3207 .repositories
3208 .iter()
3209 .map(|repo| repo.work_directory.path_key())
3210 .collect::<HashSet<_>>();
3211 assert_eq!(dotgit_paths.len(), work_dir_paths.len());
3212 assert_eq!(self.repositories.iter().count(), work_dir_paths.len());
3213 assert_eq!(self.git_repositories.iter().count(), work_dir_paths.len());
3214 for entry in self.repositories.iter() {
3215 self.git_repositories.get(&entry.work_directory_id).unwrap();
3216 }
3217 }
3218
3219 #[cfg(test)]
3220 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3221 let mut paths = Vec::new();
3222 for entry in self.entries_by_path.cursor::<()>(&()) {
3223 if include_ignored || !entry.is_ignored {
3224 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3225 }
3226 }
3227 paths.sort_by(|a, b| a.0.cmp(b.0));
3228 paths
3229 }
3230}
3231
3232impl BackgroundScannerState {
3233 fn should_scan_directory(&self, entry: &Entry) -> bool {
3234 (!entry.is_external && (!entry.is_ignored || entry.is_always_included))
3235 || entry.path.file_name() == Some(*DOT_GIT)
3236 || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
3237 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
3238 || self
3239 .paths_to_scan
3240 .iter()
3241 .any(|p| p.starts_with(&entry.path))
3242 || self
3243 .path_prefixes_to_scan
3244 .iter()
3245 .any(|p| entry.path.starts_with(p))
3246 }
3247
3248 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
3249 let path = entry.path.clone();
3250 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
3251 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
3252
3253 if !ancestor_inodes.contains(&entry.inode) {
3254 ancestor_inodes.insert(entry.inode);
3255 scan_job_tx
3256 .try_send(ScanJob {
3257 abs_path,
3258 path,
3259 ignore_stack,
3260 scan_queue: scan_job_tx.clone(),
3261 ancestor_inodes,
3262 is_external: entry.is_external,
3263 })
3264 .unwrap();
3265 }
3266 }
3267
3268 fn reuse_entry_id(&mut self, entry: &mut Entry) {
3269 if let Some(mtime) = entry.mtime {
3270 // If an entry with the same inode was removed from the worktree during this scan,
3271 // then it *might* represent the same file or directory. But the OS might also have
3272 // re-used the inode for a completely different file or directory.
3273 //
3274 // Conditionally reuse the old entry's id:
3275 // * if the mtime is the same, the file was probably been renamed.
3276 // * if the path is the same, the file may just have been updated
3277 if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
3278 if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
3279 entry.id = removed_entry.id;
3280 }
3281 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
3282 entry.id = existing_entry.id;
3283 }
3284 }
3285 }
3286
3287 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
3288 self.reuse_entry_id(&mut entry);
3289 let entry = self.snapshot.insert_entry(entry, fs);
3290 if entry.path.file_name() == Some(&DOT_GIT) {
3291 self.insert_git_repository(entry.path.clone(), fs, watcher);
3292 }
3293
3294 #[cfg(test)]
3295 self.snapshot.check_invariants(false);
3296
3297 entry
3298 }
3299
3300 fn populate_dir(
3301 &mut self,
3302 parent_path: &Arc<Path>,
3303 entries: impl IntoIterator<Item = Entry>,
3304 ignore: Option<Arc<Gitignore>>,
3305 ) {
3306 let mut parent_entry = if let Some(parent_entry) = self
3307 .snapshot
3308 .entries_by_path
3309 .get(&PathKey(parent_path.clone()), &())
3310 {
3311 parent_entry.clone()
3312 } else {
3313 log::warn!(
3314 "populating a directory {:?} that has been removed",
3315 parent_path
3316 );
3317 return;
3318 };
3319
3320 match parent_entry.kind {
3321 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
3322 EntryKind::Dir => {}
3323 _ => return,
3324 }
3325
3326 if let Some(ignore) = ignore {
3327 let abs_parent_path = self.snapshot.abs_path.as_path().join(parent_path).into();
3328 self.snapshot
3329 .ignores_by_parent_abs_path
3330 .insert(abs_parent_path, (ignore, false));
3331 }
3332
3333 let parent_entry_id = parent_entry.id;
3334 self.scanned_dirs.insert(parent_entry_id);
3335 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
3336 let mut entries_by_id_edits = Vec::new();
3337
3338 for entry in entries {
3339 entries_by_id_edits.push(Edit::Insert(PathEntry {
3340 id: entry.id,
3341 path: entry.path.clone(),
3342 is_ignored: entry.is_ignored,
3343 scan_id: self.snapshot.scan_id,
3344 }));
3345 entries_by_path_edits.push(Edit::Insert(entry));
3346 }
3347
3348 self.snapshot
3349 .entries_by_path
3350 .edit(entries_by_path_edits, &());
3351 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3352
3353 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
3354 self.changed_paths.insert(ix, parent_path.clone());
3355 }
3356
3357 #[cfg(test)]
3358 self.snapshot.check_invariants(false);
3359 }
3360
3361 fn remove_path(&mut self, path: &Path) {
3362 let mut new_entries;
3363 let removed_entries;
3364 {
3365 let mut cursor = self
3366 .snapshot
3367 .entries_by_path
3368 .cursor::<TraversalProgress>(&());
3369 new_entries = cursor.slice(&TraversalTarget::path(path), Bias::Left, &());
3370 removed_entries = cursor.slice(&TraversalTarget::successor(path), Bias::Left, &());
3371 new_entries.append(cursor.suffix(&()), &());
3372 }
3373 self.snapshot.entries_by_path = new_entries;
3374
3375 let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
3376 for entry in removed_entries.cursor::<()>(&()) {
3377 match self.removed_entries.entry(entry.inode) {
3378 hash_map::Entry::Occupied(mut e) => {
3379 let prev_removed_entry = e.get_mut();
3380 if entry.id > prev_removed_entry.id {
3381 *prev_removed_entry = entry.clone();
3382 }
3383 }
3384 hash_map::Entry::Vacant(e) => {
3385 e.insert(entry.clone());
3386 }
3387 }
3388
3389 if entry.path.file_name() == Some(&GITIGNORE) {
3390 let abs_parent_path = self
3391 .snapshot
3392 .abs_path
3393 .as_path()
3394 .join(entry.path.parent().unwrap());
3395 if let Some((_, needs_update)) = self
3396 .snapshot
3397 .ignores_by_parent_abs_path
3398 .get_mut(abs_parent_path.as_path())
3399 {
3400 *needs_update = true;
3401 }
3402 }
3403
3404 if let Err(ix) = removed_ids.binary_search(&entry.id) {
3405 removed_ids.insert(ix, entry.id);
3406 }
3407 }
3408
3409 self.snapshot.entries_by_id.edit(
3410 removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
3411 &(),
3412 );
3413 self.snapshot
3414 .git_repositories
3415 .retain(|id, _| removed_ids.binary_search(id).is_err());
3416 self.snapshot.repositories.retain(&(), |repository| {
3417 !repository.work_directory.path_key().0.starts_with(path)
3418 });
3419
3420 #[cfg(test)]
3421 self.snapshot.check_invariants(false);
3422 }
3423
3424 fn insert_git_repository(
3425 &mut self,
3426 dot_git_path: Arc<Path>,
3427 fs: &dyn Fs,
3428 watcher: &dyn Watcher,
3429 ) -> Option<LocalRepositoryEntry> {
3430 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
3431 Some(parent_dir) => {
3432 // Guard against repositories inside the repository metadata
3433 if parent_dir.iter().any(|component| component == *DOT_GIT) {
3434 log::info!(
3435 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
3436 );
3437 return None;
3438 };
3439 log::info!(
3440 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
3441 );
3442
3443 parent_dir.into()
3444 }
3445 None => {
3446 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
3447 // no files inside that directory are tracked by git, so no need to build the repo around it
3448 log::info!(
3449 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
3450 );
3451 return None;
3452 }
3453 };
3454
3455 self.insert_git_repository_for_path(
3456 WorkDirectory::InProject {
3457 relative_path: work_dir_path,
3458 },
3459 dot_git_path,
3460 fs,
3461 watcher,
3462 )
3463 }
3464
3465 fn insert_git_repository_for_path(
3466 &mut self,
3467 work_directory: WorkDirectory,
3468 dot_git_path: Arc<Path>,
3469 fs: &dyn Fs,
3470 watcher: &dyn Watcher,
3471 ) -> Option<LocalRepositoryEntry> {
3472 let work_dir_id = self
3473 .snapshot
3474 .entry_for_path(work_directory.path_key().0)
3475 .map(|entry| entry.id)?;
3476
3477 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
3478 return None;
3479 }
3480
3481 let dot_git_abs_path = self.snapshot.abs_path.as_path().join(&dot_git_path);
3482
3483 let t0 = Instant::now();
3484 let repository = fs.open_repo(&dot_git_abs_path)?;
3485
3486 let repository_path = repository.path();
3487 watcher.add(&repository_path).log_err()?;
3488
3489 let actual_dot_git_dir_abs_path = repository.main_repository_path();
3490 let dot_git_worktree_abs_path = if actual_dot_git_dir_abs_path == dot_git_abs_path {
3491 None
3492 } else {
3493 // The two paths could be different because we opened a git worktree.
3494 // When that happens:
3495 //
3496 // * `dot_git_abs_path` is a file that points to the worktree-subdirectory in the actual
3497 // .git directory.
3498 //
3499 // * `repository_path` is the worktree-subdirectory.
3500 //
3501 // * `actual_dot_git_dir_abs_path` is the path to the actual .git directory. In git
3502 // documentation this is called the "commondir".
3503 watcher.add(&dot_git_abs_path).log_err()?;
3504 Some(Arc::from(dot_git_abs_path))
3505 };
3506
3507 log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
3508
3509 if let Some(git_hosting_provider_registry) = self.git_hosting_provider_registry.clone() {
3510 git_hosting_providers::register_additional_providers(
3511 git_hosting_provider_registry,
3512 repository.clone(),
3513 );
3514 }
3515
3516 self.snapshot.repositories.insert_or_replace(
3517 RepositoryEntry {
3518 work_directory_id: work_dir_id,
3519 work_directory: work_directory.clone(),
3520 current_branch: None,
3521 statuses_by_path: Default::default(),
3522 current_merge_conflicts: Default::default(),
3523 },
3524 &(),
3525 );
3526
3527 let local_repository = LocalRepositoryEntry {
3528 work_directory_id: work_dir_id,
3529 work_directory: work_directory.clone(),
3530 git_dir_scan_id: 0,
3531 status_scan_id: 0,
3532 repo_ptr: repository.clone(),
3533 dot_git_dir_abs_path: actual_dot_git_dir_abs_path.into(),
3534 dot_git_worktree_abs_path,
3535 current_merge_head_shas: Default::default(),
3536 merge_message: None,
3537 };
3538
3539 self.snapshot
3540 .git_repositories
3541 .insert(work_dir_id, local_repository.clone());
3542
3543 Some(local_repository)
3544 }
3545}
3546
3547async fn is_git_dir(path: &Path, fs: &dyn Fs) -> bool {
3548 if path.file_name() == Some(&*DOT_GIT) {
3549 return true;
3550 }
3551
3552 // If we're in a bare repository, we are not inside a `.git` folder. In a
3553 // bare repository, the root folder contains what would normally be in the
3554 // `.git` folder.
3555 let head_metadata = fs.metadata(&path.join("HEAD")).await;
3556 if !matches!(head_metadata, Ok(Some(_))) {
3557 return false;
3558 }
3559 let config_metadata = fs.metadata(&path.join("config")).await;
3560 matches!(config_metadata, Ok(Some(_)))
3561}
3562
3563async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
3564 let contents = fs.load(abs_path).await?;
3565 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
3566 let mut builder = GitignoreBuilder::new(parent);
3567 for line in contents.lines() {
3568 builder.add_line(Some(abs_path.into()), line)?;
3569 }
3570 Ok(builder.build()?)
3571}
3572
3573impl Deref for Worktree {
3574 type Target = Snapshot;
3575
3576 fn deref(&self) -> &Self::Target {
3577 match self {
3578 Worktree::Local(worktree) => &worktree.snapshot,
3579 Worktree::Remote(worktree) => &worktree.snapshot,
3580 }
3581 }
3582}
3583
3584impl Deref for LocalWorktree {
3585 type Target = LocalSnapshot;
3586
3587 fn deref(&self) -> &Self::Target {
3588 &self.snapshot
3589 }
3590}
3591
3592impl Deref for RemoteWorktree {
3593 type Target = Snapshot;
3594
3595 fn deref(&self) -> &Self::Target {
3596 &self.snapshot
3597 }
3598}
3599
3600impl fmt::Debug for LocalWorktree {
3601 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3602 self.snapshot.fmt(f)
3603 }
3604}
3605
3606impl fmt::Debug for Snapshot {
3607 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3608 struct EntriesById<'a>(&'a SumTree<PathEntry>);
3609 struct EntriesByPath<'a>(&'a SumTree<Entry>);
3610
3611 impl<'a> fmt::Debug for EntriesByPath<'a> {
3612 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3613 f.debug_map()
3614 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
3615 .finish()
3616 }
3617 }
3618
3619 impl<'a> fmt::Debug for EntriesById<'a> {
3620 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3621 f.debug_list().entries(self.0.iter()).finish()
3622 }
3623 }
3624
3625 f.debug_struct("Snapshot")
3626 .field("id", &self.id)
3627 .field("root_name", &self.root_name)
3628 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
3629 .field("entries_by_id", &EntriesById(&self.entries_by_id))
3630 .finish()
3631 }
3632}
3633
3634#[derive(Clone, PartialEq)]
3635pub struct File {
3636 pub worktree: Entity<Worktree>,
3637 pub path: Arc<Path>,
3638 pub disk_state: DiskState,
3639 pub entry_id: Option<ProjectEntryId>,
3640 pub is_local: bool,
3641 pub is_private: bool,
3642}
3643
3644impl language::File for File {
3645 fn as_local(&self) -> Option<&dyn language::LocalFile> {
3646 if self.is_local {
3647 Some(self)
3648 } else {
3649 None
3650 }
3651 }
3652
3653 fn disk_state(&self) -> DiskState {
3654 self.disk_state
3655 }
3656
3657 fn path(&self) -> &Arc<Path> {
3658 &self.path
3659 }
3660
3661 fn full_path(&self, cx: &App) -> PathBuf {
3662 let mut full_path = PathBuf::new();
3663 let worktree = self.worktree.read(cx);
3664
3665 if worktree.is_visible() {
3666 full_path.push(worktree.root_name());
3667 } else {
3668 let path = worktree.abs_path();
3669
3670 if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3671 full_path.push("~");
3672 full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3673 } else {
3674 full_path.push(path)
3675 }
3676 }
3677
3678 if self.path.components().next().is_some() {
3679 full_path.push(&self.path);
3680 }
3681
3682 full_path
3683 }
3684
3685 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3686 /// of its worktree, then this method will return the name of the worktree itself.
3687 fn file_name<'a>(&'a self, cx: &'a App) -> &'a OsStr {
3688 self.path
3689 .file_name()
3690 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3691 }
3692
3693 fn worktree_id(&self, cx: &App) -> WorktreeId {
3694 self.worktree.read(cx).id()
3695 }
3696
3697 fn as_any(&self) -> &dyn Any {
3698 self
3699 }
3700
3701 fn to_proto(&self, cx: &App) -> rpc::proto::File {
3702 rpc::proto::File {
3703 worktree_id: self.worktree.read(cx).id().to_proto(),
3704 entry_id: self.entry_id.map(|id| id.to_proto()),
3705 path: self.path.as_ref().to_proto(),
3706 mtime: self.disk_state.mtime().map(|time| time.into()),
3707 is_deleted: self.disk_state == DiskState::Deleted,
3708 }
3709 }
3710
3711 fn is_private(&self) -> bool {
3712 self.is_private
3713 }
3714}
3715
3716impl language::LocalFile for File {
3717 fn abs_path(&self, cx: &App) -> PathBuf {
3718 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3719 if self.path.as_ref() == Path::new("") {
3720 worktree_path.as_path().to_path_buf()
3721 } else {
3722 worktree_path.as_path().join(&self.path)
3723 }
3724 }
3725
3726 fn load(&self, cx: &App) -> Task<Result<String>> {
3727 let worktree = self.worktree.read(cx).as_local().unwrap();
3728 let abs_path = worktree.absolutize(&self.path);
3729 let fs = worktree.fs.clone();
3730 cx.background_spawn(async move { fs.load(&abs_path?).await })
3731 }
3732
3733 fn load_bytes(&self, cx: &App) -> Task<Result<Vec<u8>>> {
3734 let worktree = self.worktree.read(cx).as_local().unwrap();
3735 let abs_path = worktree.absolutize(&self.path);
3736 let fs = worktree.fs.clone();
3737 cx.background_spawn(async move { fs.load_bytes(&abs_path?).await })
3738 }
3739}
3740
3741impl File {
3742 pub fn for_entry(entry: Entry, worktree: Entity<Worktree>) -> Arc<Self> {
3743 Arc::new(Self {
3744 worktree,
3745 path: entry.path.clone(),
3746 disk_state: if let Some(mtime) = entry.mtime {
3747 DiskState::Present { mtime }
3748 } else {
3749 DiskState::New
3750 },
3751 entry_id: Some(entry.id),
3752 is_local: true,
3753 is_private: entry.is_private,
3754 })
3755 }
3756
3757 pub fn from_proto(
3758 proto: rpc::proto::File,
3759 worktree: Entity<Worktree>,
3760 cx: &App,
3761 ) -> Result<Self> {
3762 let worktree_id = worktree
3763 .read(cx)
3764 .as_remote()
3765 .ok_or_else(|| anyhow!("not remote"))?
3766 .id();
3767
3768 if worktree_id.to_proto() != proto.worktree_id {
3769 return Err(anyhow!("worktree id does not match file"));
3770 }
3771
3772 let disk_state = if proto.is_deleted {
3773 DiskState::Deleted
3774 } else {
3775 if let Some(mtime) = proto.mtime.map(&Into::into) {
3776 DiskState::Present { mtime }
3777 } else {
3778 DiskState::New
3779 }
3780 };
3781
3782 Ok(Self {
3783 worktree,
3784 path: Arc::<Path>::from_proto(proto.path),
3785 disk_state,
3786 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3787 is_local: false,
3788 is_private: false,
3789 })
3790 }
3791
3792 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3793 file.and_then(|f| f.as_any().downcast_ref())
3794 }
3795
3796 pub fn worktree_id(&self, cx: &App) -> WorktreeId {
3797 self.worktree.read(cx).id()
3798 }
3799
3800 pub fn project_entry_id(&self, _: &App) -> Option<ProjectEntryId> {
3801 match self.disk_state {
3802 DiskState::Deleted => None,
3803 _ => self.entry_id,
3804 }
3805 }
3806}
3807
3808#[derive(Clone, Debug, PartialEq, Eq)]
3809pub struct Entry {
3810 pub id: ProjectEntryId,
3811 pub kind: EntryKind,
3812 pub path: Arc<Path>,
3813 pub inode: u64,
3814 pub mtime: Option<MTime>,
3815
3816 pub canonical_path: Option<Box<Path>>,
3817 /// Whether this entry is ignored by Git.
3818 ///
3819 /// We only scan ignored entries once the directory is expanded and
3820 /// exclude them from searches.
3821 pub is_ignored: bool,
3822
3823 /// Whether this entry is always included in searches.
3824 ///
3825 /// This is used for entries that are always included in searches, even
3826 /// if they are ignored by git. Overridden by file_scan_exclusions.
3827 pub is_always_included: bool,
3828
3829 /// Whether this entry's canonical path is outside of the worktree.
3830 /// This means the entry is only accessible from the worktree root via a
3831 /// symlink.
3832 ///
3833 /// We only scan entries outside of the worktree once the symlinked
3834 /// directory is expanded. External entries are treated like gitignored
3835 /// entries in that they are not included in searches.
3836 pub is_external: bool,
3837
3838 /// Whether this entry is considered to be a `.env` file.
3839 pub is_private: bool,
3840 /// The entry's size on disk, in bytes.
3841 pub size: u64,
3842 pub char_bag: CharBag,
3843 pub is_fifo: bool,
3844}
3845
3846#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3847pub enum EntryKind {
3848 UnloadedDir,
3849 PendingDir,
3850 Dir,
3851 File,
3852}
3853
3854#[derive(Clone, Copy, Debug, PartialEq)]
3855pub enum PathChange {
3856 /// A filesystem entry was was created.
3857 Added,
3858 /// A filesystem entry was removed.
3859 Removed,
3860 /// A filesystem entry was updated.
3861 Updated,
3862 /// A filesystem entry was either updated or added. We don't know
3863 /// whether or not it already existed, because the path had not
3864 /// been loaded before the event.
3865 AddedOrUpdated,
3866 /// A filesystem entry was found during the initial scan of the worktree.
3867 Loaded,
3868}
3869
3870#[derive(Debug)]
3871pub struct GitRepositoryChange {
3872 /// The previous state of the repository, if it already existed.
3873 pub old_repository: Option<RepositoryEntry>,
3874}
3875
3876pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3877pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3878
3879#[derive(Clone, Debug, PartialEq, Eq)]
3880pub struct StatusEntry {
3881 pub repo_path: RepoPath,
3882 pub status: FileStatus,
3883}
3884
3885impl StatusEntry {
3886 pub fn is_staged(&self) -> Option<bool> {
3887 self.status.is_staged()
3888 }
3889
3890 fn to_proto(&self) -> proto::StatusEntry {
3891 let simple_status = match self.status {
3892 FileStatus::Ignored | FileStatus::Untracked => proto::GitStatus::Added as i32,
3893 FileStatus::Unmerged { .. } => proto::GitStatus::Conflict as i32,
3894 FileStatus::Tracked(TrackedStatus {
3895 index_status,
3896 worktree_status,
3897 }) => tracked_status_to_proto(if worktree_status != StatusCode::Unmodified {
3898 worktree_status
3899 } else {
3900 index_status
3901 }),
3902 };
3903
3904 proto::StatusEntry {
3905 repo_path: self.repo_path.as_ref().to_proto(),
3906 simple_status,
3907 status: Some(status_to_proto(self.status)),
3908 }
3909 }
3910}
3911
3912impl TryFrom<proto::StatusEntry> for StatusEntry {
3913 type Error = anyhow::Error;
3914
3915 fn try_from(value: proto::StatusEntry) -> Result<Self, Self::Error> {
3916 let repo_path = RepoPath(Arc::<Path>::from_proto(value.repo_path));
3917 let status = status_from_proto(value.simple_status, value.status)?;
3918 Ok(Self { repo_path, status })
3919 }
3920}
3921
3922#[derive(Clone, Debug)]
3923struct PathProgress<'a> {
3924 max_path: &'a Path,
3925}
3926
3927#[derive(Clone, Debug)]
3928pub struct PathSummary<S> {
3929 max_path: Arc<Path>,
3930 item_summary: S,
3931}
3932
3933impl<S: Summary> Summary for PathSummary<S> {
3934 type Context = S::Context;
3935
3936 fn zero(cx: &Self::Context) -> Self {
3937 Self {
3938 max_path: Path::new("").into(),
3939 item_summary: S::zero(cx),
3940 }
3941 }
3942
3943 fn add_summary(&mut self, rhs: &Self, cx: &Self::Context) {
3944 self.max_path = rhs.max_path.clone();
3945 self.item_summary.add_summary(&rhs.item_summary, cx);
3946 }
3947}
3948
3949impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathProgress<'a> {
3950 fn zero(_: &<PathSummary<S> as Summary>::Context) -> Self {
3951 Self {
3952 max_path: Path::new(""),
3953 }
3954 }
3955
3956 fn add_summary(
3957 &mut self,
3958 summary: &'a PathSummary<S>,
3959 _: &<PathSummary<S> as Summary>::Context,
3960 ) {
3961 self.max_path = summary.max_path.as_ref()
3962 }
3963}
3964
3965impl sum_tree::Item for RepositoryEntry {
3966 type Summary = PathSummary<Unit>;
3967
3968 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3969 PathSummary {
3970 max_path: self.work_directory.path_key().0,
3971 item_summary: Unit,
3972 }
3973 }
3974}
3975
3976impl sum_tree::KeyedItem for RepositoryEntry {
3977 type Key = PathKey;
3978
3979 fn key(&self) -> Self::Key {
3980 self.work_directory.path_key()
3981 }
3982}
3983
3984impl sum_tree::Item for StatusEntry {
3985 type Summary = PathSummary<GitSummary>;
3986
3987 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3988 PathSummary {
3989 max_path: self.repo_path.0.clone(),
3990 item_summary: self.status.summary(),
3991 }
3992 }
3993}
3994
3995impl sum_tree::KeyedItem for StatusEntry {
3996 type Key = PathKey;
3997
3998 fn key(&self) -> Self::Key {
3999 PathKey(self.repo_path.0.clone())
4000 }
4001}
4002
4003impl<'a> sum_tree::Dimension<'a, PathSummary<GitSummary>> for GitSummary {
4004 fn zero(_cx: &()) -> Self {
4005 Default::default()
4006 }
4007
4008 fn add_summary(&mut self, summary: &'a PathSummary<GitSummary>, _: &()) {
4009 *self += summary.item_summary
4010 }
4011}
4012
4013impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathKey {
4014 fn zero(_: &S::Context) -> Self {
4015 Default::default()
4016 }
4017
4018 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4019 self.0 = summary.max_path.clone();
4020 }
4021}
4022
4023impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for TraversalProgress<'a> {
4024 fn zero(_cx: &S::Context) -> Self {
4025 Default::default()
4026 }
4027
4028 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4029 self.max_path = summary.max_path.as_ref();
4030 }
4031}
4032
4033impl Entry {
4034 fn new(
4035 path: Arc<Path>,
4036 metadata: &fs::Metadata,
4037 next_entry_id: &AtomicUsize,
4038 root_char_bag: CharBag,
4039 canonical_path: Option<Box<Path>>,
4040 ) -> Self {
4041 let char_bag = char_bag_for_path(root_char_bag, &path);
4042 Self {
4043 id: ProjectEntryId::new(next_entry_id),
4044 kind: if metadata.is_dir {
4045 EntryKind::PendingDir
4046 } else {
4047 EntryKind::File
4048 },
4049 path,
4050 inode: metadata.inode,
4051 mtime: Some(metadata.mtime),
4052 size: metadata.len,
4053 canonical_path,
4054 is_ignored: false,
4055 is_always_included: false,
4056 is_external: false,
4057 is_private: false,
4058 char_bag,
4059 is_fifo: metadata.is_fifo,
4060 }
4061 }
4062
4063 pub fn is_created(&self) -> bool {
4064 self.mtime.is_some()
4065 }
4066
4067 pub fn is_dir(&self) -> bool {
4068 self.kind.is_dir()
4069 }
4070
4071 pub fn is_file(&self) -> bool {
4072 self.kind.is_file()
4073 }
4074}
4075
4076impl EntryKind {
4077 pub fn is_dir(&self) -> bool {
4078 matches!(
4079 self,
4080 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
4081 )
4082 }
4083
4084 pub fn is_unloaded(&self) -> bool {
4085 matches!(self, EntryKind::UnloadedDir)
4086 }
4087
4088 pub fn is_file(&self) -> bool {
4089 matches!(self, EntryKind::File)
4090 }
4091}
4092
4093impl sum_tree::Item for Entry {
4094 type Summary = EntrySummary;
4095
4096 fn summary(&self, _cx: &()) -> Self::Summary {
4097 let non_ignored_count = if (self.is_ignored || self.is_external) && !self.is_always_included
4098 {
4099 0
4100 } else {
4101 1
4102 };
4103 let file_count;
4104 let non_ignored_file_count;
4105 if self.is_file() {
4106 file_count = 1;
4107 non_ignored_file_count = non_ignored_count;
4108 } else {
4109 file_count = 0;
4110 non_ignored_file_count = 0;
4111 }
4112
4113 EntrySummary {
4114 max_path: self.path.clone(),
4115 count: 1,
4116 non_ignored_count,
4117 file_count,
4118 non_ignored_file_count,
4119 }
4120 }
4121}
4122
4123impl sum_tree::KeyedItem for Entry {
4124 type Key = PathKey;
4125
4126 fn key(&self) -> Self::Key {
4127 PathKey(self.path.clone())
4128 }
4129}
4130
4131#[derive(Clone, Debug)]
4132pub struct EntrySummary {
4133 max_path: Arc<Path>,
4134 count: usize,
4135 non_ignored_count: usize,
4136 file_count: usize,
4137 non_ignored_file_count: usize,
4138}
4139
4140impl Default for EntrySummary {
4141 fn default() -> Self {
4142 Self {
4143 max_path: Arc::from(Path::new("")),
4144 count: 0,
4145 non_ignored_count: 0,
4146 file_count: 0,
4147 non_ignored_file_count: 0,
4148 }
4149 }
4150}
4151
4152impl sum_tree::Summary for EntrySummary {
4153 type Context = ();
4154
4155 fn zero(_cx: &()) -> Self {
4156 Default::default()
4157 }
4158
4159 fn add_summary(&mut self, rhs: &Self, _: &()) {
4160 self.max_path = rhs.max_path.clone();
4161 self.count += rhs.count;
4162 self.non_ignored_count += rhs.non_ignored_count;
4163 self.file_count += rhs.file_count;
4164 self.non_ignored_file_count += rhs.non_ignored_file_count;
4165 }
4166}
4167
4168#[derive(Clone, Debug)]
4169struct PathEntry {
4170 id: ProjectEntryId,
4171 path: Arc<Path>,
4172 is_ignored: bool,
4173 scan_id: usize,
4174}
4175
4176#[derive(Debug, Default)]
4177struct FsScanned {
4178 status_scans: Arc<AtomicU32>,
4179}
4180
4181impl sum_tree::Item for PathEntry {
4182 type Summary = PathEntrySummary;
4183
4184 fn summary(&self, _cx: &()) -> Self::Summary {
4185 PathEntrySummary { max_id: self.id }
4186 }
4187}
4188
4189impl sum_tree::KeyedItem for PathEntry {
4190 type Key = ProjectEntryId;
4191
4192 fn key(&self) -> Self::Key {
4193 self.id
4194 }
4195}
4196
4197#[derive(Clone, Debug, Default)]
4198struct PathEntrySummary {
4199 max_id: ProjectEntryId,
4200}
4201
4202impl sum_tree::Summary for PathEntrySummary {
4203 type Context = ();
4204
4205 fn zero(_cx: &Self::Context) -> Self {
4206 Default::default()
4207 }
4208
4209 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
4210 self.max_id = summary.max_id;
4211 }
4212}
4213
4214impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
4215 fn zero(_cx: &()) -> Self {
4216 Default::default()
4217 }
4218
4219 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
4220 *self = summary.max_id;
4221 }
4222}
4223
4224#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
4225pub struct PathKey(Arc<Path>);
4226
4227impl Default for PathKey {
4228 fn default() -> Self {
4229 Self(Path::new("").into())
4230 }
4231}
4232
4233impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
4234 fn zero(_cx: &()) -> Self {
4235 Default::default()
4236 }
4237
4238 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4239 self.0 = summary.max_path.clone();
4240 }
4241}
4242
4243struct BackgroundScanner {
4244 state: Arc<Mutex<BackgroundScannerState>>,
4245 fs: Arc<dyn Fs>,
4246 fs_case_sensitive: bool,
4247 status_updates_tx: UnboundedSender<ScanState>,
4248 executor: BackgroundExecutor,
4249 scan_requests_rx: channel::Receiver<ScanRequest>,
4250 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
4251 next_entry_id: Arc<AtomicUsize>,
4252 phase: BackgroundScannerPhase,
4253 watcher: Arc<dyn Watcher>,
4254 settings: WorktreeSettings,
4255 share_private_files: bool,
4256}
4257
4258#[derive(Copy, Clone, PartialEq)]
4259enum BackgroundScannerPhase {
4260 InitialScan,
4261 EventsReceivedDuringInitialScan,
4262 Events,
4263}
4264
4265impl BackgroundScanner {
4266 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
4267 // If the worktree root does not contain a git repository, then find
4268 // the git repository in an ancestor directory. Find any gitignore files
4269 // in ancestor directories.
4270 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
4271 let mut containing_git_repository = None;
4272 for (index, ancestor) in root_abs_path.as_path().ancestors().enumerate() {
4273 if index != 0 {
4274 if let Ok(ignore) =
4275 build_gitignore(&ancestor.join(*GITIGNORE), self.fs.as_ref()).await
4276 {
4277 self.state
4278 .lock()
4279 .snapshot
4280 .ignores_by_parent_abs_path
4281 .insert(ancestor.into(), (ignore.into(), false));
4282 }
4283 }
4284
4285 let ancestor_dot_git = ancestor.join(*DOT_GIT);
4286 // Check whether the directory or file called `.git` exists (in the
4287 // case of worktrees it's a file.)
4288 if self
4289 .fs
4290 .metadata(&ancestor_dot_git)
4291 .await
4292 .is_ok_and(|metadata| metadata.is_some())
4293 {
4294 if index != 0 {
4295 // We canonicalize, since the FS events use the canonicalized path.
4296 if let Some(ancestor_dot_git) =
4297 self.fs.canonicalize(&ancestor_dot_git).await.log_err()
4298 {
4299 // We associate the external git repo with our root folder and
4300 // also mark where in the git repo the root folder is located.
4301 let local_repository = self.state.lock().insert_git_repository_for_path(
4302 WorkDirectory::AboveProject {
4303 absolute_path: ancestor.into(),
4304 location_in_repo: root_abs_path
4305 .as_path()
4306 .strip_prefix(ancestor)
4307 .unwrap()
4308 .into(),
4309 },
4310 ancestor_dot_git.clone().into(),
4311 self.fs.as_ref(),
4312 self.watcher.as_ref(),
4313 );
4314
4315 if local_repository.is_some() {
4316 containing_git_repository = Some(ancestor_dot_git)
4317 }
4318 };
4319 }
4320
4321 // Reached root of git repository.
4322 break;
4323 }
4324 }
4325
4326 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4327 {
4328 let mut state = self.state.lock();
4329 state.snapshot.scan_id += 1;
4330 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
4331 let ignore_stack = state
4332 .snapshot
4333 .ignore_stack_for_abs_path(root_abs_path.as_path(), true);
4334 if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) {
4335 root_entry.is_ignored = true;
4336 state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
4337 }
4338 state.enqueue_scan_dir(root_abs_path.into(), &root_entry, &scan_job_tx);
4339 }
4340 };
4341
4342 // Perform an initial scan of the directory.
4343 drop(scan_job_tx);
4344 let scans_running = self.scan_dirs(true, scan_job_rx).await;
4345 {
4346 let mut state = self.state.lock();
4347 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4348 }
4349
4350 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4351 self.send_status_update(scanning, SmallVec::new());
4352
4353 // Process any any FS events that occurred while performing the initial scan.
4354 // For these events, update events cannot be as precise, because we didn't
4355 // have the previous state loaded yet.
4356 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
4357 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
4358 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4359 paths.extend(more_paths);
4360 }
4361 self.process_events(paths.into_iter().map(Into::into).collect())
4362 .await;
4363 }
4364 if let Some(abs_path) = containing_git_repository {
4365 self.process_events(vec![abs_path]).await;
4366 }
4367
4368 // Continue processing events until the worktree is dropped.
4369 self.phase = BackgroundScannerPhase::Events;
4370
4371 loop {
4372 select_biased! {
4373 // Process any path refresh requests from the worktree. Prioritize
4374 // these before handling changes reported by the filesystem.
4375 request = self.next_scan_request().fuse() => {
4376 let Ok(request) = request else { break };
4377 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4378 if !self.process_scan_request(request, scanning).await {
4379 return;
4380 }
4381 }
4382
4383 path_prefix_request = self.path_prefixes_to_scan_rx.recv().fuse() => {
4384 let Ok(request) = path_prefix_request else { break };
4385 log::trace!("adding path prefix {:?}", request.path);
4386
4387 let did_scan = self.forcibly_load_paths(&[request.path.clone()]).await;
4388 if did_scan {
4389 let abs_path =
4390 {
4391 let mut state = self.state.lock();
4392 state.path_prefixes_to_scan.insert(request.path.clone());
4393 state.snapshot.abs_path.as_path().join(&request.path)
4394 };
4395
4396 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
4397 self.process_events(vec![abs_path]).await;
4398 }
4399 }
4400 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4401 self.send_status_update(scanning, request.done);
4402 }
4403
4404 paths = fs_events_rx.next().fuse() => {
4405 let Some(mut paths) = paths else { break };
4406 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4407 paths.extend(more_paths);
4408 }
4409 self.process_events(paths.into_iter().map(Into::into).collect()).await;
4410 }
4411 }
4412 }
4413 }
4414
4415 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
4416 log::debug!("rescanning paths {:?}", request.relative_paths);
4417
4418 request.relative_paths.sort_unstable();
4419 self.forcibly_load_paths(&request.relative_paths).await;
4420
4421 let root_path = self.state.lock().snapshot.abs_path.clone();
4422 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4423 Ok(path) => SanitizedPath::from(path),
4424 Err(err) => {
4425 log::error!("failed to canonicalize root path: {}", err);
4426 return true;
4427 }
4428 };
4429 let abs_paths = request
4430 .relative_paths
4431 .iter()
4432 .map(|path| {
4433 if path.file_name().is_some() {
4434 root_canonical_path.as_path().join(path).to_path_buf()
4435 } else {
4436 root_canonical_path.as_path().to_path_buf()
4437 }
4438 })
4439 .collect::<Vec<_>>();
4440
4441 {
4442 let mut state = self.state.lock();
4443 let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
4444 state.snapshot.scan_id += 1;
4445 if is_idle {
4446 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4447 }
4448 }
4449
4450 self.reload_entries_for_paths(
4451 root_path,
4452 root_canonical_path,
4453 &request.relative_paths,
4454 abs_paths,
4455 None,
4456 )
4457 .await;
4458
4459 self.send_status_update(scanning, request.done)
4460 }
4461
4462 async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
4463 let root_path = self.state.lock().snapshot.abs_path.clone();
4464 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4465 Ok(path) => SanitizedPath::from(path),
4466 Err(err) => {
4467 let new_path = self
4468 .state
4469 .lock()
4470 .snapshot
4471 .root_file_handle
4472 .clone()
4473 .and_then(|handle| handle.current_path(&self.fs).log_err())
4474 .map(SanitizedPath::from)
4475 .filter(|new_path| *new_path != root_path);
4476
4477 if let Some(new_path) = new_path.as_ref() {
4478 log::info!(
4479 "root renamed from {} to {}",
4480 root_path.as_path().display(),
4481 new_path.as_path().display()
4482 )
4483 } else {
4484 log::warn!("root path could not be canonicalized: {}", err);
4485 }
4486 self.status_updates_tx
4487 .unbounded_send(ScanState::RootUpdated { new_path })
4488 .ok();
4489 return;
4490 }
4491 };
4492
4493 // Certain directories may have FS changes, but do not lead to git data changes that Zed cares about.
4494 // Ignore these, to avoid Zed unnecessarily rescanning git metadata.
4495 let skipped_files_in_dot_git = HashSet::from_iter([*COMMIT_MESSAGE, *INDEX_LOCK]);
4496 let skipped_dirs_in_dot_git = [*FSMONITOR_DAEMON];
4497
4498 let mut relative_paths = Vec::with_capacity(abs_paths.len());
4499 let mut dot_git_abs_paths = Vec::new();
4500 abs_paths.sort_unstable();
4501 abs_paths.dedup_by(|a, b| a.starts_with(b));
4502 abs_paths.retain(|abs_path| {
4503 let abs_path = SanitizedPath::from(abs_path);
4504
4505 let snapshot = &self.state.lock().snapshot;
4506 {
4507 let mut is_git_related = false;
4508
4509 let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| {
4510 if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) {
4511 let path_in_git_dir = abs_path.as_path().strip_prefix(ancestor).expect("stripping off the ancestor");
4512 Some((ancestor.to_owned(), path_in_git_dir.to_owned()))
4513 } else {
4514 None
4515 }
4516 });
4517
4518 if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
4519 if skipped_files_in_dot_git.contains(path_in_git_dir.as_os_str()) || skipped_dirs_in_dot_git.iter().any(|skipped_git_subdir| path_in_git_dir.starts_with(skipped_git_subdir)) {
4520 log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
4521 return false;
4522 }
4523
4524 is_git_related = true;
4525 if !dot_git_abs_paths.contains(&dot_git_abs_path) {
4526 dot_git_abs_paths.push(dot_git_abs_path);
4527 }
4528 }
4529
4530 let relative_path: Arc<Path> =
4531 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
4532 path.into()
4533 } else {
4534 if is_git_related {
4535 log::debug!(
4536 "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
4537 );
4538 } else {
4539 log::error!(
4540 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
4541 );
4542 }
4543 return false;
4544 };
4545
4546 if abs_path.0.file_name() == Some(*GITIGNORE) {
4547 for (_, repo) in snapshot.git_repositories.iter().filter(|(_, repo)| repo.directory_contains(&relative_path)) {
4548 if !dot_git_abs_paths.iter().any(|dot_git_abs_path| dot_git_abs_path == repo.dot_git_dir_abs_path.as_ref()) {
4549 dot_git_abs_paths.push(repo.dot_git_dir_abs_path.to_path_buf());
4550 }
4551 }
4552 }
4553
4554 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
4555 snapshot
4556 .entry_for_path(parent)
4557 .map_or(false, |entry| entry.kind == EntryKind::Dir)
4558 });
4559 if !parent_dir_is_loaded {
4560 log::debug!("ignoring event {relative_path:?} within unloaded directory");
4561 return false;
4562 }
4563
4564 if self.settings.is_path_excluded(&relative_path) {
4565 if !is_git_related {
4566 log::debug!("ignoring FS event for excluded path {relative_path:?}");
4567 }
4568 return false;
4569 }
4570
4571 relative_paths.push(relative_path);
4572 true
4573 }
4574 });
4575
4576 if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
4577 return;
4578 }
4579
4580 self.state.lock().snapshot.scan_id += 1;
4581
4582 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4583 log::debug!("received fs events {:?}", relative_paths);
4584 self.reload_entries_for_paths(
4585 root_path,
4586 root_canonical_path,
4587 &relative_paths,
4588 abs_paths,
4589 Some(scan_job_tx.clone()),
4590 )
4591 .await;
4592
4593 self.update_ignore_statuses(scan_job_tx).await;
4594 let scans_running = self.scan_dirs(false, scan_job_rx).await;
4595
4596 let status_update = if !dot_git_abs_paths.is_empty() {
4597 Some(self.update_git_repositories(dot_git_abs_paths))
4598 } else {
4599 None
4600 };
4601
4602 let phase = self.phase;
4603 let status_update_tx = self.status_updates_tx.clone();
4604 let state = self.state.clone();
4605 self.executor
4606 .spawn(async move {
4607 if let Some(status_update) = status_update {
4608 status_update.await;
4609 }
4610
4611 {
4612 let mut state = state.lock();
4613 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4614 for (_, entry) in mem::take(&mut state.removed_entries) {
4615 state.scanned_dirs.remove(&entry.id);
4616 }
4617 #[cfg(test)]
4618 state.snapshot.check_git_invariants();
4619 }
4620 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4621 send_status_update_inner(phase, state, status_update_tx, scanning, SmallVec::new());
4622 })
4623 .detach();
4624 }
4625
4626 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
4627 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4628 {
4629 let mut state = self.state.lock();
4630 let root_path = state.snapshot.abs_path.clone();
4631 for path in paths {
4632 for ancestor in path.ancestors() {
4633 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
4634 if entry.kind == EntryKind::UnloadedDir {
4635 let abs_path = root_path.as_path().join(ancestor);
4636 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
4637 state.paths_to_scan.insert(path.clone());
4638 break;
4639 }
4640 }
4641 }
4642 }
4643 drop(scan_job_tx);
4644 }
4645 let scans_running = Arc::new(AtomicU32::new(0));
4646 while let Ok(job) = scan_job_rx.recv().await {
4647 self.scan_dir(&scans_running, &job).await.log_err();
4648 }
4649
4650 !mem::take(&mut self.state.lock().paths_to_scan).is_empty()
4651 }
4652
4653 async fn scan_dirs(
4654 &self,
4655 enable_progress_updates: bool,
4656 scan_jobs_rx: channel::Receiver<ScanJob>,
4657 ) -> FsScanned {
4658 if self
4659 .status_updates_tx
4660 .unbounded_send(ScanState::Started)
4661 .is_err()
4662 {
4663 return FsScanned::default();
4664 }
4665
4666 let scans_running = Arc::new(AtomicU32::new(1));
4667 let progress_update_count = AtomicUsize::new(0);
4668 self.executor
4669 .scoped(|scope| {
4670 for _ in 0..self.executor.num_cpus() {
4671 scope.spawn(async {
4672 let mut last_progress_update_count = 0;
4673 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
4674 futures::pin_mut!(progress_update_timer);
4675
4676 loop {
4677 select_biased! {
4678 // Process any path refresh requests before moving on to process
4679 // the scan queue, so that user operations are prioritized.
4680 request = self.next_scan_request().fuse() => {
4681 let Ok(request) = request else { break };
4682 if !self.process_scan_request(request, true).await {
4683 return;
4684 }
4685 }
4686
4687 // Send periodic progress updates to the worktree. Use an atomic counter
4688 // to ensure that only one of the workers sends a progress update after
4689 // the update interval elapses.
4690 _ = progress_update_timer => {
4691 match progress_update_count.compare_exchange(
4692 last_progress_update_count,
4693 last_progress_update_count + 1,
4694 SeqCst,
4695 SeqCst
4696 ) {
4697 Ok(_) => {
4698 last_progress_update_count += 1;
4699 self.send_status_update(true, SmallVec::new());
4700 }
4701 Err(count) => {
4702 last_progress_update_count = count;
4703 }
4704 }
4705 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
4706 }
4707
4708 // Recursively load directories from the file system.
4709 job = scan_jobs_rx.recv().fuse() => {
4710 let Ok(job) = job else { break };
4711 if let Err(err) = self.scan_dir(&scans_running, &job).await {
4712 if job.path.as_ref() != Path::new("") {
4713 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
4714 }
4715 }
4716 }
4717 }
4718 }
4719 });
4720 }
4721 })
4722 .await;
4723
4724 scans_running.fetch_sub(1, atomic::Ordering::Release);
4725 FsScanned {
4726 status_scans: scans_running,
4727 }
4728 }
4729
4730 fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
4731 send_status_update_inner(
4732 self.phase,
4733 self.state.clone(),
4734 self.status_updates_tx.clone(),
4735 scanning,
4736 barrier,
4737 )
4738 }
4739
4740 async fn scan_dir(&self, scans_running: &Arc<AtomicU32>, job: &ScanJob) -> Result<()> {
4741 let root_abs_path;
4742 let root_char_bag;
4743 {
4744 let snapshot = &self.state.lock().snapshot;
4745 if self.settings.is_path_excluded(&job.path) {
4746 log::error!("skipping excluded directory {:?}", job.path);
4747 return Ok(());
4748 }
4749 log::debug!("scanning directory {:?}", job.path);
4750 root_abs_path = snapshot.abs_path().clone();
4751 root_char_bag = snapshot.root_char_bag;
4752 }
4753
4754 let next_entry_id = self.next_entry_id.clone();
4755 let mut ignore_stack = job.ignore_stack.clone();
4756 let mut new_ignore = None;
4757 let mut root_canonical_path = None;
4758 let mut new_entries: Vec<Entry> = Vec::new();
4759 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
4760 let mut child_paths = self
4761 .fs
4762 .read_dir(&job.abs_path)
4763 .await?
4764 .filter_map(|entry| async {
4765 match entry {
4766 Ok(entry) => Some(entry),
4767 Err(error) => {
4768 log::error!("error processing entry {:?}", error);
4769 None
4770 }
4771 }
4772 })
4773 .collect::<Vec<_>>()
4774 .await;
4775
4776 // Ensure that .git and .gitignore are processed first.
4777 swap_to_front(&mut child_paths, *GITIGNORE);
4778 swap_to_front(&mut child_paths, *DOT_GIT);
4779
4780 let mut git_status_update_jobs = Vec::new();
4781 for child_abs_path in child_paths {
4782 let child_abs_path: Arc<Path> = child_abs_path.into();
4783 let child_name = child_abs_path.file_name().unwrap();
4784 let child_path: Arc<Path> = job.path.join(child_name).into();
4785
4786 if child_name == *DOT_GIT {
4787 {
4788 let mut state = self.state.lock();
4789 let repo = state.insert_git_repository(
4790 child_path.clone(),
4791 self.fs.as_ref(),
4792 self.watcher.as_ref(),
4793 );
4794 if let Some(local_repo) = repo {
4795 scans_running.fetch_add(1, atomic::Ordering::Release);
4796 git_status_update_jobs
4797 .push(self.schedule_git_statuses_update(&mut state, local_repo));
4798 }
4799 }
4800 } else if child_name == *GITIGNORE {
4801 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
4802 Ok(ignore) => {
4803 let ignore = Arc::new(ignore);
4804 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4805 new_ignore = Some(ignore);
4806 }
4807 Err(error) => {
4808 log::error!(
4809 "error loading .gitignore file {:?} - {:?}",
4810 child_name,
4811 error
4812 );
4813 }
4814 }
4815 }
4816
4817 if self.settings.is_path_excluded(&child_path) {
4818 log::debug!("skipping excluded child entry {child_path:?}");
4819 self.state.lock().remove_path(&child_path);
4820 continue;
4821 }
4822
4823 let child_metadata = match self.fs.metadata(&child_abs_path).await {
4824 Ok(Some(metadata)) => metadata,
4825 Ok(None) => continue,
4826 Err(err) => {
4827 log::error!("error processing {child_abs_path:?}: {err:?}");
4828 continue;
4829 }
4830 };
4831
4832 let mut child_entry = Entry::new(
4833 child_path.clone(),
4834 &child_metadata,
4835 &next_entry_id,
4836 root_char_bag,
4837 None,
4838 );
4839
4840 if job.is_external {
4841 child_entry.is_external = true;
4842 } else if child_metadata.is_symlink {
4843 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
4844 Ok(path) => path,
4845 Err(err) => {
4846 log::error!(
4847 "error reading target of symlink {:?}: {:?}",
4848 child_abs_path,
4849 err
4850 );
4851 continue;
4852 }
4853 };
4854
4855 // lazily canonicalize the root path in order to determine if
4856 // symlinks point outside of the worktree.
4857 let root_canonical_path = match &root_canonical_path {
4858 Some(path) => path,
4859 None => match self.fs.canonicalize(&root_abs_path).await {
4860 Ok(path) => root_canonical_path.insert(path),
4861 Err(err) => {
4862 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
4863 continue;
4864 }
4865 },
4866 };
4867
4868 if !canonical_path.starts_with(root_canonical_path) {
4869 child_entry.is_external = true;
4870 }
4871
4872 child_entry.canonical_path = Some(canonical_path.into());
4873 }
4874
4875 if child_entry.is_dir() {
4876 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
4877 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4878
4879 // Avoid recursing until crash in the case of a recursive symlink
4880 if job.ancestor_inodes.contains(&child_entry.inode) {
4881 new_jobs.push(None);
4882 } else {
4883 let mut ancestor_inodes = job.ancestor_inodes.clone();
4884 ancestor_inodes.insert(child_entry.inode);
4885
4886 new_jobs.push(Some(ScanJob {
4887 abs_path: child_abs_path.clone(),
4888 path: child_path,
4889 is_external: child_entry.is_external,
4890 ignore_stack: if child_entry.is_ignored {
4891 IgnoreStack::all()
4892 } else {
4893 ignore_stack.clone()
4894 },
4895 ancestor_inodes,
4896 scan_queue: job.scan_queue.clone(),
4897 }));
4898 }
4899 } else {
4900 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
4901 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4902 }
4903
4904 {
4905 let relative_path = job.path.join(child_name);
4906 if self.is_path_private(&relative_path) {
4907 log::debug!("detected private file: {relative_path:?}");
4908 child_entry.is_private = true;
4909 }
4910 }
4911
4912 new_entries.push(child_entry);
4913 }
4914
4915 let task_state = self.state.clone();
4916 let phase = self.phase;
4917 let status_updates_tx = self.status_updates_tx.clone();
4918 let scans_running = scans_running.clone();
4919 self.executor
4920 .spawn(async move {
4921 if !git_status_update_jobs.is_empty() {
4922 let status_updates = join_all(git_status_update_jobs).await;
4923 let status_updated = status_updates
4924 .iter()
4925 .any(|update_result| update_result.is_ok());
4926 scans_running.fetch_sub(status_updates.len() as u32, atomic::Ordering::Release);
4927 if status_updated {
4928 let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
4929 send_status_update_inner(
4930 phase,
4931 task_state,
4932 status_updates_tx,
4933 scanning,
4934 SmallVec::new(),
4935 );
4936 }
4937 }
4938 })
4939 .detach();
4940
4941 let mut state = self.state.lock();
4942
4943 // Identify any subdirectories that should not be scanned.
4944 let mut job_ix = 0;
4945 for entry in &mut new_entries {
4946 state.reuse_entry_id(entry);
4947 if entry.is_dir() {
4948 if state.should_scan_directory(entry) {
4949 job_ix += 1;
4950 } else {
4951 log::debug!("defer scanning directory {:?}", entry.path);
4952 entry.kind = EntryKind::UnloadedDir;
4953 new_jobs.remove(job_ix);
4954 }
4955 }
4956 if entry.is_always_included {
4957 state
4958 .snapshot
4959 .always_included_entries
4960 .push(entry.path.clone());
4961 }
4962 }
4963
4964 state.populate_dir(&job.path, new_entries, new_ignore);
4965 self.watcher.add(job.abs_path.as_ref()).log_err();
4966
4967 for new_job in new_jobs.into_iter().flatten() {
4968 job.scan_queue
4969 .try_send(new_job)
4970 .expect("channel is unbounded");
4971 }
4972
4973 Ok(())
4974 }
4975
4976 /// All list arguments should be sorted before calling this function
4977 async fn reload_entries_for_paths(
4978 &self,
4979 root_abs_path: SanitizedPath,
4980 root_canonical_path: SanitizedPath,
4981 relative_paths: &[Arc<Path>],
4982 abs_paths: Vec<PathBuf>,
4983 scan_queue_tx: Option<Sender<ScanJob>>,
4984 ) {
4985 // grab metadata for all requested paths
4986 let metadata = futures::future::join_all(
4987 abs_paths
4988 .iter()
4989 .map(|abs_path| async move {
4990 let metadata = self.fs.metadata(abs_path).await?;
4991 if let Some(metadata) = metadata {
4992 let canonical_path = self.fs.canonicalize(abs_path).await?;
4993
4994 // If we're on a case-insensitive filesystem (default on macOS), we want
4995 // to only ignore metadata for non-symlink files if their absolute-path matches
4996 // the canonical-path.
4997 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4998 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4999 // treated as removed.
5000 if !self.fs_case_sensitive && !metadata.is_symlink {
5001 let canonical_file_name = canonical_path.file_name();
5002 let file_name = abs_path.file_name();
5003 if canonical_file_name != file_name {
5004 return Ok(None);
5005 }
5006 }
5007
5008 anyhow::Ok(Some((metadata, SanitizedPath::from(canonical_path))))
5009 } else {
5010 Ok(None)
5011 }
5012 })
5013 .collect::<Vec<_>>(),
5014 )
5015 .await;
5016
5017 let mut state = self.state.lock();
5018 let doing_recursive_update = scan_queue_tx.is_some();
5019
5020 // Remove any entries for paths that no longer exist or are being recursively
5021 // refreshed. Do this before adding any new entries, so that renames can be
5022 // detected regardless of the order of the paths.
5023 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
5024 if matches!(metadata, Ok(None)) || doing_recursive_update {
5025 log::trace!("remove path {:?}", path);
5026 state.remove_path(path);
5027 }
5028 }
5029
5030 // Group all relative paths by their git repository.
5031 let mut paths_by_git_repo = HashMap::default();
5032 for relative_path in relative_paths.iter() {
5033 let repository_data = state
5034 .snapshot
5035 .local_repo_for_path(relative_path)
5036 .zip(state.snapshot.repository_for_path(relative_path));
5037 if let Some((local_repo, entry)) = repository_data {
5038 if let Ok(repo_path) = local_repo.relativize(relative_path) {
5039 paths_by_git_repo
5040 .entry(local_repo.work_directory.clone())
5041 .or_insert_with(|| RepoPaths {
5042 entry: entry.clone(),
5043 repo: local_repo.repo_ptr.clone(),
5044 repo_paths: Default::default(),
5045 })
5046 .add_path(repo_path);
5047 }
5048 }
5049 }
5050
5051 for (work_directory, mut paths) in paths_by_git_repo {
5052 if let Ok(status) = paths.repo.status(&paths.repo_paths) {
5053 let mut changed_path_statuses = Vec::new();
5054 let statuses = paths.entry.statuses_by_path.clone();
5055 let mut cursor = statuses.cursor::<PathProgress>(&());
5056
5057 for (repo_path, status) in &*status.entries {
5058 paths.remove_repo_path(repo_path);
5059 if cursor.seek_forward(&PathTarget::Path(repo_path), Bias::Left, &()) {
5060 if &cursor.item().unwrap().status == status {
5061 continue;
5062 }
5063 }
5064
5065 changed_path_statuses.push(Edit::Insert(StatusEntry {
5066 repo_path: repo_path.clone(),
5067 status: *status,
5068 }));
5069 }
5070
5071 let mut cursor = statuses.cursor::<PathProgress>(&());
5072 for path in paths.repo_paths {
5073 if cursor.seek_forward(&PathTarget::Path(&path), Bias::Left, &()) {
5074 changed_path_statuses.push(Edit::Remove(PathKey(path.0)));
5075 }
5076 }
5077
5078 if !changed_path_statuses.is_empty() {
5079 let work_directory_id = state.snapshot.repositories.update(
5080 &work_directory.path_key(),
5081 &(),
5082 move |repository_entry| {
5083 repository_entry
5084 .statuses_by_path
5085 .edit(changed_path_statuses, &());
5086
5087 repository_entry.work_directory_id
5088 },
5089 );
5090
5091 if let Some(work_directory_id) = work_directory_id {
5092 let scan_id = state.snapshot.scan_id;
5093 state.snapshot.git_repositories.update(
5094 &work_directory_id,
5095 |local_repository_entry| {
5096 local_repository_entry.status_scan_id = scan_id;
5097 },
5098 );
5099 }
5100 }
5101 }
5102 }
5103
5104 for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
5105 let abs_path: Arc<Path> = root_abs_path.as_path().join(path).into();
5106 match metadata {
5107 Ok(Some((metadata, canonical_path))) => {
5108 let ignore_stack = state
5109 .snapshot
5110 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
5111 let is_external = !canonical_path.starts_with(&root_canonical_path);
5112 let mut fs_entry = Entry::new(
5113 path.clone(),
5114 &metadata,
5115 self.next_entry_id.as_ref(),
5116 state.snapshot.root_char_bag,
5117 if metadata.is_symlink {
5118 Some(canonical_path.as_path().to_path_buf().into())
5119 } else {
5120 None
5121 },
5122 );
5123
5124 let is_dir = fs_entry.is_dir();
5125 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
5126 fs_entry.is_external = is_external;
5127 fs_entry.is_private = self.is_path_private(path);
5128 fs_entry.is_always_included = self.settings.is_path_always_included(path);
5129
5130 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, is_dir) {
5131 if state.should_scan_directory(&fs_entry)
5132 || (fs_entry.path.as_os_str().is_empty()
5133 && abs_path.file_name() == Some(*DOT_GIT))
5134 {
5135 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
5136 } else {
5137 fs_entry.kind = EntryKind::UnloadedDir;
5138 }
5139 }
5140
5141 state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
5142 }
5143 Ok(None) => {
5144 self.remove_repo_path(path, &mut state.snapshot);
5145 }
5146 Err(err) => {
5147 log::error!("error reading file {abs_path:?} on event: {err:#}");
5148 }
5149 }
5150 }
5151
5152 util::extend_sorted(
5153 &mut state.changed_paths,
5154 relative_paths.iter().cloned(),
5155 usize::MAX,
5156 Ord::cmp,
5157 );
5158 }
5159
5160 fn remove_repo_path(&self, path: &Arc<Path>, snapshot: &mut LocalSnapshot) -> Option<()> {
5161 if !path
5162 .components()
5163 .any(|component| component.as_os_str() == *DOT_GIT)
5164 {
5165 if let Some(repository) = snapshot.repository(PathKey(path.clone())) {
5166 snapshot
5167 .git_repositories
5168 .remove(&repository.work_directory_id);
5169 snapshot
5170 .snapshot
5171 .repositories
5172 .remove(&repository.work_directory.path_key(), &());
5173 return Some(());
5174 }
5175 }
5176
5177 Some(())
5178 }
5179
5180 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
5181 let mut ignores_to_update = Vec::new();
5182 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
5183 let prev_snapshot;
5184 {
5185 let snapshot = &mut self.state.lock().snapshot;
5186 let abs_path = snapshot.abs_path.clone();
5187 snapshot
5188 .ignores_by_parent_abs_path
5189 .retain(|parent_abs_path, (_, needs_update)| {
5190 if let Ok(parent_path) = parent_abs_path.strip_prefix(abs_path.as_path()) {
5191 if *needs_update {
5192 *needs_update = false;
5193 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
5194 ignores_to_update.push(parent_abs_path.clone());
5195 }
5196 }
5197
5198 let ignore_path = parent_path.join(*GITIGNORE);
5199 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
5200 return false;
5201 }
5202 }
5203 true
5204 });
5205
5206 ignores_to_update.sort_unstable();
5207 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
5208 while let Some(parent_abs_path) = ignores_to_update.next() {
5209 while ignores_to_update
5210 .peek()
5211 .map_or(false, |p| p.starts_with(&parent_abs_path))
5212 {
5213 ignores_to_update.next().unwrap();
5214 }
5215
5216 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
5217 ignore_queue_tx
5218 .send_blocking(UpdateIgnoreStatusJob {
5219 abs_path: parent_abs_path,
5220 ignore_stack,
5221 ignore_queue: ignore_queue_tx.clone(),
5222 scan_queue: scan_job_tx.clone(),
5223 })
5224 .unwrap();
5225 }
5226
5227 prev_snapshot = snapshot.clone();
5228 }
5229 drop(ignore_queue_tx);
5230
5231 self.executor
5232 .scoped(|scope| {
5233 for _ in 0..self.executor.num_cpus() {
5234 scope.spawn(async {
5235 loop {
5236 select_biased! {
5237 // Process any path refresh requests before moving on to process
5238 // the queue of ignore statuses.
5239 request = self.next_scan_request().fuse() => {
5240 let Ok(request) = request else { break };
5241 if !self.process_scan_request(request, true).await {
5242 return;
5243 }
5244 }
5245
5246 // Recursively process directories whose ignores have changed.
5247 job = ignore_queue_rx.recv().fuse() => {
5248 let Ok(job) = job else { break };
5249 self.update_ignore_status(job, &prev_snapshot).await;
5250 }
5251 }
5252 }
5253 });
5254 }
5255 })
5256 .await;
5257 }
5258
5259 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
5260 log::trace!("update ignore status {:?}", job.abs_path);
5261
5262 let mut ignore_stack = job.ignore_stack;
5263 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
5264 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
5265 }
5266
5267 let mut entries_by_id_edits = Vec::new();
5268 let mut entries_by_path_edits = Vec::new();
5269 let path = job
5270 .abs_path
5271 .strip_prefix(snapshot.abs_path.as_path())
5272 .unwrap();
5273
5274 for mut entry in snapshot.child_entries(path).cloned() {
5275 let was_ignored = entry.is_ignored;
5276 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
5277 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
5278
5279 if entry.is_dir() {
5280 let child_ignore_stack = if entry.is_ignored {
5281 IgnoreStack::all()
5282 } else {
5283 ignore_stack.clone()
5284 };
5285
5286 // Scan any directories that were previously ignored and weren't previously scanned.
5287 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
5288 let state = self.state.lock();
5289 if state.should_scan_directory(&entry) {
5290 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
5291 }
5292 }
5293
5294 job.ignore_queue
5295 .send(UpdateIgnoreStatusJob {
5296 abs_path: abs_path.clone(),
5297 ignore_stack: child_ignore_stack,
5298 ignore_queue: job.ignore_queue.clone(),
5299 scan_queue: job.scan_queue.clone(),
5300 })
5301 .await
5302 .unwrap();
5303 }
5304
5305 if entry.is_ignored != was_ignored {
5306 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
5307 path_entry.scan_id = snapshot.scan_id;
5308 path_entry.is_ignored = entry.is_ignored;
5309 entries_by_id_edits.push(Edit::Insert(path_entry));
5310 entries_by_path_edits.push(Edit::Insert(entry));
5311 }
5312 }
5313
5314 let state = &mut self.state.lock();
5315 for edit in &entries_by_path_edits {
5316 if let Edit::Insert(entry) = edit {
5317 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
5318 state.changed_paths.insert(ix, entry.path.clone());
5319 }
5320 }
5321 }
5322
5323 state
5324 .snapshot
5325 .entries_by_path
5326 .edit(entries_by_path_edits, &());
5327 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
5328 }
5329
5330 fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) -> Task<()> {
5331 log::debug!("reloading repositories: {dot_git_paths:?}");
5332
5333 let mut status_updates = Vec::new();
5334 {
5335 let mut state = self.state.lock();
5336 let scan_id = state.snapshot.scan_id;
5337 for dot_git_dir in dot_git_paths {
5338 let existing_repository_entry =
5339 state
5340 .snapshot
5341 .git_repositories
5342 .iter()
5343 .find_map(|(_, repo)| {
5344 if repo.dot_git_dir_abs_path.as_ref() == &dot_git_dir
5345 || repo.dot_git_worktree_abs_path.as_deref() == Some(&dot_git_dir)
5346 {
5347 Some(repo.clone())
5348 } else {
5349 None
5350 }
5351 });
5352
5353 let local_repository = match existing_repository_entry {
5354 None => {
5355 let Ok(relative) = dot_git_dir.strip_prefix(state.snapshot.abs_path())
5356 else {
5357 return Task::ready(());
5358 };
5359 match state.insert_git_repository(
5360 relative.into(),
5361 self.fs.as_ref(),
5362 self.watcher.as_ref(),
5363 ) {
5364 Some(output) => output,
5365 None => continue,
5366 }
5367 }
5368 Some(local_repository) => {
5369 if local_repository.git_dir_scan_id == scan_id {
5370 continue;
5371 }
5372 local_repository.repo_ptr.reload_index();
5373
5374 state.snapshot.git_repositories.update(
5375 &local_repository.work_directory_id,
5376 |entry| {
5377 entry.git_dir_scan_id = scan_id;
5378 entry.status_scan_id = scan_id;
5379 },
5380 );
5381
5382 local_repository
5383 }
5384 };
5385
5386 status_updates
5387 .push(self.schedule_git_statuses_update(&mut state, local_repository));
5388 }
5389
5390 // Remove any git repositories whose .git entry no longer exists.
5391 let snapshot = &mut state.snapshot;
5392 let mut ids_to_preserve = HashSet::default();
5393 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
5394 let exists_in_snapshot = snapshot
5395 .entry_for_id(work_directory_id)
5396 .map_or(false, |entry| {
5397 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
5398 });
5399
5400 if exists_in_snapshot
5401 || matches!(
5402 smol::block_on(self.fs.metadata(&entry.dot_git_dir_abs_path)),
5403 Ok(Some(_))
5404 )
5405 {
5406 ids_to_preserve.insert(work_directory_id);
5407 }
5408 }
5409
5410 snapshot
5411 .git_repositories
5412 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
5413 snapshot.repositories.retain(&(), |entry| {
5414 ids_to_preserve.contains(&entry.work_directory_id)
5415 });
5416 }
5417
5418 self.executor.spawn(async move {
5419 let _updates_finished: Vec<Result<(), oneshot::Canceled>> =
5420 join_all(status_updates).await;
5421 })
5422 }
5423
5424 /// Update the git statuses for a given batch of entries.
5425 fn schedule_git_statuses_update(
5426 &self,
5427 state: &mut BackgroundScannerState,
5428 mut local_repository: LocalRepositoryEntry,
5429 ) -> oneshot::Receiver<()> {
5430 let repository_name = local_repository.work_directory.display_name();
5431 let path_key = local_repository.work_directory.path_key();
5432
5433 let job_state = self.state.clone();
5434 let (tx, rx) = oneshot::channel();
5435
5436 state.repository_scans.insert(
5437 path_key.clone(),
5438 self.executor.spawn(async move {
5439 update_branches(&job_state, &mut local_repository).log_err();
5440 log::trace!("updating git statuses for repo {repository_name}",);
5441 let t0 = Instant::now();
5442
5443 let Some(statuses) = local_repository
5444 .repo()
5445 .status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
5446 .log_err()
5447 else {
5448 return;
5449 };
5450
5451 log::trace!(
5452 "computed git statuses for repo {repository_name} in {:?}",
5453 t0.elapsed()
5454 );
5455
5456 let t0 = Instant::now();
5457 let mut changed_paths = Vec::new();
5458 let snapshot = job_state.lock().snapshot.snapshot.clone();
5459
5460 let Some(mut repository) = snapshot
5461 .repository(path_key)
5462 .context(
5463 "Tried to update git statuses for a repository that isn't in the snapshot",
5464 )
5465 .log_err()
5466 else {
5467 return;
5468 };
5469
5470 let merge_head_shas = local_repository.repo().merge_head_shas();
5471 if merge_head_shas != local_repository.current_merge_head_shas {
5472 mem::take(&mut repository.current_merge_conflicts);
5473 }
5474
5475 let mut new_entries_by_path = SumTree::new(&());
5476 for (repo_path, status) in statuses.entries.iter() {
5477 let project_path = repository.work_directory.unrelativize(repo_path);
5478
5479 new_entries_by_path.insert_or_replace(
5480 StatusEntry {
5481 repo_path: repo_path.clone(),
5482 status: *status,
5483 },
5484 &(),
5485 );
5486 if status.is_conflicted() {
5487 repository.current_merge_conflicts.insert(repo_path.clone());
5488 }
5489
5490 if let Some(path) = project_path {
5491 changed_paths.push(path);
5492 }
5493 }
5494
5495 repository.statuses_by_path = new_entries_by_path;
5496 let mut state = job_state.lock();
5497 state
5498 .snapshot
5499 .repositories
5500 .insert_or_replace(repository, &());
5501 state.snapshot.git_repositories.update(
5502 &local_repository.work_directory_id,
5503 |entry| {
5504 entry.current_merge_head_shas = merge_head_shas;
5505 entry.merge_message = std::fs::read_to_string(
5506 local_repository.dot_git_dir_abs_path.join("MERGE_MSG"),
5507 )
5508 .ok()
5509 .and_then(|merge_msg| Some(merge_msg.lines().next()?.to_owned()));
5510 entry.status_scan_id += 1;
5511 },
5512 );
5513
5514 util::extend_sorted(
5515 &mut state.changed_paths,
5516 changed_paths,
5517 usize::MAX,
5518 Ord::cmp,
5519 );
5520
5521 log::trace!(
5522 "applied git status updates for repo {repository_name} in {:?}",
5523 t0.elapsed(),
5524 );
5525 tx.send(()).ok();
5526 }),
5527 );
5528 rx
5529 }
5530
5531 async fn progress_timer(&self, running: bool) {
5532 if !running {
5533 return futures::future::pending().await;
5534 }
5535
5536 #[cfg(any(test, feature = "test-support"))]
5537 if self.fs.is_fake() {
5538 return self.executor.simulate_random_delay().await;
5539 }
5540
5541 smol::Timer::after(FS_WATCH_LATENCY).await;
5542 }
5543
5544 fn is_path_private(&self, path: &Path) -> bool {
5545 !self.share_private_files && self.settings.is_path_private(path)
5546 }
5547
5548 async fn next_scan_request(&self) -> Result<ScanRequest> {
5549 let mut request = self.scan_requests_rx.recv().await?;
5550 while let Ok(next_request) = self.scan_requests_rx.try_recv() {
5551 request.relative_paths.extend(next_request.relative_paths);
5552 request.done.extend(next_request.done);
5553 }
5554 Ok(request)
5555 }
5556}
5557
5558fn send_status_update_inner(
5559 phase: BackgroundScannerPhase,
5560 state: Arc<Mutex<BackgroundScannerState>>,
5561 status_updates_tx: UnboundedSender<ScanState>,
5562 scanning: bool,
5563 barrier: SmallVec<[barrier::Sender; 1]>,
5564) -> bool {
5565 let mut state = state.lock();
5566 if state.changed_paths.is_empty() && scanning {
5567 return true;
5568 }
5569
5570 let new_snapshot = state.snapshot.clone();
5571 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
5572 let changes = build_diff(phase, &old_snapshot, &new_snapshot, &state.changed_paths);
5573 state.changed_paths.clear();
5574
5575 status_updates_tx
5576 .unbounded_send(ScanState::Updated {
5577 snapshot: new_snapshot,
5578 changes,
5579 scanning,
5580 barrier,
5581 })
5582 .is_ok()
5583}
5584
5585fn update_branches(
5586 state: &Mutex<BackgroundScannerState>,
5587 repository: &mut LocalRepositoryEntry,
5588) -> Result<()> {
5589 let branches = repository.repo().branches()?;
5590 let snapshot = state.lock().snapshot.snapshot.clone();
5591 let mut repository = snapshot
5592 .repository(repository.work_directory.path_key())
5593 .context("Missing repository")?;
5594 repository.current_branch = branches.into_iter().find(|branch| branch.is_head);
5595
5596 let mut state = state.lock();
5597 state
5598 .snapshot
5599 .repositories
5600 .insert_or_replace(repository, &());
5601
5602 Ok(())
5603}
5604
5605fn build_diff(
5606 phase: BackgroundScannerPhase,
5607 old_snapshot: &Snapshot,
5608 new_snapshot: &Snapshot,
5609 event_paths: &[Arc<Path>],
5610) -> UpdatedEntriesSet {
5611 use BackgroundScannerPhase::*;
5612 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
5613
5614 // Identify which paths have changed. Use the known set of changed
5615 // parent paths to optimize the search.
5616 let mut changes = Vec::new();
5617 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(&());
5618 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(&());
5619 let mut last_newly_loaded_dir_path = None;
5620 old_paths.next(&());
5621 new_paths.next(&());
5622 for path in event_paths {
5623 let path = PathKey(path.clone());
5624 if old_paths.item().map_or(false, |e| e.path < path.0) {
5625 old_paths.seek_forward(&path, Bias::Left, &());
5626 }
5627 if new_paths.item().map_or(false, |e| e.path < path.0) {
5628 new_paths.seek_forward(&path, Bias::Left, &());
5629 }
5630 loop {
5631 match (old_paths.item(), new_paths.item()) {
5632 (Some(old_entry), Some(new_entry)) => {
5633 if old_entry.path > path.0
5634 && new_entry.path > path.0
5635 && !old_entry.path.starts_with(&path.0)
5636 && !new_entry.path.starts_with(&path.0)
5637 {
5638 break;
5639 }
5640
5641 match Ord::cmp(&old_entry.path, &new_entry.path) {
5642 Ordering::Less => {
5643 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5644 old_paths.next(&());
5645 }
5646 Ordering::Equal => {
5647 if phase == EventsReceivedDuringInitialScan {
5648 if old_entry.id != new_entry.id {
5649 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5650 }
5651 // If the worktree was not fully initialized when this event was generated,
5652 // we can't know whether this entry was added during the scan or whether
5653 // it was merely updated.
5654 changes.push((
5655 new_entry.path.clone(),
5656 new_entry.id,
5657 AddedOrUpdated,
5658 ));
5659 } else if old_entry.id != new_entry.id {
5660 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5661 changes.push((new_entry.path.clone(), new_entry.id, Added));
5662 } else if old_entry != new_entry {
5663 if old_entry.kind.is_unloaded() {
5664 last_newly_loaded_dir_path = Some(&new_entry.path);
5665 changes.push((new_entry.path.clone(), new_entry.id, Loaded));
5666 } else {
5667 changes.push((new_entry.path.clone(), new_entry.id, Updated));
5668 }
5669 }
5670 old_paths.next(&());
5671 new_paths.next(&());
5672 }
5673 Ordering::Greater => {
5674 let is_newly_loaded = phase == InitialScan
5675 || last_newly_loaded_dir_path
5676 .as_ref()
5677 .map_or(false, |dir| new_entry.path.starts_with(dir));
5678 changes.push((
5679 new_entry.path.clone(),
5680 new_entry.id,
5681 if is_newly_loaded { Loaded } else { Added },
5682 ));
5683 new_paths.next(&());
5684 }
5685 }
5686 }
5687 (Some(old_entry), None) => {
5688 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5689 old_paths.next(&());
5690 }
5691 (None, Some(new_entry)) => {
5692 let is_newly_loaded = phase == InitialScan
5693 || last_newly_loaded_dir_path
5694 .as_ref()
5695 .map_or(false, |dir| new_entry.path.starts_with(dir));
5696 changes.push((
5697 new_entry.path.clone(),
5698 new_entry.id,
5699 if is_newly_loaded { Loaded } else { Added },
5700 ));
5701 new_paths.next(&());
5702 }
5703 (None, None) => break,
5704 }
5705 }
5706 }
5707
5708 changes.into()
5709}
5710
5711fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
5712 let position = child_paths
5713 .iter()
5714 .position(|path| path.file_name().unwrap() == file);
5715 if let Some(position) = position {
5716 let temp = child_paths.remove(position);
5717 child_paths.insert(0, temp);
5718 }
5719}
5720
5721fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
5722 let mut result = root_char_bag;
5723 result.extend(
5724 path.to_string_lossy()
5725 .chars()
5726 .map(|c| c.to_ascii_lowercase()),
5727 );
5728 result
5729}
5730
5731#[derive(Debug)]
5732struct RepoPaths {
5733 repo: Arc<dyn GitRepository>,
5734 entry: RepositoryEntry,
5735 // sorted
5736 repo_paths: Vec<RepoPath>,
5737}
5738
5739impl RepoPaths {
5740 fn add_path(&mut self, repo_path: RepoPath) {
5741 match self.repo_paths.binary_search(&repo_path) {
5742 Ok(_) => {}
5743 Err(ix) => self.repo_paths.insert(ix, repo_path),
5744 }
5745 }
5746
5747 fn remove_repo_path(&mut self, repo_path: &RepoPath) {
5748 match self.repo_paths.binary_search(&repo_path) {
5749 Ok(ix) => {
5750 self.repo_paths.remove(ix);
5751 }
5752 Err(_) => {}
5753 }
5754 }
5755}
5756
5757#[derive(Debug)]
5758struct ScanJob {
5759 abs_path: Arc<Path>,
5760 path: Arc<Path>,
5761 ignore_stack: Arc<IgnoreStack>,
5762 scan_queue: Sender<ScanJob>,
5763 ancestor_inodes: TreeSet<u64>,
5764 is_external: bool,
5765}
5766
5767struct UpdateIgnoreStatusJob {
5768 abs_path: Arc<Path>,
5769 ignore_stack: Arc<IgnoreStack>,
5770 ignore_queue: Sender<UpdateIgnoreStatusJob>,
5771 scan_queue: Sender<ScanJob>,
5772}
5773
5774pub trait WorktreeModelHandle {
5775 #[cfg(any(test, feature = "test-support"))]
5776 fn flush_fs_events<'a>(
5777 &self,
5778 cx: &'a mut gpui::TestAppContext,
5779 ) -> futures::future::LocalBoxFuture<'a, ()>;
5780
5781 #[cfg(any(test, feature = "test-support"))]
5782 fn flush_fs_events_in_root_git_repository<'a>(
5783 &self,
5784 cx: &'a mut gpui::TestAppContext,
5785 ) -> futures::future::LocalBoxFuture<'a, ()>;
5786}
5787
5788impl WorktreeModelHandle for Entity<Worktree> {
5789 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
5790 // occurred before the worktree was constructed. These events can cause the worktree to perform
5791 // extra directory scans, and emit extra scan-state notifications.
5792 //
5793 // This function mutates the worktree's directory and waits for those mutations to be picked up,
5794 // to ensure that all redundant FS events have already been processed.
5795 #[cfg(any(test, feature = "test-support"))]
5796 fn flush_fs_events<'a>(
5797 &self,
5798 cx: &'a mut gpui::TestAppContext,
5799 ) -> futures::future::LocalBoxFuture<'a, ()> {
5800 let file_name = "fs-event-sentinel";
5801
5802 let tree = self.clone();
5803 let (fs, root_path) = self.update(cx, |tree, _| {
5804 let tree = tree.as_local().unwrap();
5805 (tree.fs.clone(), tree.abs_path().clone())
5806 });
5807
5808 async move {
5809 fs.create_file(&root_path.join(file_name), Default::default())
5810 .await
5811 .unwrap();
5812
5813 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
5814 .await;
5815
5816 fs.remove_file(&root_path.join(file_name), Default::default())
5817 .await
5818 .unwrap();
5819 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
5820 .await;
5821
5822 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5823 .await;
5824 }
5825 .boxed_local()
5826 }
5827
5828 // This function is similar to flush_fs_events, except that it waits for events to be flushed in
5829 // the .git folder of the root repository.
5830 // The reason for its existence is that a repository's .git folder might live *outside* of the
5831 // worktree and thus its FS events might go through a different path.
5832 // In order to flush those, we need to create artificial events in the .git folder and wait
5833 // for the repository to be reloaded.
5834 #[cfg(any(test, feature = "test-support"))]
5835 fn flush_fs_events_in_root_git_repository<'a>(
5836 &self,
5837 cx: &'a mut gpui::TestAppContext,
5838 ) -> futures::future::LocalBoxFuture<'a, ()> {
5839 let file_name = "fs-event-sentinel";
5840
5841 let tree = self.clone();
5842 let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
5843 let tree = tree.as_local().unwrap();
5844 let root_entry = tree.root_git_entry().unwrap();
5845 let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
5846 (
5847 tree.fs.clone(),
5848 local_repo_entry.dot_git_dir_abs_path.clone(),
5849 local_repo_entry.git_dir_scan_id,
5850 )
5851 });
5852
5853 let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
5854 let root_entry = tree.root_git_entry().unwrap();
5855 let local_repo_entry = tree
5856 .as_local()
5857 .unwrap()
5858 .get_local_repo(&root_entry)
5859 .unwrap();
5860
5861 if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
5862 *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
5863 true
5864 } else {
5865 false
5866 }
5867 };
5868
5869 async move {
5870 fs.create_file(&root_path.join(file_name), Default::default())
5871 .await
5872 .unwrap();
5873
5874 cx.condition(&tree, |tree, _| {
5875 scan_id_increased(tree, &mut git_dir_scan_id)
5876 })
5877 .await;
5878
5879 fs.remove_file(&root_path.join(file_name), Default::default())
5880 .await
5881 .unwrap();
5882
5883 cx.condition(&tree, |tree, _| {
5884 scan_id_increased(tree, &mut git_dir_scan_id)
5885 })
5886 .await;
5887
5888 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5889 .await;
5890 }
5891 .boxed_local()
5892 }
5893}
5894
5895#[derive(Clone, Debug)]
5896struct TraversalProgress<'a> {
5897 max_path: &'a Path,
5898 count: usize,
5899 non_ignored_count: usize,
5900 file_count: usize,
5901 non_ignored_file_count: usize,
5902}
5903
5904impl<'a> TraversalProgress<'a> {
5905 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
5906 match (include_files, include_dirs, include_ignored) {
5907 (true, true, true) => self.count,
5908 (true, true, false) => self.non_ignored_count,
5909 (true, false, true) => self.file_count,
5910 (true, false, false) => self.non_ignored_file_count,
5911 (false, true, true) => self.count - self.file_count,
5912 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
5913 (false, false, _) => 0,
5914 }
5915 }
5916}
5917
5918impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
5919 fn zero(_cx: &()) -> Self {
5920 Default::default()
5921 }
5922
5923 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
5924 self.max_path = summary.max_path.as_ref();
5925 self.count += summary.count;
5926 self.non_ignored_count += summary.non_ignored_count;
5927 self.file_count += summary.file_count;
5928 self.non_ignored_file_count += summary.non_ignored_file_count;
5929 }
5930}
5931
5932impl<'a> Default for TraversalProgress<'a> {
5933 fn default() -> Self {
5934 Self {
5935 max_path: Path::new(""),
5936 count: 0,
5937 non_ignored_count: 0,
5938 file_count: 0,
5939 non_ignored_file_count: 0,
5940 }
5941 }
5942}
5943
5944#[derive(Debug, Clone, Copy)]
5945pub struct GitEntryRef<'a> {
5946 pub entry: &'a Entry,
5947 pub git_summary: GitSummary,
5948}
5949
5950impl<'a> GitEntryRef<'a> {
5951 pub fn to_owned(&self) -> GitEntry {
5952 GitEntry {
5953 entry: self.entry.clone(),
5954 git_summary: self.git_summary,
5955 }
5956 }
5957}
5958
5959impl<'a> Deref for GitEntryRef<'a> {
5960 type Target = Entry;
5961
5962 fn deref(&self) -> &Self::Target {
5963 &self.entry
5964 }
5965}
5966
5967impl<'a> AsRef<Entry> for GitEntryRef<'a> {
5968 fn as_ref(&self) -> &Entry {
5969 self.entry
5970 }
5971}
5972
5973#[derive(Debug, Clone, PartialEq, Eq)]
5974pub struct GitEntry {
5975 pub entry: Entry,
5976 pub git_summary: GitSummary,
5977}
5978
5979impl GitEntry {
5980 pub fn to_ref(&self) -> GitEntryRef {
5981 GitEntryRef {
5982 entry: &self.entry,
5983 git_summary: self.git_summary,
5984 }
5985 }
5986}
5987
5988impl Deref for GitEntry {
5989 type Target = Entry;
5990
5991 fn deref(&self) -> &Self::Target {
5992 &self.entry
5993 }
5994}
5995
5996impl AsRef<Entry> for GitEntry {
5997 fn as_ref(&self) -> &Entry {
5998 &self.entry
5999 }
6000}
6001
6002/// Walks the worktree entries and their associated git statuses.
6003pub struct GitTraversal<'a> {
6004 traversal: Traversal<'a>,
6005 current_entry_summary: Option<GitSummary>,
6006 repo_location: Option<(
6007 &'a RepositoryEntry,
6008 Cursor<'a, StatusEntry, PathProgress<'a>>,
6009 )>,
6010}
6011
6012impl<'a> GitTraversal<'a> {
6013 fn synchronize_statuses(&mut self, reset: bool) {
6014 self.current_entry_summary = None;
6015
6016 let Some(entry) = self.traversal.cursor.item() else {
6017 return;
6018 };
6019
6020 let Some(repo) = self.traversal.snapshot.repository_for_path(&entry.path) else {
6021 self.repo_location = None;
6022 return;
6023 };
6024
6025 // Update our state if we changed repositories.
6026 if reset || self.repo_location.as_ref().map(|(prev_repo, _)| prev_repo) != Some(&repo) {
6027 self.repo_location = Some((repo, repo.statuses_by_path.cursor::<PathProgress>(&())));
6028 }
6029
6030 let Some((repo, statuses)) = &mut self.repo_location else {
6031 return;
6032 };
6033
6034 let repo_path = repo.relativize(&entry.path).unwrap();
6035
6036 if entry.is_dir() {
6037 let mut statuses = statuses.clone();
6038 statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &());
6039 let summary =
6040 statuses.summary(&PathTarget::Successor(repo_path.as_ref()), Bias::Left, &());
6041
6042 self.current_entry_summary = Some(summary);
6043 } else if entry.is_file() {
6044 // For a file entry, park the cursor on the corresponding status
6045 if statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &()) {
6046 // TODO: Investigate statuses.item() being None here.
6047 self.current_entry_summary = statuses.item().map(|item| item.status.into());
6048 } else {
6049 self.current_entry_summary = Some(GitSummary::UNCHANGED);
6050 }
6051 }
6052 }
6053
6054 pub fn advance(&mut self) -> bool {
6055 self.advance_by(1)
6056 }
6057
6058 pub fn advance_by(&mut self, count: usize) -> bool {
6059 let found = self.traversal.advance_by(count);
6060 self.synchronize_statuses(false);
6061 found
6062 }
6063
6064 pub fn advance_to_sibling(&mut self) -> bool {
6065 let found = self.traversal.advance_to_sibling();
6066 self.synchronize_statuses(false);
6067 found
6068 }
6069
6070 pub fn back_to_parent(&mut self) -> bool {
6071 let found = self.traversal.back_to_parent();
6072 self.synchronize_statuses(true);
6073 found
6074 }
6075
6076 pub fn start_offset(&self) -> usize {
6077 self.traversal.start_offset()
6078 }
6079
6080 pub fn end_offset(&self) -> usize {
6081 self.traversal.end_offset()
6082 }
6083
6084 pub fn entry(&self) -> Option<GitEntryRef<'a>> {
6085 let entry = self.traversal.cursor.item()?;
6086 let git_summary = self.current_entry_summary.unwrap_or(GitSummary::UNCHANGED);
6087 Some(GitEntryRef { entry, git_summary })
6088 }
6089}
6090
6091impl<'a> Iterator for GitTraversal<'a> {
6092 type Item = GitEntryRef<'a>;
6093 fn next(&mut self) -> Option<Self::Item> {
6094 if let Some(item) = self.entry() {
6095 self.advance();
6096 Some(item)
6097 } else {
6098 None
6099 }
6100 }
6101}
6102
6103#[derive(Debug)]
6104pub struct Traversal<'a> {
6105 snapshot: &'a Snapshot,
6106 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
6107 include_ignored: bool,
6108 include_files: bool,
6109 include_dirs: bool,
6110}
6111
6112impl<'a> Traversal<'a> {
6113 fn new(
6114 snapshot: &'a Snapshot,
6115 include_files: bool,
6116 include_dirs: bool,
6117 include_ignored: bool,
6118 start_path: &Path,
6119 ) -> Self {
6120 let mut cursor = snapshot.entries_by_path.cursor(&());
6121 cursor.seek(&TraversalTarget::path(start_path), Bias::Left, &());
6122 let mut traversal = Self {
6123 snapshot,
6124 cursor,
6125 include_files,
6126 include_dirs,
6127 include_ignored,
6128 };
6129 if traversal.end_offset() == traversal.start_offset() {
6130 traversal.next();
6131 }
6132 traversal
6133 }
6134
6135 pub fn with_git_statuses(self) -> GitTraversal<'a> {
6136 let mut this = GitTraversal {
6137 traversal: self,
6138 current_entry_summary: None,
6139 repo_location: None,
6140 };
6141 this.synchronize_statuses(true);
6142 this
6143 }
6144
6145 pub fn advance(&mut self) -> bool {
6146 self.advance_by(1)
6147 }
6148
6149 pub fn advance_by(&mut self, count: usize) -> bool {
6150 self.cursor.seek_forward(
6151 &TraversalTarget::Count {
6152 count: self.end_offset() + count,
6153 include_dirs: self.include_dirs,
6154 include_files: self.include_files,
6155 include_ignored: self.include_ignored,
6156 },
6157 Bias::Left,
6158 &(),
6159 )
6160 }
6161
6162 pub fn advance_to_sibling(&mut self) -> bool {
6163 while let Some(entry) = self.cursor.item() {
6164 self.cursor
6165 .seek_forward(&TraversalTarget::successor(&entry.path), Bias::Left, &());
6166 if let Some(entry) = self.cursor.item() {
6167 if (self.include_files || !entry.is_file())
6168 && (self.include_dirs || !entry.is_dir())
6169 && (self.include_ignored || !entry.is_ignored || entry.is_always_included)
6170 {
6171 return true;
6172 }
6173 }
6174 }
6175 false
6176 }
6177
6178 pub fn back_to_parent(&mut self) -> bool {
6179 let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
6180 return false;
6181 };
6182 self.cursor
6183 .seek(&TraversalTarget::path(parent_path), Bias::Left, &())
6184 }
6185
6186 pub fn entry(&self) -> Option<&'a Entry> {
6187 self.cursor.item()
6188 }
6189
6190 pub fn start_offset(&self) -> usize {
6191 self.cursor
6192 .start()
6193 .count(self.include_files, self.include_dirs, self.include_ignored)
6194 }
6195
6196 pub fn end_offset(&self) -> usize {
6197 self.cursor
6198 .end(&())
6199 .count(self.include_files, self.include_dirs, self.include_ignored)
6200 }
6201}
6202
6203impl<'a> Iterator for Traversal<'a> {
6204 type Item = &'a Entry;
6205
6206 fn next(&mut self) -> Option<Self::Item> {
6207 if let Some(item) = self.entry() {
6208 self.advance();
6209 Some(item)
6210 } else {
6211 None
6212 }
6213 }
6214}
6215
6216#[derive(Debug, Clone, Copy)]
6217enum PathTarget<'a> {
6218 Path(&'a Path),
6219 Successor(&'a Path),
6220}
6221
6222impl<'a> PathTarget<'a> {
6223 fn cmp_path(&self, other: &Path) -> Ordering {
6224 match self {
6225 PathTarget::Path(path) => path.cmp(&other),
6226 PathTarget::Successor(path) => {
6227 if other.starts_with(path) {
6228 Ordering::Greater
6229 } else {
6230 Ordering::Equal
6231 }
6232 }
6233 }
6234 }
6235}
6236
6237impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, PathProgress<'a>> for PathTarget<'b> {
6238 fn cmp(&self, cursor_location: &PathProgress<'a>, _: &S::Context) -> Ordering {
6239 self.cmp_path(&cursor_location.max_path)
6240 }
6241}
6242
6243impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, TraversalProgress<'a>> for PathTarget<'b> {
6244 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &S::Context) -> Ordering {
6245 self.cmp_path(&cursor_location.max_path)
6246 }
6247}
6248
6249impl<'a, 'b> SeekTarget<'a, PathSummary<GitSummary>, (TraversalProgress<'a>, GitSummary)>
6250 for PathTarget<'b>
6251{
6252 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitSummary), _: &()) -> Ordering {
6253 self.cmp_path(&cursor_location.0.max_path)
6254 }
6255}
6256
6257#[derive(Debug)]
6258enum TraversalTarget<'a> {
6259 Path(PathTarget<'a>),
6260 Count {
6261 count: usize,
6262 include_files: bool,
6263 include_ignored: bool,
6264 include_dirs: bool,
6265 },
6266}
6267
6268impl<'a> TraversalTarget<'a> {
6269 fn path(path: &'a Path) -> Self {
6270 Self::Path(PathTarget::Path(path))
6271 }
6272
6273 fn successor(path: &'a Path) -> Self {
6274 Self::Path(PathTarget::Successor(path))
6275 }
6276
6277 fn cmp_progress(&self, progress: &TraversalProgress) -> Ordering {
6278 match self {
6279 TraversalTarget::Path(path) => path.cmp_path(&progress.max_path),
6280 TraversalTarget::Count {
6281 count,
6282 include_files,
6283 include_dirs,
6284 include_ignored,
6285 } => Ord::cmp(
6286 count,
6287 &progress.count(*include_files, *include_dirs, *include_ignored),
6288 ),
6289 }
6290 }
6291}
6292
6293impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
6294 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6295 self.cmp_progress(cursor_location)
6296 }
6297}
6298
6299impl<'a, 'b> SeekTarget<'a, PathSummary<Unit>, TraversalProgress<'a>> for TraversalTarget<'b> {
6300 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6301 self.cmp_progress(cursor_location)
6302 }
6303}
6304
6305pub struct ChildEntriesOptions {
6306 pub include_files: bool,
6307 pub include_dirs: bool,
6308 pub include_ignored: bool,
6309}
6310
6311pub struct ChildEntriesIter<'a> {
6312 parent_path: &'a Path,
6313 traversal: Traversal<'a>,
6314}
6315
6316impl<'a> ChildEntriesIter<'a> {
6317 pub fn with_git_statuses(self) -> ChildEntriesGitIter<'a> {
6318 ChildEntriesGitIter {
6319 parent_path: self.parent_path,
6320 traversal: self.traversal.with_git_statuses(),
6321 }
6322 }
6323}
6324
6325pub struct ChildEntriesGitIter<'a> {
6326 parent_path: &'a Path,
6327 traversal: GitTraversal<'a>,
6328}
6329
6330impl<'a> Iterator for ChildEntriesIter<'a> {
6331 type Item = &'a Entry;
6332
6333 fn next(&mut self) -> Option<Self::Item> {
6334 if let Some(item) = self.traversal.entry() {
6335 if item.path.starts_with(self.parent_path) {
6336 self.traversal.advance_to_sibling();
6337 return Some(item);
6338 }
6339 }
6340 None
6341 }
6342}
6343
6344impl<'a> Iterator for ChildEntriesGitIter<'a> {
6345 type Item = GitEntryRef<'a>;
6346
6347 fn next(&mut self) -> Option<Self::Item> {
6348 if let Some(item) = self.traversal.entry() {
6349 if item.path.starts_with(self.parent_path) {
6350 self.traversal.advance_to_sibling();
6351 return Some(item);
6352 }
6353 }
6354 None
6355 }
6356}
6357
6358impl<'a> From<&'a Entry> for proto::Entry {
6359 fn from(entry: &'a Entry) -> Self {
6360 Self {
6361 id: entry.id.to_proto(),
6362 is_dir: entry.is_dir(),
6363 path: entry.path.as_ref().to_proto(),
6364 inode: entry.inode,
6365 mtime: entry.mtime.map(|time| time.into()),
6366 is_ignored: entry.is_ignored,
6367 is_external: entry.is_external,
6368 is_fifo: entry.is_fifo,
6369 size: Some(entry.size),
6370 canonical_path: entry
6371 .canonical_path
6372 .as_ref()
6373 .map(|path| path.as_ref().to_proto()),
6374 }
6375 }
6376}
6377
6378impl<'a> TryFrom<(&'a CharBag, &PathMatcher, proto::Entry)> for Entry {
6379 type Error = anyhow::Error;
6380
6381 fn try_from(
6382 (root_char_bag, always_included, entry): (&'a CharBag, &PathMatcher, proto::Entry),
6383 ) -> Result<Self> {
6384 let kind = if entry.is_dir {
6385 EntryKind::Dir
6386 } else {
6387 EntryKind::File
6388 };
6389
6390 let path = Arc::<Path>::from_proto(entry.path);
6391 let char_bag = char_bag_for_path(*root_char_bag, &path);
6392 let is_always_included = always_included.is_match(path.as_ref());
6393 Ok(Entry {
6394 id: ProjectEntryId::from_proto(entry.id),
6395 kind,
6396 path,
6397 inode: entry.inode,
6398 mtime: entry.mtime.map(|time| time.into()),
6399 size: entry.size.unwrap_or(0),
6400 canonical_path: entry
6401 .canonical_path
6402 .map(|path_string| Box::from(PathBuf::from_proto(path_string))),
6403 is_ignored: entry.is_ignored,
6404 is_always_included,
6405 is_external: entry.is_external,
6406 is_private: false,
6407 char_bag,
6408 is_fifo: entry.is_fifo,
6409 })
6410 }
6411}
6412
6413fn status_from_proto(
6414 simple_status: i32,
6415 status: Option<proto::GitFileStatus>,
6416) -> anyhow::Result<FileStatus> {
6417 use proto::git_file_status::Variant;
6418
6419 let Some(variant) = status.and_then(|status| status.variant) else {
6420 let code = proto::GitStatus::from_i32(simple_status)
6421 .ok_or_else(|| anyhow!("Invalid git status code: {simple_status}"))?;
6422 let result = match code {
6423 proto::GitStatus::Added => TrackedStatus {
6424 worktree_status: StatusCode::Added,
6425 index_status: StatusCode::Unmodified,
6426 }
6427 .into(),
6428 proto::GitStatus::Modified => TrackedStatus {
6429 worktree_status: StatusCode::Modified,
6430 index_status: StatusCode::Unmodified,
6431 }
6432 .into(),
6433 proto::GitStatus::Conflict => UnmergedStatus {
6434 first_head: UnmergedStatusCode::Updated,
6435 second_head: UnmergedStatusCode::Updated,
6436 }
6437 .into(),
6438 proto::GitStatus::Deleted => TrackedStatus {
6439 worktree_status: StatusCode::Deleted,
6440 index_status: StatusCode::Unmodified,
6441 }
6442 .into(),
6443 _ => return Err(anyhow!("Invalid code for simple status: {simple_status}")),
6444 };
6445 return Ok(result);
6446 };
6447
6448 let result = match variant {
6449 Variant::Untracked(_) => FileStatus::Untracked,
6450 Variant::Ignored(_) => FileStatus::Ignored,
6451 Variant::Unmerged(unmerged) => {
6452 let [first_head, second_head] =
6453 [unmerged.first_head, unmerged.second_head].map(|head| {
6454 let code = proto::GitStatus::from_i32(head)
6455 .ok_or_else(|| anyhow!("Invalid git status code: {head}"))?;
6456 let result = match code {
6457 proto::GitStatus::Added => UnmergedStatusCode::Added,
6458 proto::GitStatus::Updated => UnmergedStatusCode::Updated,
6459 proto::GitStatus::Deleted => UnmergedStatusCode::Deleted,
6460 _ => return Err(anyhow!("Invalid code for unmerged status: {code:?}")),
6461 };
6462 Ok(result)
6463 });
6464 let [first_head, second_head] = [first_head?, second_head?];
6465 UnmergedStatus {
6466 first_head,
6467 second_head,
6468 }
6469 .into()
6470 }
6471 Variant::Tracked(tracked) => {
6472 let [index_status, worktree_status] = [tracked.index_status, tracked.worktree_status]
6473 .map(|status| {
6474 let code = proto::GitStatus::from_i32(status)
6475 .ok_or_else(|| anyhow!("Invalid git status code: {status}"))?;
6476 let result = match code {
6477 proto::GitStatus::Modified => StatusCode::Modified,
6478 proto::GitStatus::TypeChanged => StatusCode::TypeChanged,
6479 proto::GitStatus::Added => StatusCode::Added,
6480 proto::GitStatus::Deleted => StatusCode::Deleted,
6481 proto::GitStatus::Renamed => StatusCode::Renamed,
6482 proto::GitStatus::Copied => StatusCode::Copied,
6483 proto::GitStatus::Unmodified => StatusCode::Unmodified,
6484 _ => return Err(anyhow!("Invalid code for tracked status: {code:?}")),
6485 };
6486 Ok(result)
6487 });
6488 let [index_status, worktree_status] = [index_status?, worktree_status?];
6489 TrackedStatus {
6490 index_status,
6491 worktree_status,
6492 }
6493 .into()
6494 }
6495 };
6496 Ok(result)
6497}
6498
6499fn status_to_proto(status: FileStatus) -> proto::GitFileStatus {
6500 use proto::git_file_status::{Tracked, Unmerged, Variant};
6501
6502 let variant = match status {
6503 FileStatus::Untracked => Variant::Untracked(Default::default()),
6504 FileStatus::Ignored => Variant::Ignored(Default::default()),
6505 FileStatus::Unmerged(UnmergedStatus {
6506 first_head,
6507 second_head,
6508 }) => Variant::Unmerged(Unmerged {
6509 first_head: unmerged_status_to_proto(first_head),
6510 second_head: unmerged_status_to_proto(second_head),
6511 }),
6512 FileStatus::Tracked(TrackedStatus {
6513 index_status,
6514 worktree_status,
6515 }) => Variant::Tracked(Tracked {
6516 index_status: tracked_status_to_proto(index_status),
6517 worktree_status: tracked_status_to_proto(worktree_status),
6518 }),
6519 };
6520 proto::GitFileStatus {
6521 variant: Some(variant),
6522 }
6523}
6524
6525fn unmerged_status_to_proto(code: UnmergedStatusCode) -> i32 {
6526 match code {
6527 UnmergedStatusCode::Added => proto::GitStatus::Added as _,
6528 UnmergedStatusCode::Deleted => proto::GitStatus::Deleted as _,
6529 UnmergedStatusCode::Updated => proto::GitStatus::Updated as _,
6530 }
6531}
6532
6533fn tracked_status_to_proto(code: StatusCode) -> i32 {
6534 match code {
6535 StatusCode::Added => proto::GitStatus::Added as _,
6536 StatusCode::Deleted => proto::GitStatus::Deleted as _,
6537 StatusCode::Modified => proto::GitStatus::Modified as _,
6538 StatusCode::Renamed => proto::GitStatus::Renamed as _,
6539 StatusCode::TypeChanged => proto::GitStatus::TypeChanged as _,
6540 StatusCode::Copied => proto::GitStatus::Copied as _,
6541 StatusCode::Unmodified => proto::GitStatus::Unmodified as _,
6542 }
6543}
6544
6545#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
6546pub struct ProjectEntryId(usize);
6547
6548impl ProjectEntryId {
6549 pub const MAX: Self = Self(usize::MAX);
6550 pub const MIN: Self = Self(usize::MIN);
6551
6552 pub fn new(counter: &AtomicUsize) -> Self {
6553 Self(counter.fetch_add(1, SeqCst))
6554 }
6555
6556 pub fn from_proto(id: u64) -> Self {
6557 Self(id as usize)
6558 }
6559
6560 pub fn to_proto(&self) -> u64 {
6561 self.0 as u64
6562 }
6563
6564 pub fn to_usize(&self) -> usize {
6565 self.0
6566 }
6567}
6568
6569#[cfg(any(test, feature = "test-support"))]
6570impl CreatedEntry {
6571 pub fn to_included(self) -> Option<Entry> {
6572 match self {
6573 CreatedEntry::Included(entry) => Some(entry),
6574 CreatedEntry::Excluded { .. } => None,
6575 }
6576 }
6577}