1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{copy_recursive, Fs, MTime, PathEvent, RemoveOptions, Watcher};
11use futures::{
12 channel::{
13 mpsc::{self, UnboundedSender},
14 oneshot,
15 },
16 future::join_all,
17 select_biased,
18 task::Poll,
19 FutureExt as _, Stream, StreamExt,
20};
21use fuzzy::CharBag;
22use git::{
23 repository::{Branch, GitRepository, RepoPath},
24 status::{
25 FileStatus, GitSummary, StatusCode, TrackedStatus, UnmergedStatus, UnmergedStatusCode,
26 },
27 GitHostingProviderRegistry, COMMIT_MESSAGE, DOT_GIT, FSMONITOR_DAEMON, GITIGNORE, INDEX_LOCK,
28};
29use gpui::{
30 App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, EventEmitter, Task,
31};
32use ignore::IgnoreStack;
33use language::DiskState;
34
35use parking_lot::Mutex;
36use paths::local_settings_folder_relative_path;
37use postage::{
38 barrier,
39 prelude::{Sink as _, Stream as _},
40 watch,
41};
42use rpc::{
43 proto::{self, split_worktree_update, FromProto, ToProto},
44 AnyProtoClient,
45};
46pub use settings::WorktreeId;
47use settings::{Settings, SettingsLocation, SettingsStore};
48use smallvec::{smallvec, SmallVec};
49use smol::channel::{self, Sender};
50use std::{
51 any::Any,
52 cmp::Ordering,
53 collections::hash_map,
54 convert::TryFrom,
55 ffi::OsStr,
56 fmt,
57 future::Future,
58 mem::{self},
59 ops::{Deref, DerefMut},
60 path::{Path, PathBuf},
61 pin::Pin,
62 sync::{
63 atomic::{self, AtomicU32, AtomicUsize, Ordering::SeqCst},
64 Arc,
65 },
66 time::{Duration, Instant},
67};
68use sum_tree::{
69 Bias, Cursor, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet, Unit,
70};
71use text::{LineEnding, Rope};
72use util::{
73 paths::{home_dir, PathMatcher, SanitizedPath},
74 ResultExt,
75};
76pub use worktree_settings::WorktreeSettings;
77
78#[cfg(feature = "test-support")]
79pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
80#[cfg(not(feature = "test-support"))]
81pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
82
83/// A set of local or remote files that are being opened as part of a project.
84/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
85/// Stores git repositories data and the diagnostics for the file(s).
86///
87/// Has an absolute path, and may be set to be visible in Zed UI or not.
88/// May correspond to a directory or a single file.
89/// Possible examples:
90/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
91/// * a directory opened in Zed — may be added as a visible entry to the current worktree
92///
93/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
94pub enum Worktree {
95 Local(LocalWorktree),
96 Remote(RemoteWorktree),
97}
98
99/// An entry, created in the worktree.
100#[derive(Debug)]
101pub enum CreatedEntry {
102 /// Got created and indexed by the worktree, receiving a corresponding entry.
103 Included(Entry),
104 /// Got created, but not indexed due to falling under exclusion filters.
105 Excluded { abs_path: PathBuf },
106}
107
108pub struct LoadedFile {
109 pub file: Arc<File>,
110 pub text: String,
111}
112
113pub struct LoadedBinaryFile {
114 pub file: Arc<File>,
115 pub content: Vec<u8>,
116}
117
118pub struct LocalWorktree {
119 snapshot: LocalSnapshot,
120 scan_requests_tx: channel::Sender<ScanRequest>,
121 path_prefixes_to_scan_tx: channel::Sender<PathPrefixScanRequest>,
122 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
123 _background_scanner_tasks: Vec<Task<()>>,
124 update_observer: Option<UpdateObservationState>,
125 fs: Arc<dyn Fs>,
126 fs_case_sensitive: bool,
127 visible: bool,
128 next_entry_id: Arc<AtomicUsize>,
129 settings: WorktreeSettings,
130 share_private_files: bool,
131}
132
133pub struct PathPrefixScanRequest {
134 path: Arc<Path>,
135 done: SmallVec<[barrier::Sender; 1]>,
136}
137
138struct ScanRequest {
139 relative_paths: Vec<Arc<Path>>,
140 done: SmallVec<[barrier::Sender; 1]>,
141}
142
143pub struct RemoteWorktree {
144 snapshot: Snapshot,
145 background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
146 project_id: u64,
147 client: AnyProtoClient,
148 file_scan_inclusions: PathMatcher,
149 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
150 update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
151 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
152 replica_id: ReplicaId,
153 visible: bool,
154 disconnected: bool,
155}
156
157#[derive(Clone)]
158pub struct Snapshot {
159 id: WorktreeId,
160 abs_path: SanitizedPath,
161 root_name: String,
162 root_char_bag: CharBag,
163 entries_by_path: SumTree<Entry>,
164 entries_by_id: SumTree<PathEntry>,
165 always_included_entries: Vec<Arc<Path>>,
166 repositories: SumTree<RepositoryEntry>,
167
168 /// A number that increases every time the worktree begins scanning
169 /// a set of paths from the filesystem. This scanning could be caused
170 /// by some operation performed on the worktree, such as reading or
171 /// writing a file, or by an event reported by the filesystem.
172 scan_id: usize,
173
174 /// The latest scan id that has completed, and whose preceding scans
175 /// have all completed. The current `scan_id` could be more than one
176 /// greater than the `completed_scan_id` if operations are performed
177 /// on the worktree while it is processing a file-system event.
178 completed_scan_id: usize,
179}
180
181#[derive(Debug, Clone, PartialEq, Eq)]
182pub struct RepositoryEntry {
183 /// The git status entries for this repository.
184 /// Note that the paths on this repository are relative to the git work directory.
185 /// If the .git folder is external to Zed, these paths will be relative to that folder,
186 /// and this data structure might reference files external to this worktree.
187 ///
188 /// For example:
189 ///
190 /// my_root_folder/ <-- repository root
191 /// .git
192 /// my_sub_folder_1/
193 /// project_root/ <-- Project root, Zed opened here
194 /// changed_file_1 <-- File with changes, in worktree
195 /// my_sub_folder_2/
196 /// changed_file_2 <-- File with changes, out of worktree
197 /// ...
198 ///
199 /// With this setup, this field would contain 2 entries, like so:
200 /// - my_sub_folder_1/project_root/changed_file_1
201 /// - my_sub_folder_2/changed_file_2
202 pub(crate) statuses_by_path: SumTree<StatusEntry>,
203 work_directory_id: ProjectEntryId,
204 pub work_directory: WorkDirectory,
205 pub(crate) branch: Option<Branch>,
206 pub current_merge_conflicts: TreeSet<RepoPath>,
207}
208
209impl Deref for RepositoryEntry {
210 type Target = WorkDirectory;
211
212 fn deref(&self) -> &Self::Target {
213 &self.work_directory
214 }
215}
216
217impl RepositoryEntry {
218 pub fn branch(&self) -> Option<&Branch> {
219 self.branch.as_ref()
220 }
221
222 pub fn work_directory_id(&self) -> ProjectEntryId {
223 self.work_directory_id
224 }
225
226 pub fn status(&self) -> impl Iterator<Item = StatusEntry> + '_ {
227 self.statuses_by_path.iter().cloned()
228 }
229
230 pub fn status_len(&self) -> usize {
231 self.statuses_by_path.summary().item_summary.count
232 }
233
234 pub fn status_summary(&self) -> GitSummary {
235 self.statuses_by_path.summary().item_summary
236 }
237
238 pub fn status_for_path(&self, path: &RepoPath) -> Option<StatusEntry> {
239 self.statuses_by_path
240 .get(&PathKey(path.0.clone()), &())
241 .cloned()
242 }
243
244 pub fn initial_update(&self) -> proto::RepositoryEntry {
245 proto::RepositoryEntry {
246 work_directory_id: self.work_directory_id.to_proto(),
247 branch: self.branch.as_ref().map(|branch| branch.name.to_string()),
248 branch_summary: self.branch.as_ref().map(branch_to_proto),
249 updated_statuses: self
250 .statuses_by_path
251 .iter()
252 .map(|entry| entry.to_proto())
253 .collect(),
254 removed_statuses: Default::default(),
255 current_merge_conflicts: self
256 .current_merge_conflicts
257 .iter()
258 .map(|repo_path| repo_path.to_proto())
259 .collect(),
260 }
261 }
262
263 pub fn build_update(&self, old: &Self) -> proto::RepositoryEntry {
264 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
265 let mut removed_statuses: Vec<String> = Vec::new();
266
267 let mut new_statuses = self.statuses_by_path.iter().peekable();
268 let mut old_statuses = old.statuses_by_path.iter().peekable();
269
270 let mut current_new_entry = new_statuses.next();
271 let mut current_old_entry = old_statuses.next();
272 loop {
273 match (current_new_entry, current_old_entry) {
274 (Some(new_entry), Some(old_entry)) => {
275 match new_entry.repo_path.cmp(&old_entry.repo_path) {
276 Ordering::Less => {
277 updated_statuses.push(new_entry.to_proto());
278 current_new_entry = new_statuses.next();
279 }
280 Ordering::Equal => {
281 if new_entry.status != old_entry.status {
282 updated_statuses.push(new_entry.to_proto());
283 }
284 current_old_entry = old_statuses.next();
285 current_new_entry = new_statuses.next();
286 }
287 Ordering::Greater => {
288 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
289 current_old_entry = old_statuses.next();
290 }
291 }
292 }
293 (None, Some(old_entry)) => {
294 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
295 current_old_entry = old_statuses.next();
296 }
297 (Some(new_entry), None) => {
298 updated_statuses.push(new_entry.to_proto());
299 current_new_entry = new_statuses.next();
300 }
301 (None, None) => break,
302 }
303 }
304
305 proto::RepositoryEntry {
306 work_directory_id: self.work_directory_id.to_proto(),
307 branch: self.branch.as_ref().map(|branch| branch.name.to_string()),
308 branch_summary: self.branch.as_ref().map(branch_to_proto),
309 updated_statuses,
310 removed_statuses,
311 current_merge_conflicts: self
312 .current_merge_conflicts
313 .iter()
314 .map(|path| path.as_ref().to_proto())
315 .collect(),
316 }
317 }
318}
319
320pub fn branch_to_proto(branch: &git::repository::Branch) -> proto::Branch {
321 proto::Branch {
322 is_head: branch.is_head,
323 name: branch.name.to_string(),
324 unix_timestamp: branch
325 .most_recent_commit
326 .as_ref()
327 .map(|commit| commit.commit_timestamp as u64),
328 upstream: branch.upstream.as_ref().map(|upstream| proto::GitUpstream {
329 ref_name: upstream.ref_name.to_string(),
330 tracking: upstream
331 .tracking
332 .as_ref()
333 .map(|upstream| proto::UpstreamTracking {
334 ahead: upstream.ahead as u64,
335 behind: upstream.behind as u64,
336 }),
337 }),
338 most_recent_commit: branch
339 .most_recent_commit
340 .as_ref()
341 .map(|commit| proto::CommitSummary {
342 sha: commit.sha.to_string(),
343 subject: commit.subject.to_string(),
344 commit_timestamp: commit.commit_timestamp,
345 }),
346 }
347}
348
349pub fn proto_to_branch(proto: &proto::Branch) -> git::repository::Branch {
350 git::repository::Branch {
351 is_head: proto.is_head,
352 name: proto.name.clone().into(),
353 upstream: proto
354 .upstream
355 .as_ref()
356 .map(|upstream| git::repository::Upstream {
357 ref_name: upstream.ref_name.to_string().into(),
358 tracking: upstream.tracking.as_ref().map(|tracking| {
359 git::repository::UpstreamTracking {
360 ahead: tracking.ahead as u32,
361 behind: tracking.behind as u32,
362 }
363 }),
364 }),
365 most_recent_commit: proto.most_recent_commit.as_ref().map(|commit| {
366 git::repository::CommitSummary {
367 sha: commit.sha.to_string().into(),
368 subject: commit.subject.to_string().into(),
369 commit_timestamp: commit.commit_timestamp,
370 }
371 }),
372 }
373}
374
375/// This path corresponds to the 'content path' of a repository in relation
376/// to Zed's project root.
377/// In the majority of the cases, this is the folder that contains the .git folder.
378/// But if a sub-folder of a git repository is opened, this corresponds to the
379/// project root and the .git folder is located in a parent directory.
380#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]
381pub enum WorkDirectory {
382 InProject {
383 relative_path: Arc<Path>,
384 },
385 AboveProject {
386 absolute_path: Arc<Path>,
387 location_in_repo: Arc<Path>,
388 },
389}
390
391impl WorkDirectory {
392 #[cfg(test)]
393 fn in_project(path: &str) -> Self {
394 let path = Path::new(path);
395 Self::InProject {
396 relative_path: path.into(),
397 }
398 }
399
400 #[cfg(test)]
401 fn canonicalize(&self) -> Self {
402 match self {
403 WorkDirectory::InProject { relative_path } => WorkDirectory::InProject {
404 relative_path: relative_path.clone(),
405 },
406 WorkDirectory::AboveProject {
407 absolute_path,
408 location_in_repo,
409 } => WorkDirectory::AboveProject {
410 absolute_path: absolute_path.canonicalize().unwrap().into(),
411 location_in_repo: location_in_repo.clone(),
412 },
413 }
414 }
415
416 pub fn is_above_project(&self) -> bool {
417 match self {
418 WorkDirectory::InProject { .. } => false,
419 WorkDirectory::AboveProject { .. } => true,
420 }
421 }
422
423 fn path_key(&self) -> PathKey {
424 match self {
425 WorkDirectory::InProject { relative_path } => PathKey(relative_path.clone()),
426 WorkDirectory::AboveProject { .. } => PathKey(Path::new("").into()),
427 }
428 }
429
430 /// Returns true if the given path is a child of the work directory.
431 ///
432 /// Note that the path may not be a member of this repository, if there
433 /// is a repository in a directory between these two paths
434 /// external .git folder in a parent folder of the project root.
435 #[track_caller]
436 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
437 let path = path.as_ref();
438 debug_assert!(path.is_relative());
439 match self {
440 WorkDirectory::InProject { relative_path } => path.starts_with(relative_path),
441 WorkDirectory::AboveProject { .. } => true,
442 }
443 }
444
445 /// relativize returns the given project path relative to the root folder of the
446 /// repository.
447 /// If the root of the repository (and its .git folder) are located in a parent folder
448 /// of the project root folder, then the returned RepoPath is relative to the root
449 /// of the repository and not a valid path inside the project.
450 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
451 // path is assumed to be relative to worktree root.
452 debug_assert!(path.is_relative());
453 match self {
454 WorkDirectory::InProject { relative_path } => Ok(path
455 .strip_prefix(relative_path)
456 .map_err(|_| {
457 anyhow!(
458 "could not relativize {:?} against {:?}",
459 path,
460 relative_path
461 )
462 })?
463 .into()),
464 WorkDirectory::AboveProject {
465 location_in_repo, ..
466 } => {
467 // Avoid joining a `/` to location_in_repo in the case of a single-file worktree.
468 if path == Path::new("") {
469 Ok(RepoPath(location_in_repo.clone()))
470 } else {
471 Ok(location_in_repo.join(path).into())
472 }
473 }
474 }
475 }
476
477 /// This is the opposite operation to `relativize` above
478 pub fn unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
479 match self {
480 WorkDirectory::InProject { relative_path } => Some(relative_path.join(path).into()),
481 WorkDirectory::AboveProject {
482 location_in_repo, ..
483 } => {
484 // If we fail to strip the prefix, that means this status entry is
485 // external to this worktree, and we definitely won't have an entry_id
486 path.strip_prefix(location_in_repo).ok().map(Into::into)
487 }
488 }
489 }
490
491 pub fn display_name(&self) -> String {
492 match self {
493 WorkDirectory::InProject { relative_path } => relative_path.display().to_string(),
494 WorkDirectory::AboveProject {
495 absolute_path,
496 location_in_repo,
497 } => {
498 let num_of_dots = location_in_repo.components().count();
499
500 "../".repeat(num_of_dots)
501 + &absolute_path
502 .file_name()
503 .map(|s| s.to_string_lossy())
504 .unwrap_or_default()
505 + "/"
506 }
507 }
508 }
509}
510
511impl Default for WorkDirectory {
512 fn default() -> Self {
513 Self::InProject {
514 relative_path: Arc::from(Path::new("")),
515 }
516 }
517}
518
519#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
520pub struct WorkDirectoryEntry(ProjectEntryId);
521
522impl Deref for WorkDirectoryEntry {
523 type Target = ProjectEntryId;
524
525 fn deref(&self) -> &Self::Target {
526 &self.0
527 }
528}
529
530impl From<ProjectEntryId> for WorkDirectoryEntry {
531 fn from(value: ProjectEntryId) -> Self {
532 WorkDirectoryEntry(value)
533 }
534}
535
536#[derive(Debug, Clone)]
537pub struct LocalSnapshot {
538 snapshot: Snapshot,
539 /// All of the gitignore files in the worktree, indexed by their relative path.
540 /// The boolean indicates whether the gitignore needs to be updated.
541 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
542 /// All of the git repositories in the worktree, indexed by the project entry
543 /// id of their parent directory.
544 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
545 /// The file handle of the root dir
546 /// (so we can find it after it's been moved)
547 root_file_handle: Option<Arc<dyn fs::FileHandle>>,
548}
549
550struct BackgroundScannerState {
551 snapshot: LocalSnapshot,
552 scanned_dirs: HashSet<ProjectEntryId>,
553 path_prefixes_to_scan: HashSet<Arc<Path>>,
554 paths_to_scan: HashSet<Arc<Path>>,
555 /// The ids of all of the entries that were removed from the snapshot
556 /// as part of the current update. These entry ids may be re-used
557 /// if the same inode is discovered at a new path, or if the given
558 /// path is re-created after being deleted.
559 removed_entries: HashMap<u64, Entry>,
560 changed_paths: Vec<Arc<Path>>,
561 prev_snapshot: Snapshot,
562 git_hosting_provider_registry: Option<Arc<GitHostingProviderRegistry>>,
563 repository_scans: HashMap<PathKey, Task<()>>,
564}
565
566#[derive(Debug, Clone)]
567pub struct LocalRepositoryEntry {
568 pub(crate) work_directory_id: ProjectEntryId,
569 pub(crate) work_directory: WorkDirectory,
570 pub(crate) git_dir_scan_id: usize,
571 pub(crate) status_scan_id: usize,
572 pub(crate) repo_ptr: Arc<dyn GitRepository>,
573 /// Absolute path to the actual .git folder.
574 /// Note: if .git is a file, this points to the folder indicated by the .git file
575 pub(crate) dot_git_dir_abs_path: Arc<Path>,
576 /// Absolute path to the .git file, if we're in a git worktree.
577 pub(crate) dot_git_worktree_abs_path: Option<Arc<Path>>,
578 pub current_merge_head_shas: Vec<String>,
579}
580
581impl sum_tree::Item for LocalRepositoryEntry {
582 type Summary = PathSummary<Unit>;
583
584 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
585 PathSummary {
586 max_path: self.work_directory.path_key().0,
587 item_summary: Unit,
588 }
589 }
590}
591
592impl KeyedItem for LocalRepositoryEntry {
593 type Key = PathKey;
594
595 fn key(&self) -> Self::Key {
596 self.work_directory.path_key()
597 }
598}
599
600impl LocalRepositoryEntry {
601 pub fn repo(&self) -> &Arc<dyn GitRepository> {
602 &self.repo_ptr
603 }
604}
605
606impl Deref for LocalRepositoryEntry {
607 type Target = WorkDirectory;
608
609 fn deref(&self) -> &Self::Target {
610 &self.work_directory
611 }
612}
613
614impl Deref for LocalSnapshot {
615 type Target = Snapshot;
616
617 fn deref(&self) -> &Self::Target {
618 &self.snapshot
619 }
620}
621
622impl DerefMut for LocalSnapshot {
623 fn deref_mut(&mut self) -> &mut Self::Target {
624 &mut self.snapshot
625 }
626}
627
628#[derive(Debug)]
629enum ScanState {
630 Started,
631 Updated {
632 snapshot: LocalSnapshot,
633 changes: UpdatedEntriesSet,
634 barrier: SmallVec<[barrier::Sender; 1]>,
635 scanning: bool,
636 },
637 RootUpdated {
638 new_path: Option<SanitizedPath>,
639 },
640}
641
642struct UpdateObservationState {
643 snapshots_tx:
644 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
645 resume_updates: watch::Sender<()>,
646 _maintain_remote_snapshot: Task<Option<()>>,
647}
648
649#[derive(Clone)]
650pub enum Event {
651 UpdatedEntries(UpdatedEntriesSet),
652 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
653 DeletedEntry(ProjectEntryId),
654}
655
656const EMPTY_PATH: &str = "";
657
658impl EventEmitter<Event> for Worktree {}
659
660impl Worktree {
661 pub async fn local(
662 path: impl Into<Arc<Path>>,
663 visible: bool,
664 fs: Arc<dyn Fs>,
665 next_entry_id: Arc<AtomicUsize>,
666 cx: &mut AsyncApp,
667 ) -> Result<Entity<Self>> {
668 let abs_path = path.into();
669 let metadata = fs
670 .metadata(&abs_path)
671 .await
672 .context("failed to stat worktree path")?;
673
674 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
675 log::error!(
676 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
677 );
678 true
679 });
680
681 let root_file_handle = fs.open_handle(&abs_path).await.log_err();
682
683 cx.new(move |cx: &mut Context<Worktree>| {
684 let mut snapshot = LocalSnapshot {
685 ignores_by_parent_abs_path: Default::default(),
686 git_repositories: Default::default(),
687 snapshot: Snapshot::new(
688 cx.entity_id().as_u64(),
689 abs_path
690 .file_name()
691 .map_or(String::new(), |f| f.to_string_lossy().to_string()),
692 abs_path.clone(),
693 ),
694 root_file_handle,
695 };
696
697 let worktree_id = snapshot.id();
698 let settings_location = Some(SettingsLocation {
699 worktree_id,
700 path: Path::new(EMPTY_PATH),
701 });
702
703 let settings = WorktreeSettings::get(settings_location, cx).clone();
704 cx.observe_global::<SettingsStore>(move |this, cx| {
705 if let Self::Local(this) = this {
706 let settings = WorktreeSettings::get(settings_location, cx).clone();
707 if this.settings != settings {
708 this.settings = settings;
709 this.restart_background_scanners(cx);
710 }
711 }
712 })
713 .detach();
714
715 let share_private_files = false;
716 if let Some(metadata) = metadata {
717 let mut entry = Entry::new(
718 Arc::from(Path::new("")),
719 &metadata,
720 &next_entry_id,
721 snapshot.root_char_bag,
722 None,
723 );
724 if !metadata.is_dir {
725 entry.is_private = !share_private_files
726 && settings.is_path_private(abs_path.file_name().unwrap().as_ref());
727 }
728 snapshot.insert_entry(entry, fs.as_ref());
729 }
730
731 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
732 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
733 let mut worktree = LocalWorktree {
734 share_private_files,
735 next_entry_id,
736 snapshot,
737 is_scanning: watch::channel_with(true),
738 update_observer: None,
739 scan_requests_tx,
740 path_prefixes_to_scan_tx,
741 _background_scanner_tasks: Vec::new(),
742 fs,
743 fs_case_sensitive,
744 visible,
745 settings,
746 };
747 worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
748 Worktree::Local(worktree)
749 })
750 }
751
752 pub fn remote(
753 project_id: u64,
754 replica_id: ReplicaId,
755 worktree: proto::WorktreeMetadata,
756 client: AnyProtoClient,
757 cx: &mut App,
758 ) -> Entity<Self> {
759 cx.new(|cx: &mut Context<Self>| {
760 let snapshot = Snapshot::new(
761 worktree.id,
762 worktree.root_name,
763 Arc::<Path>::from_proto(worktree.abs_path),
764 );
765
766 let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
767 let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
768 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
769
770 let worktree_id = snapshot.id();
771 let settings_location = Some(SettingsLocation {
772 worktree_id,
773 path: Path::new(EMPTY_PATH),
774 });
775
776 let settings = WorktreeSettings::get(settings_location, cx).clone();
777 let worktree = RemoteWorktree {
778 client,
779 project_id,
780 replica_id,
781 snapshot,
782 file_scan_inclusions: settings.file_scan_inclusions.clone(),
783 background_snapshot: background_snapshot.clone(),
784 updates_tx: Some(background_updates_tx),
785 update_observer: None,
786 snapshot_subscriptions: Default::default(),
787 visible: worktree.visible,
788 disconnected: false,
789 };
790
791 // Apply updates to a separate snapshot in a background task, then
792 // send them to a foreground task which updates the model.
793 cx.background_executor()
794 .spawn(async move {
795 while let Some(update) = background_updates_rx.next().await {
796 {
797 let mut lock = background_snapshot.lock();
798 if let Err(error) = lock
799 .0
800 .apply_remote_update(update.clone(), &settings.file_scan_inclusions)
801 {
802 log::error!("error applying worktree update: {}", error);
803 }
804 lock.1.push(update);
805 }
806 snapshot_updated_tx.send(()).await.ok();
807 }
808 })
809 .detach();
810
811 // On the foreground task, update to the latest snapshot and notify
812 // any update observer of all updates that led to that snapshot.
813 cx.spawn(|this, mut cx| async move {
814 while (snapshot_updated_rx.recv().await).is_some() {
815 this.update(&mut cx, |this, cx| {
816 let this = this.as_remote_mut().unwrap();
817 {
818 let mut lock = this.background_snapshot.lock();
819 this.snapshot = lock.0.clone();
820 if let Some(tx) = &this.update_observer {
821 for update in lock.1.drain(..) {
822 tx.unbounded_send(update).ok();
823 }
824 }
825 };
826 cx.emit(Event::UpdatedEntries(Arc::default()));
827 cx.notify();
828 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
829 if this.observed_snapshot(*scan_id) {
830 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
831 let _ = tx.send(());
832 } else {
833 break;
834 }
835 }
836 })?;
837 }
838 anyhow::Ok(())
839 })
840 .detach();
841
842 Worktree::Remote(worktree)
843 })
844 }
845
846 pub fn as_local(&self) -> Option<&LocalWorktree> {
847 if let Worktree::Local(worktree) = self {
848 Some(worktree)
849 } else {
850 None
851 }
852 }
853
854 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
855 if let Worktree::Remote(worktree) = self {
856 Some(worktree)
857 } else {
858 None
859 }
860 }
861
862 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
863 if let Worktree::Local(worktree) = self {
864 Some(worktree)
865 } else {
866 None
867 }
868 }
869
870 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
871 if let Worktree::Remote(worktree) = self {
872 Some(worktree)
873 } else {
874 None
875 }
876 }
877
878 pub fn is_local(&self) -> bool {
879 matches!(self, Worktree::Local(_))
880 }
881
882 pub fn is_remote(&self) -> bool {
883 !self.is_local()
884 }
885
886 pub fn settings_location(&self, _: &Context<Self>) -> SettingsLocation<'static> {
887 SettingsLocation {
888 worktree_id: self.id(),
889 path: Path::new(EMPTY_PATH),
890 }
891 }
892
893 pub fn snapshot(&self) -> Snapshot {
894 match self {
895 Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
896 Worktree::Remote(worktree) => worktree.snapshot.clone(),
897 }
898 }
899
900 pub fn scan_id(&self) -> usize {
901 match self {
902 Worktree::Local(worktree) => worktree.snapshot.scan_id,
903 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
904 }
905 }
906
907 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
908 proto::WorktreeMetadata {
909 id: self.id().to_proto(),
910 root_name: self.root_name().to_string(),
911 visible: self.is_visible(),
912 abs_path: self.abs_path().to_proto(),
913 }
914 }
915
916 pub fn completed_scan_id(&self) -> usize {
917 match self {
918 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
919 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
920 }
921 }
922
923 pub fn is_visible(&self) -> bool {
924 match self {
925 Worktree::Local(worktree) => worktree.visible,
926 Worktree::Remote(worktree) => worktree.visible,
927 }
928 }
929
930 pub fn replica_id(&self) -> ReplicaId {
931 match self {
932 Worktree::Local(_) => 0,
933 Worktree::Remote(worktree) => worktree.replica_id,
934 }
935 }
936
937 pub fn abs_path(&self) -> Arc<Path> {
938 match self {
939 Worktree::Local(worktree) => worktree.abs_path.clone().into(),
940 Worktree::Remote(worktree) => worktree.abs_path.clone().into(),
941 }
942 }
943
944 pub fn root_file(&self, cx: &Context<Self>) -> Option<Arc<File>> {
945 let entry = self.root_entry()?;
946 Some(File::for_entry(entry.clone(), cx.entity()))
947 }
948
949 pub fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
950 where
951 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
952 Fut: 'static + Send + Future<Output = bool>,
953 {
954 match self {
955 Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
956 Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
957 }
958 }
959
960 pub fn stop_observing_updates(&mut self) {
961 match self {
962 Worktree::Local(this) => {
963 this.update_observer.take();
964 }
965 Worktree::Remote(this) => {
966 this.update_observer.take();
967 }
968 }
969 }
970
971 #[cfg(any(test, feature = "test-support"))]
972 pub fn has_update_observer(&self) -> bool {
973 match self {
974 Worktree::Local(this) => this.update_observer.is_some(),
975 Worktree::Remote(this) => this.update_observer.is_some(),
976 }
977 }
978
979 pub fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
980 match self {
981 Worktree::Local(this) => this.load_file(path, cx),
982 Worktree::Remote(_) => {
983 Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
984 }
985 }
986 }
987
988 pub fn load_staged_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
989 match self {
990 Worktree::Local(this) => {
991 let path = Arc::from(path);
992 let snapshot = this.snapshot();
993 cx.background_executor().spawn(async move {
994 if let Some(repo) = snapshot.repository_for_path(&path) {
995 if let Some(repo_path) = repo.relativize(&path).log_err() {
996 if let Some(git_repo) =
997 snapshot.git_repositories.get(&repo.work_directory_id)
998 {
999 return Ok(git_repo.repo_ptr.load_index_text(&repo_path));
1000 }
1001 }
1002 }
1003 Err(anyhow!("No repository found for {path:?}"))
1004 })
1005 }
1006 Worktree::Remote(_) => {
1007 Task::ready(Err(anyhow!("remote worktrees can't yet load staged files")))
1008 }
1009 }
1010 }
1011
1012 pub fn load_committed_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1013 match self {
1014 Worktree::Local(this) => {
1015 let path = Arc::from(path);
1016 let snapshot = this.snapshot();
1017 cx.background_executor().spawn(async move {
1018 if let Some(repo) = snapshot.repository_for_path(&path) {
1019 if let Some(repo_path) = repo.relativize(&path).log_err() {
1020 if let Some(git_repo) =
1021 snapshot.git_repositories.get(&repo.work_directory_id)
1022 {
1023 return Ok(git_repo.repo_ptr.load_committed_text(&repo_path));
1024 }
1025 }
1026 }
1027 Err(anyhow!("No repository found for {path:?}"))
1028 })
1029 }
1030 Worktree::Remote(_) => Task::ready(Err(anyhow!(
1031 "remote worktrees can't yet load committed files"
1032 ))),
1033 }
1034 }
1035
1036 pub fn load_binary_file(
1037 &self,
1038 path: &Path,
1039 cx: &Context<Worktree>,
1040 ) -> Task<Result<LoadedBinaryFile>> {
1041 match self {
1042 Worktree::Local(this) => this.load_binary_file(path, cx),
1043 Worktree::Remote(_) => {
1044 Task::ready(Err(anyhow!("remote worktrees can't yet load binary files")))
1045 }
1046 }
1047 }
1048
1049 pub fn write_file(
1050 &self,
1051 path: &Path,
1052 text: Rope,
1053 line_ending: LineEnding,
1054 cx: &Context<Worktree>,
1055 ) -> Task<Result<Arc<File>>> {
1056 match self {
1057 Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
1058 Worktree::Remote(_) => {
1059 Task::ready(Err(anyhow!("remote worktree can't yet write files")))
1060 }
1061 }
1062 }
1063
1064 pub fn create_entry(
1065 &mut self,
1066 path: impl Into<Arc<Path>>,
1067 is_directory: bool,
1068 cx: &Context<Worktree>,
1069 ) -> Task<Result<CreatedEntry>> {
1070 let path: Arc<Path> = path.into();
1071 let worktree_id = self.id();
1072 match self {
1073 Worktree::Local(this) => this.create_entry(path, is_directory, cx),
1074 Worktree::Remote(this) => {
1075 let project_id = this.project_id;
1076 let request = this.client.request(proto::CreateProjectEntry {
1077 worktree_id: worktree_id.to_proto(),
1078 project_id,
1079 path: path.as_ref().to_proto(),
1080 is_directory,
1081 });
1082 cx.spawn(move |this, mut cx| async move {
1083 let response = request.await?;
1084 match response.entry {
1085 Some(entry) => this
1086 .update(&mut cx, |worktree, cx| {
1087 worktree.as_remote_mut().unwrap().insert_entry(
1088 entry,
1089 response.worktree_scan_id as usize,
1090 cx,
1091 )
1092 })?
1093 .await
1094 .map(CreatedEntry::Included),
1095 None => {
1096 let abs_path = this.update(&mut cx, |worktree, _| {
1097 worktree
1098 .absolutize(&path)
1099 .with_context(|| format!("absolutizing {path:?}"))
1100 })??;
1101 Ok(CreatedEntry::Excluded { abs_path })
1102 }
1103 }
1104 })
1105 }
1106 }
1107 }
1108
1109 pub fn delete_entry(
1110 &mut self,
1111 entry_id: ProjectEntryId,
1112 trash: bool,
1113 cx: &mut Context<Worktree>,
1114 ) -> Option<Task<Result<()>>> {
1115 let task = match self {
1116 Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
1117 Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
1118 }?;
1119
1120 let entry = match self {
1121 Worktree::Local(ref this) => this.entry_for_id(entry_id),
1122 Worktree::Remote(ref this) => this.entry_for_id(entry_id),
1123 }?;
1124
1125 let mut ids = vec![entry_id];
1126 let path = &*entry.path;
1127
1128 self.get_children_ids_recursive(path, &mut ids);
1129
1130 for id in ids {
1131 cx.emit(Event::DeletedEntry(id));
1132 }
1133 Some(task)
1134 }
1135
1136 fn get_children_ids_recursive(&self, path: &Path, ids: &mut Vec<ProjectEntryId>) {
1137 let children_iter = self.child_entries(path);
1138 for child in children_iter {
1139 ids.push(child.id);
1140 self.get_children_ids_recursive(&child.path, ids);
1141 }
1142 }
1143
1144 pub fn rename_entry(
1145 &mut self,
1146 entry_id: ProjectEntryId,
1147 new_path: impl Into<Arc<Path>>,
1148 cx: &Context<Self>,
1149 ) -> Task<Result<CreatedEntry>> {
1150 let new_path = new_path.into();
1151 match self {
1152 Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
1153 Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
1154 }
1155 }
1156
1157 pub fn copy_entry(
1158 &mut self,
1159 entry_id: ProjectEntryId,
1160 relative_worktree_source_path: Option<PathBuf>,
1161 new_path: impl Into<Arc<Path>>,
1162 cx: &Context<Self>,
1163 ) -> Task<Result<Option<Entry>>> {
1164 let new_path: Arc<Path> = new_path.into();
1165 match self {
1166 Worktree::Local(this) => {
1167 this.copy_entry(entry_id, relative_worktree_source_path, new_path, cx)
1168 }
1169 Worktree::Remote(this) => {
1170 let relative_worktree_source_path = relative_worktree_source_path
1171 .map(|relative_worktree_source_path| relative_worktree_source_path.to_proto());
1172 let response = this.client.request(proto::CopyProjectEntry {
1173 project_id: this.project_id,
1174 entry_id: entry_id.to_proto(),
1175 relative_worktree_source_path,
1176 new_path: new_path.to_proto(),
1177 });
1178 cx.spawn(move |this, mut cx| async move {
1179 let response = response.await?;
1180 match response.entry {
1181 Some(entry) => this
1182 .update(&mut cx, |worktree, cx| {
1183 worktree.as_remote_mut().unwrap().insert_entry(
1184 entry,
1185 response.worktree_scan_id as usize,
1186 cx,
1187 )
1188 })?
1189 .await
1190 .map(Some),
1191 None => Ok(None),
1192 }
1193 })
1194 }
1195 }
1196 }
1197
1198 pub fn copy_external_entries(
1199 &mut self,
1200 target_directory: PathBuf,
1201 paths: Vec<Arc<Path>>,
1202 overwrite_existing_files: bool,
1203 cx: &Context<Worktree>,
1204 ) -> Task<Result<Vec<ProjectEntryId>>> {
1205 match self {
1206 Worktree::Local(this) => {
1207 this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
1208 }
1209 _ => Task::ready(Err(anyhow!(
1210 "Copying external entries is not supported for remote worktrees"
1211 ))),
1212 }
1213 }
1214
1215 pub fn expand_entry(
1216 &mut self,
1217 entry_id: ProjectEntryId,
1218 cx: &Context<Worktree>,
1219 ) -> Option<Task<Result<()>>> {
1220 match self {
1221 Worktree::Local(this) => this.expand_entry(entry_id, cx),
1222 Worktree::Remote(this) => {
1223 let response = this.client.request(proto::ExpandProjectEntry {
1224 project_id: this.project_id,
1225 entry_id: entry_id.to_proto(),
1226 });
1227 Some(cx.spawn(move |this, mut cx| async move {
1228 let response = response.await?;
1229 this.update(&mut cx, |this, _| {
1230 this.as_remote_mut()
1231 .unwrap()
1232 .wait_for_snapshot(response.worktree_scan_id as usize)
1233 })?
1234 .await?;
1235 Ok(())
1236 }))
1237 }
1238 }
1239 }
1240
1241 pub fn expand_all_for_entry(
1242 &mut self,
1243 entry_id: ProjectEntryId,
1244 cx: &Context<Worktree>,
1245 ) -> Option<Task<Result<()>>> {
1246 match self {
1247 Worktree::Local(this) => this.expand_all_for_entry(entry_id, cx),
1248 Worktree::Remote(this) => {
1249 let response = this.client.request(proto::ExpandAllForProjectEntry {
1250 project_id: this.project_id,
1251 entry_id: entry_id.to_proto(),
1252 });
1253 Some(cx.spawn(move |this, mut cx| async move {
1254 let response = response.await?;
1255 this.update(&mut cx, |this, _| {
1256 this.as_remote_mut()
1257 .unwrap()
1258 .wait_for_snapshot(response.worktree_scan_id as usize)
1259 })?
1260 .await?;
1261 Ok(())
1262 }))
1263 }
1264 }
1265 }
1266
1267 pub async fn handle_create_entry(
1268 this: Entity<Self>,
1269 request: proto::CreateProjectEntry,
1270 mut cx: AsyncApp,
1271 ) -> Result<proto::ProjectEntryResponse> {
1272 let (scan_id, entry) = this.update(&mut cx, |this, cx| {
1273 (
1274 this.scan_id(),
1275 this.create_entry(
1276 Arc::<Path>::from_proto(request.path),
1277 request.is_directory,
1278 cx,
1279 ),
1280 )
1281 })?;
1282 Ok(proto::ProjectEntryResponse {
1283 entry: match &entry.await? {
1284 CreatedEntry::Included(entry) => Some(entry.into()),
1285 CreatedEntry::Excluded { .. } => None,
1286 },
1287 worktree_scan_id: scan_id as u64,
1288 })
1289 }
1290
1291 pub async fn handle_delete_entry(
1292 this: Entity<Self>,
1293 request: proto::DeleteProjectEntry,
1294 mut cx: AsyncApp,
1295 ) -> Result<proto::ProjectEntryResponse> {
1296 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1297 (
1298 this.scan_id(),
1299 this.delete_entry(
1300 ProjectEntryId::from_proto(request.entry_id),
1301 request.use_trash,
1302 cx,
1303 ),
1304 )
1305 })?;
1306 task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
1307 Ok(proto::ProjectEntryResponse {
1308 entry: None,
1309 worktree_scan_id: scan_id as u64,
1310 })
1311 }
1312
1313 pub async fn handle_expand_entry(
1314 this: Entity<Self>,
1315 request: proto::ExpandProjectEntry,
1316 mut cx: AsyncApp,
1317 ) -> Result<proto::ExpandProjectEntryResponse> {
1318 let task = this.update(&mut cx, |this, cx| {
1319 this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1320 })?;
1321 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1322 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1323 Ok(proto::ExpandProjectEntryResponse {
1324 worktree_scan_id: scan_id as u64,
1325 })
1326 }
1327
1328 pub async fn handle_expand_all_for_entry(
1329 this: Entity<Self>,
1330 request: proto::ExpandAllForProjectEntry,
1331 mut cx: AsyncApp,
1332 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
1333 let task = this.update(&mut cx, |this, cx| {
1334 this.expand_all_for_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1335 })?;
1336 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1337 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1338 Ok(proto::ExpandAllForProjectEntryResponse {
1339 worktree_scan_id: scan_id as u64,
1340 })
1341 }
1342
1343 pub async fn handle_rename_entry(
1344 this: Entity<Self>,
1345 request: proto::RenameProjectEntry,
1346 mut cx: AsyncApp,
1347 ) -> Result<proto::ProjectEntryResponse> {
1348 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1349 (
1350 this.scan_id(),
1351 this.rename_entry(
1352 ProjectEntryId::from_proto(request.entry_id),
1353 Arc::<Path>::from_proto(request.new_path),
1354 cx,
1355 ),
1356 )
1357 })?;
1358 Ok(proto::ProjectEntryResponse {
1359 entry: match &task.await? {
1360 CreatedEntry::Included(entry) => Some(entry.into()),
1361 CreatedEntry::Excluded { .. } => None,
1362 },
1363 worktree_scan_id: scan_id as u64,
1364 })
1365 }
1366
1367 pub async fn handle_copy_entry(
1368 this: Entity<Self>,
1369 request: proto::CopyProjectEntry,
1370 mut cx: AsyncApp,
1371 ) -> Result<proto::ProjectEntryResponse> {
1372 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1373 let relative_worktree_source_path = request
1374 .relative_worktree_source_path
1375 .map(PathBuf::from_proto);
1376 (
1377 this.scan_id(),
1378 this.copy_entry(
1379 ProjectEntryId::from_proto(request.entry_id),
1380 relative_worktree_source_path,
1381 PathBuf::from_proto(request.new_path),
1382 cx,
1383 ),
1384 )
1385 })?;
1386 Ok(proto::ProjectEntryResponse {
1387 entry: task.await?.as_ref().map(|e| e.into()),
1388 worktree_scan_id: scan_id as u64,
1389 })
1390 }
1391}
1392
1393impl LocalWorktree {
1394 pub fn fs(&self) -> &Arc<dyn Fs> {
1395 &self.fs
1396 }
1397
1398 pub fn is_path_private(&self, path: &Path) -> bool {
1399 !self.share_private_files && self.settings.is_path_private(path)
1400 }
1401
1402 fn restart_background_scanners(&mut self, cx: &Context<Worktree>) {
1403 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
1404 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
1405 self.scan_requests_tx = scan_requests_tx;
1406 self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
1407
1408 self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
1409 let always_included_entries = mem::take(&mut self.snapshot.always_included_entries);
1410 log::debug!(
1411 "refreshing entries for the following always included paths: {:?}",
1412 always_included_entries
1413 );
1414
1415 // Cleans up old always included entries to ensure they get updated properly. Otherwise,
1416 // nested always included entries may not get updated and will result in out-of-date info.
1417 self.refresh_entries_for_paths(always_included_entries);
1418 }
1419
1420 fn start_background_scanner(
1421 &mut self,
1422 scan_requests_rx: channel::Receiver<ScanRequest>,
1423 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
1424 cx: &Context<Worktree>,
1425 ) {
1426 let snapshot = self.snapshot();
1427 let share_private_files = self.share_private_files;
1428 let next_entry_id = self.next_entry_id.clone();
1429 let fs = self.fs.clone();
1430 let git_hosting_provider_registry = GitHostingProviderRegistry::try_global(cx);
1431 let settings = self.settings.clone();
1432 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
1433 let background_scanner = cx.background_executor().spawn({
1434 let abs_path = snapshot.abs_path.as_path().to_path_buf();
1435 let background = cx.background_executor().clone();
1436 async move {
1437 let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
1438 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
1439 log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
1440 true
1441 });
1442
1443 let mut scanner = BackgroundScanner {
1444 fs,
1445 fs_case_sensitive,
1446 status_updates_tx: scan_states_tx,
1447 executor: background,
1448 scan_requests_rx,
1449 path_prefixes_to_scan_rx,
1450 next_entry_id,
1451 state: Arc::new(Mutex::new(BackgroundScannerState {
1452 prev_snapshot: snapshot.snapshot.clone(),
1453 snapshot,
1454 scanned_dirs: Default::default(),
1455 path_prefixes_to_scan: Default::default(),
1456 paths_to_scan: Default::default(),
1457 removed_entries: Default::default(),
1458 changed_paths: Default::default(),
1459 repository_scans: HashMap::default(),
1460 git_hosting_provider_registry,
1461 })),
1462 phase: BackgroundScannerPhase::InitialScan,
1463 share_private_files,
1464 settings,
1465 watcher,
1466 };
1467
1468 scanner
1469 .run(Box::pin(
1470 events.map(|events| events.into_iter().map(Into::into).collect()),
1471 ))
1472 .await;
1473 }
1474 });
1475 let scan_state_updater = cx.spawn(|this, mut cx| async move {
1476 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1477 this.update(&mut cx, |this, cx| {
1478 let this = this.as_local_mut().unwrap();
1479 match state {
1480 ScanState::Started => {
1481 *this.is_scanning.0.borrow_mut() = true;
1482 }
1483 ScanState::Updated {
1484 snapshot,
1485 changes,
1486 barrier,
1487 scanning,
1488 } => {
1489 *this.is_scanning.0.borrow_mut() = scanning;
1490 this.set_snapshot(snapshot, changes, cx);
1491 drop(barrier);
1492 }
1493 ScanState::RootUpdated { new_path } => {
1494 this.update_abs_path_and_refresh(new_path, cx);
1495 }
1496 }
1497 cx.notify();
1498 })
1499 .ok();
1500 }
1501 });
1502 self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1503 self.is_scanning = watch::channel_with(true);
1504 }
1505
1506 fn set_snapshot(
1507 &mut self,
1508 new_snapshot: LocalSnapshot,
1509 entry_changes: UpdatedEntriesSet,
1510 cx: &mut Context<Worktree>,
1511 ) {
1512 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1513 self.snapshot = new_snapshot;
1514
1515 if let Some(share) = self.update_observer.as_mut() {
1516 share
1517 .snapshots_tx
1518 .unbounded_send((
1519 self.snapshot.clone(),
1520 entry_changes.clone(),
1521 repo_changes.clone(),
1522 ))
1523 .ok();
1524 }
1525
1526 if !entry_changes.is_empty() {
1527 cx.emit(Event::UpdatedEntries(entry_changes));
1528 }
1529 if !repo_changes.is_empty() {
1530 cx.emit(Event::UpdatedGitRepositories(repo_changes));
1531 }
1532 }
1533
1534 fn changed_repos(
1535 &self,
1536 old_snapshot: &LocalSnapshot,
1537 new_snapshot: &LocalSnapshot,
1538 ) -> UpdatedGitRepositoriesSet {
1539 let mut changes = Vec::new();
1540 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1541 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1542
1543 loop {
1544 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1545 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1546 match Ord::cmp(&new_entry_id, &old_entry_id) {
1547 Ordering::Less => {
1548 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1549 changes.push((
1550 entry.path.clone(),
1551 GitRepositoryChange {
1552 old_repository: None,
1553 },
1554 ));
1555 }
1556 new_repos.next();
1557 }
1558 Ordering::Equal => {
1559 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id
1560 || new_repo.status_scan_id != old_repo.status_scan_id
1561 {
1562 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1563 let old_repo = old_snapshot
1564 .repositories
1565 .get(&PathKey(entry.path.clone()), &())
1566 .cloned();
1567 changes.push((
1568 entry.path.clone(),
1569 GitRepositoryChange {
1570 old_repository: old_repo,
1571 },
1572 ));
1573 }
1574 }
1575 new_repos.next();
1576 old_repos.next();
1577 }
1578 Ordering::Greater => {
1579 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1580 let old_repo = old_snapshot
1581 .repositories
1582 .get(&PathKey(entry.path.clone()), &())
1583 .cloned();
1584 changes.push((
1585 entry.path.clone(),
1586 GitRepositoryChange {
1587 old_repository: old_repo,
1588 },
1589 ));
1590 }
1591 old_repos.next();
1592 }
1593 }
1594 }
1595 (Some((entry_id, _)), None) => {
1596 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1597 changes.push((
1598 entry.path.clone(),
1599 GitRepositoryChange {
1600 old_repository: None,
1601 },
1602 ));
1603 }
1604 new_repos.next();
1605 }
1606 (None, Some((entry_id, _))) => {
1607 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1608 let old_repo = old_snapshot
1609 .repositories
1610 .get(&PathKey(entry.path.clone()), &())
1611 .cloned();
1612 changes.push((
1613 entry.path.clone(),
1614 GitRepositoryChange {
1615 old_repository: old_repo,
1616 },
1617 ));
1618 }
1619 old_repos.next();
1620 }
1621 (None, None) => break,
1622 }
1623 }
1624
1625 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1626 (value.0.clone(), value.1.clone())
1627 }
1628
1629 changes.into()
1630 }
1631
1632 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1633 let mut is_scanning_rx = self.is_scanning.1.clone();
1634 async move {
1635 let mut is_scanning = *is_scanning_rx.borrow();
1636 while is_scanning {
1637 if let Some(value) = is_scanning_rx.recv().await {
1638 is_scanning = value;
1639 } else {
1640 break;
1641 }
1642 }
1643 }
1644 }
1645
1646 pub fn snapshot(&self) -> LocalSnapshot {
1647 self.snapshot.clone()
1648 }
1649
1650 pub fn settings(&self) -> WorktreeSettings {
1651 self.settings.clone()
1652 }
1653
1654 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1655 self.git_repositories.get(&repo.work_directory_id)
1656 }
1657
1658 fn load_binary_file(
1659 &self,
1660 path: &Path,
1661 cx: &Context<Worktree>,
1662 ) -> Task<Result<LoadedBinaryFile>> {
1663 let path = Arc::from(path);
1664 let abs_path = self.absolutize(&path);
1665 let fs = self.fs.clone();
1666 let entry = self.refresh_entry(path.clone(), None, cx);
1667 let is_private = self.is_path_private(path.as_ref());
1668
1669 let worktree = cx.weak_entity();
1670 cx.background_executor().spawn(async move {
1671 let abs_path = abs_path?;
1672 let content = fs.load_bytes(&abs_path).await?;
1673
1674 let worktree = worktree
1675 .upgrade()
1676 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1677 let file = match entry.await? {
1678 Some(entry) => File::for_entry(entry, worktree),
1679 None => {
1680 let metadata = fs
1681 .metadata(&abs_path)
1682 .await
1683 .with_context(|| {
1684 format!("Loading metadata for excluded file {abs_path:?}")
1685 })?
1686 .with_context(|| {
1687 format!("Excluded file {abs_path:?} got removed during loading")
1688 })?;
1689 Arc::new(File {
1690 entry_id: None,
1691 worktree,
1692 path,
1693 disk_state: DiskState::Present {
1694 mtime: metadata.mtime,
1695 },
1696 is_local: true,
1697 is_private,
1698 })
1699 }
1700 };
1701
1702 Ok(LoadedBinaryFile { file, content })
1703 })
1704 }
1705
1706 fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1707 let path = Arc::from(path);
1708 let abs_path = self.absolutize(&path);
1709 let fs = self.fs.clone();
1710 let entry = self.refresh_entry(path.clone(), None, cx);
1711 let is_private = self.is_path_private(path.as_ref());
1712
1713 cx.spawn(|this, _cx| async move {
1714 let abs_path = abs_path?;
1715 let text = fs.load(&abs_path).await?;
1716
1717 let worktree = this
1718 .upgrade()
1719 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1720 let file = match entry.await? {
1721 Some(entry) => File::for_entry(entry, worktree),
1722 None => {
1723 let metadata = fs
1724 .metadata(&abs_path)
1725 .await
1726 .with_context(|| {
1727 format!("Loading metadata for excluded file {abs_path:?}")
1728 })?
1729 .with_context(|| {
1730 format!("Excluded file {abs_path:?} got removed during loading")
1731 })?;
1732 Arc::new(File {
1733 entry_id: None,
1734 worktree,
1735 path,
1736 disk_state: DiskState::Present {
1737 mtime: metadata.mtime,
1738 },
1739 is_local: true,
1740 is_private,
1741 })
1742 }
1743 };
1744
1745 Ok(LoadedFile { file, text })
1746 })
1747 }
1748
1749 /// Find the lowest path in the worktree's datastructures that is an ancestor
1750 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1751 let mut lowest_ancestor = None;
1752 for path in path.ancestors() {
1753 if self.entry_for_path(path).is_some() {
1754 lowest_ancestor = Some(path.to_path_buf());
1755 break;
1756 }
1757 }
1758
1759 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1760 }
1761
1762 fn create_entry(
1763 &self,
1764 path: impl Into<Arc<Path>>,
1765 is_dir: bool,
1766 cx: &Context<Worktree>,
1767 ) -> Task<Result<CreatedEntry>> {
1768 let path = path.into();
1769 let abs_path = match self.absolutize(&path) {
1770 Ok(path) => path,
1771 Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1772 };
1773 let path_excluded = self.settings.is_path_excluded(&abs_path);
1774 let fs = self.fs.clone();
1775 let task_abs_path = abs_path.clone();
1776 let write = cx.background_executor().spawn(async move {
1777 if is_dir {
1778 fs.create_dir(&task_abs_path)
1779 .await
1780 .with_context(|| format!("creating directory {task_abs_path:?}"))
1781 } else {
1782 fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1783 .await
1784 .with_context(|| format!("creating file {task_abs_path:?}"))
1785 }
1786 });
1787
1788 let lowest_ancestor = self.lowest_ancestor(&path);
1789 cx.spawn(|this, mut cx| async move {
1790 write.await?;
1791 if path_excluded {
1792 return Ok(CreatedEntry::Excluded { abs_path });
1793 }
1794
1795 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1796 let mut refreshes = Vec::new();
1797 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1798 for refresh_path in refresh_paths.ancestors() {
1799 if refresh_path == Path::new("") {
1800 continue;
1801 }
1802 let refresh_full_path = lowest_ancestor.join(refresh_path);
1803
1804 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1805 refresh_full_path.into(),
1806 None,
1807 cx,
1808 ));
1809 }
1810 (
1811 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1812 refreshes,
1813 )
1814 })?;
1815 for refresh in refreshes {
1816 refresh.await.log_err();
1817 }
1818
1819 Ok(result
1820 .await?
1821 .map(CreatedEntry::Included)
1822 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1823 })
1824 }
1825
1826 fn write_file(
1827 &self,
1828 path: impl Into<Arc<Path>>,
1829 text: Rope,
1830 line_ending: LineEnding,
1831 cx: &Context<Worktree>,
1832 ) -> Task<Result<Arc<File>>> {
1833 let path = path.into();
1834 let fs = self.fs.clone();
1835 let is_private = self.is_path_private(&path);
1836 let Ok(abs_path) = self.absolutize(&path) else {
1837 return Task::ready(Err(anyhow!("invalid path {path:?}")));
1838 };
1839
1840 let write = cx.background_executor().spawn({
1841 let fs = fs.clone();
1842 let abs_path = abs_path.clone();
1843 async move { fs.save(&abs_path, &text, line_ending).await }
1844 });
1845
1846 cx.spawn(move |this, mut cx| async move {
1847 write.await?;
1848 let entry = this
1849 .update(&mut cx, |this, cx| {
1850 this.as_local_mut()
1851 .unwrap()
1852 .refresh_entry(path.clone(), None, cx)
1853 })?
1854 .await?;
1855 let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1856 if let Some(entry) = entry {
1857 Ok(File::for_entry(entry, worktree))
1858 } else {
1859 let metadata = fs
1860 .metadata(&abs_path)
1861 .await
1862 .with_context(|| {
1863 format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1864 })?
1865 .with_context(|| {
1866 format!("Excluded buffer {path:?} got removed during saving")
1867 })?;
1868 Ok(Arc::new(File {
1869 worktree,
1870 path,
1871 disk_state: DiskState::Present {
1872 mtime: metadata.mtime,
1873 },
1874 entry_id: None,
1875 is_local: true,
1876 is_private,
1877 }))
1878 }
1879 })
1880 }
1881
1882 fn delete_entry(
1883 &self,
1884 entry_id: ProjectEntryId,
1885 trash: bool,
1886 cx: &Context<Worktree>,
1887 ) -> Option<Task<Result<()>>> {
1888 let entry = self.entry_for_id(entry_id)?.clone();
1889 let abs_path = self.absolutize(&entry.path);
1890 let fs = self.fs.clone();
1891
1892 let delete = cx.background_executor().spawn(async move {
1893 if entry.is_file() {
1894 if trash {
1895 fs.trash_file(&abs_path?, Default::default()).await?;
1896 } else {
1897 fs.remove_file(&abs_path?, Default::default()).await?;
1898 }
1899 } else if trash {
1900 fs.trash_dir(
1901 &abs_path?,
1902 RemoveOptions {
1903 recursive: true,
1904 ignore_if_not_exists: false,
1905 },
1906 )
1907 .await?;
1908 } else {
1909 fs.remove_dir(
1910 &abs_path?,
1911 RemoveOptions {
1912 recursive: true,
1913 ignore_if_not_exists: false,
1914 },
1915 )
1916 .await?;
1917 }
1918 anyhow::Ok(entry.path)
1919 });
1920
1921 Some(cx.spawn(|this, mut cx| async move {
1922 let path = delete.await?;
1923 this.update(&mut cx, |this, _| {
1924 this.as_local_mut()
1925 .unwrap()
1926 .refresh_entries_for_paths(vec![path])
1927 })?
1928 .recv()
1929 .await;
1930 Ok(())
1931 }))
1932 }
1933
1934 /// Rename an entry.
1935 ///
1936 /// `new_path` is the new relative path to the worktree root.
1937 /// If the root entry is renamed then `new_path` is the new root name instead.
1938 fn rename_entry(
1939 &self,
1940 entry_id: ProjectEntryId,
1941 new_path: impl Into<Arc<Path>>,
1942 cx: &Context<Worktree>,
1943 ) -> Task<Result<CreatedEntry>> {
1944 let old_path = match self.entry_for_id(entry_id) {
1945 Some(entry) => entry.path.clone(),
1946 None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1947 };
1948 let new_path = new_path.into();
1949 let abs_old_path = self.absolutize(&old_path);
1950
1951 let is_root_entry = self.root_entry().is_some_and(|e| e.id == entry_id);
1952 let abs_new_path = if is_root_entry {
1953 let Some(root_parent_path) = self.abs_path().parent() else {
1954 return Task::ready(Err(anyhow!("no parent for path {:?}", self.abs_path)));
1955 };
1956 root_parent_path.join(&new_path)
1957 } else {
1958 let Ok(absolutize_path) = self.absolutize(&new_path) else {
1959 return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1960 };
1961 absolutize_path
1962 };
1963 let abs_path = abs_new_path.clone();
1964 let fs = self.fs.clone();
1965 let case_sensitive = self.fs_case_sensitive;
1966 let rename = cx.background_executor().spawn(async move {
1967 let abs_old_path = abs_old_path?;
1968 let abs_new_path = abs_new_path;
1969
1970 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1971 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1972
1973 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1974 // we want to overwrite, because otherwise we run into a file-already-exists error.
1975 let overwrite = !case_sensitive
1976 && abs_old_path != abs_new_path
1977 && abs_old_path_lower == abs_new_path_lower;
1978
1979 fs.rename(
1980 &abs_old_path,
1981 &abs_new_path,
1982 fs::RenameOptions {
1983 overwrite,
1984 ..Default::default()
1985 },
1986 )
1987 .await
1988 .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1989 });
1990
1991 cx.spawn(|this, mut cx| async move {
1992 rename.await?;
1993 Ok(this
1994 .update(&mut cx, |this, cx| {
1995 let local = this.as_local_mut().unwrap();
1996 if is_root_entry {
1997 // We eagerly update `abs_path` and refresh this worktree.
1998 // Otherwise, the FS watcher would do it on the `RootUpdated` event,
1999 // but with a noticeable delay, so we handle it proactively.
2000 local.update_abs_path_and_refresh(
2001 Some(SanitizedPath::from(abs_path.clone())),
2002 cx,
2003 );
2004 Task::ready(Ok(this.root_entry().cloned()))
2005 } else {
2006 local.refresh_entry(new_path.clone(), Some(old_path), cx)
2007 }
2008 })?
2009 .await?
2010 .map(CreatedEntry::Included)
2011 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
2012 })
2013 }
2014
2015 fn copy_entry(
2016 &self,
2017 entry_id: ProjectEntryId,
2018 relative_worktree_source_path: Option<PathBuf>,
2019 new_path: impl Into<Arc<Path>>,
2020 cx: &Context<Worktree>,
2021 ) -> Task<Result<Option<Entry>>> {
2022 let old_path = match self.entry_for_id(entry_id) {
2023 Some(entry) => entry.path.clone(),
2024 None => return Task::ready(Ok(None)),
2025 };
2026 let new_path = new_path.into();
2027 let abs_old_path =
2028 if let Some(relative_worktree_source_path) = relative_worktree_source_path {
2029 Ok(self.abs_path().join(relative_worktree_source_path))
2030 } else {
2031 self.absolutize(&old_path)
2032 };
2033 let abs_new_path = self.absolutize(&new_path);
2034 let fs = self.fs.clone();
2035 let copy = cx.background_executor().spawn(async move {
2036 copy_recursive(
2037 fs.as_ref(),
2038 &abs_old_path?,
2039 &abs_new_path?,
2040 Default::default(),
2041 )
2042 .await
2043 });
2044
2045 cx.spawn(|this, mut cx| async move {
2046 copy.await?;
2047 this.update(&mut cx, |this, cx| {
2048 this.as_local_mut()
2049 .unwrap()
2050 .refresh_entry(new_path.clone(), None, cx)
2051 })?
2052 .await
2053 })
2054 }
2055
2056 pub fn copy_external_entries(
2057 &self,
2058 target_directory: PathBuf,
2059 paths: Vec<Arc<Path>>,
2060 overwrite_existing_files: bool,
2061 cx: &Context<Worktree>,
2062 ) -> Task<Result<Vec<ProjectEntryId>>> {
2063 let worktree_path = self.abs_path().clone();
2064 let fs = self.fs.clone();
2065 let paths = paths
2066 .into_iter()
2067 .filter_map(|source| {
2068 let file_name = source.file_name()?;
2069 let mut target = target_directory.clone();
2070 target.push(file_name);
2071
2072 // Do not allow copying the same file to itself.
2073 if source.as_ref() != target.as_path() {
2074 Some((source, target))
2075 } else {
2076 None
2077 }
2078 })
2079 .collect::<Vec<_>>();
2080
2081 let paths_to_refresh = paths
2082 .iter()
2083 .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
2084 .collect::<Vec<_>>();
2085
2086 cx.spawn(|this, cx| async move {
2087 cx.background_executor()
2088 .spawn(async move {
2089 for (source, target) in paths {
2090 copy_recursive(
2091 fs.as_ref(),
2092 &source,
2093 &target,
2094 fs::CopyOptions {
2095 overwrite: overwrite_existing_files,
2096 ..Default::default()
2097 },
2098 )
2099 .await
2100 .with_context(|| {
2101 anyhow!("Failed to copy file from {source:?} to {target:?}")
2102 })?;
2103 }
2104 Ok::<(), anyhow::Error>(())
2105 })
2106 .await
2107 .log_err();
2108 let mut refresh = cx.read_entity(
2109 &this.upgrade().with_context(|| "Dropped worktree")?,
2110 |this, _| {
2111 Ok::<postage::barrier::Receiver, anyhow::Error>(
2112 this.as_local()
2113 .with_context(|| "Worktree is not local")?
2114 .refresh_entries_for_paths(paths_to_refresh.clone()),
2115 )
2116 },
2117 )??;
2118
2119 cx.background_executor()
2120 .spawn(async move {
2121 refresh.next().await;
2122 Ok::<(), anyhow::Error>(())
2123 })
2124 .await
2125 .log_err();
2126
2127 let this = this.upgrade().with_context(|| "Dropped worktree")?;
2128 cx.read_entity(&this, |this, _| {
2129 paths_to_refresh
2130 .iter()
2131 .filter_map(|path| Some(this.entry_for_path(path)?.id))
2132 .collect()
2133 })
2134 })
2135 }
2136
2137 fn expand_entry(
2138 &self,
2139 entry_id: ProjectEntryId,
2140 cx: &Context<Worktree>,
2141 ) -> Option<Task<Result<()>>> {
2142 let path = self.entry_for_id(entry_id)?.path.clone();
2143 let mut refresh = self.refresh_entries_for_paths(vec![path]);
2144 Some(cx.background_executor().spawn(async move {
2145 refresh.next().await;
2146 Ok(())
2147 }))
2148 }
2149
2150 fn expand_all_for_entry(
2151 &self,
2152 entry_id: ProjectEntryId,
2153 cx: &Context<Worktree>,
2154 ) -> Option<Task<Result<()>>> {
2155 let path = self.entry_for_id(entry_id).unwrap().path.clone();
2156 let mut rx = self.add_path_prefix_to_scan(path.clone());
2157 Some(cx.background_executor().spawn(async move {
2158 rx.next().await;
2159 Ok(())
2160 }))
2161 }
2162
2163 fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
2164 let (tx, rx) = barrier::channel();
2165 self.scan_requests_tx
2166 .try_send(ScanRequest {
2167 relative_paths: paths,
2168 done: smallvec![tx],
2169 })
2170 .ok();
2171 rx
2172 }
2173
2174 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) -> barrier::Receiver {
2175 let (tx, rx) = barrier::channel();
2176 self.path_prefixes_to_scan_tx
2177 .try_send(PathPrefixScanRequest {
2178 path: path_prefix,
2179 done: smallvec![tx],
2180 })
2181 .ok();
2182 rx
2183 }
2184
2185 fn refresh_entry(
2186 &self,
2187 path: Arc<Path>,
2188 old_path: Option<Arc<Path>>,
2189 cx: &Context<Worktree>,
2190 ) -> Task<Result<Option<Entry>>> {
2191 if self.settings.is_path_excluded(&path) {
2192 return Task::ready(Ok(None));
2193 }
2194 let paths = if let Some(old_path) = old_path.as_ref() {
2195 vec![old_path.clone(), path.clone()]
2196 } else {
2197 vec![path.clone()]
2198 };
2199 let t0 = Instant::now();
2200 let mut refresh = self.refresh_entries_for_paths(paths);
2201 cx.spawn(move |this, mut cx| async move {
2202 refresh.recv().await;
2203 log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
2204 let new_entry = this.update(&mut cx, |this, _| {
2205 this.entry_for_path(path)
2206 .cloned()
2207 .ok_or_else(|| anyhow!("failed to read path after update"))
2208 })??;
2209 Ok(Some(new_entry))
2210 })
2211 }
2212
2213 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2214 where
2215 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2216 Fut: Send + Future<Output = bool>,
2217 {
2218 if let Some(observer) = self.update_observer.as_mut() {
2219 *observer.resume_updates.borrow_mut() = ();
2220 return;
2221 }
2222
2223 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
2224 let (snapshots_tx, mut snapshots_rx) =
2225 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
2226 snapshots_tx
2227 .unbounded_send((self.snapshot(), Arc::default(), Arc::default()))
2228 .ok();
2229
2230 let worktree_id = cx.entity_id().as_u64();
2231 let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
2232 let mut is_first = true;
2233 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
2234 let update = if is_first {
2235 is_first = false;
2236 snapshot.build_initial_update(project_id, worktree_id)
2237 } else {
2238 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes)
2239 };
2240
2241 for update in proto::split_worktree_update(update) {
2242 let _ = resume_updates_rx.try_recv();
2243 loop {
2244 let result = callback(update.clone());
2245 if result.await {
2246 break;
2247 } else {
2248 log::info!("waiting to resume updates");
2249 if resume_updates_rx.next().await.is_none() {
2250 return Some(());
2251 }
2252 }
2253 }
2254 }
2255 }
2256 Some(())
2257 });
2258
2259 self.update_observer = Some(UpdateObservationState {
2260 snapshots_tx,
2261 resume_updates: resume_updates_tx,
2262 _maintain_remote_snapshot,
2263 });
2264 }
2265
2266 pub fn share_private_files(&mut self, cx: &Context<Worktree>) {
2267 self.share_private_files = true;
2268 self.restart_background_scanners(cx);
2269 }
2270
2271 fn update_abs_path_and_refresh(
2272 &mut self,
2273 new_path: Option<SanitizedPath>,
2274 cx: &Context<Worktree>,
2275 ) {
2276 if let Some(new_path) = new_path {
2277 self.snapshot.git_repositories = Default::default();
2278 self.snapshot.ignores_by_parent_abs_path = Default::default();
2279 let root_name = new_path
2280 .as_path()
2281 .file_name()
2282 .map_or(String::new(), |f| f.to_string_lossy().to_string());
2283 self.snapshot.update_abs_path(new_path, root_name);
2284 }
2285 self.restart_background_scanners(cx);
2286 }
2287}
2288
2289impl RemoteWorktree {
2290 pub fn project_id(&self) -> u64 {
2291 self.project_id
2292 }
2293
2294 pub fn client(&self) -> AnyProtoClient {
2295 self.client.clone()
2296 }
2297
2298 pub fn disconnected_from_host(&mut self) {
2299 self.updates_tx.take();
2300 self.snapshot_subscriptions.clear();
2301 self.disconnected = true;
2302 }
2303
2304 pub fn update_from_remote(&self, update: proto::UpdateWorktree) {
2305 if let Some(updates_tx) = &self.updates_tx {
2306 updates_tx
2307 .unbounded_send(update)
2308 .expect("consumer runs to completion");
2309 }
2310 }
2311
2312 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2313 where
2314 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2315 Fut: 'static + Send + Future<Output = bool>,
2316 {
2317 let (tx, mut rx) = mpsc::unbounded();
2318 let initial_update = self
2319 .snapshot
2320 .build_initial_update(project_id, self.id().to_proto());
2321 self.update_observer = Some(tx);
2322 cx.spawn(|this, mut cx| async move {
2323 let mut update = initial_update;
2324 'outer: loop {
2325 // SSH projects use a special project ID of 0, and we need to
2326 // remap it to the correct one here.
2327 update.project_id = project_id;
2328
2329 for chunk in split_worktree_update(update) {
2330 if !callback(chunk).await {
2331 break 'outer;
2332 }
2333 }
2334
2335 if let Some(next_update) = rx.next().await {
2336 update = next_update;
2337 } else {
2338 break;
2339 }
2340 }
2341 this.update(&mut cx, |this, _| {
2342 let this = this.as_remote_mut().unwrap();
2343 this.update_observer.take();
2344 })
2345 })
2346 .detach();
2347 }
2348
2349 fn observed_snapshot(&self, scan_id: usize) -> bool {
2350 self.completed_scan_id >= scan_id
2351 }
2352
2353 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
2354 let (tx, rx) = oneshot::channel();
2355 if self.observed_snapshot(scan_id) {
2356 let _ = tx.send(());
2357 } else if self.disconnected {
2358 drop(tx);
2359 } else {
2360 match self
2361 .snapshot_subscriptions
2362 .binary_search_by_key(&scan_id, |probe| probe.0)
2363 {
2364 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
2365 }
2366 }
2367
2368 async move {
2369 rx.await?;
2370 Ok(())
2371 }
2372 }
2373
2374 fn insert_entry(
2375 &mut self,
2376 entry: proto::Entry,
2377 scan_id: usize,
2378 cx: &Context<Worktree>,
2379 ) -> Task<Result<Entry>> {
2380 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
2381 cx.spawn(|this, mut cx| async move {
2382 wait_for_snapshot.await?;
2383 this.update(&mut cx, |worktree, _| {
2384 let worktree = worktree.as_remote_mut().unwrap();
2385 let snapshot = &mut worktree.background_snapshot.lock().0;
2386 let entry = snapshot.insert_entry(entry, &worktree.file_scan_inclusions);
2387 worktree.snapshot = snapshot.clone();
2388 entry
2389 })?
2390 })
2391 }
2392
2393 fn delete_entry(
2394 &self,
2395 entry_id: ProjectEntryId,
2396 trash: bool,
2397 cx: &Context<Worktree>,
2398 ) -> Option<Task<Result<()>>> {
2399 let response = self.client.request(proto::DeleteProjectEntry {
2400 project_id: self.project_id,
2401 entry_id: entry_id.to_proto(),
2402 use_trash: trash,
2403 });
2404 Some(cx.spawn(move |this, mut cx| async move {
2405 let response = response.await?;
2406 let scan_id = response.worktree_scan_id as usize;
2407
2408 this.update(&mut cx, move |this, _| {
2409 this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
2410 })?
2411 .await?;
2412
2413 this.update(&mut cx, |this, _| {
2414 let this = this.as_remote_mut().unwrap();
2415 let snapshot = &mut this.background_snapshot.lock().0;
2416 snapshot.delete_entry(entry_id);
2417 this.snapshot = snapshot.clone();
2418 })
2419 }))
2420 }
2421
2422 fn rename_entry(
2423 &self,
2424 entry_id: ProjectEntryId,
2425 new_path: impl Into<Arc<Path>>,
2426 cx: &Context<Worktree>,
2427 ) -> Task<Result<CreatedEntry>> {
2428 let new_path: Arc<Path> = new_path.into();
2429 let response = self.client.request(proto::RenameProjectEntry {
2430 project_id: self.project_id,
2431 entry_id: entry_id.to_proto(),
2432 new_path: new_path.as_ref().to_proto(),
2433 });
2434 cx.spawn(move |this, mut cx| async move {
2435 let response = response.await?;
2436 match response.entry {
2437 Some(entry) => this
2438 .update(&mut cx, |this, cx| {
2439 this.as_remote_mut().unwrap().insert_entry(
2440 entry,
2441 response.worktree_scan_id as usize,
2442 cx,
2443 )
2444 })?
2445 .await
2446 .map(CreatedEntry::Included),
2447 None => {
2448 let abs_path = this.update(&mut cx, |worktree, _| {
2449 worktree
2450 .absolutize(&new_path)
2451 .with_context(|| format!("absolutizing {new_path:?}"))
2452 })??;
2453 Ok(CreatedEntry::Excluded { abs_path })
2454 }
2455 }
2456 })
2457 }
2458}
2459
2460impl Snapshot {
2461 pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
2462 Snapshot {
2463 id: WorktreeId::from_usize(id as usize),
2464 abs_path: abs_path.into(),
2465 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
2466 root_name,
2467 always_included_entries: Default::default(),
2468 entries_by_path: Default::default(),
2469 entries_by_id: Default::default(),
2470 repositories: Default::default(),
2471 scan_id: 1,
2472 completed_scan_id: 0,
2473 }
2474 }
2475
2476 pub fn id(&self) -> WorktreeId {
2477 self.id
2478 }
2479
2480 // TODO:
2481 // Consider the following:
2482 //
2483 // ```rust
2484 // let abs_path: Arc<Path> = snapshot.abs_path(); // e.g. "C:\Users\user\Desktop\project"
2485 // let some_non_trimmed_path = Path::new("\\\\?\\C:\\Users\\user\\Desktop\\project\\main.rs");
2486 // // The caller perform some actions here:
2487 // some_non_trimmed_path.strip_prefix(abs_path); // This fails
2488 // some_non_trimmed_path.starts_with(abs_path); // This fails too
2489 // ```
2490 //
2491 // This is definitely a bug, but it's not clear if we should handle it here or not.
2492 pub fn abs_path(&self) -> &Arc<Path> {
2493 self.abs_path.as_path()
2494 }
2495
2496 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2497 let mut updated_entries = self
2498 .entries_by_path
2499 .iter()
2500 .map(proto::Entry::from)
2501 .collect::<Vec<_>>();
2502 updated_entries.sort_unstable_by_key(|e| e.id);
2503
2504 let mut updated_repositories = self
2505 .repositories
2506 .iter()
2507 .map(|repository| repository.initial_update())
2508 .collect::<Vec<_>>();
2509 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2510
2511 proto::UpdateWorktree {
2512 project_id,
2513 worktree_id,
2514 abs_path: self.abs_path().to_proto(),
2515 root_name: self.root_name().to_string(),
2516 updated_entries,
2517 removed_entries: Vec::new(),
2518 scan_id: self.scan_id as u64,
2519 is_last_update: self.completed_scan_id == self.scan_id,
2520 updated_repositories,
2521 removed_repositories: Vec::new(),
2522 }
2523 }
2524
2525 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
2526 if path
2527 .components()
2528 .any(|component| !matches!(component, std::path::Component::Normal(_)))
2529 {
2530 return Err(anyhow!("invalid path"));
2531 }
2532 if path.file_name().is_some() {
2533 Ok(self.abs_path.as_path().join(path))
2534 } else {
2535 Ok(self.abs_path.as_path().to_path_buf())
2536 }
2537 }
2538
2539 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
2540 self.entries_by_id.get(&entry_id, &()).is_some()
2541 }
2542
2543 fn insert_entry(
2544 &mut self,
2545 entry: proto::Entry,
2546 always_included_paths: &PathMatcher,
2547 ) -> Result<Entry> {
2548 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2549 let old_entry = self.entries_by_id.insert_or_replace(
2550 PathEntry {
2551 id: entry.id,
2552 path: entry.path.clone(),
2553 is_ignored: entry.is_ignored,
2554 scan_id: 0,
2555 },
2556 &(),
2557 );
2558 if let Some(old_entry) = old_entry {
2559 self.entries_by_path.remove(&PathKey(old_entry.path), &());
2560 }
2561 self.entries_by_path.insert_or_replace(entry.clone(), &());
2562 Ok(entry)
2563 }
2564
2565 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2566 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2567 self.entries_by_path = {
2568 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>(&());
2569 let mut new_entries_by_path =
2570 cursor.slice(&TraversalTarget::path(&removed_entry.path), Bias::Left, &());
2571 while let Some(entry) = cursor.item() {
2572 if entry.path.starts_with(&removed_entry.path) {
2573 self.entries_by_id.remove(&entry.id, &());
2574 cursor.next(&());
2575 } else {
2576 break;
2577 }
2578 }
2579 new_entries_by_path.append(cursor.suffix(&()), &());
2580 new_entries_by_path
2581 };
2582
2583 Some(removed_entry.path)
2584 }
2585
2586 pub fn status_for_file(&self, path: impl AsRef<Path>) -> Option<FileStatus> {
2587 let path = path.as_ref();
2588 self.repository_for_path(path).and_then(|repo| {
2589 let repo_path = repo.relativize(path).unwrap();
2590 repo.statuses_by_path
2591 .get(&PathKey(repo_path.0), &())
2592 .map(|entry| entry.status)
2593 })
2594 }
2595
2596 fn update_abs_path(&mut self, abs_path: SanitizedPath, root_name: String) {
2597 self.abs_path = abs_path;
2598 if root_name != self.root_name {
2599 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
2600 self.root_name = root_name;
2601 }
2602 }
2603
2604 pub(crate) fn apply_remote_update(
2605 &mut self,
2606 mut update: proto::UpdateWorktree,
2607 always_included_paths: &PathMatcher,
2608 ) -> Result<()> {
2609 log::debug!(
2610 "applying remote worktree update. {} entries updated, {} removed",
2611 update.updated_entries.len(),
2612 update.removed_entries.len()
2613 );
2614 self.update_abs_path(
2615 SanitizedPath::from(PathBuf::from_proto(update.abs_path)),
2616 update.root_name,
2617 );
2618
2619 let mut entries_by_path_edits = Vec::new();
2620 let mut entries_by_id_edits = Vec::new();
2621
2622 for entry_id in update.removed_entries {
2623 let entry_id = ProjectEntryId::from_proto(entry_id);
2624 entries_by_id_edits.push(Edit::Remove(entry_id));
2625 if let Some(entry) = self.entry_for_id(entry_id) {
2626 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2627 }
2628 }
2629
2630 for entry in update.updated_entries {
2631 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2632 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2633 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2634 }
2635 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2636 if old_entry.id != entry.id {
2637 entries_by_id_edits.push(Edit::Remove(old_entry.id));
2638 }
2639 }
2640 entries_by_id_edits.push(Edit::Insert(PathEntry {
2641 id: entry.id,
2642 path: entry.path.clone(),
2643 is_ignored: entry.is_ignored,
2644 scan_id: 0,
2645 }));
2646 entries_by_path_edits.push(Edit::Insert(entry));
2647 }
2648
2649 self.entries_by_path.edit(entries_by_path_edits, &());
2650 self.entries_by_id.edit(entries_by_id_edits, &());
2651
2652 update.removed_repositories.sort_unstable();
2653 self.repositories.retain(&(), |entry: &RepositoryEntry| {
2654 update
2655 .removed_repositories
2656 .binary_search(&entry.work_directory_id.to_proto())
2657 .is_err()
2658 });
2659
2660 for repository in update.updated_repositories {
2661 let work_directory_id = ProjectEntryId::from_proto(repository.work_directory_id);
2662 if let Some(work_dir_entry) = self.entry_for_id(work_directory_id) {
2663 let conflicted_paths = TreeSet::from_ordered_entries(
2664 repository
2665 .current_merge_conflicts
2666 .into_iter()
2667 .map(|path| RepoPath(Path::new(&path).into())),
2668 );
2669
2670 if self
2671 .repositories
2672 .contains(&PathKey(work_dir_entry.path.clone()), &())
2673 {
2674 let edits = repository
2675 .removed_statuses
2676 .into_iter()
2677 .map(|path| Edit::Remove(PathKey(FromProto::from_proto(path))))
2678 .chain(repository.updated_statuses.into_iter().filter_map(
2679 |updated_status| {
2680 Some(Edit::Insert(updated_status.try_into().log_err()?))
2681 },
2682 ))
2683 .collect::<Vec<_>>();
2684
2685 self.repositories
2686 .update(&PathKey(work_dir_entry.path.clone()), &(), |repo| {
2687 repo.branch = repository.branch_summary.as_ref().map(proto_to_branch);
2688 repo.statuses_by_path.edit(edits, &());
2689 repo.current_merge_conflicts = conflicted_paths
2690 });
2691 } else {
2692 let statuses = SumTree::from_iter(
2693 repository
2694 .updated_statuses
2695 .into_iter()
2696 .filter_map(|updated_status| updated_status.try_into().log_err()),
2697 &(),
2698 );
2699
2700 self.repositories.insert_or_replace(
2701 RepositoryEntry {
2702 work_directory_id,
2703 // When syncing repository entries from a peer, we don't need
2704 // the location_in_repo field, since git operations don't happen locally
2705 // anyway.
2706 work_directory: WorkDirectory::InProject {
2707 relative_path: work_dir_entry.path.clone(),
2708 },
2709 branch: repository.branch_summary.as_ref().map(proto_to_branch),
2710 statuses_by_path: statuses,
2711 current_merge_conflicts: conflicted_paths,
2712 },
2713 &(),
2714 );
2715 }
2716 } else {
2717 log::error!(
2718 "no work directory entry for repository {:?}",
2719 repository.work_directory_id
2720 )
2721 }
2722 }
2723
2724 self.scan_id = update.scan_id as usize;
2725 if update.is_last_update {
2726 self.completed_scan_id = update.scan_id as usize;
2727 }
2728
2729 Ok(())
2730 }
2731
2732 pub fn entry_count(&self) -> usize {
2733 self.entries_by_path.summary().count
2734 }
2735
2736 pub fn visible_entry_count(&self) -> usize {
2737 self.entries_by_path.summary().non_ignored_count
2738 }
2739
2740 pub fn dir_count(&self) -> usize {
2741 let summary = self.entries_by_path.summary();
2742 summary.count - summary.file_count
2743 }
2744
2745 pub fn visible_dir_count(&self) -> usize {
2746 let summary = self.entries_by_path.summary();
2747 summary.non_ignored_count - summary.non_ignored_file_count
2748 }
2749
2750 pub fn file_count(&self) -> usize {
2751 self.entries_by_path.summary().file_count
2752 }
2753
2754 pub fn visible_file_count(&self) -> usize {
2755 self.entries_by_path.summary().non_ignored_file_count
2756 }
2757
2758 fn traverse_from_offset(
2759 &self,
2760 include_files: bool,
2761 include_dirs: bool,
2762 include_ignored: bool,
2763 start_offset: usize,
2764 ) -> Traversal {
2765 let mut cursor = self.entries_by_path.cursor(&());
2766 cursor.seek(
2767 &TraversalTarget::Count {
2768 count: start_offset,
2769 include_files,
2770 include_dirs,
2771 include_ignored,
2772 },
2773 Bias::Right,
2774 &(),
2775 );
2776 Traversal {
2777 snapshot: self,
2778 cursor,
2779 include_files,
2780 include_dirs,
2781 include_ignored,
2782 }
2783 }
2784
2785 pub fn traverse_from_path(
2786 &self,
2787 include_files: bool,
2788 include_dirs: bool,
2789 include_ignored: bool,
2790 path: &Path,
2791 ) -> Traversal {
2792 Traversal::new(self, include_files, include_dirs, include_ignored, path)
2793 }
2794
2795 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2796 self.traverse_from_offset(true, false, include_ignored, start)
2797 }
2798
2799 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2800 self.traverse_from_offset(false, true, include_ignored, start)
2801 }
2802
2803 pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2804 self.traverse_from_offset(true, true, include_ignored, start)
2805 }
2806
2807 #[cfg(any(feature = "test-support", test))]
2808 pub fn git_status(&self, work_dir: &Path) -> Option<Vec<StatusEntry>> {
2809 self.repositories
2810 .get(&PathKey(work_dir.into()), &())
2811 .map(|repo| repo.status().collect())
2812 }
2813
2814 pub fn repositories(&self) -> &SumTree<RepositoryEntry> {
2815 &self.repositories
2816 }
2817
2818 /// Get the repository whose work directory corresponds to the given path.
2819 pub(crate) fn repository(&self, work_directory: PathKey) -> Option<RepositoryEntry> {
2820 self.repositories.get(&work_directory, &()).cloned()
2821 }
2822
2823 /// Get the repository whose work directory contains the given path.
2824 #[track_caller]
2825 pub fn repository_for_path(&self, path: &Path) -> Option<&RepositoryEntry> {
2826 self.repositories
2827 .iter()
2828 .filter(|repo| repo.work_directory.directory_contains(path))
2829 .last()
2830 }
2831
2832 /// Given an ordered iterator of entries, returns an iterator of those entries,
2833 /// along with their containing git repository.
2834 #[track_caller]
2835 pub fn entries_with_repositories<'a>(
2836 &'a self,
2837 entries: impl 'a + Iterator<Item = &'a Entry>,
2838 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2839 let mut containing_repos = Vec::<&RepositoryEntry>::new();
2840 let mut repositories = self.repositories().iter().peekable();
2841 entries.map(move |entry| {
2842 while let Some(repository) = containing_repos.last() {
2843 if repository.directory_contains(&entry.path) {
2844 break;
2845 } else {
2846 containing_repos.pop();
2847 }
2848 }
2849 while let Some(repository) = repositories.peek() {
2850 if repository.directory_contains(&entry.path) {
2851 containing_repos.push(repositories.next().unwrap());
2852 } else {
2853 break;
2854 }
2855 }
2856 let repo = containing_repos.last().copied();
2857 (entry, repo)
2858 })
2859 }
2860
2861 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2862 let empty_path = Path::new("");
2863 self.entries_by_path
2864 .cursor::<()>(&())
2865 .filter(move |entry| entry.path.as_ref() != empty_path)
2866 .map(|entry| &entry.path)
2867 }
2868
2869 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2870 let options = ChildEntriesOptions {
2871 include_files: true,
2872 include_dirs: true,
2873 include_ignored: true,
2874 };
2875 self.child_entries_with_options(parent_path, options)
2876 }
2877
2878 pub fn child_entries_with_options<'a>(
2879 &'a self,
2880 parent_path: &'a Path,
2881 options: ChildEntriesOptions,
2882 ) -> ChildEntriesIter<'a> {
2883 let mut cursor = self.entries_by_path.cursor(&());
2884 cursor.seek(&TraversalTarget::path(parent_path), Bias::Right, &());
2885 let traversal = Traversal {
2886 snapshot: self,
2887 cursor,
2888 include_files: options.include_files,
2889 include_dirs: options.include_dirs,
2890 include_ignored: options.include_ignored,
2891 };
2892 ChildEntriesIter {
2893 traversal,
2894 parent_path,
2895 }
2896 }
2897
2898 pub fn root_entry(&self) -> Option<&Entry> {
2899 self.entry_for_path("")
2900 }
2901
2902 /// TODO: what's the difference between `root_dir` and `abs_path`?
2903 /// is there any? if so, document it.
2904 pub fn root_dir(&self) -> Option<Arc<Path>> {
2905 self.root_entry()
2906 .filter(|entry| entry.is_dir())
2907 .map(|_| self.abs_path().clone())
2908 }
2909
2910 pub fn root_name(&self) -> &str {
2911 &self.root_name
2912 }
2913
2914 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2915 self.repositories
2916 .get(&PathKey(Path::new("").into()), &())
2917 .map(|entry| entry.to_owned())
2918 }
2919
2920 pub fn git_entry(&self, work_directory_path: Arc<Path>) -> Option<RepositoryEntry> {
2921 self.repositories
2922 .get(&PathKey(work_directory_path), &())
2923 .map(|entry| entry.to_owned())
2924 }
2925
2926 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2927 self.repositories.iter()
2928 }
2929
2930 pub fn scan_id(&self) -> usize {
2931 self.scan_id
2932 }
2933
2934 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2935 let path = path.as_ref();
2936 debug_assert!(path.is_relative());
2937 self.traverse_from_path(true, true, true, path)
2938 .entry()
2939 .and_then(|entry| {
2940 if entry.path.as_ref() == path {
2941 Some(entry)
2942 } else {
2943 None
2944 }
2945 })
2946 }
2947
2948 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2949 let entry = self.entries_by_id.get(&id, &())?;
2950 self.entry_for_path(&entry.path)
2951 }
2952
2953 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2954 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2955 }
2956}
2957
2958impl LocalSnapshot {
2959 pub fn local_repo_for_path(&self, path: &Path) -> Option<&LocalRepositoryEntry> {
2960 let repository_entry = self.repository_for_path(path)?;
2961 let work_directory_id = repository_entry.work_directory_id();
2962 self.git_repositories.get(&work_directory_id)
2963 }
2964
2965 fn build_update(
2966 &self,
2967 project_id: u64,
2968 worktree_id: u64,
2969 entry_changes: UpdatedEntriesSet,
2970 repo_changes: UpdatedGitRepositoriesSet,
2971 ) -> proto::UpdateWorktree {
2972 let mut updated_entries = Vec::new();
2973 let mut removed_entries = Vec::new();
2974 let mut updated_repositories = Vec::new();
2975 let mut removed_repositories = Vec::new();
2976
2977 for (_, entry_id, path_change) in entry_changes.iter() {
2978 if let PathChange::Removed = path_change {
2979 removed_entries.push(entry_id.0 as u64);
2980 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2981 updated_entries.push(proto::Entry::from(entry));
2982 }
2983 }
2984
2985 for (work_dir_path, change) in repo_changes.iter() {
2986 let new_repo = self.repositories.get(&PathKey(work_dir_path.clone()), &());
2987 match (&change.old_repository, new_repo) {
2988 (Some(old_repo), Some(new_repo)) => {
2989 updated_repositories.push(new_repo.build_update(old_repo));
2990 }
2991 (None, Some(new_repo)) => {
2992 updated_repositories.push(new_repo.initial_update());
2993 }
2994 (Some(old_repo), None) => {
2995 removed_repositories.push(old_repo.work_directory_id.to_proto());
2996 }
2997 _ => {}
2998 }
2999 }
3000
3001 removed_entries.sort_unstable();
3002 updated_entries.sort_unstable_by_key(|e| e.id);
3003 removed_repositories.sort_unstable();
3004 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
3005
3006 // TODO - optimize, knowing that removed_entries are sorted.
3007 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
3008
3009 proto::UpdateWorktree {
3010 project_id,
3011 worktree_id,
3012 abs_path: self.abs_path().to_proto(),
3013 root_name: self.root_name().to_string(),
3014 updated_entries,
3015 removed_entries,
3016 scan_id: self.scan_id as u64,
3017 is_last_update: self.completed_scan_id == self.scan_id,
3018 updated_repositories,
3019 removed_repositories,
3020 }
3021 }
3022
3023 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
3024 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
3025 let abs_path = self.abs_path.as_path().join(&entry.path);
3026 match smol::block_on(build_gitignore(&abs_path, fs)) {
3027 Ok(ignore) => {
3028 self.ignores_by_parent_abs_path
3029 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
3030 }
3031 Err(error) => {
3032 log::error!(
3033 "error loading .gitignore file {:?} - {:?}",
3034 &entry.path,
3035 error
3036 );
3037 }
3038 }
3039 }
3040
3041 if entry.kind == EntryKind::PendingDir {
3042 if let Some(existing_entry) =
3043 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
3044 {
3045 entry.kind = existing_entry.kind;
3046 }
3047 }
3048
3049 let scan_id = self.scan_id;
3050 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
3051 if let Some(removed) = removed {
3052 if removed.id != entry.id {
3053 self.entries_by_id.remove(&removed.id, &());
3054 }
3055 }
3056 self.entries_by_id.insert_or_replace(
3057 PathEntry {
3058 id: entry.id,
3059 path: entry.path.clone(),
3060 is_ignored: entry.is_ignored,
3061 scan_id,
3062 },
3063 &(),
3064 );
3065
3066 entry
3067 }
3068
3069 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
3070 let mut inodes = TreeSet::default();
3071 for ancestor in path.ancestors().skip(1) {
3072 if let Some(entry) = self.entry_for_path(ancestor) {
3073 inodes.insert(entry.inode);
3074 }
3075 }
3076 inodes
3077 }
3078
3079 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
3080 let mut new_ignores = Vec::new();
3081 for (index, ancestor) in abs_path.ancestors().enumerate() {
3082 if index > 0 {
3083 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
3084 new_ignores.push((ancestor, Some(ignore.clone())));
3085 } else {
3086 new_ignores.push((ancestor, None));
3087 }
3088 }
3089 if ancestor.join(*DOT_GIT).exists() {
3090 break;
3091 }
3092 }
3093
3094 let mut ignore_stack = IgnoreStack::none();
3095 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
3096 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
3097 ignore_stack = IgnoreStack::all();
3098 break;
3099 } else if let Some(ignore) = ignore {
3100 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
3101 }
3102 }
3103
3104 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
3105 ignore_stack = IgnoreStack::all();
3106 }
3107
3108 ignore_stack
3109 }
3110
3111 #[cfg(test)]
3112 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
3113 self.entries_by_path
3114 .cursor::<()>(&())
3115 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
3116 }
3117
3118 #[cfg(test)]
3119 pub fn check_invariants(&self, git_state: bool) {
3120 use pretty_assertions::assert_eq;
3121
3122 assert_eq!(
3123 self.entries_by_path
3124 .cursor::<()>(&())
3125 .map(|e| (&e.path, e.id))
3126 .collect::<Vec<_>>(),
3127 self.entries_by_id
3128 .cursor::<()>(&())
3129 .map(|e| (&e.path, e.id))
3130 .collect::<collections::BTreeSet<_>>()
3131 .into_iter()
3132 .collect::<Vec<_>>(),
3133 "entries_by_path and entries_by_id are inconsistent"
3134 );
3135
3136 let mut files = self.files(true, 0);
3137 let mut visible_files = self.files(false, 0);
3138 for entry in self.entries_by_path.cursor::<()>(&()) {
3139 if entry.is_file() {
3140 assert_eq!(files.next().unwrap().inode, entry.inode);
3141 if (!entry.is_ignored && !entry.is_external) || entry.is_always_included {
3142 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3143 }
3144 }
3145 }
3146
3147 assert!(files.next().is_none());
3148 assert!(visible_files.next().is_none());
3149
3150 let mut bfs_paths = Vec::new();
3151 let mut stack = self
3152 .root_entry()
3153 .map(|e| e.path.as_ref())
3154 .into_iter()
3155 .collect::<Vec<_>>();
3156 while let Some(path) = stack.pop() {
3157 bfs_paths.push(path);
3158 let ix = stack.len();
3159 for child_entry in self.child_entries(path) {
3160 stack.insert(ix, &child_entry.path);
3161 }
3162 }
3163
3164 let dfs_paths_via_iter = self
3165 .entries_by_path
3166 .cursor::<()>(&())
3167 .map(|e| e.path.as_ref())
3168 .collect::<Vec<_>>();
3169 assert_eq!(bfs_paths, dfs_paths_via_iter);
3170
3171 let dfs_paths_via_traversal = self
3172 .entries(true, 0)
3173 .map(|e| e.path.as_ref())
3174 .collect::<Vec<_>>();
3175 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3176
3177 if git_state {
3178 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3179 let ignore_parent_path = ignore_parent_abs_path
3180 .strip_prefix(self.abs_path.as_path())
3181 .unwrap();
3182 assert!(self.entry_for_path(ignore_parent_path).is_some());
3183 assert!(self
3184 .entry_for_path(ignore_parent_path.join(*GITIGNORE))
3185 .is_some());
3186 }
3187 }
3188 }
3189
3190 #[cfg(test)]
3191 fn check_git_invariants(&self) {
3192 let dotgit_paths = self
3193 .git_repositories
3194 .iter()
3195 .map(|repo| repo.1.dot_git_dir_abs_path.clone())
3196 .collect::<HashSet<_>>();
3197 let work_dir_paths = self
3198 .repositories
3199 .iter()
3200 .map(|repo| repo.work_directory.path_key())
3201 .collect::<HashSet<_>>();
3202 assert_eq!(dotgit_paths.len(), work_dir_paths.len());
3203 assert_eq!(self.repositories.iter().count(), work_dir_paths.len());
3204 assert_eq!(self.git_repositories.iter().count(), work_dir_paths.len());
3205 for entry in self.repositories.iter() {
3206 self.git_repositories.get(&entry.work_directory_id).unwrap();
3207 }
3208 }
3209
3210 #[cfg(test)]
3211 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3212 let mut paths = Vec::new();
3213 for entry in self.entries_by_path.cursor::<()>(&()) {
3214 if include_ignored || !entry.is_ignored {
3215 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3216 }
3217 }
3218 paths.sort_by(|a, b| a.0.cmp(b.0));
3219 paths
3220 }
3221}
3222
3223impl BackgroundScannerState {
3224 fn should_scan_directory(&self, entry: &Entry) -> bool {
3225 (!entry.is_external && (!entry.is_ignored || entry.is_always_included))
3226 || entry.path.file_name() == Some(*DOT_GIT)
3227 || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
3228 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
3229 || self
3230 .paths_to_scan
3231 .iter()
3232 .any(|p| p.starts_with(&entry.path))
3233 || self
3234 .path_prefixes_to_scan
3235 .iter()
3236 .any(|p| entry.path.starts_with(p))
3237 }
3238
3239 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
3240 let path = entry.path.clone();
3241 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
3242 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
3243
3244 if !ancestor_inodes.contains(&entry.inode) {
3245 ancestor_inodes.insert(entry.inode);
3246 scan_job_tx
3247 .try_send(ScanJob {
3248 abs_path,
3249 path,
3250 ignore_stack,
3251 scan_queue: scan_job_tx.clone(),
3252 ancestor_inodes,
3253 is_external: entry.is_external,
3254 })
3255 .unwrap();
3256 }
3257 }
3258
3259 fn reuse_entry_id(&mut self, entry: &mut Entry) {
3260 if let Some(mtime) = entry.mtime {
3261 // If an entry with the same inode was removed from the worktree during this scan,
3262 // then it *might* represent the same file or directory. But the OS might also have
3263 // re-used the inode for a completely different file or directory.
3264 //
3265 // Conditionally reuse the old entry's id:
3266 // * if the mtime is the same, the file was probably been renamed.
3267 // * if the path is the same, the file may just have been updated
3268 if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
3269 if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
3270 entry.id = removed_entry.id;
3271 }
3272 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
3273 entry.id = existing_entry.id;
3274 }
3275 }
3276 }
3277
3278 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
3279 self.reuse_entry_id(&mut entry);
3280 let entry = self.snapshot.insert_entry(entry, fs);
3281 if entry.path.file_name() == Some(&DOT_GIT) {
3282 self.insert_git_repository(entry.path.clone(), fs, watcher);
3283 }
3284
3285 #[cfg(test)]
3286 self.snapshot.check_invariants(false);
3287
3288 entry
3289 }
3290
3291 fn populate_dir(
3292 &mut self,
3293 parent_path: &Arc<Path>,
3294 entries: impl IntoIterator<Item = Entry>,
3295 ignore: Option<Arc<Gitignore>>,
3296 ) {
3297 let mut parent_entry = if let Some(parent_entry) = self
3298 .snapshot
3299 .entries_by_path
3300 .get(&PathKey(parent_path.clone()), &())
3301 {
3302 parent_entry.clone()
3303 } else {
3304 log::warn!(
3305 "populating a directory {:?} that has been removed",
3306 parent_path
3307 );
3308 return;
3309 };
3310
3311 match parent_entry.kind {
3312 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
3313 EntryKind::Dir => {}
3314 _ => return,
3315 }
3316
3317 if let Some(ignore) = ignore {
3318 let abs_parent_path = self.snapshot.abs_path.as_path().join(parent_path).into();
3319 self.snapshot
3320 .ignores_by_parent_abs_path
3321 .insert(abs_parent_path, (ignore, false));
3322 }
3323
3324 let parent_entry_id = parent_entry.id;
3325 self.scanned_dirs.insert(parent_entry_id);
3326 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
3327 let mut entries_by_id_edits = Vec::new();
3328
3329 for entry in entries {
3330 entries_by_id_edits.push(Edit::Insert(PathEntry {
3331 id: entry.id,
3332 path: entry.path.clone(),
3333 is_ignored: entry.is_ignored,
3334 scan_id: self.snapshot.scan_id,
3335 }));
3336 entries_by_path_edits.push(Edit::Insert(entry));
3337 }
3338
3339 self.snapshot
3340 .entries_by_path
3341 .edit(entries_by_path_edits, &());
3342 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3343
3344 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
3345 self.changed_paths.insert(ix, parent_path.clone());
3346 }
3347
3348 #[cfg(test)]
3349 self.snapshot.check_invariants(false);
3350 }
3351
3352 fn remove_path(&mut self, path: &Path) {
3353 let mut new_entries;
3354 let removed_entries;
3355 {
3356 let mut cursor = self
3357 .snapshot
3358 .entries_by_path
3359 .cursor::<TraversalProgress>(&());
3360 new_entries = cursor.slice(&TraversalTarget::path(path), Bias::Left, &());
3361 removed_entries = cursor.slice(&TraversalTarget::successor(path), Bias::Left, &());
3362 new_entries.append(cursor.suffix(&()), &());
3363 }
3364 self.snapshot.entries_by_path = new_entries;
3365
3366 let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
3367 for entry in removed_entries.cursor::<()>(&()) {
3368 match self.removed_entries.entry(entry.inode) {
3369 hash_map::Entry::Occupied(mut e) => {
3370 let prev_removed_entry = e.get_mut();
3371 if entry.id > prev_removed_entry.id {
3372 *prev_removed_entry = entry.clone();
3373 }
3374 }
3375 hash_map::Entry::Vacant(e) => {
3376 e.insert(entry.clone());
3377 }
3378 }
3379
3380 if entry.path.file_name() == Some(&GITIGNORE) {
3381 let abs_parent_path = self
3382 .snapshot
3383 .abs_path
3384 .as_path()
3385 .join(entry.path.parent().unwrap());
3386 if let Some((_, needs_update)) = self
3387 .snapshot
3388 .ignores_by_parent_abs_path
3389 .get_mut(abs_parent_path.as_path())
3390 {
3391 *needs_update = true;
3392 }
3393 }
3394
3395 if let Err(ix) = removed_ids.binary_search(&entry.id) {
3396 removed_ids.insert(ix, entry.id);
3397 }
3398 }
3399
3400 self.snapshot.entries_by_id.edit(
3401 removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
3402 &(),
3403 );
3404 self.snapshot
3405 .git_repositories
3406 .retain(|id, _| removed_ids.binary_search(id).is_err());
3407 self.snapshot.repositories.retain(&(), |repository| {
3408 !repository.work_directory.path_key().0.starts_with(path)
3409 });
3410
3411 #[cfg(test)]
3412 self.snapshot.check_invariants(false);
3413 }
3414
3415 fn insert_git_repository(
3416 &mut self,
3417 dot_git_path: Arc<Path>,
3418 fs: &dyn Fs,
3419 watcher: &dyn Watcher,
3420 ) -> Option<LocalRepositoryEntry> {
3421 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
3422 Some(parent_dir) => {
3423 // Guard against repositories inside the repository metadata
3424 if parent_dir.iter().any(|component| component == *DOT_GIT) {
3425 log::info!(
3426 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
3427 );
3428 return None;
3429 };
3430 log::info!(
3431 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
3432 );
3433
3434 parent_dir.into()
3435 }
3436 None => {
3437 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
3438 // no files inside that directory are tracked by git, so no need to build the repo around it
3439 log::info!(
3440 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
3441 );
3442 return None;
3443 }
3444 };
3445
3446 self.insert_git_repository_for_path(
3447 WorkDirectory::InProject {
3448 relative_path: work_dir_path,
3449 },
3450 dot_git_path,
3451 fs,
3452 watcher,
3453 )
3454 }
3455
3456 fn insert_git_repository_for_path(
3457 &mut self,
3458 work_directory: WorkDirectory,
3459 dot_git_path: Arc<Path>,
3460 fs: &dyn Fs,
3461 watcher: &dyn Watcher,
3462 ) -> Option<LocalRepositoryEntry> {
3463 let work_dir_id = self
3464 .snapshot
3465 .entry_for_path(work_directory.path_key().0)
3466 .map(|entry| entry.id)?;
3467
3468 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
3469 return None;
3470 }
3471
3472 let dot_git_abs_path = self.snapshot.abs_path.as_path().join(&dot_git_path);
3473
3474 let t0 = Instant::now();
3475 let repository = fs.open_repo(&dot_git_abs_path)?;
3476
3477 let repository_path = repository.path();
3478 watcher.add(&repository_path).log_err()?;
3479
3480 let actual_dot_git_dir_abs_path = repository.main_repository_path();
3481 let dot_git_worktree_abs_path = if actual_dot_git_dir_abs_path == dot_git_abs_path {
3482 None
3483 } else {
3484 // The two paths could be different because we opened a git worktree.
3485 // When that happens:
3486 //
3487 // * `dot_git_abs_path` is a file that points to the worktree-subdirectory in the actual
3488 // .git directory.
3489 //
3490 // * `repository_path` is the worktree-subdirectory.
3491 //
3492 // * `actual_dot_git_dir_abs_path` is the path to the actual .git directory. In git
3493 // documentation this is called the "commondir".
3494 watcher.add(&dot_git_abs_path).log_err()?;
3495 Some(Arc::from(dot_git_abs_path))
3496 };
3497
3498 log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
3499
3500 if let Some(git_hosting_provider_registry) = self.git_hosting_provider_registry.clone() {
3501 git_hosting_providers::register_additional_providers(
3502 git_hosting_provider_registry,
3503 repository.clone(),
3504 );
3505 }
3506
3507 self.snapshot.repositories.insert_or_replace(
3508 RepositoryEntry {
3509 work_directory_id: work_dir_id,
3510 work_directory: work_directory.clone(),
3511 branch: None,
3512 statuses_by_path: Default::default(),
3513 current_merge_conflicts: Default::default(),
3514 },
3515 &(),
3516 );
3517
3518 let local_repository = LocalRepositoryEntry {
3519 work_directory_id: work_dir_id,
3520 work_directory: work_directory.clone(),
3521 git_dir_scan_id: 0,
3522 status_scan_id: 0,
3523 repo_ptr: repository.clone(),
3524 dot_git_dir_abs_path: actual_dot_git_dir_abs_path.into(),
3525 dot_git_worktree_abs_path,
3526 current_merge_head_shas: Default::default(),
3527 };
3528
3529 self.snapshot
3530 .git_repositories
3531 .insert(work_dir_id, local_repository.clone());
3532
3533 Some(local_repository)
3534 }
3535}
3536
3537async fn is_git_dir(path: &Path, fs: &dyn Fs) -> bool {
3538 if path.file_name() == Some(&*DOT_GIT) {
3539 return true;
3540 }
3541
3542 // If we're in a bare repository, we are not inside a `.git` folder. In a
3543 // bare repository, the root folder contains what would normally be in the
3544 // `.git` folder.
3545 let head_metadata = fs.metadata(&path.join("HEAD")).await;
3546 if !matches!(head_metadata, Ok(Some(_))) {
3547 return false;
3548 }
3549 let config_metadata = fs.metadata(&path.join("config")).await;
3550 matches!(config_metadata, Ok(Some(_)))
3551}
3552
3553async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
3554 let contents = fs.load(abs_path).await?;
3555 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
3556 let mut builder = GitignoreBuilder::new(parent);
3557 for line in contents.lines() {
3558 builder.add_line(Some(abs_path.into()), line)?;
3559 }
3560 Ok(builder.build()?)
3561}
3562
3563impl Deref for Worktree {
3564 type Target = Snapshot;
3565
3566 fn deref(&self) -> &Self::Target {
3567 match self {
3568 Worktree::Local(worktree) => &worktree.snapshot,
3569 Worktree::Remote(worktree) => &worktree.snapshot,
3570 }
3571 }
3572}
3573
3574impl Deref for LocalWorktree {
3575 type Target = LocalSnapshot;
3576
3577 fn deref(&self) -> &Self::Target {
3578 &self.snapshot
3579 }
3580}
3581
3582impl Deref for RemoteWorktree {
3583 type Target = Snapshot;
3584
3585 fn deref(&self) -> &Self::Target {
3586 &self.snapshot
3587 }
3588}
3589
3590impl fmt::Debug for LocalWorktree {
3591 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3592 self.snapshot.fmt(f)
3593 }
3594}
3595
3596impl fmt::Debug for Snapshot {
3597 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3598 struct EntriesById<'a>(&'a SumTree<PathEntry>);
3599 struct EntriesByPath<'a>(&'a SumTree<Entry>);
3600
3601 impl<'a> fmt::Debug for EntriesByPath<'a> {
3602 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3603 f.debug_map()
3604 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
3605 .finish()
3606 }
3607 }
3608
3609 impl<'a> fmt::Debug for EntriesById<'a> {
3610 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3611 f.debug_list().entries(self.0.iter()).finish()
3612 }
3613 }
3614
3615 f.debug_struct("Snapshot")
3616 .field("id", &self.id)
3617 .field("root_name", &self.root_name)
3618 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
3619 .field("entries_by_id", &EntriesById(&self.entries_by_id))
3620 .finish()
3621 }
3622}
3623
3624#[derive(Clone, PartialEq)]
3625pub struct File {
3626 pub worktree: Entity<Worktree>,
3627 pub path: Arc<Path>,
3628 pub disk_state: DiskState,
3629 pub entry_id: Option<ProjectEntryId>,
3630 pub is_local: bool,
3631 pub is_private: bool,
3632}
3633
3634impl language::File for File {
3635 fn as_local(&self) -> Option<&dyn language::LocalFile> {
3636 if self.is_local {
3637 Some(self)
3638 } else {
3639 None
3640 }
3641 }
3642
3643 fn disk_state(&self) -> DiskState {
3644 self.disk_state
3645 }
3646
3647 fn path(&self) -> &Arc<Path> {
3648 &self.path
3649 }
3650
3651 fn full_path(&self, cx: &App) -> PathBuf {
3652 let mut full_path = PathBuf::new();
3653 let worktree = self.worktree.read(cx);
3654
3655 if worktree.is_visible() {
3656 full_path.push(worktree.root_name());
3657 } else {
3658 let path = worktree.abs_path();
3659
3660 if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3661 full_path.push("~");
3662 full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3663 } else {
3664 full_path.push(path)
3665 }
3666 }
3667
3668 if self.path.components().next().is_some() {
3669 full_path.push(&self.path);
3670 }
3671
3672 full_path
3673 }
3674
3675 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3676 /// of its worktree, then this method will return the name of the worktree itself.
3677 fn file_name<'a>(&'a self, cx: &'a App) -> &'a OsStr {
3678 self.path
3679 .file_name()
3680 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3681 }
3682
3683 fn worktree_id(&self, cx: &App) -> WorktreeId {
3684 self.worktree.read(cx).id()
3685 }
3686
3687 fn as_any(&self) -> &dyn Any {
3688 self
3689 }
3690
3691 fn to_proto(&self, cx: &App) -> rpc::proto::File {
3692 rpc::proto::File {
3693 worktree_id: self.worktree.read(cx).id().to_proto(),
3694 entry_id: self.entry_id.map(|id| id.to_proto()),
3695 path: self.path.as_ref().to_proto(),
3696 mtime: self.disk_state.mtime().map(|time| time.into()),
3697 is_deleted: self.disk_state == DiskState::Deleted,
3698 }
3699 }
3700
3701 fn is_private(&self) -> bool {
3702 self.is_private
3703 }
3704}
3705
3706impl language::LocalFile for File {
3707 fn abs_path(&self, cx: &App) -> PathBuf {
3708 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3709 if self.path.as_ref() == Path::new("") {
3710 worktree_path.as_path().to_path_buf()
3711 } else {
3712 worktree_path.as_path().join(&self.path)
3713 }
3714 }
3715
3716 fn load(&self, cx: &App) -> Task<Result<String>> {
3717 let worktree = self.worktree.read(cx).as_local().unwrap();
3718 let abs_path = worktree.absolutize(&self.path);
3719 let fs = worktree.fs.clone();
3720 cx.background_executor()
3721 .spawn(async move { fs.load(&abs_path?).await })
3722 }
3723
3724 fn load_bytes(&self, cx: &App) -> Task<Result<Vec<u8>>> {
3725 let worktree = self.worktree.read(cx).as_local().unwrap();
3726 let abs_path = worktree.absolutize(&self.path);
3727 let fs = worktree.fs.clone();
3728 cx.background_executor()
3729 .spawn(async move { fs.load_bytes(&abs_path?).await })
3730 }
3731}
3732
3733impl File {
3734 pub fn for_entry(entry: Entry, worktree: Entity<Worktree>) -> Arc<Self> {
3735 Arc::new(Self {
3736 worktree,
3737 path: entry.path.clone(),
3738 disk_state: if let Some(mtime) = entry.mtime {
3739 DiskState::Present { mtime }
3740 } else {
3741 DiskState::New
3742 },
3743 entry_id: Some(entry.id),
3744 is_local: true,
3745 is_private: entry.is_private,
3746 })
3747 }
3748
3749 pub fn from_proto(
3750 proto: rpc::proto::File,
3751 worktree: Entity<Worktree>,
3752 cx: &App,
3753 ) -> Result<Self> {
3754 let worktree_id = worktree
3755 .read(cx)
3756 .as_remote()
3757 .ok_or_else(|| anyhow!("not remote"))?
3758 .id();
3759
3760 if worktree_id.to_proto() != proto.worktree_id {
3761 return Err(anyhow!("worktree id does not match file"));
3762 }
3763
3764 let disk_state = if proto.is_deleted {
3765 DiskState::Deleted
3766 } else {
3767 if let Some(mtime) = proto.mtime.map(&Into::into) {
3768 DiskState::Present { mtime }
3769 } else {
3770 DiskState::New
3771 }
3772 };
3773
3774 Ok(Self {
3775 worktree,
3776 path: Arc::<Path>::from_proto(proto.path),
3777 disk_state,
3778 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3779 is_local: false,
3780 is_private: false,
3781 })
3782 }
3783
3784 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3785 file.and_then(|f| f.as_any().downcast_ref())
3786 }
3787
3788 pub fn worktree_id(&self, cx: &App) -> WorktreeId {
3789 self.worktree.read(cx).id()
3790 }
3791
3792 pub fn project_entry_id(&self, _: &App) -> Option<ProjectEntryId> {
3793 match self.disk_state {
3794 DiskState::Deleted => None,
3795 _ => self.entry_id,
3796 }
3797 }
3798}
3799
3800#[derive(Clone, Debug, PartialEq, Eq)]
3801pub struct Entry {
3802 pub id: ProjectEntryId,
3803 pub kind: EntryKind,
3804 pub path: Arc<Path>,
3805 pub inode: u64,
3806 pub mtime: Option<MTime>,
3807
3808 pub canonical_path: Option<Box<Path>>,
3809 /// Whether this entry is ignored by Git.
3810 ///
3811 /// We only scan ignored entries once the directory is expanded and
3812 /// exclude them from searches.
3813 pub is_ignored: bool,
3814
3815 /// Whether this entry is always included in searches.
3816 ///
3817 /// This is used for entries that are always included in searches, even
3818 /// if they are ignored by git. Overridden by file_scan_exclusions.
3819 pub is_always_included: bool,
3820
3821 /// Whether this entry's canonical path is outside of the worktree.
3822 /// This means the entry is only accessible from the worktree root via a
3823 /// symlink.
3824 ///
3825 /// We only scan entries outside of the worktree once the symlinked
3826 /// directory is expanded. External entries are treated like gitignored
3827 /// entries in that they are not included in searches.
3828 pub is_external: bool,
3829
3830 /// Whether this entry is considered to be a `.env` file.
3831 pub is_private: bool,
3832 /// The entry's size on disk, in bytes.
3833 pub size: u64,
3834 pub char_bag: CharBag,
3835 pub is_fifo: bool,
3836}
3837
3838#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3839pub enum EntryKind {
3840 UnloadedDir,
3841 PendingDir,
3842 Dir,
3843 File,
3844}
3845
3846#[derive(Clone, Copy, Debug, PartialEq)]
3847pub enum PathChange {
3848 /// A filesystem entry was was created.
3849 Added,
3850 /// A filesystem entry was removed.
3851 Removed,
3852 /// A filesystem entry was updated.
3853 Updated,
3854 /// A filesystem entry was either updated or added. We don't know
3855 /// whether or not it already existed, because the path had not
3856 /// been loaded before the event.
3857 AddedOrUpdated,
3858 /// A filesystem entry was found during the initial scan of the worktree.
3859 Loaded,
3860}
3861
3862#[derive(Debug)]
3863pub struct GitRepositoryChange {
3864 /// The previous state of the repository, if it already existed.
3865 pub old_repository: Option<RepositoryEntry>,
3866}
3867
3868pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3869pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3870
3871#[derive(Clone, Debug, PartialEq, Eq)]
3872pub struct StatusEntry {
3873 pub repo_path: RepoPath,
3874 pub status: FileStatus,
3875}
3876
3877impl StatusEntry {
3878 pub fn is_staged(&self) -> Option<bool> {
3879 self.status.is_staged()
3880 }
3881
3882 fn to_proto(&self) -> proto::StatusEntry {
3883 let simple_status = match self.status {
3884 FileStatus::Ignored | FileStatus::Untracked => proto::GitStatus::Added as i32,
3885 FileStatus::Unmerged { .. } => proto::GitStatus::Conflict as i32,
3886 FileStatus::Tracked(TrackedStatus {
3887 index_status,
3888 worktree_status,
3889 }) => tracked_status_to_proto(if worktree_status != StatusCode::Unmodified {
3890 worktree_status
3891 } else {
3892 index_status
3893 }),
3894 };
3895
3896 proto::StatusEntry {
3897 repo_path: self.repo_path.as_ref().to_proto(),
3898 simple_status,
3899 status: Some(status_to_proto(self.status)),
3900 }
3901 }
3902}
3903
3904impl TryFrom<proto::StatusEntry> for StatusEntry {
3905 type Error = anyhow::Error;
3906
3907 fn try_from(value: proto::StatusEntry) -> Result<Self, Self::Error> {
3908 let repo_path = RepoPath(Arc::<Path>::from_proto(value.repo_path));
3909 let status = status_from_proto(value.simple_status, value.status)?;
3910 Ok(Self { repo_path, status })
3911 }
3912}
3913
3914#[derive(Clone, Debug)]
3915struct PathProgress<'a> {
3916 max_path: &'a Path,
3917}
3918
3919#[derive(Clone, Debug)]
3920pub struct PathSummary<S> {
3921 max_path: Arc<Path>,
3922 item_summary: S,
3923}
3924
3925impl<S: Summary> Summary for PathSummary<S> {
3926 type Context = S::Context;
3927
3928 fn zero(cx: &Self::Context) -> Self {
3929 Self {
3930 max_path: Path::new("").into(),
3931 item_summary: S::zero(cx),
3932 }
3933 }
3934
3935 fn add_summary(&mut self, rhs: &Self, cx: &Self::Context) {
3936 self.max_path = rhs.max_path.clone();
3937 self.item_summary.add_summary(&rhs.item_summary, cx);
3938 }
3939}
3940
3941impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathProgress<'a> {
3942 fn zero(_: &<PathSummary<S> as Summary>::Context) -> Self {
3943 Self {
3944 max_path: Path::new(""),
3945 }
3946 }
3947
3948 fn add_summary(
3949 &mut self,
3950 summary: &'a PathSummary<S>,
3951 _: &<PathSummary<S> as Summary>::Context,
3952 ) {
3953 self.max_path = summary.max_path.as_ref()
3954 }
3955}
3956
3957impl sum_tree::Item for RepositoryEntry {
3958 type Summary = PathSummary<Unit>;
3959
3960 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3961 PathSummary {
3962 max_path: self.work_directory.path_key().0,
3963 item_summary: Unit,
3964 }
3965 }
3966}
3967
3968impl sum_tree::KeyedItem for RepositoryEntry {
3969 type Key = PathKey;
3970
3971 fn key(&self) -> Self::Key {
3972 self.work_directory.path_key()
3973 }
3974}
3975
3976impl sum_tree::Item for StatusEntry {
3977 type Summary = PathSummary<GitSummary>;
3978
3979 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3980 PathSummary {
3981 max_path: self.repo_path.0.clone(),
3982 item_summary: self.status.summary(),
3983 }
3984 }
3985}
3986
3987impl sum_tree::KeyedItem for StatusEntry {
3988 type Key = PathKey;
3989
3990 fn key(&self) -> Self::Key {
3991 PathKey(self.repo_path.0.clone())
3992 }
3993}
3994
3995impl<'a> sum_tree::Dimension<'a, PathSummary<GitSummary>> for GitSummary {
3996 fn zero(_cx: &()) -> Self {
3997 Default::default()
3998 }
3999
4000 fn add_summary(&mut self, summary: &'a PathSummary<GitSummary>, _: &()) {
4001 *self += summary.item_summary
4002 }
4003}
4004
4005impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathKey {
4006 fn zero(_: &S::Context) -> Self {
4007 Default::default()
4008 }
4009
4010 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4011 self.0 = summary.max_path.clone();
4012 }
4013}
4014
4015impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for TraversalProgress<'a> {
4016 fn zero(_cx: &S::Context) -> Self {
4017 Default::default()
4018 }
4019
4020 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4021 self.max_path = summary.max_path.as_ref();
4022 }
4023}
4024
4025impl Entry {
4026 fn new(
4027 path: Arc<Path>,
4028 metadata: &fs::Metadata,
4029 next_entry_id: &AtomicUsize,
4030 root_char_bag: CharBag,
4031 canonical_path: Option<Box<Path>>,
4032 ) -> Self {
4033 let char_bag = char_bag_for_path(root_char_bag, &path);
4034 Self {
4035 id: ProjectEntryId::new(next_entry_id),
4036 kind: if metadata.is_dir {
4037 EntryKind::PendingDir
4038 } else {
4039 EntryKind::File
4040 },
4041 path,
4042 inode: metadata.inode,
4043 mtime: Some(metadata.mtime),
4044 size: metadata.len,
4045 canonical_path,
4046 is_ignored: false,
4047 is_always_included: false,
4048 is_external: false,
4049 is_private: false,
4050 char_bag,
4051 is_fifo: metadata.is_fifo,
4052 }
4053 }
4054
4055 pub fn is_created(&self) -> bool {
4056 self.mtime.is_some()
4057 }
4058
4059 pub fn is_dir(&self) -> bool {
4060 self.kind.is_dir()
4061 }
4062
4063 pub fn is_file(&self) -> bool {
4064 self.kind.is_file()
4065 }
4066}
4067
4068impl EntryKind {
4069 pub fn is_dir(&self) -> bool {
4070 matches!(
4071 self,
4072 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
4073 )
4074 }
4075
4076 pub fn is_unloaded(&self) -> bool {
4077 matches!(self, EntryKind::UnloadedDir)
4078 }
4079
4080 pub fn is_file(&self) -> bool {
4081 matches!(self, EntryKind::File)
4082 }
4083}
4084
4085impl sum_tree::Item for Entry {
4086 type Summary = EntrySummary;
4087
4088 fn summary(&self, _cx: &()) -> Self::Summary {
4089 let non_ignored_count = if (self.is_ignored || self.is_external) && !self.is_always_included
4090 {
4091 0
4092 } else {
4093 1
4094 };
4095 let file_count;
4096 let non_ignored_file_count;
4097 if self.is_file() {
4098 file_count = 1;
4099 non_ignored_file_count = non_ignored_count;
4100 } else {
4101 file_count = 0;
4102 non_ignored_file_count = 0;
4103 }
4104
4105 EntrySummary {
4106 max_path: self.path.clone(),
4107 count: 1,
4108 non_ignored_count,
4109 file_count,
4110 non_ignored_file_count,
4111 }
4112 }
4113}
4114
4115impl sum_tree::KeyedItem for Entry {
4116 type Key = PathKey;
4117
4118 fn key(&self) -> Self::Key {
4119 PathKey(self.path.clone())
4120 }
4121}
4122
4123#[derive(Clone, Debug)]
4124pub struct EntrySummary {
4125 max_path: Arc<Path>,
4126 count: usize,
4127 non_ignored_count: usize,
4128 file_count: usize,
4129 non_ignored_file_count: usize,
4130}
4131
4132impl Default for EntrySummary {
4133 fn default() -> Self {
4134 Self {
4135 max_path: Arc::from(Path::new("")),
4136 count: 0,
4137 non_ignored_count: 0,
4138 file_count: 0,
4139 non_ignored_file_count: 0,
4140 }
4141 }
4142}
4143
4144impl sum_tree::Summary for EntrySummary {
4145 type Context = ();
4146
4147 fn zero(_cx: &()) -> Self {
4148 Default::default()
4149 }
4150
4151 fn add_summary(&mut self, rhs: &Self, _: &()) {
4152 self.max_path = rhs.max_path.clone();
4153 self.count += rhs.count;
4154 self.non_ignored_count += rhs.non_ignored_count;
4155 self.file_count += rhs.file_count;
4156 self.non_ignored_file_count += rhs.non_ignored_file_count;
4157 }
4158}
4159
4160#[derive(Clone, Debug)]
4161struct PathEntry {
4162 id: ProjectEntryId,
4163 path: Arc<Path>,
4164 is_ignored: bool,
4165 scan_id: usize,
4166}
4167
4168#[derive(Debug, Default)]
4169struct FsScanned {
4170 status_scans: Arc<AtomicU32>,
4171}
4172
4173impl sum_tree::Item for PathEntry {
4174 type Summary = PathEntrySummary;
4175
4176 fn summary(&self, _cx: &()) -> Self::Summary {
4177 PathEntrySummary { max_id: self.id }
4178 }
4179}
4180
4181impl sum_tree::KeyedItem for PathEntry {
4182 type Key = ProjectEntryId;
4183
4184 fn key(&self) -> Self::Key {
4185 self.id
4186 }
4187}
4188
4189#[derive(Clone, Debug, Default)]
4190struct PathEntrySummary {
4191 max_id: ProjectEntryId,
4192}
4193
4194impl sum_tree::Summary for PathEntrySummary {
4195 type Context = ();
4196
4197 fn zero(_cx: &Self::Context) -> Self {
4198 Default::default()
4199 }
4200
4201 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
4202 self.max_id = summary.max_id;
4203 }
4204}
4205
4206impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
4207 fn zero(_cx: &()) -> Self {
4208 Default::default()
4209 }
4210
4211 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
4212 *self = summary.max_id;
4213 }
4214}
4215
4216#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
4217pub struct PathKey(Arc<Path>);
4218
4219impl Default for PathKey {
4220 fn default() -> Self {
4221 Self(Path::new("").into())
4222 }
4223}
4224
4225impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
4226 fn zero(_cx: &()) -> Self {
4227 Default::default()
4228 }
4229
4230 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4231 self.0 = summary.max_path.clone();
4232 }
4233}
4234
4235struct BackgroundScanner {
4236 state: Arc<Mutex<BackgroundScannerState>>,
4237 fs: Arc<dyn Fs>,
4238 fs_case_sensitive: bool,
4239 status_updates_tx: UnboundedSender<ScanState>,
4240 executor: BackgroundExecutor,
4241 scan_requests_rx: channel::Receiver<ScanRequest>,
4242 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
4243 next_entry_id: Arc<AtomicUsize>,
4244 phase: BackgroundScannerPhase,
4245 watcher: Arc<dyn Watcher>,
4246 settings: WorktreeSettings,
4247 share_private_files: bool,
4248}
4249
4250#[derive(Copy, Clone, PartialEq)]
4251enum BackgroundScannerPhase {
4252 InitialScan,
4253 EventsReceivedDuringInitialScan,
4254 Events,
4255}
4256
4257impl BackgroundScanner {
4258 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
4259 // If the worktree root does not contain a git repository, then find
4260 // the git repository in an ancestor directory. Find any gitignore files
4261 // in ancestor directories.
4262 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
4263 let mut containing_git_repository = None;
4264 for (index, ancestor) in root_abs_path.as_path().ancestors().enumerate() {
4265 if index != 0 {
4266 if let Ok(ignore) =
4267 build_gitignore(&ancestor.join(*GITIGNORE), self.fs.as_ref()).await
4268 {
4269 self.state
4270 .lock()
4271 .snapshot
4272 .ignores_by_parent_abs_path
4273 .insert(ancestor.into(), (ignore.into(), false));
4274 }
4275 }
4276
4277 let ancestor_dot_git = ancestor.join(*DOT_GIT);
4278 // Check whether the directory or file called `.git` exists (in the
4279 // case of worktrees it's a file.)
4280 if self
4281 .fs
4282 .metadata(&ancestor_dot_git)
4283 .await
4284 .is_ok_and(|metadata| metadata.is_some())
4285 {
4286 if index != 0 {
4287 // We canonicalize, since the FS events use the canonicalized path.
4288 if let Some(ancestor_dot_git) =
4289 self.fs.canonicalize(&ancestor_dot_git).await.log_err()
4290 {
4291 // We associate the external git repo with our root folder and
4292 // also mark where in the git repo the root folder is located.
4293 let local_repository = self.state.lock().insert_git_repository_for_path(
4294 WorkDirectory::AboveProject {
4295 absolute_path: ancestor.into(),
4296 location_in_repo: root_abs_path
4297 .as_path()
4298 .strip_prefix(ancestor)
4299 .unwrap()
4300 .into(),
4301 },
4302 ancestor_dot_git.clone().into(),
4303 self.fs.as_ref(),
4304 self.watcher.as_ref(),
4305 );
4306
4307 if local_repository.is_some() {
4308 containing_git_repository = Some(ancestor_dot_git)
4309 }
4310 };
4311 }
4312
4313 // Reached root of git repository.
4314 break;
4315 }
4316 }
4317
4318 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4319 {
4320 let mut state = self.state.lock();
4321 state.snapshot.scan_id += 1;
4322 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
4323 let ignore_stack = state
4324 .snapshot
4325 .ignore_stack_for_abs_path(root_abs_path.as_path(), true);
4326 if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) {
4327 root_entry.is_ignored = true;
4328 state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
4329 }
4330 state.enqueue_scan_dir(root_abs_path.into(), &root_entry, &scan_job_tx);
4331 }
4332 };
4333
4334 // Perform an initial scan of the directory.
4335 drop(scan_job_tx);
4336 let scans_running = self.scan_dirs(true, scan_job_rx).await;
4337 {
4338 let mut state = self.state.lock();
4339 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4340 }
4341
4342 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4343 self.send_status_update(scanning, SmallVec::new());
4344
4345 // Process any any FS events that occurred while performing the initial scan.
4346 // For these events, update events cannot be as precise, because we didn't
4347 // have the previous state loaded yet.
4348 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
4349 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
4350 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4351 paths.extend(more_paths);
4352 }
4353 self.process_events(paths.into_iter().map(Into::into).collect())
4354 .await;
4355 }
4356 if let Some(abs_path) = containing_git_repository {
4357 self.process_events(vec![abs_path]).await;
4358 }
4359
4360 // Continue processing events until the worktree is dropped.
4361 self.phase = BackgroundScannerPhase::Events;
4362
4363 loop {
4364 select_biased! {
4365 // Process any path refresh requests from the worktree. Prioritize
4366 // these before handling changes reported by the filesystem.
4367 request = self.next_scan_request().fuse() => {
4368 let Ok(request) = request else { break };
4369 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4370 if !self.process_scan_request(request, scanning).await {
4371 return;
4372 }
4373 }
4374
4375 path_prefix_request = self.path_prefixes_to_scan_rx.recv().fuse() => {
4376 let Ok(request) = path_prefix_request else { break };
4377 log::trace!("adding path prefix {:?}", request.path);
4378
4379 let did_scan = self.forcibly_load_paths(&[request.path.clone()]).await;
4380 if did_scan {
4381 let abs_path =
4382 {
4383 let mut state = self.state.lock();
4384 state.path_prefixes_to_scan.insert(request.path.clone());
4385 state.snapshot.abs_path.as_path().join(&request.path)
4386 };
4387
4388 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
4389 self.process_events(vec![abs_path]).await;
4390 }
4391 }
4392 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4393 self.send_status_update(scanning, request.done);
4394 }
4395
4396 paths = fs_events_rx.next().fuse() => {
4397 let Some(mut paths) = paths else { break };
4398 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4399 paths.extend(more_paths);
4400 }
4401 self.process_events(paths.into_iter().map(Into::into).collect()).await;
4402 }
4403 }
4404 }
4405 }
4406
4407 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
4408 log::debug!("rescanning paths {:?}", request.relative_paths);
4409
4410 request.relative_paths.sort_unstable();
4411 self.forcibly_load_paths(&request.relative_paths).await;
4412
4413 let root_path = self.state.lock().snapshot.abs_path.clone();
4414 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4415 Ok(path) => SanitizedPath::from(path),
4416 Err(err) => {
4417 log::error!("failed to canonicalize root path: {}", err);
4418 return true;
4419 }
4420 };
4421 let abs_paths = request
4422 .relative_paths
4423 .iter()
4424 .map(|path| {
4425 if path.file_name().is_some() {
4426 root_canonical_path.as_path().join(path).to_path_buf()
4427 } else {
4428 root_canonical_path.as_path().to_path_buf()
4429 }
4430 })
4431 .collect::<Vec<_>>();
4432
4433 {
4434 let mut state = self.state.lock();
4435 let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
4436 state.snapshot.scan_id += 1;
4437 if is_idle {
4438 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4439 }
4440 }
4441
4442 self.reload_entries_for_paths(
4443 root_path,
4444 root_canonical_path,
4445 &request.relative_paths,
4446 abs_paths,
4447 None,
4448 )
4449 .await;
4450
4451 self.send_status_update(scanning, request.done)
4452 }
4453
4454 async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
4455 let root_path = self.state.lock().snapshot.abs_path.clone();
4456 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4457 Ok(path) => SanitizedPath::from(path),
4458 Err(err) => {
4459 let new_path = self
4460 .state
4461 .lock()
4462 .snapshot
4463 .root_file_handle
4464 .clone()
4465 .and_then(|handle| handle.current_path(&self.fs).log_err())
4466 .map(SanitizedPath::from)
4467 .filter(|new_path| *new_path != root_path);
4468
4469 if let Some(new_path) = new_path.as_ref() {
4470 log::info!(
4471 "root renamed from {} to {}",
4472 root_path.as_path().display(),
4473 new_path.as_path().display()
4474 )
4475 } else {
4476 log::warn!("root path could not be canonicalized: {}", err);
4477 }
4478 self.status_updates_tx
4479 .unbounded_send(ScanState::RootUpdated { new_path })
4480 .ok();
4481 return;
4482 }
4483 };
4484
4485 // Certain directories may have FS changes, but do not lead to git data changes that Zed cares about.
4486 // Ignore these, to avoid Zed unnecessarily rescanning git metadata.
4487 let skipped_files_in_dot_git = HashSet::from_iter([*COMMIT_MESSAGE, *INDEX_LOCK]);
4488 let skipped_dirs_in_dot_git = [*FSMONITOR_DAEMON];
4489
4490 let mut relative_paths = Vec::with_capacity(abs_paths.len());
4491 let mut dot_git_abs_paths = Vec::new();
4492 abs_paths.sort_unstable();
4493 abs_paths.dedup_by(|a, b| a.starts_with(b));
4494 abs_paths.retain(|abs_path| {
4495 let abs_path = SanitizedPath::from(abs_path);
4496
4497 let snapshot = &self.state.lock().snapshot;
4498 {
4499 let mut is_git_related = false;
4500
4501 let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| {
4502 if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) {
4503 let path_in_git_dir = abs_path.as_path().strip_prefix(ancestor).expect("stripping off the ancestor");
4504 Some((ancestor.to_owned(), path_in_git_dir.to_owned()))
4505 } else {
4506 None
4507 }
4508 });
4509
4510 if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
4511 if skipped_files_in_dot_git.contains(path_in_git_dir.as_os_str()) || skipped_dirs_in_dot_git.iter().any(|skipped_git_subdir| path_in_git_dir.starts_with(skipped_git_subdir)) {
4512 log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
4513 return false;
4514 }
4515
4516 is_git_related = true;
4517 if !dot_git_abs_paths.contains(&dot_git_abs_path) {
4518 dot_git_abs_paths.push(dot_git_abs_path);
4519 }
4520 }
4521
4522 let relative_path: Arc<Path> =
4523 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
4524 path.into()
4525 } else {
4526 if is_git_related {
4527 log::debug!(
4528 "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
4529 );
4530 } else {
4531 log::error!(
4532 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
4533 );
4534 }
4535 return false;
4536 };
4537
4538 if abs_path.0.file_name() == Some(*GITIGNORE) {
4539 for (_, repo) in snapshot.git_repositories.iter().filter(|(_, repo)| repo.directory_contains(&relative_path)) {
4540 if !dot_git_abs_paths.iter().any(|dot_git_abs_path| dot_git_abs_path == repo.dot_git_dir_abs_path.as_ref()) {
4541 dot_git_abs_paths.push(repo.dot_git_dir_abs_path.to_path_buf());
4542 }
4543 }
4544 }
4545
4546 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
4547 snapshot
4548 .entry_for_path(parent)
4549 .map_or(false, |entry| entry.kind == EntryKind::Dir)
4550 });
4551 if !parent_dir_is_loaded {
4552 log::debug!("ignoring event {relative_path:?} within unloaded directory");
4553 return false;
4554 }
4555
4556 if self.settings.is_path_excluded(&relative_path) {
4557 if !is_git_related {
4558 log::debug!("ignoring FS event for excluded path {relative_path:?}");
4559 }
4560 return false;
4561 }
4562
4563 relative_paths.push(relative_path);
4564 true
4565 }
4566 });
4567
4568 if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
4569 return;
4570 }
4571
4572 self.state.lock().snapshot.scan_id += 1;
4573
4574 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4575 log::debug!("received fs events {:?}", relative_paths);
4576 self.reload_entries_for_paths(
4577 root_path,
4578 root_canonical_path,
4579 &relative_paths,
4580 abs_paths,
4581 Some(scan_job_tx.clone()),
4582 )
4583 .await;
4584
4585 self.update_ignore_statuses(scan_job_tx).await;
4586 let scans_running = self.scan_dirs(false, scan_job_rx).await;
4587
4588 let status_update = if !dot_git_abs_paths.is_empty() {
4589 Some(self.update_git_repositories(dot_git_abs_paths))
4590 } else {
4591 None
4592 };
4593
4594 let phase = self.phase;
4595 let status_update_tx = self.status_updates_tx.clone();
4596 let state = self.state.clone();
4597 self.executor
4598 .spawn(async move {
4599 if let Some(status_update) = status_update {
4600 status_update.await;
4601 }
4602
4603 {
4604 let mut state = state.lock();
4605 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4606 for (_, entry) in mem::take(&mut state.removed_entries) {
4607 state.scanned_dirs.remove(&entry.id);
4608 }
4609 #[cfg(test)]
4610 state.snapshot.check_git_invariants();
4611 }
4612 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4613 send_status_update_inner(phase, state, status_update_tx, scanning, SmallVec::new());
4614 })
4615 .detach();
4616 }
4617
4618 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
4619 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4620 {
4621 let mut state = self.state.lock();
4622 let root_path = state.snapshot.abs_path.clone();
4623 for path in paths {
4624 for ancestor in path.ancestors() {
4625 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
4626 if entry.kind == EntryKind::UnloadedDir {
4627 let abs_path = root_path.as_path().join(ancestor);
4628 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
4629 state.paths_to_scan.insert(path.clone());
4630 break;
4631 }
4632 }
4633 }
4634 }
4635 drop(scan_job_tx);
4636 }
4637 let scans_running = Arc::new(AtomicU32::new(0));
4638 while let Ok(job) = scan_job_rx.recv().await {
4639 self.scan_dir(&scans_running, &job).await.log_err();
4640 }
4641
4642 !mem::take(&mut self.state.lock().paths_to_scan).is_empty()
4643 }
4644
4645 async fn scan_dirs(
4646 &self,
4647 enable_progress_updates: bool,
4648 scan_jobs_rx: channel::Receiver<ScanJob>,
4649 ) -> FsScanned {
4650 if self
4651 .status_updates_tx
4652 .unbounded_send(ScanState::Started)
4653 .is_err()
4654 {
4655 return FsScanned::default();
4656 }
4657
4658 let scans_running = Arc::new(AtomicU32::new(1));
4659 let progress_update_count = AtomicUsize::new(0);
4660 self.executor
4661 .scoped(|scope| {
4662 for _ in 0..self.executor.num_cpus() {
4663 scope.spawn(async {
4664 let mut last_progress_update_count = 0;
4665 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
4666 futures::pin_mut!(progress_update_timer);
4667
4668 loop {
4669 select_biased! {
4670 // Process any path refresh requests before moving on to process
4671 // the scan queue, so that user operations are prioritized.
4672 request = self.next_scan_request().fuse() => {
4673 let Ok(request) = request else { break };
4674 if !self.process_scan_request(request, true).await {
4675 return;
4676 }
4677 }
4678
4679 // Send periodic progress updates to the worktree. Use an atomic counter
4680 // to ensure that only one of the workers sends a progress update after
4681 // the update interval elapses.
4682 _ = progress_update_timer => {
4683 match progress_update_count.compare_exchange(
4684 last_progress_update_count,
4685 last_progress_update_count + 1,
4686 SeqCst,
4687 SeqCst
4688 ) {
4689 Ok(_) => {
4690 last_progress_update_count += 1;
4691 self.send_status_update(true, SmallVec::new());
4692 }
4693 Err(count) => {
4694 last_progress_update_count = count;
4695 }
4696 }
4697 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
4698 }
4699
4700 // Recursively load directories from the file system.
4701 job = scan_jobs_rx.recv().fuse() => {
4702 let Ok(job) = job else { break };
4703 if let Err(err) = self.scan_dir(&scans_running, &job).await {
4704 if job.path.as_ref() != Path::new("") {
4705 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
4706 }
4707 }
4708 }
4709 }
4710 }
4711 });
4712 }
4713 })
4714 .await;
4715
4716 scans_running.fetch_sub(1, atomic::Ordering::Release);
4717 FsScanned {
4718 status_scans: scans_running,
4719 }
4720 }
4721
4722 fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
4723 send_status_update_inner(
4724 self.phase,
4725 self.state.clone(),
4726 self.status_updates_tx.clone(),
4727 scanning,
4728 barrier,
4729 )
4730 }
4731
4732 async fn scan_dir(&self, scans_running: &Arc<AtomicU32>, job: &ScanJob) -> Result<()> {
4733 let root_abs_path;
4734 let root_char_bag;
4735 {
4736 let snapshot = &self.state.lock().snapshot;
4737 if self.settings.is_path_excluded(&job.path) {
4738 log::error!("skipping excluded directory {:?}", job.path);
4739 return Ok(());
4740 }
4741 log::debug!("scanning directory {:?}", job.path);
4742 root_abs_path = snapshot.abs_path().clone();
4743 root_char_bag = snapshot.root_char_bag;
4744 }
4745
4746 let next_entry_id = self.next_entry_id.clone();
4747 let mut ignore_stack = job.ignore_stack.clone();
4748 let mut new_ignore = None;
4749 let mut root_canonical_path = None;
4750 let mut new_entries: Vec<Entry> = Vec::new();
4751 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
4752 let mut child_paths = self
4753 .fs
4754 .read_dir(&job.abs_path)
4755 .await?
4756 .filter_map(|entry| async {
4757 match entry {
4758 Ok(entry) => Some(entry),
4759 Err(error) => {
4760 log::error!("error processing entry {:?}", error);
4761 None
4762 }
4763 }
4764 })
4765 .collect::<Vec<_>>()
4766 .await;
4767
4768 // Ensure that .git and .gitignore are processed first.
4769 swap_to_front(&mut child_paths, *GITIGNORE);
4770 swap_to_front(&mut child_paths, *DOT_GIT);
4771
4772 let mut git_status_update_jobs = Vec::new();
4773 for child_abs_path in child_paths {
4774 let child_abs_path: Arc<Path> = child_abs_path.into();
4775 let child_name = child_abs_path.file_name().unwrap();
4776 let child_path: Arc<Path> = job.path.join(child_name).into();
4777
4778 if child_name == *DOT_GIT {
4779 {
4780 let mut state = self.state.lock();
4781 let repo = state.insert_git_repository(
4782 child_path.clone(),
4783 self.fs.as_ref(),
4784 self.watcher.as_ref(),
4785 );
4786 if let Some(local_repo) = repo {
4787 scans_running.fetch_add(1, atomic::Ordering::Release);
4788 git_status_update_jobs
4789 .push(self.schedule_git_statuses_update(&mut state, local_repo));
4790 }
4791 }
4792 } else if child_name == *GITIGNORE {
4793 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
4794 Ok(ignore) => {
4795 let ignore = Arc::new(ignore);
4796 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4797 new_ignore = Some(ignore);
4798 }
4799 Err(error) => {
4800 log::error!(
4801 "error loading .gitignore file {:?} - {:?}",
4802 child_name,
4803 error
4804 );
4805 }
4806 }
4807 }
4808
4809 if self.settings.is_path_excluded(&child_path) {
4810 log::debug!("skipping excluded child entry {child_path:?}");
4811 self.state.lock().remove_path(&child_path);
4812 continue;
4813 }
4814
4815 let child_metadata = match self.fs.metadata(&child_abs_path).await {
4816 Ok(Some(metadata)) => metadata,
4817 Ok(None) => continue,
4818 Err(err) => {
4819 log::error!("error processing {child_abs_path:?}: {err:?}");
4820 continue;
4821 }
4822 };
4823
4824 let mut child_entry = Entry::new(
4825 child_path.clone(),
4826 &child_metadata,
4827 &next_entry_id,
4828 root_char_bag,
4829 None,
4830 );
4831
4832 if job.is_external {
4833 child_entry.is_external = true;
4834 } else if child_metadata.is_symlink {
4835 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
4836 Ok(path) => path,
4837 Err(err) => {
4838 log::error!(
4839 "error reading target of symlink {:?}: {:?}",
4840 child_abs_path,
4841 err
4842 );
4843 continue;
4844 }
4845 };
4846
4847 // lazily canonicalize the root path in order to determine if
4848 // symlinks point outside of the worktree.
4849 let root_canonical_path = match &root_canonical_path {
4850 Some(path) => path,
4851 None => match self.fs.canonicalize(&root_abs_path).await {
4852 Ok(path) => root_canonical_path.insert(path),
4853 Err(err) => {
4854 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
4855 continue;
4856 }
4857 },
4858 };
4859
4860 if !canonical_path.starts_with(root_canonical_path) {
4861 child_entry.is_external = true;
4862 }
4863
4864 child_entry.canonical_path = Some(canonical_path.into());
4865 }
4866
4867 if child_entry.is_dir() {
4868 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
4869 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4870
4871 // Avoid recursing until crash in the case of a recursive symlink
4872 if job.ancestor_inodes.contains(&child_entry.inode) {
4873 new_jobs.push(None);
4874 } else {
4875 let mut ancestor_inodes = job.ancestor_inodes.clone();
4876 ancestor_inodes.insert(child_entry.inode);
4877
4878 new_jobs.push(Some(ScanJob {
4879 abs_path: child_abs_path.clone(),
4880 path: child_path,
4881 is_external: child_entry.is_external,
4882 ignore_stack: if child_entry.is_ignored {
4883 IgnoreStack::all()
4884 } else {
4885 ignore_stack.clone()
4886 },
4887 ancestor_inodes,
4888 scan_queue: job.scan_queue.clone(),
4889 }));
4890 }
4891 } else {
4892 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
4893 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4894 }
4895
4896 {
4897 let relative_path = job.path.join(child_name);
4898 if self.is_path_private(&relative_path) {
4899 log::debug!("detected private file: {relative_path:?}");
4900 child_entry.is_private = true;
4901 }
4902 }
4903
4904 new_entries.push(child_entry);
4905 }
4906
4907 let task_state = self.state.clone();
4908 let phase = self.phase;
4909 let status_updates_tx = self.status_updates_tx.clone();
4910 let scans_running = scans_running.clone();
4911 self.executor
4912 .spawn(async move {
4913 if !git_status_update_jobs.is_empty() {
4914 let status_updates = join_all(git_status_update_jobs).await;
4915 let status_updated = status_updates
4916 .iter()
4917 .any(|update_result| update_result.is_ok());
4918 scans_running.fetch_sub(status_updates.len() as u32, atomic::Ordering::Release);
4919 if status_updated {
4920 let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
4921 send_status_update_inner(
4922 phase,
4923 task_state,
4924 status_updates_tx,
4925 scanning,
4926 SmallVec::new(),
4927 );
4928 }
4929 }
4930 })
4931 .detach();
4932
4933 let mut state = self.state.lock();
4934
4935 // Identify any subdirectories that should not be scanned.
4936 let mut job_ix = 0;
4937 for entry in &mut new_entries {
4938 state.reuse_entry_id(entry);
4939 if entry.is_dir() {
4940 if state.should_scan_directory(entry) {
4941 job_ix += 1;
4942 } else {
4943 log::debug!("defer scanning directory {:?}", entry.path);
4944 entry.kind = EntryKind::UnloadedDir;
4945 new_jobs.remove(job_ix);
4946 }
4947 }
4948 if entry.is_always_included {
4949 state
4950 .snapshot
4951 .always_included_entries
4952 .push(entry.path.clone());
4953 }
4954 }
4955
4956 state.populate_dir(&job.path, new_entries, new_ignore);
4957 self.watcher.add(job.abs_path.as_ref()).log_err();
4958
4959 for new_job in new_jobs.into_iter().flatten() {
4960 job.scan_queue
4961 .try_send(new_job)
4962 .expect("channel is unbounded");
4963 }
4964
4965 Ok(())
4966 }
4967
4968 /// All list arguments should be sorted before calling this function
4969 async fn reload_entries_for_paths(
4970 &self,
4971 root_abs_path: SanitizedPath,
4972 root_canonical_path: SanitizedPath,
4973 relative_paths: &[Arc<Path>],
4974 abs_paths: Vec<PathBuf>,
4975 scan_queue_tx: Option<Sender<ScanJob>>,
4976 ) {
4977 // grab metadata for all requested paths
4978 let metadata = futures::future::join_all(
4979 abs_paths
4980 .iter()
4981 .map(|abs_path| async move {
4982 let metadata = self.fs.metadata(abs_path).await?;
4983 if let Some(metadata) = metadata {
4984 let canonical_path = self.fs.canonicalize(abs_path).await?;
4985
4986 // If we're on a case-insensitive filesystem (default on macOS), we want
4987 // to only ignore metadata for non-symlink files if their absolute-path matches
4988 // the canonical-path.
4989 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4990 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4991 // treated as removed.
4992 if !self.fs_case_sensitive && !metadata.is_symlink {
4993 let canonical_file_name = canonical_path.file_name();
4994 let file_name = abs_path.file_name();
4995 if canonical_file_name != file_name {
4996 return Ok(None);
4997 }
4998 }
4999
5000 anyhow::Ok(Some((metadata, SanitizedPath::from(canonical_path))))
5001 } else {
5002 Ok(None)
5003 }
5004 })
5005 .collect::<Vec<_>>(),
5006 )
5007 .await;
5008
5009 let mut state = self.state.lock();
5010 let doing_recursive_update = scan_queue_tx.is_some();
5011
5012 // Remove any entries for paths that no longer exist or are being recursively
5013 // refreshed. Do this before adding any new entries, so that renames can be
5014 // detected regardless of the order of the paths.
5015 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
5016 if matches!(metadata, Ok(None)) || doing_recursive_update {
5017 log::trace!("remove path {:?}", path);
5018 state.remove_path(path);
5019 }
5020 }
5021
5022 // Group all relative paths by their git repository.
5023 let mut paths_by_git_repo = HashMap::default();
5024 for relative_path in relative_paths.iter() {
5025 let repository_data = state
5026 .snapshot
5027 .local_repo_for_path(relative_path)
5028 .zip(state.snapshot.repository_for_path(relative_path));
5029 if let Some((local_repo, entry)) = repository_data {
5030 if let Ok(repo_path) = local_repo.relativize(relative_path) {
5031 paths_by_git_repo
5032 .entry(local_repo.work_directory.clone())
5033 .or_insert_with(|| RepoPaths {
5034 entry: entry.clone(),
5035 repo: local_repo.repo_ptr.clone(),
5036 repo_paths: Default::default(),
5037 })
5038 .add_path(repo_path);
5039 }
5040 }
5041 }
5042
5043 for (work_directory, mut paths) in paths_by_git_repo {
5044 if let Ok(status) = paths.repo.status(&paths.repo_paths) {
5045 let mut changed_path_statuses = Vec::new();
5046 let statuses = paths.entry.statuses_by_path.clone();
5047 let mut cursor = statuses.cursor::<PathProgress>(&());
5048
5049 for (repo_path, status) in &*status.entries {
5050 paths.remove_repo_path(repo_path);
5051 if cursor.seek_forward(&PathTarget::Path(repo_path), Bias::Left, &()) {
5052 if &cursor.item().unwrap().status == status {
5053 continue;
5054 }
5055 }
5056
5057 changed_path_statuses.push(Edit::Insert(StatusEntry {
5058 repo_path: repo_path.clone(),
5059 status: *status,
5060 }));
5061 }
5062
5063 let mut cursor = statuses.cursor::<PathProgress>(&());
5064 for path in paths.repo_paths {
5065 if cursor.seek_forward(&PathTarget::Path(&path), Bias::Left, &()) {
5066 changed_path_statuses.push(Edit::Remove(PathKey(path.0)));
5067 }
5068 }
5069
5070 if !changed_path_statuses.is_empty() {
5071 let work_directory_id = state.snapshot.repositories.update(
5072 &work_directory.path_key(),
5073 &(),
5074 move |repository_entry| {
5075 repository_entry
5076 .statuses_by_path
5077 .edit(changed_path_statuses, &());
5078
5079 repository_entry.work_directory_id
5080 },
5081 );
5082
5083 if let Some(work_directory_id) = work_directory_id {
5084 let scan_id = state.snapshot.scan_id;
5085 state.snapshot.git_repositories.update(
5086 &work_directory_id,
5087 |local_repository_entry| {
5088 local_repository_entry.status_scan_id = scan_id;
5089 },
5090 );
5091 }
5092 }
5093 }
5094 }
5095
5096 for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
5097 let abs_path: Arc<Path> = root_abs_path.as_path().join(path).into();
5098 match metadata {
5099 Ok(Some((metadata, canonical_path))) => {
5100 let ignore_stack = state
5101 .snapshot
5102 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
5103 let is_external = !canonical_path.starts_with(&root_canonical_path);
5104 let mut fs_entry = Entry::new(
5105 path.clone(),
5106 &metadata,
5107 self.next_entry_id.as_ref(),
5108 state.snapshot.root_char_bag,
5109 if metadata.is_symlink {
5110 Some(canonical_path.as_path().to_path_buf().into())
5111 } else {
5112 None
5113 },
5114 );
5115
5116 let is_dir = fs_entry.is_dir();
5117 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
5118 fs_entry.is_external = is_external;
5119 fs_entry.is_private = self.is_path_private(path);
5120 fs_entry.is_always_included = self.settings.is_path_always_included(path);
5121
5122 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, is_dir) {
5123 if state.should_scan_directory(&fs_entry)
5124 || (fs_entry.path.as_os_str().is_empty()
5125 && abs_path.file_name() == Some(*DOT_GIT))
5126 {
5127 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
5128 } else {
5129 fs_entry.kind = EntryKind::UnloadedDir;
5130 }
5131 }
5132
5133 state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
5134 }
5135 Ok(None) => {
5136 self.remove_repo_path(path, &mut state.snapshot);
5137 }
5138 Err(err) => {
5139 log::error!("error reading file {abs_path:?} on event: {err:#}");
5140 }
5141 }
5142 }
5143
5144 util::extend_sorted(
5145 &mut state.changed_paths,
5146 relative_paths.iter().cloned(),
5147 usize::MAX,
5148 Ord::cmp,
5149 );
5150 }
5151
5152 fn remove_repo_path(&self, path: &Arc<Path>, snapshot: &mut LocalSnapshot) -> Option<()> {
5153 if !path
5154 .components()
5155 .any(|component| component.as_os_str() == *DOT_GIT)
5156 {
5157 if let Some(repository) = snapshot.repository(PathKey(path.clone())) {
5158 snapshot
5159 .git_repositories
5160 .remove(&repository.work_directory_id);
5161 snapshot
5162 .snapshot
5163 .repositories
5164 .remove(&repository.work_directory.path_key(), &());
5165 return Some(());
5166 }
5167 }
5168
5169 Some(())
5170 }
5171
5172 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
5173 let mut ignores_to_update = Vec::new();
5174 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
5175 let prev_snapshot;
5176 {
5177 let snapshot = &mut self.state.lock().snapshot;
5178 let abs_path = snapshot.abs_path.clone();
5179 snapshot
5180 .ignores_by_parent_abs_path
5181 .retain(|parent_abs_path, (_, needs_update)| {
5182 if let Ok(parent_path) = parent_abs_path.strip_prefix(abs_path.as_path()) {
5183 if *needs_update {
5184 *needs_update = false;
5185 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
5186 ignores_to_update.push(parent_abs_path.clone());
5187 }
5188 }
5189
5190 let ignore_path = parent_path.join(*GITIGNORE);
5191 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
5192 return false;
5193 }
5194 }
5195 true
5196 });
5197
5198 ignores_to_update.sort_unstable();
5199 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
5200 while let Some(parent_abs_path) = ignores_to_update.next() {
5201 while ignores_to_update
5202 .peek()
5203 .map_or(false, |p| p.starts_with(&parent_abs_path))
5204 {
5205 ignores_to_update.next().unwrap();
5206 }
5207
5208 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
5209 ignore_queue_tx
5210 .send_blocking(UpdateIgnoreStatusJob {
5211 abs_path: parent_abs_path,
5212 ignore_stack,
5213 ignore_queue: ignore_queue_tx.clone(),
5214 scan_queue: scan_job_tx.clone(),
5215 })
5216 .unwrap();
5217 }
5218
5219 prev_snapshot = snapshot.clone();
5220 }
5221 drop(ignore_queue_tx);
5222
5223 self.executor
5224 .scoped(|scope| {
5225 for _ in 0..self.executor.num_cpus() {
5226 scope.spawn(async {
5227 loop {
5228 select_biased! {
5229 // Process any path refresh requests before moving on to process
5230 // the queue of ignore statuses.
5231 request = self.next_scan_request().fuse() => {
5232 let Ok(request) = request else { break };
5233 if !self.process_scan_request(request, true).await {
5234 return;
5235 }
5236 }
5237
5238 // Recursively process directories whose ignores have changed.
5239 job = ignore_queue_rx.recv().fuse() => {
5240 let Ok(job) = job else { break };
5241 self.update_ignore_status(job, &prev_snapshot).await;
5242 }
5243 }
5244 }
5245 });
5246 }
5247 })
5248 .await;
5249 }
5250
5251 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
5252 log::trace!("update ignore status {:?}", job.abs_path);
5253
5254 let mut ignore_stack = job.ignore_stack;
5255 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
5256 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
5257 }
5258
5259 let mut entries_by_id_edits = Vec::new();
5260 let mut entries_by_path_edits = Vec::new();
5261 let path = job
5262 .abs_path
5263 .strip_prefix(snapshot.abs_path.as_path())
5264 .unwrap();
5265
5266 for mut entry in snapshot.child_entries(path).cloned() {
5267 let was_ignored = entry.is_ignored;
5268 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
5269 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
5270
5271 if entry.is_dir() {
5272 let child_ignore_stack = if entry.is_ignored {
5273 IgnoreStack::all()
5274 } else {
5275 ignore_stack.clone()
5276 };
5277
5278 // Scan any directories that were previously ignored and weren't previously scanned.
5279 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
5280 let state = self.state.lock();
5281 if state.should_scan_directory(&entry) {
5282 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
5283 }
5284 }
5285
5286 job.ignore_queue
5287 .send(UpdateIgnoreStatusJob {
5288 abs_path: abs_path.clone(),
5289 ignore_stack: child_ignore_stack,
5290 ignore_queue: job.ignore_queue.clone(),
5291 scan_queue: job.scan_queue.clone(),
5292 })
5293 .await
5294 .unwrap();
5295 }
5296
5297 if entry.is_ignored != was_ignored {
5298 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
5299 path_entry.scan_id = snapshot.scan_id;
5300 path_entry.is_ignored = entry.is_ignored;
5301 entries_by_id_edits.push(Edit::Insert(path_entry));
5302 entries_by_path_edits.push(Edit::Insert(entry));
5303 }
5304 }
5305
5306 let state = &mut self.state.lock();
5307 for edit in &entries_by_path_edits {
5308 if let Edit::Insert(entry) = edit {
5309 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
5310 state.changed_paths.insert(ix, entry.path.clone());
5311 }
5312 }
5313 }
5314
5315 state
5316 .snapshot
5317 .entries_by_path
5318 .edit(entries_by_path_edits, &());
5319 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
5320 }
5321
5322 fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) -> Task<()> {
5323 log::debug!("reloading repositories: {dot_git_paths:?}");
5324
5325 let mut status_updates = Vec::new();
5326 {
5327 let mut state = self.state.lock();
5328 let scan_id = state.snapshot.scan_id;
5329 for dot_git_dir in dot_git_paths {
5330 let existing_repository_entry =
5331 state
5332 .snapshot
5333 .git_repositories
5334 .iter()
5335 .find_map(|(_, repo)| {
5336 if repo.dot_git_dir_abs_path.as_ref() == &dot_git_dir
5337 || repo.dot_git_worktree_abs_path.as_deref() == Some(&dot_git_dir)
5338 {
5339 Some(repo.clone())
5340 } else {
5341 None
5342 }
5343 });
5344
5345 let local_repository = match existing_repository_entry {
5346 None => {
5347 let Ok(relative) = dot_git_dir.strip_prefix(state.snapshot.abs_path())
5348 else {
5349 return Task::ready(());
5350 };
5351 match state.insert_git_repository(
5352 relative.into(),
5353 self.fs.as_ref(),
5354 self.watcher.as_ref(),
5355 ) {
5356 Some(output) => output,
5357 None => continue,
5358 }
5359 }
5360 Some(local_repository) => {
5361 if local_repository.git_dir_scan_id == scan_id {
5362 continue;
5363 }
5364 local_repository.repo_ptr.reload_index();
5365
5366 state.snapshot.git_repositories.update(
5367 &local_repository.work_directory_id,
5368 |entry| {
5369 entry.git_dir_scan_id = scan_id;
5370 entry.status_scan_id = scan_id;
5371 },
5372 );
5373
5374 local_repository
5375 }
5376 };
5377
5378 status_updates
5379 .push(self.schedule_git_statuses_update(&mut state, local_repository));
5380 }
5381
5382 // Remove any git repositories whose .git entry no longer exists.
5383 let snapshot = &mut state.snapshot;
5384 let mut ids_to_preserve = HashSet::default();
5385 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
5386 let exists_in_snapshot = snapshot
5387 .entry_for_id(work_directory_id)
5388 .map_or(false, |entry| {
5389 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
5390 });
5391
5392 if exists_in_snapshot
5393 || matches!(
5394 smol::block_on(self.fs.metadata(&entry.dot_git_dir_abs_path)),
5395 Ok(Some(_))
5396 )
5397 {
5398 ids_to_preserve.insert(work_directory_id);
5399 }
5400 }
5401
5402 snapshot
5403 .git_repositories
5404 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
5405 snapshot.repositories.retain(&(), |entry| {
5406 ids_to_preserve.contains(&entry.work_directory_id)
5407 });
5408 }
5409
5410 self.executor.spawn(async move {
5411 let _updates_finished: Vec<Result<(), oneshot::Canceled>> =
5412 join_all(status_updates).await;
5413 })
5414 }
5415
5416 /// Update the git statuses for a given batch of entries.
5417 fn schedule_git_statuses_update(
5418 &self,
5419 state: &mut BackgroundScannerState,
5420 mut local_repository: LocalRepositoryEntry,
5421 ) -> oneshot::Receiver<()> {
5422 let repository_name = local_repository.work_directory.display_name();
5423 let path_key = local_repository.work_directory.path_key();
5424
5425 let job_state = self.state.clone();
5426 let (tx, rx) = oneshot::channel();
5427
5428 state.repository_scans.insert(
5429 path_key.clone(),
5430 self.executor.spawn(async move {
5431 update_branches(&job_state, &mut local_repository).log_err();
5432 log::trace!("updating git statuses for repo {repository_name}",);
5433 let t0 = Instant::now();
5434
5435 let Some(statuses) = local_repository
5436 .repo()
5437 .status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
5438 .log_err()
5439 else {
5440 return;
5441 };
5442
5443 log::trace!(
5444 "computed git statuses for repo {repository_name} in {:?}",
5445 t0.elapsed()
5446 );
5447
5448 let t0 = Instant::now();
5449 let mut changed_paths = Vec::new();
5450 let snapshot = job_state.lock().snapshot.snapshot.clone();
5451
5452 let Some(mut repository) = snapshot
5453 .repository(path_key)
5454 .context(
5455 "Tried to update git statuses for a repository that isn't in the snapshot",
5456 )
5457 .log_err()
5458 else {
5459 return;
5460 };
5461
5462 let merge_head_shas = local_repository.repo().merge_head_shas();
5463 if merge_head_shas != local_repository.current_merge_head_shas {
5464 mem::take(&mut repository.current_merge_conflicts);
5465 }
5466
5467 let mut new_entries_by_path = SumTree::new(&());
5468 for (repo_path, status) in statuses.entries.iter() {
5469 let project_path = repository.work_directory.unrelativize(repo_path);
5470
5471 new_entries_by_path.insert_or_replace(
5472 StatusEntry {
5473 repo_path: repo_path.clone(),
5474 status: *status,
5475 },
5476 &(),
5477 );
5478
5479 if let Some(path) = project_path {
5480 changed_paths.push(path);
5481 }
5482 }
5483
5484 repository.statuses_by_path = new_entries_by_path;
5485 let mut state = job_state.lock();
5486 state
5487 .snapshot
5488 .repositories
5489 .insert_or_replace(repository, &());
5490 state.snapshot.git_repositories.update(
5491 &local_repository.work_directory_id,
5492 |entry| {
5493 entry.current_merge_head_shas = merge_head_shas;
5494 entry.status_scan_id += 1;
5495 },
5496 );
5497
5498 util::extend_sorted(
5499 &mut state.changed_paths,
5500 changed_paths,
5501 usize::MAX,
5502 Ord::cmp,
5503 );
5504
5505 log::trace!(
5506 "applied git status updates for repo {repository_name} in {:?}",
5507 t0.elapsed(),
5508 );
5509 tx.send(()).ok();
5510 }),
5511 );
5512 rx
5513 }
5514
5515 async fn progress_timer(&self, running: bool) {
5516 if !running {
5517 return futures::future::pending().await;
5518 }
5519
5520 #[cfg(any(test, feature = "test-support"))]
5521 if self.fs.is_fake() {
5522 return self.executor.simulate_random_delay().await;
5523 }
5524
5525 smol::Timer::after(FS_WATCH_LATENCY).await;
5526 }
5527
5528 fn is_path_private(&self, path: &Path) -> bool {
5529 !self.share_private_files && self.settings.is_path_private(path)
5530 }
5531
5532 async fn next_scan_request(&self) -> Result<ScanRequest> {
5533 let mut request = self.scan_requests_rx.recv().await?;
5534 while let Ok(next_request) = self.scan_requests_rx.try_recv() {
5535 request.relative_paths.extend(next_request.relative_paths);
5536 request.done.extend(next_request.done);
5537 }
5538 Ok(request)
5539 }
5540}
5541
5542fn send_status_update_inner(
5543 phase: BackgroundScannerPhase,
5544 state: Arc<Mutex<BackgroundScannerState>>,
5545 status_updates_tx: UnboundedSender<ScanState>,
5546 scanning: bool,
5547 barrier: SmallVec<[barrier::Sender; 1]>,
5548) -> bool {
5549 let mut state = state.lock();
5550 if state.changed_paths.is_empty() && scanning {
5551 return true;
5552 }
5553
5554 let new_snapshot = state.snapshot.clone();
5555 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
5556 let changes = build_diff(phase, &old_snapshot, &new_snapshot, &state.changed_paths);
5557 state.changed_paths.clear();
5558
5559 status_updates_tx
5560 .unbounded_send(ScanState::Updated {
5561 snapshot: new_snapshot,
5562 changes,
5563 scanning,
5564 barrier,
5565 })
5566 .is_ok()
5567}
5568
5569fn update_branches(
5570 state: &Mutex<BackgroundScannerState>,
5571 repository: &mut LocalRepositoryEntry,
5572) -> Result<()> {
5573 let branches = repository.repo().branches()?;
5574 let snapshot = state.lock().snapshot.snapshot.clone();
5575 let mut repository = snapshot
5576 .repository(repository.work_directory.path_key())
5577 .context("Missing repository")?;
5578 repository.branch = branches.into_iter().find(|branch| branch.is_head);
5579
5580 let mut state = state.lock();
5581 state
5582 .snapshot
5583 .repositories
5584 .insert_or_replace(repository, &());
5585
5586 Ok(())
5587}
5588
5589fn build_diff(
5590 phase: BackgroundScannerPhase,
5591 old_snapshot: &Snapshot,
5592 new_snapshot: &Snapshot,
5593 event_paths: &[Arc<Path>],
5594) -> UpdatedEntriesSet {
5595 use BackgroundScannerPhase::*;
5596 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
5597
5598 // Identify which paths have changed. Use the known set of changed
5599 // parent paths to optimize the search.
5600 let mut changes = Vec::new();
5601 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(&());
5602 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(&());
5603 let mut last_newly_loaded_dir_path = None;
5604 old_paths.next(&());
5605 new_paths.next(&());
5606 for path in event_paths {
5607 let path = PathKey(path.clone());
5608 if old_paths.item().map_or(false, |e| e.path < path.0) {
5609 old_paths.seek_forward(&path, Bias::Left, &());
5610 }
5611 if new_paths.item().map_or(false, |e| e.path < path.0) {
5612 new_paths.seek_forward(&path, Bias::Left, &());
5613 }
5614 loop {
5615 match (old_paths.item(), new_paths.item()) {
5616 (Some(old_entry), Some(new_entry)) => {
5617 if old_entry.path > path.0
5618 && new_entry.path > path.0
5619 && !old_entry.path.starts_with(&path.0)
5620 && !new_entry.path.starts_with(&path.0)
5621 {
5622 break;
5623 }
5624
5625 match Ord::cmp(&old_entry.path, &new_entry.path) {
5626 Ordering::Less => {
5627 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5628 old_paths.next(&());
5629 }
5630 Ordering::Equal => {
5631 if phase == EventsReceivedDuringInitialScan {
5632 if old_entry.id != new_entry.id {
5633 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5634 }
5635 // If the worktree was not fully initialized when this event was generated,
5636 // we can't know whether this entry was added during the scan or whether
5637 // it was merely updated.
5638 changes.push((
5639 new_entry.path.clone(),
5640 new_entry.id,
5641 AddedOrUpdated,
5642 ));
5643 } else if old_entry.id != new_entry.id {
5644 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5645 changes.push((new_entry.path.clone(), new_entry.id, Added));
5646 } else if old_entry != new_entry {
5647 if old_entry.kind.is_unloaded() {
5648 last_newly_loaded_dir_path = Some(&new_entry.path);
5649 changes.push((new_entry.path.clone(), new_entry.id, Loaded));
5650 } else {
5651 changes.push((new_entry.path.clone(), new_entry.id, Updated));
5652 }
5653 }
5654 old_paths.next(&());
5655 new_paths.next(&());
5656 }
5657 Ordering::Greater => {
5658 let is_newly_loaded = phase == InitialScan
5659 || last_newly_loaded_dir_path
5660 .as_ref()
5661 .map_or(false, |dir| new_entry.path.starts_with(dir));
5662 changes.push((
5663 new_entry.path.clone(),
5664 new_entry.id,
5665 if is_newly_loaded { Loaded } else { Added },
5666 ));
5667 new_paths.next(&());
5668 }
5669 }
5670 }
5671 (Some(old_entry), None) => {
5672 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5673 old_paths.next(&());
5674 }
5675 (None, Some(new_entry)) => {
5676 let is_newly_loaded = phase == InitialScan
5677 || last_newly_loaded_dir_path
5678 .as_ref()
5679 .map_or(false, |dir| new_entry.path.starts_with(dir));
5680 changes.push((
5681 new_entry.path.clone(),
5682 new_entry.id,
5683 if is_newly_loaded { Loaded } else { Added },
5684 ));
5685 new_paths.next(&());
5686 }
5687 (None, None) => break,
5688 }
5689 }
5690 }
5691
5692 changes.into()
5693}
5694
5695fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
5696 let position = child_paths
5697 .iter()
5698 .position(|path| path.file_name().unwrap() == file);
5699 if let Some(position) = position {
5700 let temp = child_paths.remove(position);
5701 child_paths.insert(0, temp);
5702 }
5703}
5704
5705fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
5706 let mut result = root_char_bag;
5707 result.extend(
5708 path.to_string_lossy()
5709 .chars()
5710 .map(|c| c.to_ascii_lowercase()),
5711 );
5712 result
5713}
5714
5715#[derive(Debug)]
5716struct RepoPaths {
5717 repo: Arc<dyn GitRepository>,
5718 entry: RepositoryEntry,
5719 // sorted
5720 repo_paths: Vec<RepoPath>,
5721}
5722
5723impl RepoPaths {
5724 fn add_path(&mut self, repo_path: RepoPath) {
5725 match self.repo_paths.binary_search(&repo_path) {
5726 Ok(_) => {}
5727 Err(ix) => self.repo_paths.insert(ix, repo_path),
5728 }
5729 }
5730
5731 fn remove_repo_path(&mut self, repo_path: &RepoPath) {
5732 match self.repo_paths.binary_search(&repo_path) {
5733 Ok(ix) => {
5734 self.repo_paths.remove(ix);
5735 }
5736 Err(_) => {}
5737 }
5738 }
5739}
5740
5741#[derive(Debug)]
5742struct ScanJob {
5743 abs_path: Arc<Path>,
5744 path: Arc<Path>,
5745 ignore_stack: Arc<IgnoreStack>,
5746 scan_queue: Sender<ScanJob>,
5747 ancestor_inodes: TreeSet<u64>,
5748 is_external: bool,
5749}
5750
5751struct UpdateIgnoreStatusJob {
5752 abs_path: Arc<Path>,
5753 ignore_stack: Arc<IgnoreStack>,
5754 ignore_queue: Sender<UpdateIgnoreStatusJob>,
5755 scan_queue: Sender<ScanJob>,
5756}
5757
5758pub trait WorktreeModelHandle {
5759 #[cfg(any(test, feature = "test-support"))]
5760 fn flush_fs_events<'a>(
5761 &self,
5762 cx: &'a mut gpui::TestAppContext,
5763 ) -> futures::future::LocalBoxFuture<'a, ()>;
5764
5765 #[cfg(any(test, feature = "test-support"))]
5766 fn flush_fs_events_in_root_git_repository<'a>(
5767 &self,
5768 cx: &'a mut gpui::TestAppContext,
5769 ) -> futures::future::LocalBoxFuture<'a, ()>;
5770}
5771
5772impl WorktreeModelHandle for Entity<Worktree> {
5773 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
5774 // occurred before the worktree was constructed. These events can cause the worktree to perform
5775 // extra directory scans, and emit extra scan-state notifications.
5776 //
5777 // This function mutates the worktree's directory and waits for those mutations to be picked up,
5778 // to ensure that all redundant FS events have already been processed.
5779 #[cfg(any(test, feature = "test-support"))]
5780 fn flush_fs_events<'a>(
5781 &self,
5782 cx: &'a mut gpui::TestAppContext,
5783 ) -> futures::future::LocalBoxFuture<'a, ()> {
5784 let file_name = "fs-event-sentinel";
5785
5786 let tree = self.clone();
5787 let (fs, root_path) = self.update(cx, |tree, _| {
5788 let tree = tree.as_local().unwrap();
5789 (tree.fs.clone(), tree.abs_path().clone())
5790 });
5791
5792 async move {
5793 fs.create_file(&root_path.join(file_name), Default::default())
5794 .await
5795 .unwrap();
5796
5797 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
5798 .await;
5799
5800 fs.remove_file(&root_path.join(file_name), Default::default())
5801 .await
5802 .unwrap();
5803 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
5804 .await;
5805
5806 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5807 .await;
5808 }
5809 .boxed_local()
5810 }
5811
5812 // This function is similar to flush_fs_events, except that it waits for events to be flushed in
5813 // the .git folder of the root repository.
5814 // The reason for its existence is that a repository's .git folder might live *outside* of the
5815 // worktree and thus its FS events might go through a different path.
5816 // In order to flush those, we need to create artificial events in the .git folder and wait
5817 // for the repository to be reloaded.
5818 #[cfg(any(test, feature = "test-support"))]
5819 fn flush_fs_events_in_root_git_repository<'a>(
5820 &self,
5821 cx: &'a mut gpui::TestAppContext,
5822 ) -> futures::future::LocalBoxFuture<'a, ()> {
5823 let file_name = "fs-event-sentinel";
5824
5825 let tree = self.clone();
5826 let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
5827 let tree = tree.as_local().unwrap();
5828 let root_entry = tree.root_git_entry().unwrap();
5829 let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
5830 (
5831 tree.fs.clone(),
5832 local_repo_entry.dot_git_dir_abs_path.clone(),
5833 local_repo_entry.git_dir_scan_id,
5834 )
5835 });
5836
5837 let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
5838 let root_entry = tree.root_git_entry().unwrap();
5839 let local_repo_entry = tree
5840 .as_local()
5841 .unwrap()
5842 .get_local_repo(&root_entry)
5843 .unwrap();
5844
5845 if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
5846 *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
5847 true
5848 } else {
5849 false
5850 }
5851 };
5852
5853 async move {
5854 fs.create_file(&root_path.join(file_name), Default::default())
5855 .await
5856 .unwrap();
5857
5858 cx.condition(&tree, |tree, _| {
5859 scan_id_increased(tree, &mut git_dir_scan_id)
5860 })
5861 .await;
5862
5863 fs.remove_file(&root_path.join(file_name), Default::default())
5864 .await
5865 .unwrap();
5866
5867 cx.condition(&tree, |tree, _| {
5868 scan_id_increased(tree, &mut git_dir_scan_id)
5869 })
5870 .await;
5871
5872 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5873 .await;
5874 }
5875 .boxed_local()
5876 }
5877}
5878
5879#[derive(Clone, Debug)]
5880struct TraversalProgress<'a> {
5881 max_path: &'a Path,
5882 count: usize,
5883 non_ignored_count: usize,
5884 file_count: usize,
5885 non_ignored_file_count: usize,
5886}
5887
5888impl<'a> TraversalProgress<'a> {
5889 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
5890 match (include_files, include_dirs, include_ignored) {
5891 (true, true, true) => self.count,
5892 (true, true, false) => self.non_ignored_count,
5893 (true, false, true) => self.file_count,
5894 (true, false, false) => self.non_ignored_file_count,
5895 (false, true, true) => self.count - self.file_count,
5896 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
5897 (false, false, _) => 0,
5898 }
5899 }
5900}
5901
5902impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
5903 fn zero(_cx: &()) -> Self {
5904 Default::default()
5905 }
5906
5907 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
5908 self.max_path = summary.max_path.as_ref();
5909 self.count += summary.count;
5910 self.non_ignored_count += summary.non_ignored_count;
5911 self.file_count += summary.file_count;
5912 self.non_ignored_file_count += summary.non_ignored_file_count;
5913 }
5914}
5915
5916impl<'a> Default for TraversalProgress<'a> {
5917 fn default() -> Self {
5918 Self {
5919 max_path: Path::new(""),
5920 count: 0,
5921 non_ignored_count: 0,
5922 file_count: 0,
5923 non_ignored_file_count: 0,
5924 }
5925 }
5926}
5927
5928#[derive(Debug, Clone, Copy)]
5929pub struct GitEntryRef<'a> {
5930 pub entry: &'a Entry,
5931 pub git_summary: GitSummary,
5932}
5933
5934impl<'a> GitEntryRef<'a> {
5935 pub fn to_owned(&self) -> GitEntry {
5936 GitEntry {
5937 entry: self.entry.clone(),
5938 git_summary: self.git_summary,
5939 }
5940 }
5941}
5942
5943impl<'a> Deref for GitEntryRef<'a> {
5944 type Target = Entry;
5945
5946 fn deref(&self) -> &Self::Target {
5947 &self.entry
5948 }
5949}
5950
5951impl<'a> AsRef<Entry> for GitEntryRef<'a> {
5952 fn as_ref(&self) -> &Entry {
5953 self.entry
5954 }
5955}
5956
5957#[derive(Debug, Clone, PartialEq, Eq)]
5958pub struct GitEntry {
5959 pub entry: Entry,
5960 pub git_summary: GitSummary,
5961}
5962
5963impl GitEntry {
5964 pub fn to_ref(&self) -> GitEntryRef {
5965 GitEntryRef {
5966 entry: &self.entry,
5967 git_summary: self.git_summary,
5968 }
5969 }
5970}
5971
5972impl Deref for GitEntry {
5973 type Target = Entry;
5974
5975 fn deref(&self) -> &Self::Target {
5976 &self.entry
5977 }
5978}
5979
5980impl AsRef<Entry> for GitEntry {
5981 fn as_ref(&self) -> &Entry {
5982 &self.entry
5983 }
5984}
5985
5986/// Walks the worktree entries and their associated git statuses.
5987pub struct GitTraversal<'a> {
5988 traversal: Traversal<'a>,
5989 current_entry_summary: Option<GitSummary>,
5990 repo_location: Option<(
5991 &'a RepositoryEntry,
5992 Cursor<'a, StatusEntry, PathProgress<'a>>,
5993 )>,
5994}
5995
5996impl<'a> GitTraversal<'a> {
5997 fn synchronize_statuses(&mut self, reset: bool) {
5998 self.current_entry_summary = None;
5999
6000 let Some(entry) = self.traversal.cursor.item() else {
6001 return;
6002 };
6003
6004 let Some(repo) = self.traversal.snapshot.repository_for_path(&entry.path) else {
6005 self.repo_location = None;
6006 return;
6007 };
6008
6009 // Update our state if we changed repositories.
6010 if reset || self.repo_location.as_ref().map(|(prev_repo, _)| prev_repo) != Some(&repo) {
6011 self.repo_location = Some((repo, repo.statuses_by_path.cursor::<PathProgress>(&())));
6012 }
6013
6014 let Some((repo, statuses)) = &mut self.repo_location else {
6015 return;
6016 };
6017
6018 let repo_path = repo.relativize(&entry.path).unwrap();
6019
6020 if entry.is_dir() {
6021 let mut statuses = statuses.clone();
6022 statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &());
6023 let summary =
6024 statuses.summary(&PathTarget::Successor(repo_path.as_ref()), Bias::Left, &());
6025
6026 self.current_entry_summary = Some(summary);
6027 } else if entry.is_file() {
6028 // For a file entry, park the cursor on the corresponding status
6029 if statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &()) {
6030 // TODO: Investigate statuses.item() being None here.
6031 self.current_entry_summary = statuses.item().map(|item| item.status.into());
6032 } else {
6033 self.current_entry_summary = Some(GitSummary::UNCHANGED);
6034 }
6035 }
6036 }
6037
6038 pub fn advance(&mut self) -> bool {
6039 self.advance_by(1)
6040 }
6041
6042 pub fn advance_by(&mut self, count: usize) -> bool {
6043 let found = self.traversal.advance_by(count);
6044 self.synchronize_statuses(false);
6045 found
6046 }
6047
6048 pub fn advance_to_sibling(&mut self) -> bool {
6049 let found = self.traversal.advance_to_sibling();
6050 self.synchronize_statuses(false);
6051 found
6052 }
6053
6054 pub fn back_to_parent(&mut self) -> bool {
6055 let found = self.traversal.back_to_parent();
6056 self.synchronize_statuses(true);
6057 found
6058 }
6059
6060 pub fn start_offset(&self) -> usize {
6061 self.traversal.start_offset()
6062 }
6063
6064 pub fn end_offset(&self) -> usize {
6065 self.traversal.end_offset()
6066 }
6067
6068 pub fn entry(&self) -> Option<GitEntryRef<'a>> {
6069 let entry = self.traversal.cursor.item()?;
6070 let git_summary = self.current_entry_summary.unwrap_or(GitSummary::UNCHANGED);
6071 Some(GitEntryRef { entry, git_summary })
6072 }
6073}
6074
6075impl<'a> Iterator for GitTraversal<'a> {
6076 type Item = GitEntryRef<'a>;
6077 fn next(&mut self) -> Option<Self::Item> {
6078 if let Some(item) = self.entry() {
6079 self.advance();
6080 Some(item)
6081 } else {
6082 None
6083 }
6084 }
6085}
6086
6087#[derive(Debug)]
6088pub struct Traversal<'a> {
6089 snapshot: &'a Snapshot,
6090 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
6091 include_ignored: bool,
6092 include_files: bool,
6093 include_dirs: bool,
6094}
6095
6096impl<'a> Traversal<'a> {
6097 fn new(
6098 snapshot: &'a Snapshot,
6099 include_files: bool,
6100 include_dirs: bool,
6101 include_ignored: bool,
6102 start_path: &Path,
6103 ) -> Self {
6104 let mut cursor = snapshot.entries_by_path.cursor(&());
6105 cursor.seek(&TraversalTarget::path(start_path), Bias::Left, &());
6106 let mut traversal = Self {
6107 snapshot,
6108 cursor,
6109 include_files,
6110 include_dirs,
6111 include_ignored,
6112 };
6113 if traversal.end_offset() == traversal.start_offset() {
6114 traversal.next();
6115 }
6116 traversal
6117 }
6118
6119 pub fn with_git_statuses(self) -> GitTraversal<'a> {
6120 let mut this = GitTraversal {
6121 traversal: self,
6122 current_entry_summary: None,
6123 repo_location: None,
6124 };
6125 this.synchronize_statuses(true);
6126 this
6127 }
6128
6129 pub fn advance(&mut self) -> bool {
6130 self.advance_by(1)
6131 }
6132
6133 pub fn advance_by(&mut self, count: usize) -> bool {
6134 self.cursor.seek_forward(
6135 &TraversalTarget::Count {
6136 count: self.end_offset() + count,
6137 include_dirs: self.include_dirs,
6138 include_files: self.include_files,
6139 include_ignored: self.include_ignored,
6140 },
6141 Bias::Left,
6142 &(),
6143 )
6144 }
6145
6146 pub fn advance_to_sibling(&mut self) -> bool {
6147 while let Some(entry) = self.cursor.item() {
6148 self.cursor
6149 .seek_forward(&TraversalTarget::successor(&entry.path), Bias::Left, &());
6150 if let Some(entry) = self.cursor.item() {
6151 if (self.include_files || !entry.is_file())
6152 && (self.include_dirs || !entry.is_dir())
6153 && (self.include_ignored || !entry.is_ignored || entry.is_always_included)
6154 {
6155 return true;
6156 }
6157 }
6158 }
6159 false
6160 }
6161
6162 pub fn back_to_parent(&mut self) -> bool {
6163 let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
6164 return false;
6165 };
6166 self.cursor
6167 .seek(&TraversalTarget::path(parent_path), Bias::Left, &())
6168 }
6169
6170 pub fn entry(&self) -> Option<&'a Entry> {
6171 self.cursor.item()
6172 }
6173
6174 pub fn start_offset(&self) -> usize {
6175 self.cursor
6176 .start()
6177 .count(self.include_files, self.include_dirs, self.include_ignored)
6178 }
6179
6180 pub fn end_offset(&self) -> usize {
6181 self.cursor
6182 .end(&())
6183 .count(self.include_files, self.include_dirs, self.include_ignored)
6184 }
6185}
6186
6187impl<'a> Iterator for Traversal<'a> {
6188 type Item = &'a Entry;
6189
6190 fn next(&mut self) -> Option<Self::Item> {
6191 if let Some(item) = self.entry() {
6192 self.advance();
6193 Some(item)
6194 } else {
6195 None
6196 }
6197 }
6198}
6199
6200#[derive(Debug, Clone, Copy)]
6201enum PathTarget<'a> {
6202 Path(&'a Path),
6203 Successor(&'a Path),
6204}
6205
6206impl<'a> PathTarget<'a> {
6207 fn cmp_path(&self, other: &Path) -> Ordering {
6208 match self {
6209 PathTarget::Path(path) => path.cmp(&other),
6210 PathTarget::Successor(path) => {
6211 if other.starts_with(path) {
6212 Ordering::Greater
6213 } else {
6214 Ordering::Equal
6215 }
6216 }
6217 }
6218 }
6219}
6220
6221impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, PathProgress<'a>> for PathTarget<'b> {
6222 fn cmp(&self, cursor_location: &PathProgress<'a>, _: &S::Context) -> Ordering {
6223 self.cmp_path(&cursor_location.max_path)
6224 }
6225}
6226
6227impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, TraversalProgress<'a>> for PathTarget<'b> {
6228 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &S::Context) -> Ordering {
6229 self.cmp_path(&cursor_location.max_path)
6230 }
6231}
6232
6233impl<'a, 'b> SeekTarget<'a, PathSummary<GitSummary>, (TraversalProgress<'a>, GitSummary)>
6234 for PathTarget<'b>
6235{
6236 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitSummary), _: &()) -> Ordering {
6237 self.cmp_path(&cursor_location.0.max_path)
6238 }
6239}
6240
6241#[derive(Debug)]
6242enum TraversalTarget<'a> {
6243 Path(PathTarget<'a>),
6244 Count {
6245 count: usize,
6246 include_files: bool,
6247 include_ignored: bool,
6248 include_dirs: bool,
6249 },
6250}
6251
6252impl<'a> TraversalTarget<'a> {
6253 fn path(path: &'a Path) -> Self {
6254 Self::Path(PathTarget::Path(path))
6255 }
6256
6257 fn successor(path: &'a Path) -> Self {
6258 Self::Path(PathTarget::Successor(path))
6259 }
6260
6261 fn cmp_progress(&self, progress: &TraversalProgress) -> Ordering {
6262 match self {
6263 TraversalTarget::Path(path) => path.cmp_path(&progress.max_path),
6264 TraversalTarget::Count {
6265 count,
6266 include_files,
6267 include_dirs,
6268 include_ignored,
6269 } => Ord::cmp(
6270 count,
6271 &progress.count(*include_files, *include_dirs, *include_ignored),
6272 ),
6273 }
6274 }
6275}
6276
6277impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
6278 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6279 self.cmp_progress(cursor_location)
6280 }
6281}
6282
6283impl<'a, 'b> SeekTarget<'a, PathSummary<Unit>, TraversalProgress<'a>> for TraversalTarget<'b> {
6284 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6285 self.cmp_progress(cursor_location)
6286 }
6287}
6288
6289pub struct ChildEntriesOptions {
6290 pub include_files: bool,
6291 pub include_dirs: bool,
6292 pub include_ignored: bool,
6293}
6294
6295pub struct ChildEntriesIter<'a> {
6296 parent_path: &'a Path,
6297 traversal: Traversal<'a>,
6298}
6299
6300impl<'a> ChildEntriesIter<'a> {
6301 pub fn with_git_statuses(self) -> ChildEntriesGitIter<'a> {
6302 ChildEntriesGitIter {
6303 parent_path: self.parent_path,
6304 traversal: self.traversal.with_git_statuses(),
6305 }
6306 }
6307}
6308
6309pub struct ChildEntriesGitIter<'a> {
6310 parent_path: &'a Path,
6311 traversal: GitTraversal<'a>,
6312}
6313
6314impl<'a> Iterator for ChildEntriesIter<'a> {
6315 type Item = &'a Entry;
6316
6317 fn next(&mut self) -> Option<Self::Item> {
6318 if let Some(item) = self.traversal.entry() {
6319 if item.path.starts_with(self.parent_path) {
6320 self.traversal.advance_to_sibling();
6321 return Some(item);
6322 }
6323 }
6324 None
6325 }
6326}
6327
6328impl<'a> Iterator for ChildEntriesGitIter<'a> {
6329 type Item = GitEntryRef<'a>;
6330
6331 fn next(&mut self) -> Option<Self::Item> {
6332 if let Some(item) = self.traversal.entry() {
6333 if item.path.starts_with(self.parent_path) {
6334 self.traversal.advance_to_sibling();
6335 return Some(item);
6336 }
6337 }
6338 None
6339 }
6340}
6341
6342impl<'a> From<&'a Entry> for proto::Entry {
6343 fn from(entry: &'a Entry) -> Self {
6344 Self {
6345 id: entry.id.to_proto(),
6346 is_dir: entry.is_dir(),
6347 path: entry.path.as_ref().to_proto(),
6348 inode: entry.inode,
6349 mtime: entry.mtime.map(|time| time.into()),
6350 is_ignored: entry.is_ignored,
6351 is_external: entry.is_external,
6352 is_fifo: entry.is_fifo,
6353 size: Some(entry.size),
6354 canonical_path: entry
6355 .canonical_path
6356 .as_ref()
6357 .map(|path| path.as_ref().to_proto()),
6358 }
6359 }
6360}
6361
6362impl<'a> TryFrom<(&'a CharBag, &PathMatcher, proto::Entry)> for Entry {
6363 type Error = anyhow::Error;
6364
6365 fn try_from(
6366 (root_char_bag, always_included, entry): (&'a CharBag, &PathMatcher, proto::Entry),
6367 ) -> Result<Self> {
6368 let kind = if entry.is_dir {
6369 EntryKind::Dir
6370 } else {
6371 EntryKind::File
6372 };
6373
6374 let path = Arc::<Path>::from_proto(entry.path);
6375 let char_bag = char_bag_for_path(*root_char_bag, &path);
6376 let is_always_included = always_included.is_match(path.as_ref());
6377 Ok(Entry {
6378 id: ProjectEntryId::from_proto(entry.id),
6379 kind,
6380 path,
6381 inode: entry.inode,
6382 mtime: entry.mtime.map(|time| time.into()),
6383 size: entry.size.unwrap_or(0),
6384 canonical_path: entry
6385 .canonical_path
6386 .map(|path_string| Box::from(PathBuf::from_proto(path_string))),
6387 is_ignored: entry.is_ignored,
6388 is_always_included,
6389 is_external: entry.is_external,
6390 is_private: false,
6391 char_bag,
6392 is_fifo: entry.is_fifo,
6393 })
6394 }
6395}
6396
6397fn status_from_proto(
6398 simple_status: i32,
6399 status: Option<proto::GitFileStatus>,
6400) -> anyhow::Result<FileStatus> {
6401 use proto::git_file_status::Variant;
6402
6403 let Some(variant) = status.and_then(|status| status.variant) else {
6404 let code = proto::GitStatus::from_i32(simple_status)
6405 .ok_or_else(|| anyhow!("Invalid git status code: {simple_status}"))?;
6406 let result = match code {
6407 proto::GitStatus::Added => TrackedStatus {
6408 worktree_status: StatusCode::Added,
6409 index_status: StatusCode::Unmodified,
6410 }
6411 .into(),
6412 proto::GitStatus::Modified => TrackedStatus {
6413 worktree_status: StatusCode::Modified,
6414 index_status: StatusCode::Unmodified,
6415 }
6416 .into(),
6417 proto::GitStatus::Conflict => UnmergedStatus {
6418 first_head: UnmergedStatusCode::Updated,
6419 second_head: UnmergedStatusCode::Updated,
6420 }
6421 .into(),
6422 proto::GitStatus::Deleted => TrackedStatus {
6423 worktree_status: StatusCode::Deleted,
6424 index_status: StatusCode::Unmodified,
6425 }
6426 .into(),
6427 _ => return Err(anyhow!("Invalid code for simple status: {simple_status}")),
6428 };
6429 return Ok(result);
6430 };
6431
6432 let result = match variant {
6433 Variant::Untracked(_) => FileStatus::Untracked,
6434 Variant::Ignored(_) => FileStatus::Ignored,
6435 Variant::Unmerged(unmerged) => {
6436 let [first_head, second_head] =
6437 [unmerged.first_head, unmerged.second_head].map(|head| {
6438 let code = proto::GitStatus::from_i32(head)
6439 .ok_or_else(|| anyhow!("Invalid git status code: {head}"))?;
6440 let result = match code {
6441 proto::GitStatus::Added => UnmergedStatusCode::Added,
6442 proto::GitStatus::Updated => UnmergedStatusCode::Updated,
6443 proto::GitStatus::Deleted => UnmergedStatusCode::Deleted,
6444 _ => return Err(anyhow!("Invalid code for unmerged status: {code:?}")),
6445 };
6446 Ok(result)
6447 });
6448 let [first_head, second_head] = [first_head?, second_head?];
6449 UnmergedStatus {
6450 first_head,
6451 second_head,
6452 }
6453 .into()
6454 }
6455 Variant::Tracked(tracked) => {
6456 let [index_status, worktree_status] = [tracked.index_status, tracked.worktree_status]
6457 .map(|status| {
6458 let code = proto::GitStatus::from_i32(status)
6459 .ok_or_else(|| anyhow!("Invalid git status code: {status}"))?;
6460 let result = match code {
6461 proto::GitStatus::Modified => StatusCode::Modified,
6462 proto::GitStatus::TypeChanged => StatusCode::TypeChanged,
6463 proto::GitStatus::Added => StatusCode::Added,
6464 proto::GitStatus::Deleted => StatusCode::Deleted,
6465 proto::GitStatus::Renamed => StatusCode::Renamed,
6466 proto::GitStatus::Copied => StatusCode::Copied,
6467 proto::GitStatus::Unmodified => StatusCode::Unmodified,
6468 _ => return Err(anyhow!("Invalid code for tracked status: {code:?}")),
6469 };
6470 Ok(result)
6471 });
6472 let [index_status, worktree_status] = [index_status?, worktree_status?];
6473 TrackedStatus {
6474 index_status,
6475 worktree_status,
6476 }
6477 .into()
6478 }
6479 };
6480 Ok(result)
6481}
6482
6483fn status_to_proto(status: FileStatus) -> proto::GitFileStatus {
6484 use proto::git_file_status::{Tracked, Unmerged, Variant};
6485
6486 let variant = match status {
6487 FileStatus::Untracked => Variant::Untracked(Default::default()),
6488 FileStatus::Ignored => Variant::Ignored(Default::default()),
6489 FileStatus::Unmerged(UnmergedStatus {
6490 first_head,
6491 second_head,
6492 }) => Variant::Unmerged(Unmerged {
6493 first_head: unmerged_status_to_proto(first_head),
6494 second_head: unmerged_status_to_proto(second_head),
6495 }),
6496 FileStatus::Tracked(TrackedStatus {
6497 index_status,
6498 worktree_status,
6499 }) => Variant::Tracked(Tracked {
6500 index_status: tracked_status_to_proto(index_status),
6501 worktree_status: tracked_status_to_proto(worktree_status),
6502 }),
6503 };
6504 proto::GitFileStatus {
6505 variant: Some(variant),
6506 }
6507}
6508
6509fn unmerged_status_to_proto(code: UnmergedStatusCode) -> i32 {
6510 match code {
6511 UnmergedStatusCode::Added => proto::GitStatus::Added as _,
6512 UnmergedStatusCode::Deleted => proto::GitStatus::Deleted as _,
6513 UnmergedStatusCode::Updated => proto::GitStatus::Updated as _,
6514 }
6515}
6516
6517fn tracked_status_to_proto(code: StatusCode) -> i32 {
6518 match code {
6519 StatusCode::Added => proto::GitStatus::Added as _,
6520 StatusCode::Deleted => proto::GitStatus::Deleted as _,
6521 StatusCode::Modified => proto::GitStatus::Modified as _,
6522 StatusCode::Renamed => proto::GitStatus::Renamed as _,
6523 StatusCode::TypeChanged => proto::GitStatus::TypeChanged as _,
6524 StatusCode::Copied => proto::GitStatus::Copied as _,
6525 StatusCode::Unmodified => proto::GitStatus::Unmodified as _,
6526 }
6527}
6528
6529#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
6530pub struct ProjectEntryId(usize);
6531
6532impl ProjectEntryId {
6533 pub const MAX: Self = Self(usize::MAX);
6534 pub const MIN: Self = Self(usize::MIN);
6535
6536 pub fn new(counter: &AtomicUsize) -> Self {
6537 Self(counter.fetch_add(1, SeqCst))
6538 }
6539
6540 pub fn from_proto(id: u64) -> Self {
6541 Self(id as usize)
6542 }
6543
6544 pub fn to_proto(&self) -> u64 {
6545 self.0 as u64
6546 }
6547
6548 pub fn to_usize(&self) -> usize {
6549 self.0
6550 }
6551}
6552
6553#[cfg(any(test, feature = "test-support"))]
6554impl CreatedEntry {
6555 pub fn to_included(self) -> Option<Entry> {
6556 match self {
6557 CreatedEntry::Included(entry) => Some(entry),
6558 CreatedEntry::Excluded { .. } => None,
6559 }
6560 }
6561}