1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{copy_recursive, Fs, MTime, PathEvent, RemoveOptions, Watcher};
11use futures::{
12 channel::{
13 mpsc::{self, UnboundedSender},
14 oneshot,
15 },
16 future::join_all,
17 select_biased,
18 task::Poll,
19 FutureExt as _, Stream, StreamExt,
20};
21use fuzzy::CharBag;
22use git::{
23 repository::{Branch, GitRepository, RepoPath, UpstreamTrackingStatus},
24 status::{
25 FileStatus, GitSummary, StatusCode, TrackedStatus, UnmergedStatus, UnmergedStatusCode,
26 },
27 GitHostingProviderRegistry, COMMIT_MESSAGE, DOT_GIT, FSMONITOR_DAEMON, GITIGNORE, INDEX_LOCK,
28};
29use gpui::{
30 App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, EventEmitter, Task,
31};
32use ignore::IgnoreStack;
33use language::DiskState;
34
35use parking_lot::Mutex;
36use paths::local_settings_folder_relative_path;
37use postage::{
38 barrier,
39 prelude::{Sink as _, Stream as _},
40 watch,
41};
42use rpc::{
43 proto::{self, split_worktree_update, FromProto, ToProto},
44 AnyProtoClient,
45};
46pub use settings::WorktreeId;
47use settings::{Settings, SettingsLocation, SettingsStore};
48use smallvec::{smallvec, SmallVec};
49use smol::channel::{self, Sender};
50use std::{
51 any::Any,
52 cmp::Ordering,
53 collections::hash_map,
54 convert::TryFrom,
55 ffi::OsStr,
56 fmt,
57 future::Future,
58 mem::{self},
59 ops::{Deref, DerefMut},
60 path::{Path, PathBuf},
61 pin::Pin,
62 sync::{
63 atomic::{self, AtomicU32, AtomicUsize, Ordering::SeqCst},
64 Arc,
65 },
66 time::{Duration, Instant},
67};
68use sum_tree::{
69 Bias, Cursor, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet, Unit,
70};
71use text::{LineEnding, Rope};
72use util::{
73 paths::{home_dir, PathMatcher, SanitizedPath},
74 ResultExt,
75};
76pub use worktree_settings::WorktreeSettings;
77
78#[cfg(feature = "test-support")]
79pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
80#[cfg(not(feature = "test-support"))]
81pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
82
83/// A set of local or remote files that are being opened as part of a project.
84/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
85/// Stores git repositories data and the diagnostics for the file(s).
86///
87/// Has an absolute path, and may be set to be visible in Zed UI or not.
88/// May correspond to a directory or a single file.
89/// Possible examples:
90/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
91/// * a directory opened in Zed — may be added as a visible entry to the current worktree
92///
93/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
94pub enum Worktree {
95 Local(LocalWorktree),
96 Remote(RemoteWorktree),
97}
98
99/// An entry, created in the worktree.
100#[derive(Debug)]
101pub enum CreatedEntry {
102 /// Got created and indexed by the worktree, receiving a corresponding entry.
103 Included(Entry),
104 /// Got created, but not indexed due to falling under exclusion filters.
105 Excluded { abs_path: PathBuf },
106}
107
108pub struct LoadedFile {
109 pub file: Arc<File>,
110 pub text: String,
111}
112
113pub struct LoadedBinaryFile {
114 pub file: Arc<File>,
115 pub content: Vec<u8>,
116}
117
118pub struct LocalWorktree {
119 snapshot: LocalSnapshot,
120 scan_requests_tx: channel::Sender<ScanRequest>,
121 path_prefixes_to_scan_tx: channel::Sender<PathPrefixScanRequest>,
122 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
123 _background_scanner_tasks: Vec<Task<()>>,
124 update_observer: Option<UpdateObservationState>,
125 fs: Arc<dyn Fs>,
126 fs_case_sensitive: bool,
127 visible: bool,
128 next_entry_id: Arc<AtomicUsize>,
129 settings: WorktreeSettings,
130 share_private_files: bool,
131}
132
133pub struct PathPrefixScanRequest {
134 path: Arc<Path>,
135 done: SmallVec<[barrier::Sender; 1]>,
136}
137
138struct ScanRequest {
139 relative_paths: Vec<Arc<Path>>,
140 done: SmallVec<[barrier::Sender; 1]>,
141}
142
143pub struct RemoteWorktree {
144 snapshot: Snapshot,
145 background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
146 project_id: u64,
147 client: AnyProtoClient,
148 file_scan_inclusions: PathMatcher,
149 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
150 update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
151 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
152 replica_id: ReplicaId,
153 visible: bool,
154 disconnected: bool,
155}
156
157#[derive(Clone)]
158pub struct Snapshot {
159 id: WorktreeId,
160 abs_path: SanitizedPath,
161 root_name: String,
162 root_char_bag: CharBag,
163 entries_by_path: SumTree<Entry>,
164 entries_by_id: SumTree<PathEntry>,
165 always_included_entries: Vec<Arc<Path>>,
166 repositories: SumTree<RepositoryEntry>,
167
168 /// A number that increases every time the worktree begins scanning
169 /// a set of paths from the filesystem. This scanning could be caused
170 /// by some operation performed on the worktree, such as reading or
171 /// writing a file, or by an event reported by the filesystem.
172 scan_id: usize,
173
174 /// The latest scan id that has completed, and whose preceding scans
175 /// have all completed. The current `scan_id` could be more than one
176 /// greater than the `completed_scan_id` if operations are performed
177 /// on the worktree while it is processing a file-system event.
178 completed_scan_id: usize,
179}
180
181#[derive(Debug, Clone, PartialEq, Eq)]
182pub struct RepositoryEntry {
183 /// The git status entries for this repository.
184 /// Note that the paths on this repository are relative to the git work directory.
185 /// If the .git folder is external to Zed, these paths will be relative to that folder,
186 /// and this data structure might reference files external to this worktree.
187 ///
188 /// For example:
189 ///
190 /// my_root_folder/ <-- repository root
191 /// .git
192 /// my_sub_folder_1/
193 /// project_root/ <-- Project root, Zed opened here
194 /// changed_file_1 <-- File with changes, in worktree
195 /// my_sub_folder_2/
196 /// changed_file_2 <-- File with changes, out of worktree
197 /// ...
198 ///
199 /// With this setup, this field would contain 2 entries, like so:
200 /// - my_sub_folder_1/project_root/changed_file_1
201 /// - my_sub_folder_2/changed_file_2
202 pub(crate) statuses_by_path: SumTree<StatusEntry>,
203 work_directory_id: ProjectEntryId,
204 pub work_directory: WorkDirectory,
205 pub(crate) current_branch: Option<Branch>,
206 pub current_merge_conflicts: TreeSet<RepoPath>,
207}
208
209impl RepositoryEntry {
210 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
211 self.work_directory.relativize(path)
212 }
213
214 pub fn unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
215 self.work_directory.unrelativize(path)
216 }
217
218 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
219 self.work_directory.directory_contains(path)
220 }
221
222 pub fn branch(&self) -> Option<&Branch> {
223 self.current_branch.as_ref()
224 }
225
226 pub fn work_directory_id(&self) -> ProjectEntryId {
227 self.work_directory_id
228 }
229
230 pub fn status(&self) -> impl Iterator<Item = StatusEntry> + '_ {
231 self.statuses_by_path.iter().cloned()
232 }
233
234 pub fn status_len(&self) -> usize {
235 self.statuses_by_path.summary().item_summary.count
236 }
237
238 pub fn status_summary(&self) -> GitSummary {
239 self.statuses_by_path.summary().item_summary
240 }
241
242 pub fn status_for_path(&self, path: &RepoPath) -> Option<StatusEntry> {
243 self.statuses_by_path
244 .get(&PathKey(path.0.clone()), &())
245 .cloned()
246 }
247
248 pub fn initial_update(&self) -> proto::RepositoryEntry {
249 proto::RepositoryEntry {
250 work_directory_id: self.work_directory_id.to_proto(),
251 branch: self
252 .current_branch
253 .as_ref()
254 .map(|branch| branch.name.to_string()),
255 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
256 updated_statuses: self
257 .statuses_by_path
258 .iter()
259 .map(|entry| entry.to_proto())
260 .collect(),
261 removed_statuses: Default::default(),
262 current_merge_conflicts: self
263 .current_merge_conflicts
264 .iter()
265 .map(|repo_path| repo_path.to_proto())
266 .collect(),
267 }
268 }
269
270 pub fn build_update(&self, old: &Self) -> proto::RepositoryEntry {
271 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
272 let mut removed_statuses: Vec<String> = Vec::new();
273
274 let mut new_statuses = self.statuses_by_path.iter().peekable();
275 let mut old_statuses = old.statuses_by_path.iter().peekable();
276
277 let mut current_new_entry = new_statuses.next();
278 let mut current_old_entry = old_statuses.next();
279 loop {
280 match (current_new_entry, current_old_entry) {
281 (Some(new_entry), Some(old_entry)) => {
282 match new_entry.repo_path.cmp(&old_entry.repo_path) {
283 Ordering::Less => {
284 updated_statuses.push(new_entry.to_proto());
285 current_new_entry = new_statuses.next();
286 }
287 Ordering::Equal => {
288 if new_entry.status != old_entry.status {
289 updated_statuses.push(new_entry.to_proto());
290 }
291 current_old_entry = old_statuses.next();
292 current_new_entry = new_statuses.next();
293 }
294 Ordering::Greater => {
295 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
296 current_old_entry = old_statuses.next();
297 }
298 }
299 }
300 (None, Some(old_entry)) => {
301 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
302 current_old_entry = old_statuses.next();
303 }
304 (Some(new_entry), None) => {
305 updated_statuses.push(new_entry.to_proto());
306 current_new_entry = new_statuses.next();
307 }
308 (None, None) => break,
309 }
310 }
311
312 proto::RepositoryEntry {
313 work_directory_id: self.work_directory_id.to_proto(),
314 branch: self
315 .current_branch
316 .as_ref()
317 .map(|branch| branch.name.to_string()),
318 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
319 updated_statuses,
320 removed_statuses,
321 current_merge_conflicts: self
322 .current_merge_conflicts
323 .iter()
324 .map(|path| path.as_ref().to_proto())
325 .collect(),
326 }
327 }
328}
329
330pub fn branch_to_proto(branch: &git::repository::Branch) -> proto::Branch {
331 proto::Branch {
332 is_head: branch.is_head,
333 name: branch.name.to_string(),
334 unix_timestamp: branch
335 .most_recent_commit
336 .as_ref()
337 .map(|commit| commit.commit_timestamp as u64),
338 upstream: branch.upstream.as_ref().map(|upstream| proto::GitUpstream {
339 ref_name: upstream.ref_name.to_string(),
340 tracking: upstream
341 .tracking
342 .status()
343 .map(|upstream| proto::UpstreamTracking {
344 ahead: upstream.ahead as u64,
345 behind: upstream.behind as u64,
346 }),
347 }),
348 most_recent_commit: branch
349 .most_recent_commit
350 .as_ref()
351 .map(|commit| proto::CommitSummary {
352 sha: commit.sha.to_string(),
353 subject: commit.subject.to_string(),
354 commit_timestamp: commit.commit_timestamp,
355 }),
356 }
357}
358
359pub fn proto_to_branch(proto: &proto::Branch) -> git::repository::Branch {
360 git::repository::Branch {
361 is_head: proto.is_head,
362 name: proto.name.clone().into(),
363 upstream: proto
364 .upstream
365 .as_ref()
366 .map(|upstream| git::repository::Upstream {
367 ref_name: upstream.ref_name.to_string().into(),
368 tracking: upstream
369 .tracking
370 .as_ref()
371 .map(|tracking| {
372 git::repository::UpstreamTracking::Tracked(UpstreamTrackingStatus {
373 ahead: tracking.ahead as u32,
374 behind: tracking.behind as u32,
375 })
376 })
377 .unwrap_or(git::repository::UpstreamTracking::Gone),
378 }),
379 most_recent_commit: proto.most_recent_commit.as_ref().map(|commit| {
380 git::repository::CommitSummary {
381 sha: commit.sha.to_string().into(),
382 subject: commit.subject.to_string().into(),
383 commit_timestamp: commit.commit_timestamp,
384 }
385 }),
386 }
387}
388
389/// This path corresponds to the 'content path' of a repository in relation
390/// to Zed's project root.
391/// In the majority of the cases, this is the folder that contains the .git folder.
392/// But if a sub-folder of a git repository is opened, this corresponds to the
393/// project root and the .git folder is located in a parent directory.
394#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]
395pub enum WorkDirectory {
396 InProject {
397 relative_path: Arc<Path>,
398 },
399 AboveProject {
400 absolute_path: Arc<Path>,
401 location_in_repo: Arc<Path>,
402 },
403}
404
405impl WorkDirectory {
406 #[cfg(test)]
407 fn in_project(path: &str) -> Self {
408 let path = Path::new(path);
409 Self::InProject {
410 relative_path: path.into(),
411 }
412 }
413
414 #[cfg(test)]
415 fn canonicalize(&self) -> Self {
416 match self {
417 WorkDirectory::InProject { relative_path } => WorkDirectory::InProject {
418 relative_path: relative_path.clone(),
419 },
420 WorkDirectory::AboveProject {
421 absolute_path,
422 location_in_repo,
423 } => WorkDirectory::AboveProject {
424 absolute_path: absolute_path.canonicalize().unwrap().into(),
425 location_in_repo: location_in_repo.clone(),
426 },
427 }
428 }
429
430 pub fn is_above_project(&self) -> bool {
431 match self {
432 WorkDirectory::InProject { .. } => false,
433 WorkDirectory::AboveProject { .. } => true,
434 }
435 }
436
437 fn path_key(&self) -> PathKey {
438 match self {
439 WorkDirectory::InProject { relative_path } => PathKey(relative_path.clone()),
440 WorkDirectory::AboveProject { .. } => PathKey(Path::new("").into()),
441 }
442 }
443
444 /// Returns true if the given path is a child of the work directory.
445 ///
446 /// Note that the path may not be a member of this repository, if there
447 /// is a repository in a directory between these two paths
448 /// external .git folder in a parent folder of the project root.
449 #[track_caller]
450 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
451 let path = path.as_ref();
452 debug_assert!(path.is_relative());
453 match self {
454 WorkDirectory::InProject { relative_path } => path.starts_with(relative_path),
455 WorkDirectory::AboveProject { .. } => true,
456 }
457 }
458
459 /// relativize returns the given project path relative to the root folder of the
460 /// repository.
461 /// If the root of the repository (and its .git folder) are located in a parent folder
462 /// of the project root folder, then the returned RepoPath is relative to the root
463 /// of the repository and not a valid path inside the project.
464 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
465 // path is assumed to be relative to worktree root.
466 debug_assert!(path.is_relative());
467 match self {
468 WorkDirectory::InProject { relative_path } => Ok(path
469 .strip_prefix(relative_path)
470 .map_err(|_| {
471 anyhow!(
472 "could not relativize {:?} against {:?}",
473 path,
474 relative_path
475 )
476 })?
477 .into()),
478 WorkDirectory::AboveProject {
479 location_in_repo, ..
480 } => {
481 // Avoid joining a `/` to location_in_repo in the case of a single-file worktree.
482 if path == Path::new("") {
483 Ok(RepoPath(location_in_repo.clone()))
484 } else {
485 Ok(location_in_repo.join(path).into())
486 }
487 }
488 }
489 }
490
491 /// This is the opposite operation to `relativize` above
492 pub fn unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
493 match self {
494 WorkDirectory::InProject { relative_path } => Some(relative_path.join(path).into()),
495 WorkDirectory::AboveProject {
496 location_in_repo, ..
497 } => {
498 // If we fail to strip the prefix, that means this status entry is
499 // external to this worktree, and we definitely won't have an entry_id
500 path.strip_prefix(location_in_repo).ok().map(Into::into)
501 }
502 }
503 }
504
505 pub fn display_name(&self) -> String {
506 match self {
507 WorkDirectory::InProject { relative_path } => relative_path.display().to_string(),
508 WorkDirectory::AboveProject {
509 absolute_path,
510 location_in_repo,
511 } => {
512 let num_of_dots = location_in_repo.components().count();
513
514 "../".repeat(num_of_dots)
515 + &absolute_path
516 .file_name()
517 .map(|s| s.to_string_lossy())
518 .unwrap_or_default()
519 + "/"
520 }
521 }
522 }
523}
524
525impl Default for WorkDirectory {
526 fn default() -> Self {
527 Self::InProject {
528 relative_path: Arc::from(Path::new("")),
529 }
530 }
531}
532
533#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
534pub struct WorkDirectoryEntry(ProjectEntryId);
535
536impl Deref for WorkDirectoryEntry {
537 type Target = ProjectEntryId;
538
539 fn deref(&self) -> &Self::Target {
540 &self.0
541 }
542}
543
544impl From<ProjectEntryId> for WorkDirectoryEntry {
545 fn from(value: ProjectEntryId) -> Self {
546 WorkDirectoryEntry(value)
547 }
548}
549
550#[derive(Debug, Clone)]
551pub struct LocalSnapshot {
552 snapshot: Snapshot,
553 /// All of the gitignore files in the worktree, indexed by their relative path.
554 /// The boolean indicates whether the gitignore needs to be updated.
555 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
556 /// All of the git repositories in the worktree, indexed by the project entry
557 /// id of their parent directory.
558 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
559 /// The file handle of the root dir
560 /// (so we can find it after it's been moved)
561 root_file_handle: Option<Arc<dyn fs::FileHandle>>,
562}
563
564struct BackgroundScannerState {
565 snapshot: LocalSnapshot,
566 scanned_dirs: HashSet<ProjectEntryId>,
567 path_prefixes_to_scan: HashSet<Arc<Path>>,
568 paths_to_scan: HashSet<Arc<Path>>,
569 /// The ids of all of the entries that were removed from the snapshot
570 /// as part of the current update. These entry ids may be re-used
571 /// if the same inode is discovered at a new path, or if the given
572 /// path is re-created after being deleted.
573 removed_entries: HashMap<u64, Entry>,
574 changed_paths: Vec<Arc<Path>>,
575 prev_snapshot: Snapshot,
576 git_hosting_provider_registry: Option<Arc<GitHostingProviderRegistry>>,
577 repository_scans: HashMap<PathKey, Task<()>>,
578}
579
580#[derive(Debug, Clone)]
581pub struct LocalRepositoryEntry {
582 pub(crate) work_directory_id: ProjectEntryId,
583 pub(crate) work_directory: WorkDirectory,
584 pub(crate) git_dir_scan_id: usize,
585 pub(crate) status_scan_id: usize,
586 pub(crate) repo_ptr: Arc<dyn GitRepository>,
587 /// Absolute path to the actual .git folder.
588 /// Note: if .git is a file, this points to the folder indicated by the .git file
589 pub(crate) dot_git_dir_abs_path: Arc<Path>,
590 /// Absolute path to the .git file, if we're in a git worktree.
591 pub(crate) dot_git_worktree_abs_path: Option<Arc<Path>>,
592 pub current_merge_head_shas: Vec<String>,
593 pub merge_message: Option<String>,
594}
595
596impl sum_tree::Item for LocalRepositoryEntry {
597 type Summary = PathSummary<Unit>;
598
599 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
600 PathSummary {
601 max_path: self.work_directory.path_key().0,
602 item_summary: Unit,
603 }
604 }
605}
606
607impl KeyedItem for LocalRepositoryEntry {
608 type Key = PathKey;
609
610 fn key(&self) -> Self::Key {
611 self.work_directory.path_key()
612 }
613}
614
615impl LocalRepositoryEntry {
616 pub fn repo(&self) -> &Arc<dyn GitRepository> {
617 &self.repo_ptr
618 }
619}
620
621impl Deref for LocalRepositoryEntry {
622 type Target = WorkDirectory;
623
624 fn deref(&self) -> &Self::Target {
625 &self.work_directory
626 }
627}
628
629impl Deref for LocalSnapshot {
630 type Target = Snapshot;
631
632 fn deref(&self) -> &Self::Target {
633 &self.snapshot
634 }
635}
636
637impl DerefMut for LocalSnapshot {
638 fn deref_mut(&mut self) -> &mut Self::Target {
639 &mut self.snapshot
640 }
641}
642
643#[derive(Debug)]
644enum ScanState {
645 Started,
646 Updated {
647 snapshot: LocalSnapshot,
648 changes: UpdatedEntriesSet,
649 barrier: SmallVec<[barrier::Sender; 1]>,
650 scanning: bool,
651 },
652 RootUpdated {
653 new_path: Option<SanitizedPath>,
654 },
655}
656
657struct UpdateObservationState {
658 snapshots_tx:
659 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
660 resume_updates: watch::Sender<()>,
661 _maintain_remote_snapshot: Task<Option<()>>,
662}
663
664#[derive(Clone)]
665pub enum Event {
666 UpdatedEntries(UpdatedEntriesSet),
667 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
668 DeletedEntry(ProjectEntryId),
669}
670
671const EMPTY_PATH: &str = "";
672
673impl EventEmitter<Event> for Worktree {}
674
675impl Worktree {
676 pub async fn local(
677 path: impl Into<Arc<Path>>,
678 visible: bool,
679 fs: Arc<dyn Fs>,
680 next_entry_id: Arc<AtomicUsize>,
681 cx: &mut AsyncApp,
682 ) -> Result<Entity<Self>> {
683 let abs_path = path.into();
684 let metadata = fs
685 .metadata(&abs_path)
686 .await
687 .context("failed to stat worktree path")?;
688
689 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
690 log::error!(
691 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
692 );
693 true
694 });
695
696 let root_file_handle = fs.open_handle(&abs_path).await.log_err();
697
698 cx.new(move |cx: &mut Context<Worktree>| {
699 let mut snapshot = LocalSnapshot {
700 ignores_by_parent_abs_path: Default::default(),
701 git_repositories: Default::default(),
702 snapshot: Snapshot::new(
703 cx.entity_id().as_u64(),
704 abs_path
705 .file_name()
706 .map_or(String::new(), |f| f.to_string_lossy().to_string()),
707 abs_path.clone(),
708 ),
709 root_file_handle,
710 };
711
712 let worktree_id = snapshot.id();
713 let settings_location = Some(SettingsLocation {
714 worktree_id,
715 path: Path::new(EMPTY_PATH),
716 });
717
718 let settings = WorktreeSettings::get(settings_location, cx).clone();
719 cx.observe_global::<SettingsStore>(move |this, cx| {
720 if let Self::Local(this) = this {
721 let settings = WorktreeSettings::get(settings_location, cx).clone();
722 if this.settings != settings {
723 this.settings = settings;
724 this.restart_background_scanners(cx);
725 }
726 }
727 })
728 .detach();
729
730 let share_private_files = false;
731 if let Some(metadata) = metadata {
732 let mut entry = Entry::new(
733 Arc::from(Path::new("")),
734 &metadata,
735 &next_entry_id,
736 snapshot.root_char_bag,
737 None,
738 );
739 if !metadata.is_dir {
740 entry.is_private = !share_private_files
741 && settings.is_path_private(abs_path.file_name().unwrap().as_ref());
742 }
743 snapshot.insert_entry(entry, fs.as_ref());
744 }
745
746 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
747 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
748 let mut worktree = LocalWorktree {
749 share_private_files,
750 next_entry_id,
751 snapshot,
752 is_scanning: watch::channel_with(true),
753 update_observer: None,
754 scan_requests_tx,
755 path_prefixes_to_scan_tx,
756 _background_scanner_tasks: Vec::new(),
757 fs,
758 fs_case_sensitive,
759 visible,
760 settings,
761 };
762 worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
763 Worktree::Local(worktree)
764 })
765 }
766
767 pub fn remote(
768 project_id: u64,
769 replica_id: ReplicaId,
770 worktree: proto::WorktreeMetadata,
771 client: AnyProtoClient,
772 cx: &mut App,
773 ) -> Entity<Self> {
774 cx.new(|cx: &mut Context<Self>| {
775 let snapshot = Snapshot::new(
776 worktree.id,
777 worktree.root_name,
778 Arc::<Path>::from_proto(worktree.abs_path),
779 );
780
781 let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
782 let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
783 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
784
785 let worktree_id = snapshot.id();
786 let settings_location = Some(SettingsLocation {
787 worktree_id,
788 path: Path::new(EMPTY_PATH),
789 });
790
791 let settings = WorktreeSettings::get(settings_location, cx).clone();
792 let worktree = RemoteWorktree {
793 client,
794 project_id,
795 replica_id,
796 snapshot,
797 file_scan_inclusions: settings.file_scan_inclusions.clone(),
798 background_snapshot: background_snapshot.clone(),
799 updates_tx: Some(background_updates_tx),
800 update_observer: None,
801 snapshot_subscriptions: Default::default(),
802 visible: worktree.visible,
803 disconnected: false,
804 };
805
806 // Apply updates to a separate snapshot in a background task, then
807 // send them to a foreground task which updates the model.
808 cx.background_spawn(async move {
809 while let Some(update) = background_updates_rx.next().await {
810 {
811 let mut lock = background_snapshot.lock();
812 if let Err(error) = lock
813 .0
814 .apply_remote_update(update.clone(), &settings.file_scan_inclusions)
815 {
816 log::error!("error applying worktree update: {}", error);
817 }
818 lock.1.push(update);
819 }
820 snapshot_updated_tx.send(()).await.ok();
821 }
822 })
823 .detach();
824
825 // On the foreground task, update to the latest snapshot and notify
826 // any update observer of all updates that led to that snapshot.
827 cx.spawn(|this, mut cx| async move {
828 while (snapshot_updated_rx.recv().await).is_some() {
829 this.update(&mut cx, |this, cx| {
830 let this = this.as_remote_mut().unwrap();
831 {
832 let mut lock = this.background_snapshot.lock();
833 this.snapshot = lock.0.clone();
834 if let Some(tx) = &this.update_observer {
835 for update in lock.1.drain(..) {
836 tx.unbounded_send(update).ok();
837 }
838 }
839 };
840 cx.emit(Event::UpdatedEntries(Arc::default()));
841 cx.notify();
842 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
843 if this.observed_snapshot(*scan_id) {
844 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
845 let _ = tx.send(());
846 } else {
847 break;
848 }
849 }
850 })?;
851 }
852 anyhow::Ok(())
853 })
854 .detach();
855
856 Worktree::Remote(worktree)
857 })
858 }
859
860 pub fn as_local(&self) -> Option<&LocalWorktree> {
861 if let Worktree::Local(worktree) = self {
862 Some(worktree)
863 } else {
864 None
865 }
866 }
867
868 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
869 if let Worktree::Remote(worktree) = self {
870 Some(worktree)
871 } else {
872 None
873 }
874 }
875
876 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
877 if let Worktree::Local(worktree) = self {
878 Some(worktree)
879 } else {
880 None
881 }
882 }
883
884 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
885 if let Worktree::Remote(worktree) = self {
886 Some(worktree)
887 } else {
888 None
889 }
890 }
891
892 pub fn is_local(&self) -> bool {
893 matches!(self, Worktree::Local(_))
894 }
895
896 pub fn is_remote(&self) -> bool {
897 !self.is_local()
898 }
899
900 pub fn settings_location(&self, _: &Context<Self>) -> SettingsLocation<'static> {
901 SettingsLocation {
902 worktree_id: self.id(),
903 path: Path::new(EMPTY_PATH),
904 }
905 }
906
907 pub fn snapshot(&self) -> Snapshot {
908 match self {
909 Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
910 Worktree::Remote(worktree) => worktree.snapshot.clone(),
911 }
912 }
913
914 pub fn scan_id(&self) -> usize {
915 match self {
916 Worktree::Local(worktree) => worktree.snapshot.scan_id,
917 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
918 }
919 }
920
921 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
922 proto::WorktreeMetadata {
923 id: self.id().to_proto(),
924 root_name: self.root_name().to_string(),
925 visible: self.is_visible(),
926 abs_path: self.abs_path().to_proto(),
927 }
928 }
929
930 pub fn completed_scan_id(&self) -> usize {
931 match self {
932 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
933 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
934 }
935 }
936
937 pub fn is_visible(&self) -> bool {
938 match self {
939 Worktree::Local(worktree) => worktree.visible,
940 Worktree::Remote(worktree) => worktree.visible,
941 }
942 }
943
944 pub fn replica_id(&self) -> ReplicaId {
945 match self {
946 Worktree::Local(_) => 0,
947 Worktree::Remote(worktree) => worktree.replica_id,
948 }
949 }
950
951 pub fn abs_path(&self) -> Arc<Path> {
952 match self {
953 Worktree::Local(worktree) => worktree.abs_path.clone().into(),
954 Worktree::Remote(worktree) => worktree.abs_path.clone().into(),
955 }
956 }
957
958 pub fn root_file(&self, cx: &Context<Self>) -> Option<Arc<File>> {
959 let entry = self.root_entry()?;
960 Some(File::for_entry(entry.clone(), cx.entity()))
961 }
962
963 pub fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
964 where
965 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
966 Fut: 'static + Send + Future<Output = bool>,
967 {
968 match self {
969 Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
970 Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
971 }
972 }
973
974 pub fn stop_observing_updates(&mut self) {
975 match self {
976 Worktree::Local(this) => {
977 this.update_observer.take();
978 }
979 Worktree::Remote(this) => {
980 this.update_observer.take();
981 }
982 }
983 }
984
985 #[cfg(any(test, feature = "test-support"))]
986 pub fn has_update_observer(&self) -> bool {
987 match self {
988 Worktree::Local(this) => this.update_observer.is_some(),
989 Worktree::Remote(this) => this.update_observer.is_some(),
990 }
991 }
992
993 pub fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
994 match self {
995 Worktree::Local(this) => this.load_file(path, cx),
996 Worktree::Remote(_) => {
997 Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
998 }
999 }
1000 }
1001
1002 pub fn load_staged_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1003 match self {
1004 Worktree::Local(this) => {
1005 let path = Arc::from(path);
1006 let snapshot = this.snapshot();
1007 cx.background_spawn(async move {
1008 if let Some(repo) = snapshot.repository_for_path(&path) {
1009 if let Some(repo_path) = repo.relativize(&path).log_err() {
1010 if let Some(git_repo) =
1011 snapshot.git_repositories.get(&repo.work_directory_id)
1012 {
1013 return Ok(git_repo.repo_ptr.load_index_text(&repo_path));
1014 }
1015 }
1016 }
1017 Err(anyhow!("No repository found for {path:?}"))
1018 })
1019 }
1020 Worktree::Remote(_) => {
1021 Task::ready(Err(anyhow!("remote worktrees can't yet load staged files")))
1022 }
1023 }
1024 }
1025
1026 pub fn load_committed_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1027 match self {
1028 Worktree::Local(this) => {
1029 let path = Arc::from(path);
1030 let snapshot = this.snapshot();
1031 cx.background_spawn(async move {
1032 if let Some(repo) = snapshot.repository_for_path(&path) {
1033 if let Some(repo_path) = repo.relativize(&path).log_err() {
1034 if let Some(git_repo) =
1035 snapshot.git_repositories.get(&repo.work_directory_id)
1036 {
1037 return Ok(git_repo.repo_ptr.load_committed_text(&repo_path));
1038 }
1039 }
1040 }
1041 Err(anyhow!("No repository found for {path:?}"))
1042 })
1043 }
1044 Worktree::Remote(_) => Task::ready(Err(anyhow!(
1045 "remote worktrees can't yet load committed files"
1046 ))),
1047 }
1048 }
1049
1050 pub fn load_binary_file(
1051 &self,
1052 path: &Path,
1053 cx: &Context<Worktree>,
1054 ) -> Task<Result<LoadedBinaryFile>> {
1055 match self {
1056 Worktree::Local(this) => this.load_binary_file(path, cx),
1057 Worktree::Remote(_) => {
1058 Task::ready(Err(anyhow!("remote worktrees can't yet load binary files")))
1059 }
1060 }
1061 }
1062
1063 pub fn write_file(
1064 &self,
1065 path: &Path,
1066 text: Rope,
1067 line_ending: LineEnding,
1068 cx: &Context<Worktree>,
1069 ) -> Task<Result<Arc<File>>> {
1070 match self {
1071 Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
1072 Worktree::Remote(_) => {
1073 Task::ready(Err(anyhow!("remote worktree can't yet write files")))
1074 }
1075 }
1076 }
1077
1078 pub fn create_entry(
1079 &mut self,
1080 path: impl Into<Arc<Path>>,
1081 is_directory: bool,
1082 cx: &Context<Worktree>,
1083 ) -> Task<Result<CreatedEntry>> {
1084 let path: Arc<Path> = path.into();
1085 let worktree_id = self.id();
1086 match self {
1087 Worktree::Local(this) => this.create_entry(path, is_directory, cx),
1088 Worktree::Remote(this) => {
1089 let project_id = this.project_id;
1090 let request = this.client.request(proto::CreateProjectEntry {
1091 worktree_id: worktree_id.to_proto(),
1092 project_id,
1093 path: path.as_ref().to_proto(),
1094 is_directory,
1095 });
1096 cx.spawn(move |this, mut cx| async move {
1097 let response = request.await?;
1098 match response.entry {
1099 Some(entry) => this
1100 .update(&mut cx, |worktree, cx| {
1101 worktree.as_remote_mut().unwrap().insert_entry(
1102 entry,
1103 response.worktree_scan_id as usize,
1104 cx,
1105 )
1106 })?
1107 .await
1108 .map(CreatedEntry::Included),
1109 None => {
1110 let abs_path = this.update(&mut cx, |worktree, _| {
1111 worktree
1112 .absolutize(&path)
1113 .with_context(|| format!("absolutizing {path:?}"))
1114 })??;
1115 Ok(CreatedEntry::Excluded { abs_path })
1116 }
1117 }
1118 })
1119 }
1120 }
1121 }
1122
1123 pub fn delete_entry(
1124 &mut self,
1125 entry_id: ProjectEntryId,
1126 trash: bool,
1127 cx: &mut Context<Worktree>,
1128 ) -> Option<Task<Result<()>>> {
1129 let task = match self {
1130 Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
1131 Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
1132 }?;
1133
1134 let entry = match self {
1135 Worktree::Local(ref this) => this.entry_for_id(entry_id),
1136 Worktree::Remote(ref this) => this.entry_for_id(entry_id),
1137 }?;
1138
1139 let mut ids = vec![entry_id];
1140 let path = &*entry.path;
1141
1142 self.get_children_ids_recursive(path, &mut ids);
1143
1144 for id in ids {
1145 cx.emit(Event::DeletedEntry(id));
1146 }
1147 Some(task)
1148 }
1149
1150 fn get_children_ids_recursive(&self, path: &Path, ids: &mut Vec<ProjectEntryId>) {
1151 let children_iter = self.child_entries(path);
1152 for child in children_iter {
1153 ids.push(child.id);
1154 self.get_children_ids_recursive(&child.path, ids);
1155 }
1156 }
1157
1158 pub fn rename_entry(
1159 &mut self,
1160 entry_id: ProjectEntryId,
1161 new_path: impl Into<Arc<Path>>,
1162 cx: &Context<Self>,
1163 ) -> Task<Result<CreatedEntry>> {
1164 let new_path = new_path.into();
1165 match self {
1166 Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
1167 Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
1168 }
1169 }
1170
1171 pub fn copy_entry(
1172 &mut self,
1173 entry_id: ProjectEntryId,
1174 relative_worktree_source_path: Option<PathBuf>,
1175 new_path: impl Into<Arc<Path>>,
1176 cx: &Context<Self>,
1177 ) -> Task<Result<Option<Entry>>> {
1178 let new_path: Arc<Path> = new_path.into();
1179 match self {
1180 Worktree::Local(this) => {
1181 this.copy_entry(entry_id, relative_worktree_source_path, new_path, cx)
1182 }
1183 Worktree::Remote(this) => {
1184 let relative_worktree_source_path = relative_worktree_source_path
1185 .map(|relative_worktree_source_path| relative_worktree_source_path.to_proto());
1186 let response = this.client.request(proto::CopyProjectEntry {
1187 project_id: this.project_id,
1188 entry_id: entry_id.to_proto(),
1189 relative_worktree_source_path,
1190 new_path: new_path.to_proto(),
1191 });
1192 cx.spawn(move |this, mut cx| async move {
1193 let response = response.await?;
1194 match response.entry {
1195 Some(entry) => this
1196 .update(&mut cx, |worktree, cx| {
1197 worktree.as_remote_mut().unwrap().insert_entry(
1198 entry,
1199 response.worktree_scan_id as usize,
1200 cx,
1201 )
1202 })?
1203 .await
1204 .map(Some),
1205 None => Ok(None),
1206 }
1207 })
1208 }
1209 }
1210 }
1211
1212 pub fn copy_external_entries(
1213 &mut self,
1214 target_directory: PathBuf,
1215 paths: Vec<Arc<Path>>,
1216 overwrite_existing_files: bool,
1217 cx: &Context<Worktree>,
1218 ) -> Task<Result<Vec<ProjectEntryId>>> {
1219 match self {
1220 Worktree::Local(this) => {
1221 this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
1222 }
1223 _ => Task::ready(Err(anyhow!(
1224 "Copying external entries is not supported for remote worktrees"
1225 ))),
1226 }
1227 }
1228
1229 pub fn expand_entry(
1230 &mut self,
1231 entry_id: ProjectEntryId,
1232 cx: &Context<Worktree>,
1233 ) -> Option<Task<Result<()>>> {
1234 match self {
1235 Worktree::Local(this) => this.expand_entry(entry_id, cx),
1236 Worktree::Remote(this) => {
1237 let response = this.client.request(proto::ExpandProjectEntry {
1238 project_id: this.project_id,
1239 entry_id: entry_id.to_proto(),
1240 });
1241 Some(cx.spawn(move |this, mut cx| async move {
1242 let response = response.await?;
1243 this.update(&mut cx, |this, _| {
1244 this.as_remote_mut()
1245 .unwrap()
1246 .wait_for_snapshot(response.worktree_scan_id as usize)
1247 })?
1248 .await?;
1249 Ok(())
1250 }))
1251 }
1252 }
1253 }
1254
1255 pub fn expand_all_for_entry(
1256 &mut self,
1257 entry_id: ProjectEntryId,
1258 cx: &Context<Worktree>,
1259 ) -> Option<Task<Result<()>>> {
1260 match self {
1261 Worktree::Local(this) => this.expand_all_for_entry(entry_id, cx),
1262 Worktree::Remote(this) => {
1263 let response = this.client.request(proto::ExpandAllForProjectEntry {
1264 project_id: this.project_id,
1265 entry_id: entry_id.to_proto(),
1266 });
1267 Some(cx.spawn(move |this, mut cx| async move {
1268 let response = response.await?;
1269 this.update(&mut cx, |this, _| {
1270 this.as_remote_mut()
1271 .unwrap()
1272 .wait_for_snapshot(response.worktree_scan_id as usize)
1273 })?
1274 .await?;
1275 Ok(())
1276 }))
1277 }
1278 }
1279 }
1280
1281 pub async fn handle_create_entry(
1282 this: Entity<Self>,
1283 request: proto::CreateProjectEntry,
1284 mut cx: AsyncApp,
1285 ) -> Result<proto::ProjectEntryResponse> {
1286 let (scan_id, entry) = this.update(&mut cx, |this, cx| {
1287 (
1288 this.scan_id(),
1289 this.create_entry(
1290 Arc::<Path>::from_proto(request.path),
1291 request.is_directory,
1292 cx,
1293 ),
1294 )
1295 })?;
1296 Ok(proto::ProjectEntryResponse {
1297 entry: match &entry.await? {
1298 CreatedEntry::Included(entry) => Some(entry.into()),
1299 CreatedEntry::Excluded { .. } => None,
1300 },
1301 worktree_scan_id: scan_id as u64,
1302 })
1303 }
1304
1305 pub async fn handle_delete_entry(
1306 this: Entity<Self>,
1307 request: proto::DeleteProjectEntry,
1308 mut cx: AsyncApp,
1309 ) -> Result<proto::ProjectEntryResponse> {
1310 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1311 (
1312 this.scan_id(),
1313 this.delete_entry(
1314 ProjectEntryId::from_proto(request.entry_id),
1315 request.use_trash,
1316 cx,
1317 ),
1318 )
1319 })?;
1320 task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
1321 Ok(proto::ProjectEntryResponse {
1322 entry: None,
1323 worktree_scan_id: scan_id as u64,
1324 })
1325 }
1326
1327 pub async fn handle_expand_entry(
1328 this: Entity<Self>,
1329 request: proto::ExpandProjectEntry,
1330 mut cx: AsyncApp,
1331 ) -> Result<proto::ExpandProjectEntryResponse> {
1332 let task = this.update(&mut cx, |this, cx| {
1333 this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1334 })?;
1335 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1336 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1337 Ok(proto::ExpandProjectEntryResponse {
1338 worktree_scan_id: scan_id as u64,
1339 })
1340 }
1341
1342 pub async fn handle_expand_all_for_entry(
1343 this: Entity<Self>,
1344 request: proto::ExpandAllForProjectEntry,
1345 mut cx: AsyncApp,
1346 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
1347 let task = this.update(&mut cx, |this, cx| {
1348 this.expand_all_for_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1349 })?;
1350 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1351 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1352 Ok(proto::ExpandAllForProjectEntryResponse {
1353 worktree_scan_id: scan_id as u64,
1354 })
1355 }
1356
1357 pub async fn handle_rename_entry(
1358 this: Entity<Self>,
1359 request: proto::RenameProjectEntry,
1360 mut cx: AsyncApp,
1361 ) -> Result<proto::ProjectEntryResponse> {
1362 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1363 (
1364 this.scan_id(),
1365 this.rename_entry(
1366 ProjectEntryId::from_proto(request.entry_id),
1367 Arc::<Path>::from_proto(request.new_path),
1368 cx,
1369 ),
1370 )
1371 })?;
1372 Ok(proto::ProjectEntryResponse {
1373 entry: match &task.await? {
1374 CreatedEntry::Included(entry) => Some(entry.into()),
1375 CreatedEntry::Excluded { .. } => None,
1376 },
1377 worktree_scan_id: scan_id as u64,
1378 })
1379 }
1380
1381 pub async fn handle_copy_entry(
1382 this: Entity<Self>,
1383 request: proto::CopyProjectEntry,
1384 mut cx: AsyncApp,
1385 ) -> Result<proto::ProjectEntryResponse> {
1386 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1387 let relative_worktree_source_path = request
1388 .relative_worktree_source_path
1389 .map(PathBuf::from_proto);
1390 (
1391 this.scan_id(),
1392 this.copy_entry(
1393 ProjectEntryId::from_proto(request.entry_id),
1394 relative_worktree_source_path,
1395 PathBuf::from_proto(request.new_path),
1396 cx,
1397 ),
1398 )
1399 })?;
1400 Ok(proto::ProjectEntryResponse {
1401 entry: task.await?.as_ref().map(|e| e.into()),
1402 worktree_scan_id: scan_id as u64,
1403 })
1404 }
1405}
1406
1407impl LocalWorktree {
1408 pub fn fs(&self) -> &Arc<dyn Fs> {
1409 &self.fs
1410 }
1411
1412 pub fn is_path_private(&self, path: &Path) -> bool {
1413 !self.share_private_files && self.settings.is_path_private(path)
1414 }
1415
1416 fn restart_background_scanners(&mut self, cx: &Context<Worktree>) {
1417 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
1418 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
1419 self.scan_requests_tx = scan_requests_tx;
1420 self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
1421
1422 self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
1423 let always_included_entries = mem::take(&mut self.snapshot.always_included_entries);
1424 log::debug!(
1425 "refreshing entries for the following always included paths: {:?}",
1426 always_included_entries
1427 );
1428
1429 // Cleans up old always included entries to ensure they get updated properly. Otherwise,
1430 // nested always included entries may not get updated and will result in out-of-date info.
1431 self.refresh_entries_for_paths(always_included_entries);
1432 }
1433
1434 fn start_background_scanner(
1435 &mut self,
1436 scan_requests_rx: channel::Receiver<ScanRequest>,
1437 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
1438 cx: &Context<Worktree>,
1439 ) {
1440 let snapshot = self.snapshot();
1441 let share_private_files = self.share_private_files;
1442 let next_entry_id = self.next_entry_id.clone();
1443 let fs = self.fs.clone();
1444 let git_hosting_provider_registry = GitHostingProviderRegistry::try_global(cx);
1445 let settings = self.settings.clone();
1446 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
1447 let background_scanner = cx.background_spawn({
1448 let abs_path = snapshot.abs_path.as_path().to_path_buf();
1449 let background = cx.background_executor().clone();
1450 async move {
1451 let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
1452 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
1453 log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
1454 true
1455 });
1456
1457 let mut scanner = BackgroundScanner {
1458 fs,
1459 fs_case_sensitive,
1460 status_updates_tx: scan_states_tx,
1461 executor: background,
1462 scan_requests_rx,
1463 path_prefixes_to_scan_rx,
1464 next_entry_id,
1465 state: Arc::new(Mutex::new(BackgroundScannerState {
1466 prev_snapshot: snapshot.snapshot.clone(),
1467 snapshot,
1468 scanned_dirs: Default::default(),
1469 path_prefixes_to_scan: Default::default(),
1470 paths_to_scan: Default::default(),
1471 removed_entries: Default::default(),
1472 changed_paths: Default::default(),
1473 repository_scans: HashMap::default(),
1474 git_hosting_provider_registry,
1475 })),
1476 phase: BackgroundScannerPhase::InitialScan,
1477 share_private_files,
1478 settings,
1479 watcher,
1480 };
1481
1482 scanner
1483 .run(Box::pin(
1484 events.map(|events| events.into_iter().map(Into::into).collect()),
1485 ))
1486 .await;
1487 }
1488 });
1489 let scan_state_updater = cx.spawn(|this, mut cx| async move {
1490 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1491 this.update(&mut cx, |this, cx| {
1492 let this = this.as_local_mut().unwrap();
1493 match state {
1494 ScanState::Started => {
1495 *this.is_scanning.0.borrow_mut() = true;
1496 }
1497 ScanState::Updated {
1498 snapshot,
1499 changes,
1500 barrier,
1501 scanning,
1502 } => {
1503 *this.is_scanning.0.borrow_mut() = scanning;
1504 this.set_snapshot(snapshot, changes, cx);
1505 drop(barrier);
1506 }
1507 ScanState::RootUpdated { new_path } => {
1508 this.update_abs_path_and_refresh(new_path, cx);
1509 }
1510 }
1511 cx.notify();
1512 })
1513 .ok();
1514 }
1515 });
1516 self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1517 self.is_scanning = watch::channel_with(true);
1518 }
1519
1520 fn set_snapshot(
1521 &mut self,
1522 new_snapshot: LocalSnapshot,
1523 entry_changes: UpdatedEntriesSet,
1524 cx: &mut Context<Worktree>,
1525 ) {
1526 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1527 self.snapshot = new_snapshot;
1528
1529 if let Some(share) = self.update_observer.as_mut() {
1530 share
1531 .snapshots_tx
1532 .unbounded_send((
1533 self.snapshot.clone(),
1534 entry_changes.clone(),
1535 repo_changes.clone(),
1536 ))
1537 .ok();
1538 }
1539
1540 if !entry_changes.is_empty() {
1541 cx.emit(Event::UpdatedEntries(entry_changes));
1542 }
1543 if !repo_changes.is_empty() {
1544 cx.emit(Event::UpdatedGitRepositories(repo_changes));
1545 }
1546 }
1547
1548 fn changed_repos(
1549 &self,
1550 old_snapshot: &LocalSnapshot,
1551 new_snapshot: &LocalSnapshot,
1552 ) -> UpdatedGitRepositoriesSet {
1553 let mut changes = Vec::new();
1554 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1555 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1556
1557 loop {
1558 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1559 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1560 match Ord::cmp(&new_entry_id, &old_entry_id) {
1561 Ordering::Less => {
1562 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1563 changes.push((
1564 entry.path.clone(),
1565 GitRepositoryChange {
1566 old_repository: None,
1567 },
1568 ));
1569 }
1570 new_repos.next();
1571 }
1572 Ordering::Equal => {
1573 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id
1574 || new_repo.status_scan_id != old_repo.status_scan_id
1575 {
1576 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1577 let old_repo = old_snapshot
1578 .repositories
1579 .get(&PathKey(entry.path.clone()), &())
1580 .cloned();
1581 changes.push((
1582 entry.path.clone(),
1583 GitRepositoryChange {
1584 old_repository: old_repo,
1585 },
1586 ));
1587 }
1588 }
1589 new_repos.next();
1590 old_repos.next();
1591 }
1592 Ordering::Greater => {
1593 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1594 let old_repo = old_snapshot
1595 .repositories
1596 .get(&PathKey(entry.path.clone()), &())
1597 .cloned();
1598 changes.push((
1599 entry.path.clone(),
1600 GitRepositoryChange {
1601 old_repository: old_repo,
1602 },
1603 ));
1604 }
1605 old_repos.next();
1606 }
1607 }
1608 }
1609 (Some((entry_id, _)), None) => {
1610 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1611 changes.push((
1612 entry.path.clone(),
1613 GitRepositoryChange {
1614 old_repository: None,
1615 },
1616 ));
1617 }
1618 new_repos.next();
1619 }
1620 (None, Some((entry_id, _))) => {
1621 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1622 let old_repo = old_snapshot
1623 .repositories
1624 .get(&PathKey(entry.path.clone()), &())
1625 .cloned();
1626 changes.push((
1627 entry.path.clone(),
1628 GitRepositoryChange {
1629 old_repository: old_repo,
1630 },
1631 ));
1632 }
1633 old_repos.next();
1634 }
1635 (None, None) => break,
1636 }
1637 }
1638
1639 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1640 (value.0.clone(), value.1.clone())
1641 }
1642
1643 changes.into()
1644 }
1645
1646 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1647 let mut is_scanning_rx = self.is_scanning.1.clone();
1648 async move {
1649 let mut is_scanning = *is_scanning_rx.borrow();
1650 while is_scanning {
1651 if let Some(value) = is_scanning_rx.recv().await {
1652 is_scanning = value;
1653 } else {
1654 break;
1655 }
1656 }
1657 }
1658 }
1659
1660 pub fn snapshot(&self) -> LocalSnapshot {
1661 self.snapshot.clone()
1662 }
1663
1664 pub fn settings(&self) -> WorktreeSettings {
1665 self.settings.clone()
1666 }
1667
1668 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1669 self.git_repositories.get(&repo.work_directory_id)
1670 }
1671
1672 fn load_binary_file(
1673 &self,
1674 path: &Path,
1675 cx: &Context<Worktree>,
1676 ) -> Task<Result<LoadedBinaryFile>> {
1677 let path = Arc::from(path);
1678 let abs_path = self.absolutize(&path);
1679 let fs = self.fs.clone();
1680 let entry = self.refresh_entry(path.clone(), None, cx);
1681 let is_private = self.is_path_private(path.as_ref());
1682
1683 let worktree = cx.weak_entity();
1684 cx.background_spawn(async move {
1685 let abs_path = abs_path?;
1686 let content = fs.load_bytes(&abs_path).await?;
1687
1688 let worktree = worktree
1689 .upgrade()
1690 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1691 let file = match entry.await? {
1692 Some(entry) => File::for_entry(entry, worktree),
1693 None => {
1694 let metadata = fs
1695 .metadata(&abs_path)
1696 .await
1697 .with_context(|| {
1698 format!("Loading metadata for excluded file {abs_path:?}")
1699 })?
1700 .with_context(|| {
1701 format!("Excluded file {abs_path:?} got removed during loading")
1702 })?;
1703 Arc::new(File {
1704 entry_id: None,
1705 worktree,
1706 path,
1707 disk_state: DiskState::Present {
1708 mtime: metadata.mtime,
1709 },
1710 is_local: true,
1711 is_private,
1712 })
1713 }
1714 };
1715
1716 Ok(LoadedBinaryFile { file, content })
1717 })
1718 }
1719
1720 fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1721 let path = Arc::from(path);
1722 let abs_path = self.absolutize(&path);
1723 let fs = self.fs.clone();
1724 let entry = self.refresh_entry(path.clone(), None, cx);
1725 let is_private = self.is_path_private(path.as_ref());
1726
1727 cx.spawn(|this, _cx| async move {
1728 let abs_path = abs_path?;
1729 let text = fs.load(&abs_path).await?;
1730
1731 let worktree = this
1732 .upgrade()
1733 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1734 let file = match entry.await? {
1735 Some(entry) => File::for_entry(entry, worktree),
1736 None => {
1737 let metadata = fs
1738 .metadata(&abs_path)
1739 .await
1740 .with_context(|| {
1741 format!("Loading metadata for excluded file {abs_path:?}")
1742 })?
1743 .with_context(|| {
1744 format!("Excluded file {abs_path:?} got removed during loading")
1745 })?;
1746 Arc::new(File {
1747 entry_id: None,
1748 worktree,
1749 path,
1750 disk_state: DiskState::Present {
1751 mtime: metadata.mtime,
1752 },
1753 is_local: true,
1754 is_private,
1755 })
1756 }
1757 };
1758
1759 Ok(LoadedFile { file, text })
1760 })
1761 }
1762
1763 /// Find the lowest path in the worktree's datastructures that is an ancestor
1764 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1765 let mut lowest_ancestor = None;
1766 for path in path.ancestors() {
1767 if self.entry_for_path(path).is_some() {
1768 lowest_ancestor = Some(path.to_path_buf());
1769 break;
1770 }
1771 }
1772
1773 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1774 }
1775
1776 fn create_entry(
1777 &self,
1778 path: impl Into<Arc<Path>>,
1779 is_dir: bool,
1780 cx: &Context<Worktree>,
1781 ) -> Task<Result<CreatedEntry>> {
1782 let path = path.into();
1783 let abs_path = match self.absolutize(&path) {
1784 Ok(path) => path,
1785 Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1786 };
1787 let path_excluded = self.settings.is_path_excluded(&abs_path);
1788 let fs = self.fs.clone();
1789 let task_abs_path = abs_path.clone();
1790 let write = cx.background_spawn(async move {
1791 if is_dir {
1792 fs.create_dir(&task_abs_path)
1793 .await
1794 .with_context(|| format!("creating directory {task_abs_path:?}"))
1795 } else {
1796 fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1797 .await
1798 .with_context(|| format!("creating file {task_abs_path:?}"))
1799 }
1800 });
1801
1802 let lowest_ancestor = self.lowest_ancestor(&path);
1803 cx.spawn(|this, mut cx| async move {
1804 write.await?;
1805 if path_excluded {
1806 return Ok(CreatedEntry::Excluded { abs_path });
1807 }
1808
1809 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1810 let mut refreshes = Vec::new();
1811 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1812 for refresh_path in refresh_paths.ancestors() {
1813 if refresh_path == Path::new("") {
1814 continue;
1815 }
1816 let refresh_full_path = lowest_ancestor.join(refresh_path);
1817
1818 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1819 refresh_full_path.into(),
1820 None,
1821 cx,
1822 ));
1823 }
1824 (
1825 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1826 refreshes,
1827 )
1828 })?;
1829 for refresh in refreshes {
1830 refresh.await.log_err();
1831 }
1832
1833 Ok(result
1834 .await?
1835 .map(CreatedEntry::Included)
1836 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1837 })
1838 }
1839
1840 fn write_file(
1841 &self,
1842 path: impl Into<Arc<Path>>,
1843 text: Rope,
1844 line_ending: LineEnding,
1845 cx: &Context<Worktree>,
1846 ) -> Task<Result<Arc<File>>> {
1847 let path = path.into();
1848 let fs = self.fs.clone();
1849 let is_private = self.is_path_private(&path);
1850 let Ok(abs_path) = self.absolutize(&path) else {
1851 return Task::ready(Err(anyhow!("invalid path {path:?}")));
1852 };
1853
1854 let write = cx.background_spawn({
1855 let fs = fs.clone();
1856 let abs_path = abs_path.clone();
1857 async move { fs.save(&abs_path, &text, line_ending).await }
1858 });
1859
1860 cx.spawn(move |this, mut cx| async move {
1861 write.await?;
1862 let entry = this
1863 .update(&mut cx, |this, cx| {
1864 this.as_local_mut()
1865 .unwrap()
1866 .refresh_entry(path.clone(), None, cx)
1867 })?
1868 .await?;
1869 let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1870 if let Some(entry) = entry {
1871 Ok(File::for_entry(entry, worktree))
1872 } else {
1873 let metadata = fs
1874 .metadata(&abs_path)
1875 .await
1876 .with_context(|| {
1877 format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1878 })?
1879 .with_context(|| {
1880 format!("Excluded buffer {path:?} got removed during saving")
1881 })?;
1882 Ok(Arc::new(File {
1883 worktree,
1884 path,
1885 disk_state: DiskState::Present {
1886 mtime: metadata.mtime,
1887 },
1888 entry_id: None,
1889 is_local: true,
1890 is_private,
1891 }))
1892 }
1893 })
1894 }
1895
1896 fn delete_entry(
1897 &self,
1898 entry_id: ProjectEntryId,
1899 trash: bool,
1900 cx: &Context<Worktree>,
1901 ) -> Option<Task<Result<()>>> {
1902 let entry = self.entry_for_id(entry_id)?.clone();
1903 let abs_path = self.absolutize(&entry.path);
1904 let fs = self.fs.clone();
1905
1906 let delete = cx.background_spawn(async move {
1907 if entry.is_file() {
1908 if trash {
1909 fs.trash_file(&abs_path?, Default::default()).await?;
1910 } else {
1911 fs.remove_file(&abs_path?, Default::default()).await?;
1912 }
1913 } else if trash {
1914 fs.trash_dir(
1915 &abs_path?,
1916 RemoveOptions {
1917 recursive: true,
1918 ignore_if_not_exists: false,
1919 },
1920 )
1921 .await?;
1922 } else {
1923 fs.remove_dir(
1924 &abs_path?,
1925 RemoveOptions {
1926 recursive: true,
1927 ignore_if_not_exists: false,
1928 },
1929 )
1930 .await?;
1931 }
1932 anyhow::Ok(entry.path)
1933 });
1934
1935 Some(cx.spawn(|this, mut cx| async move {
1936 let path = delete.await?;
1937 this.update(&mut cx, |this, _| {
1938 this.as_local_mut()
1939 .unwrap()
1940 .refresh_entries_for_paths(vec![path])
1941 })?
1942 .recv()
1943 .await;
1944 Ok(())
1945 }))
1946 }
1947
1948 /// Rename an entry.
1949 ///
1950 /// `new_path` is the new relative path to the worktree root.
1951 /// If the root entry is renamed then `new_path` is the new root name instead.
1952 fn rename_entry(
1953 &self,
1954 entry_id: ProjectEntryId,
1955 new_path: impl Into<Arc<Path>>,
1956 cx: &Context<Worktree>,
1957 ) -> Task<Result<CreatedEntry>> {
1958 let old_path = match self.entry_for_id(entry_id) {
1959 Some(entry) => entry.path.clone(),
1960 None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1961 };
1962 let new_path = new_path.into();
1963 let abs_old_path = self.absolutize(&old_path);
1964
1965 let is_root_entry = self.root_entry().is_some_and(|e| e.id == entry_id);
1966 let abs_new_path = if is_root_entry {
1967 let Some(root_parent_path) = self.abs_path().parent() else {
1968 return Task::ready(Err(anyhow!("no parent for path {:?}", self.abs_path)));
1969 };
1970 root_parent_path.join(&new_path)
1971 } else {
1972 let Ok(absolutize_path) = self.absolutize(&new_path) else {
1973 return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1974 };
1975 absolutize_path
1976 };
1977 let abs_path = abs_new_path.clone();
1978 let fs = self.fs.clone();
1979 let case_sensitive = self.fs_case_sensitive;
1980 let rename = cx.background_spawn(async move {
1981 let abs_old_path = abs_old_path?;
1982 let abs_new_path = abs_new_path;
1983
1984 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1985 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1986
1987 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1988 // we want to overwrite, because otherwise we run into a file-already-exists error.
1989 let overwrite = !case_sensitive
1990 && abs_old_path != abs_new_path
1991 && abs_old_path_lower == abs_new_path_lower;
1992
1993 fs.rename(
1994 &abs_old_path,
1995 &abs_new_path,
1996 fs::RenameOptions {
1997 overwrite,
1998 ..Default::default()
1999 },
2000 )
2001 .await
2002 .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
2003 });
2004
2005 cx.spawn(|this, mut cx| async move {
2006 rename.await?;
2007 Ok(this
2008 .update(&mut cx, |this, cx| {
2009 let local = this.as_local_mut().unwrap();
2010 if is_root_entry {
2011 // We eagerly update `abs_path` and refresh this worktree.
2012 // Otherwise, the FS watcher would do it on the `RootUpdated` event,
2013 // but with a noticeable delay, so we handle it proactively.
2014 local.update_abs_path_and_refresh(
2015 Some(SanitizedPath::from(abs_path.clone())),
2016 cx,
2017 );
2018 Task::ready(Ok(this.root_entry().cloned()))
2019 } else {
2020 local.refresh_entry(new_path.clone(), Some(old_path), cx)
2021 }
2022 })?
2023 .await?
2024 .map(CreatedEntry::Included)
2025 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
2026 })
2027 }
2028
2029 fn copy_entry(
2030 &self,
2031 entry_id: ProjectEntryId,
2032 relative_worktree_source_path: Option<PathBuf>,
2033 new_path: impl Into<Arc<Path>>,
2034 cx: &Context<Worktree>,
2035 ) -> Task<Result<Option<Entry>>> {
2036 let old_path = match self.entry_for_id(entry_id) {
2037 Some(entry) => entry.path.clone(),
2038 None => return Task::ready(Ok(None)),
2039 };
2040 let new_path = new_path.into();
2041 let abs_old_path =
2042 if let Some(relative_worktree_source_path) = relative_worktree_source_path {
2043 Ok(self.abs_path().join(relative_worktree_source_path))
2044 } else {
2045 self.absolutize(&old_path)
2046 };
2047 let abs_new_path = self.absolutize(&new_path);
2048 let fs = self.fs.clone();
2049 let copy = cx.background_spawn(async move {
2050 copy_recursive(
2051 fs.as_ref(),
2052 &abs_old_path?,
2053 &abs_new_path?,
2054 Default::default(),
2055 )
2056 .await
2057 });
2058
2059 cx.spawn(|this, mut cx| async move {
2060 copy.await?;
2061 this.update(&mut cx, |this, cx| {
2062 this.as_local_mut()
2063 .unwrap()
2064 .refresh_entry(new_path.clone(), None, cx)
2065 })?
2066 .await
2067 })
2068 }
2069
2070 pub fn copy_external_entries(
2071 &self,
2072 target_directory: PathBuf,
2073 paths: Vec<Arc<Path>>,
2074 overwrite_existing_files: bool,
2075 cx: &Context<Worktree>,
2076 ) -> Task<Result<Vec<ProjectEntryId>>> {
2077 let worktree_path = self.abs_path().clone();
2078 let fs = self.fs.clone();
2079 let paths = paths
2080 .into_iter()
2081 .filter_map(|source| {
2082 let file_name = source.file_name()?;
2083 let mut target = target_directory.clone();
2084 target.push(file_name);
2085
2086 // Do not allow copying the same file to itself.
2087 if source.as_ref() != target.as_path() {
2088 Some((source, target))
2089 } else {
2090 None
2091 }
2092 })
2093 .collect::<Vec<_>>();
2094
2095 let paths_to_refresh = paths
2096 .iter()
2097 .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
2098 .collect::<Vec<_>>();
2099
2100 cx.spawn(|this, cx| async move {
2101 cx.background_spawn(async move {
2102 for (source, target) in paths {
2103 copy_recursive(
2104 fs.as_ref(),
2105 &source,
2106 &target,
2107 fs::CopyOptions {
2108 overwrite: overwrite_existing_files,
2109 ..Default::default()
2110 },
2111 )
2112 .await
2113 .with_context(|| {
2114 anyhow!("Failed to copy file from {source:?} to {target:?}")
2115 })?;
2116 }
2117 Ok::<(), anyhow::Error>(())
2118 })
2119 .await
2120 .log_err();
2121 let mut refresh = cx.read_entity(
2122 &this.upgrade().with_context(|| "Dropped worktree")?,
2123 |this, _| {
2124 Ok::<postage::barrier::Receiver, anyhow::Error>(
2125 this.as_local()
2126 .with_context(|| "Worktree is not local")?
2127 .refresh_entries_for_paths(paths_to_refresh.clone()),
2128 )
2129 },
2130 )??;
2131
2132 cx.background_spawn(async move {
2133 refresh.next().await;
2134 Ok::<(), anyhow::Error>(())
2135 })
2136 .await
2137 .log_err();
2138
2139 let this = this.upgrade().with_context(|| "Dropped worktree")?;
2140 cx.read_entity(&this, |this, _| {
2141 paths_to_refresh
2142 .iter()
2143 .filter_map(|path| Some(this.entry_for_path(path)?.id))
2144 .collect()
2145 })
2146 })
2147 }
2148
2149 fn expand_entry(
2150 &self,
2151 entry_id: ProjectEntryId,
2152 cx: &Context<Worktree>,
2153 ) -> Option<Task<Result<()>>> {
2154 let path = self.entry_for_id(entry_id)?.path.clone();
2155 let mut refresh = self.refresh_entries_for_paths(vec![path]);
2156 Some(cx.background_spawn(async move {
2157 refresh.next().await;
2158 Ok(())
2159 }))
2160 }
2161
2162 fn expand_all_for_entry(
2163 &self,
2164 entry_id: ProjectEntryId,
2165 cx: &Context<Worktree>,
2166 ) -> Option<Task<Result<()>>> {
2167 let path = self.entry_for_id(entry_id).unwrap().path.clone();
2168 let mut rx = self.add_path_prefix_to_scan(path.clone());
2169 Some(cx.background_spawn(async move {
2170 rx.next().await;
2171 Ok(())
2172 }))
2173 }
2174
2175 fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
2176 let (tx, rx) = barrier::channel();
2177 self.scan_requests_tx
2178 .try_send(ScanRequest {
2179 relative_paths: paths,
2180 done: smallvec![tx],
2181 })
2182 .ok();
2183 rx
2184 }
2185
2186 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) -> barrier::Receiver {
2187 let (tx, rx) = barrier::channel();
2188 self.path_prefixes_to_scan_tx
2189 .try_send(PathPrefixScanRequest {
2190 path: path_prefix,
2191 done: smallvec![tx],
2192 })
2193 .ok();
2194 rx
2195 }
2196
2197 fn refresh_entry(
2198 &self,
2199 path: Arc<Path>,
2200 old_path: Option<Arc<Path>>,
2201 cx: &Context<Worktree>,
2202 ) -> Task<Result<Option<Entry>>> {
2203 if self.settings.is_path_excluded(&path) {
2204 return Task::ready(Ok(None));
2205 }
2206 let paths = if let Some(old_path) = old_path.as_ref() {
2207 vec![old_path.clone(), path.clone()]
2208 } else {
2209 vec![path.clone()]
2210 };
2211 let t0 = Instant::now();
2212 let mut refresh = self.refresh_entries_for_paths(paths);
2213 cx.spawn(move |this, mut cx| async move {
2214 refresh.recv().await;
2215 log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
2216 let new_entry = this.update(&mut cx, |this, _| {
2217 this.entry_for_path(path)
2218 .cloned()
2219 .ok_or_else(|| anyhow!("failed to read path after update"))
2220 })??;
2221 Ok(Some(new_entry))
2222 })
2223 }
2224
2225 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2226 where
2227 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2228 Fut: Send + Future<Output = bool>,
2229 {
2230 if let Some(observer) = self.update_observer.as_mut() {
2231 *observer.resume_updates.borrow_mut() = ();
2232 return;
2233 }
2234
2235 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
2236 let (snapshots_tx, mut snapshots_rx) =
2237 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
2238 snapshots_tx
2239 .unbounded_send((self.snapshot(), Arc::default(), Arc::default()))
2240 .ok();
2241
2242 let worktree_id = cx.entity_id().as_u64();
2243 let _maintain_remote_snapshot = cx.background_spawn(async move {
2244 let mut is_first = true;
2245 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
2246 let update = if is_first {
2247 is_first = false;
2248 snapshot.build_initial_update(project_id, worktree_id)
2249 } else {
2250 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes)
2251 };
2252
2253 for update in proto::split_worktree_update(update) {
2254 let _ = resume_updates_rx.try_recv();
2255 loop {
2256 let result = callback(update.clone());
2257 if result.await {
2258 break;
2259 } else {
2260 log::info!("waiting to resume updates");
2261 if resume_updates_rx.next().await.is_none() {
2262 return Some(());
2263 }
2264 }
2265 }
2266 }
2267 }
2268 Some(())
2269 });
2270
2271 self.update_observer = Some(UpdateObservationState {
2272 snapshots_tx,
2273 resume_updates: resume_updates_tx,
2274 _maintain_remote_snapshot,
2275 });
2276 }
2277
2278 pub fn share_private_files(&mut self, cx: &Context<Worktree>) {
2279 self.share_private_files = true;
2280 self.restart_background_scanners(cx);
2281 }
2282
2283 fn update_abs_path_and_refresh(
2284 &mut self,
2285 new_path: Option<SanitizedPath>,
2286 cx: &Context<Worktree>,
2287 ) {
2288 if let Some(new_path) = new_path {
2289 self.snapshot.git_repositories = Default::default();
2290 self.snapshot.ignores_by_parent_abs_path = Default::default();
2291 let root_name = new_path
2292 .as_path()
2293 .file_name()
2294 .map_or(String::new(), |f| f.to_string_lossy().to_string());
2295 self.snapshot.update_abs_path(new_path, root_name);
2296 }
2297 self.restart_background_scanners(cx);
2298 }
2299}
2300
2301impl RemoteWorktree {
2302 pub fn project_id(&self) -> u64 {
2303 self.project_id
2304 }
2305
2306 pub fn client(&self) -> AnyProtoClient {
2307 self.client.clone()
2308 }
2309
2310 pub fn disconnected_from_host(&mut self) {
2311 self.updates_tx.take();
2312 self.snapshot_subscriptions.clear();
2313 self.disconnected = true;
2314 }
2315
2316 pub fn update_from_remote(&self, update: proto::UpdateWorktree) {
2317 if let Some(updates_tx) = &self.updates_tx {
2318 updates_tx
2319 .unbounded_send(update)
2320 .expect("consumer runs to completion");
2321 }
2322 }
2323
2324 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2325 where
2326 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
2327 Fut: 'static + Send + Future<Output = bool>,
2328 {
2329 let (tx, mut rx) = mpsc::unbounded();
2330 let initial_update = self
2331 .snapshot
2332 .build_initial_update(project_id, self.id().to_proto());
2333 self.update_observer = Some(tx);
2334 cx.spawn(|this, mut cx| async move {
2335 let mut update = initial_update;
2336 'outer: loop {
2337 // SSH projects use a special project ID of 0, and we need to
2338 // remap it to the correct one here.
2339 update.project_id = project_id;
2340
2341 for chunk in split_worktree_update(update) {
2342 if !callback(chunk).await {
2343 break 'outer;
2344 }
2345 }
2346
2347 if let Some(next_update) = rx.next().await {
2348 update = next_update;
2349 } else {
2350 break;
2351 }
2352 }
2353 this.update(&mut cx, |this, _| {
2354 let this = this.as_remote_mut().unwrap();
2355 this.update_observer.take();
2356 })
2357 })
2358 .detach();
2359 }
2360
2361 fn observed_snapshot(&self, scan_id: usize) -> bool {
2362 self.completed_scan_id >= scan_id
2363 }
2364
2365 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
2366 let (tx, rx) = oneshot::channel();
2367 if self.observed_snapshot(scan_id) {
2368 let _ = tx.send(());
2369 } else if self.disconnected {
2370 drop(tx);
2371 } else {
2372 match self
2373 .snapshot_subscriptions
2374 .binary_search_by_key(&scan_id, |probe| probe.0)
2375 {
2376 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
2377 }
2378 }
2379
2380 async move {
2381 rx.await?;
2382 Ok(())
2383 }
2384 }
2385
2386 fn insert_entry(
2387 &mut self,
2388 entry: proto::Entry,
2389 scan_id: usize,
2390 cx: &Context<Worktree>,
2391 ) -> Task<Result<Entry>> {
2392 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
2393 cx.spawn(|this, mut cx| async move {
2394 wait_for_snapshot.await?;
2395 this.update(&mut cx, |worktree, _| {
2396 let worktree = worktree.as_remote_mut().unwrap();
2397 let snapshot = &mut worktree.background_snapshot.lock().0;
2398 let entry = snapshot.insert_entry(entry, &worktree.file_scan_inclusions);
2399 worktree.snapshot = snapshot.clone();
2400 entry
2401 })?
2402 })
2403 }
2404
2405 fn delete_entry(
2406 &self,
2407 entry_id: ProjectEntryId,
2408 trash: bool,
2409 cx: &Context<Worktree>,
2410 ) -> Option<Task<Result<()>>> {
2411 let response = self.client.request(proto::DeleteProjectEntry {
2412 project_id: self.project_id,
2413 entry_id: entry_id.to_proto(),
2414 use_trash: trash,
2415 });
2416 Some(cx.spawn(move |this, mut cx| async move {
2417 let response = response.await?;
2418 let scan_id = response.worktree_scan_id as usize;
2419
2420 this.update(&mut cx, move |this, _| {
2421 this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
2422 })?
2423 .await?;
2424
2425 this.update(&mut cx, |this, _| {
2426 let this = this.as_remote_mut().unwrap();
2427 let snapshot = &mut this.background_snapshot.lock().0;
2428 snapshot.delete_entry(entry_id);
2429 this.snapshot = snapshot.clone();
2430 })
2431 }))
2432 }
2433
2434 fn rename_entry(
2435 &self,
2436 entry_id: ProjectEntryId,
2437 new_path: impl Into<Arc<Path>>,
2438 cx: &Context<Worktree>,
2439 ) -> Task<Result<CreatedEntry>> {
2440 let new_path: Arc<Path> = new_path.into();
2441 let response = self.client.request(proto::RenameProjectEntry {
2442 project_id: self.project_id,
2443 entry_id: entry_id.to_proto(),
2444 new_path: new_path.as_ref().to_proto(),
2445 });
2446 cx.spawn(move |this, mut cx| async move {
2447 let response = response.await?;
2448 match response.entry {
2449 Some(entry) => this
2450 .update(&mut cx, |this, cx| {
2451 this.as_remote_mut().unwrap().insert_entry(
2452 entry,
2453 response.worktree_scan_id as usize,
2454 cx,
2455 )
2456 })?
2457 .await
2458 .map(CreatedEntry::Included),
2459 None => {
2460 let abs_path = this.update(&mut cx, |worktree, _| {
2461 worktree
2462 .absolutize(&new_path)
2463 .with_context(|| format!("absolutizing {new_path:?}"))
2464 })??;
2465 Ok(CreatedEntry::Excluded { abs_path })
2466 }
2467 }
2468 })
2469 }
2470}
2471
2472impl Snapshot {
2473 pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
2474 Snapshot {
2475 id: WorktreeId::from_usize(id as usize),
2476 abs_path: abs_path.into(),
2477 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
2478 root_name,
2479 always_included_entries: Default::default(),
2480 entries_by_path: Default::default(),
2481 entries_by_id: Default::default(),
2482 repositories: Default::default(),
2483 scan_id: 1,
2484 completed_scan_id: 0,
2485 }
2486 }
2487
2488 pub fn id(&self) -> WorktreeId {
2489 self.id
2490 }
2491
2492 // TODO:
2493 // Consider the following:
2494 //
2495 // ```rust
2496 // let abs_path: Arc<Path> = snapshot.abs_path(); // e.g. "C:\Users\user\Desktop\project"
2497 // let some_non_trimmed_path = Path::new("\\\\?\\C:\\Users\\user\\Desktop\\project\\main.rs");
2498 // // The caller perform some actions here:
2499 // some_non_trimmed_path.strip_prefix(abs_path); // This fails
2500 // some_non_trimmed_path.starts_with(abs_path); // This fails too
2501 // ```
2502 //
2503 // This is definitely a bug, but it's not clear if we should handle it here or not.
2504 pub fn abs_path(&self) -> &Arc<Path> {
2505 self.abs_path.as_path()
2506 }
2507
2508 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2509 let mut updated_entries = self
2510 .entries_by_path
2511 .iter()
2512 .map(proto::Entry::from)
2513 .collect::<Vec<_>>();
2514 updated_entries.sort_unstable_by_key(|e| e.id);
2515
2516 let mut updated_repositories = self
2517 .repositories
2518 .iter()
2519 .map(|repository| repository.initial_update())
2520 .collect::<Vec<_>>();
2521 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2522
2523 proto::UpdateWorktree {
2524 project_id,
2525 worktree_id,
2526 abs_path: self.abs_path().to_proto(),
2527 root_name: self.root_name().to_string(),
2528 updated_entries,
2529 removed_entries: Vec::new(),
2530 scan_id: self.scan_id as u64,
2531 is_last_update: self.completed_scan_id == self.scan_id,
2532 updated_repositories,
2533 removed_repositories: Vec::new(),
2534 }
2535 }
2536
2537 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
2538 if path
2539 .components()
2540 .any(|component| !matches!(component, std::path::Component::Normal(_)))
2541 {
2542 return Err(anyhow!("invalid path"));
2543 }
2544 if path.file_name().is_some() {
2545 Ok(self.abs_path.as_path().join(path))
2546 } else {
2547 Ok(self.abs_path.as_path().to_path_buf())
2548 }
2549 }
2550
2551 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
2552 self.entries_by_id.get(&entry_id, &()).is_some()
2553 }
2554
2555 fn insert_entry(
2556 &mut self,
2557 entry: proto::Entry,
2558 always_included_paths: &PathMatcher,
2559 ) -> Result<Entry> {
2560 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2561 let old_entry = self.entries_by_id.insert_or_replace(
2562 PathEntry {
2563 id: entry.id,
2564 path: entry.path.clone(),
2565 is_ignored: entry.is_ignored,
2566 scan_id: 0,
2567 },
2568 &(),
2569 );
2570 if let Some(old_entry) = old_entry {
2571 self.entries_by_path.remove(&PathKey(old_entry.path), &());
2572 }
2573 self.entries_by_path.insert_or_replace(entry.clone(), &());
2574 Ok(entry)
2575 }
2576
2577 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2578 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2579 self.entries_by_path = {
2580 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>(&());
2581 let mut new_entries_by_path =
2582 cursor.slice(&TraversalTarget::path(&removed_entry.path), Bias::Left, &());
2583 while let Some(entry) = cursor.item() {
2584 if entry.path.starts_with(&removed_entry.path) {
2585 self.entries_by_id.remove(&entry.id, &());
2586 cursor.next(&());
2587 } else {
2588 break;
2589 }
2590 }
2591 new_entries_by_path.append(cursor.suffix(&()), &());
2592 new_entries_by_path
2593 };
2594
2595 Some(removed_entry.path)
2596 }
2597
2598 pub fn status_for_file(&self, path: impl AsRef<Path>) -> Option<FileStatus> {
2599 let path = path.as_ref();
2600 self.repository_for_path(path).and_then(|repo| {
2601 let repo_path = repo.relativize(path).unwrap();
2602 repo.statuses_by_path
2603 .get(&PathKey(repo_path.0), &())
2604 .map(|entry| entry.status)
2605 })
2606 }
2607
2608 fn update_abs_path(&mut self, abs_path: SanitizedPath, root_name: String) {
2609 self.abs_path = abs_path;
2610 if root_name != self.root_name {
2611 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
2612 self.root_name = root_name;
2613 }
2614 }
2615
2616 pub(crate) fn apply_remote_update(
2617 &mut self,
2618 mut update: proto::UpdateWorktree,
2619 always_included_paths: &PathMatcher,
2620 ) -> Result<()> {
2621 log::debug!(
2622 "applying remote worktree update. {} entries updated, {} removed",
2623 update.updated_entries.len(),
2624 update.removed_entries.len()
2625 );
2626 self.update_abs_path(
2627 SanitizedPath::from(PathBuf::from_proto(update.abs_path)),
2628 update.root_name,
2629 );
2630
2631 let mut entries_by_path_edits = Vec::new();
2632 let mut entries_by_id_edits = Vec::new();
2633
2634 for entry_id in update.removed_entries {
2635 let entry_id = ProjectEntryId::from_proto(entry_id);
2636 entries_by_id_edits.push(Edit::Remove(entry_id));
2637 if let Some(entry) = self.entry_for_id(entry_id) {
2638 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2639 }
2640 }
2641
2642 for entry in update.updated_entries {
2643 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2644 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2645 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2646 }
2647 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2648 if old_entry.id != entry.id {
2649 entries_by_id_edits.push(Edit::Remove(old_entry.id));
2650 }
2651 }
2652 entries_by_id_edits.push(Edit::Insert(PathEntry {
2653 id: entry.id,
2654 path: entry.path.clone(),
2655 is_ignored: entry.is_ignored,
2656 scan_id: 0,
2657 }));
2658 entries_by_path_edits.push(Edit::Insert(entry));
2659 }
2660
2661 self.entries_by_path.edit(entries_by_path_edits, &());
2662 self.entries_by_id.edit(entries_by_id_edits, &());
2663
2664 update.removed_repositories.sort_unstable();
2665 self.repositories.retain(&(), |entry: &RepositoryEntry| {
2666 update
2667 .removed_repositories
2668 .binary_search(&entry.work_directory_id.to_proto())
2669 .is_err()
2670 });
2671
2672 for repository in update.updated_repositories {
2673 let work_directory_id = ProjectEntryId::from_proto(repository.work_directory_id);
2674 if let Some(work_dir_entry) = self.entry_for_id(work_directory_id) {
2675 let conflicted_paths = TreeSet::from_ordered_entries(
2676 repository
2677 .current_merge_conflicts
2678 .into_iter()
2679 .map(|path| RepoPath(Path::new(&path).into())),
2680 );
2681
2682 if self
2683 .repositories
2684 .contains(&PathKey(work_dir_entry.path.clone()), &())
2685 {
2686 let edits = repository
2687 .removed_statuses
2688 .into_iter()
2689 .map(|path| Edit::Remove(PathKey(FromProto::from_proto(path))))
2690 .chain(repository.updated_statuses.into_iter().filter_map(
2691 |updated_status| {
2692 Some(Edit::Insert(updated_status.try_into().log_err()?))
2693 },
2694 ))
2695 .collect::<Vec<_>>();
2696
2697 self.repositories
2698 .update(&PathKey(work_dir_entry.path.clone()), &(), |repo| {
2699 repo.current_branch =
2700 repository.branch_summary.as_ref().map(proto_to_branch);
2701 repo.statuses_by_path.edit(edits, &());
2702 repo.current_merge_conflicts = conflicted_paths
2703 });
2704 } else {
2705 let statuses = SumTree::from_iter(
2706 repository
2707 .updated_statuses
2708 .into_iter()
2709 .filter_map(|updated_status| updated_status.try_into().log_err()),
2710 &(),
2711 );
2712
2713 self.repositories.insert_or_replace(
2714 RepositoryEntry {
2715 work_directory_id,
2716 // When syncing repository entries from a peer, we don't need
2717 // the location_in_repo field, since git operations don't happen locally
2718 // anyway.
2719 work_directory: WorkDirectory::InProject {
2720 relative_path: work_dir_entry.path.clone(),
2721 },
2722 current_branch: repository.branch_summary.as_ref().map(proto_to_branch),
2723 statuses_by_path: statuses,
2724 current_merge_conflicts: conflicted_paths,
2725 },
2726 &(),
2727 );
2728 }
2729 } else {
2730 log::error!(
2731 "no work directory entry for repository {:?}",
2732 repository.work_directory_id
2733 )
2734 }
2735 }
2736
2737 self.scan_id = update.scan_id as usize;
2738 if update.is_last_update {
2739 self.completed_scan_id = update.scan_id as usize;
2740 }
2741
2742 Ok(())
2743 }
2744
2745 pub fn entry_count(&self) -> usize {
2746 self.entries_by_path.summary().count
2747 }
2748
2749 pub fn visible_entry_count(&self) -> usize {
2750 self.entries_by_path.summary().non_ignored_count
2751 }
2752
2753 pub fn dir_count(&self) -> usize {
2754 let summary = self.entries_by_path.summary();
2755 summary.count - summary.file_count
2756 }
2757
2758 pub fn visible_dir_count(&self) -> usize {
2759 let summary = self.entries_by_path.summary();
2760 summary.non_ignored_count - summary.non_ignored_file_count
2761 }
2762
2763 pub fn file_count(&self) -> usize {
2764 self.entries_by_path.summary().file_count
2765 }
2766
2767 pub fn visible_file_count(&self) -> usize {
2768 self.entries_by_path.summary().non_ignored_file_count
2769 }
2770
2771 fn traverse_from_offset(
2772 &self,
2773 include_files: bool,
2774 include_dirs: bool,
2775 include_ignored: bool,
2776 start_offset: usize,
2777 ) -> Traversal {
2778 let mut cursor = self.entries_by_path.cursor(&());
2779 cursor.seek(
2780 &TraversalTarget::Count {
2781 count: start_offset,
2782 include_files,
2783 include_dirs,
2784 include_ignored,
2785 },
2786 Bias::Right,
2787 &(),
2788 );
2789 Traversal {
2790 snapshot: self,
2791 cursor,
2792 include_files,
2793 include_dirs,
2794 include_ignored,
2795 }
2796 }
2797
2798 pub fn traverse_from_path(
2799 &self,
2800 include_files: bool,
2801 include_dirs: bool,
2802 include_ignored: bool,
2803 path: &Path,
2804 ) -> Traversal {
2805 Traversal::new(self, include_files, include_dirs, include_ignored, path)
2806 }
2807
2808 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2809 self.traverse_from_offset(true, false, include_ignored, start)
2810 }
2811
2812 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2813 self.traverse_from_offset(false, true, include_ignored, start)
2814 }
2815
2816 pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2817 self.traverse_from_offset(true, true, include_ignored, start)
2818 }
2819
2820 #[cfg(any(feature = "test-support", test))]
2821 pub fn git_status(&self, work_dir: &Path) -> Option<Vec<StatusEntry>> {
2822 self.repositories
2823 .get(&PathKey(work_dir.into()), &())
2824 .map(|repo| repo.status().collect())
2825 }
2826
2827 pub fn repositories(&self) -> &SumTree<RepositoryEntry> {
2828 &self.repositories
2829 }
2830
2831 /// Get the repository whose work directory corresponds to the given path.
2832 pub(crate) fn repository(&self, work_directory: PathKey) -> Option<RepositoryEntry> {
2833 self.repositories.get(&work_directory, &()).cloned()
2834 }
2835
2836 /// Get the repository whose work directory contains the given path.
2837 #[track_caller]
2838 pub fn repository_for_path(&self, path: &Path) -> Option<&RepositoryEntry> {
2839 self.repositories
2840 .iter()
2841 .filter(|repo| repo.directory_contains(path))
2842 .last()
2843 }
2844
2845 /// Given an ordered iterator of entries, returns an iterator of those entries,
2846 /// along with their containing git repository.
2847 #[track_caller]
2848 pub fn entries_with_repositories<'a>(
2849 &'a self,
2850 entries: impl 'a + Iterator<Item = &'a Entry>,
2851 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2852 let mut containing_repos = Vec::<&RepositoryEntry>::new();
2853 let mut repositories = self.repositories().iter().peekable();
2854 entries.map(move |entry| {
2855 while let Some(repository) = containing_repos.last() {
2856 if repository.directory_contains(&entry.path) {
2857 break;
2858 } else {
2859 containing_repos.pop();
2860 }
2861 }
2862 while let Some(repository) = repositories.peek() {
2863 if repository.directory_contains(&entry.path) {
2864 containing_repos.push(repositories.next().unwrap());
2865 } else {
2866 break;
2867 }
2868 }
2869 let repo = containing_repos.last().copied();
2870 (entry, repo)
2871 })
2872 }
2873
2874 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2875 let empty_path = Path::new("");
2876 self.entries_by_path
2877 .cursor::<()>(&())
2878 .filter(move |entry| entry.path.as_ref() != empty_path)
2879 .map(|entry| &entry.path)
2880 }
2881
2882 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2883 let options = ChildEntriesOptions {
2884 include_files: true,
2885 include_dirs: true,
2886 include_ignored: true,
2887 };
2888 self.child_entries_with_options(parent_path, options)
2889 }
2890
2891 pub fn child_entries_with_options<'a>(
2892 &'a self,
2893 parent_path: &'a Path,
2894 options: ChildEntriesOptions,
2895 ) -> ChildEntriesIter<'a> {
2896 let mut cursor = self.entries_by_path.cursor(&());
2897 cursor.seek(&TraversalTarget::path(parent_path), Bias::Right, &());
2898 let traversal = Traversal {
2899 snapshot: self,
2900 cursor,
2901 include_files: options.include_files,
2902 include_dirs: options.include_dirs,
2903 include_ignored: options.include_ignored,
2904 };
2905 ChildEntriesIter {
2906 traversal,
2907 parent_path,
2908 }
2909 }
2910
2911 pub fn root_entry(&self) -> Option<&Entry> {
2912 self.entry_for_path("")
2913 }
2914
2915 /// TODO: what's the difference between `root_dir` and `abs_path`?
2916 /// is there any? if so, document it.
2917 pub fn root_dir(&self) -> Option<Arc<Path>> {
2918 self.root_entry()
2919 .filter(|entry| entry.is_dir())
2920 .map(|_| self.abs_path().clone())
2921 }
2922
2923 pub fn root_name(&self) -> &str {
2924 &self.root_name
2925 }
2926
2927 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2928 self.repositories
2929 .get(&PathKey(Path::new("").into()), &())
2930 .map(|entry| entry.to_owned())
2931 }
2932
2933 pub fn git_entry(&self, work_directory_path: Arc<Path>) -> Option<RepositoryEntry> {
2934 self.repositories
2935 .get(&PathKey(work_directory_path), &())
2936 .map(|entry| entry.to_owned())
2937 }
2938
2939 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2940 self.repositories.iter()
2941 }
2942
2943 pub fn scan_id(&self) -> usize {
2944 self.scan_id
2945 }
2946
2947 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2948 let path = path.as_ref();
2949 debug_assert!(path.is_relative());
2950 self.traverse_from_path(true, true, true, path)
2951 .entry()
2952 .and_then(|entry| {
2953 if entry.path.as_ref() == path {
2954 Some(entry)
2955 } else {
2956 None
2957 }
2958 })
2959 }
2960
2961 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2962 let entry = self.entries_by_id.get(&id, &())?;
2963 self.entry_for_path(&entry.path)
2964 }
2965
2966 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2967 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2968 }
2969}
2970
2971impl LocalSnapshot {
2972 pub fn local_repo_for_path(&self, path: &Path) -> Option<&LocalRepositoryEntry> {
2973 let repository_entry = self.repository_for_path(path)?;
2974 let work_directory_id = repository_entry.work_directory_id();
2975 self.git_repositories.get(&work_directory_id)
2976 }
2977
2978 fn build_update(
2979 &self,
2980 project_id: u64,
2981 worktree_id: u64,
2982 entry_changes: UpdatedEntriesSet,
2983 repo_changes: UpdatedGitRepositoriesSet,
2984 ) -> proto::UpdateWorktree {
2985 let mut updated_entries = Vec::new();
2986 let mut removed_entries = Vec::new();
2987 let mut updated_repositories = Vec::new();
2988 let mut removed_repositories = Vec::new();
2989
2990 for (_, entry_id, path_change) in entry_changes.iter() {
2991 if let PathChange::Removed = path_change {
2992 removed_entries.push(entry_id.0 as u64);
2993 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2994 updated_entries.push(proto::Entry::from(entry));
2995 }
2996 }
2997
2998 for (work_dir_path, change) in repo_changes.iter() {
2999 let new_repo = self.repositories.get(&PathKey(work_dir_path.clone()), &());
3000 match (&change.old_repository, new_repo) {
3001 (Some(old_repo), Some(new_repo)) => {
3002 updated_repositories.push(new_repo.build_update(old_repo));
3003 }
3004 (None, Some(new_repo)) => {
3005 updated_repositories.push(new_repo.initial_update());
3006 }
3007 (Some(old_repo), None) => {
3008 removed_repositories.push(old_repo.work_directory_id.to_proto());
3009 }
3010 _ => {}
3011 }
3012 }
3013
3014 removed_entries.sort_unstable();
3015 updated_entries.sort_unstable_by_key(|e| e.id);
3016 removed_repositories.sort_unstable();
3017 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
3018
3019 // TODO - optimize, knowing that removed_entries are sorted.
3020 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
3021
3022 proto::UpdateWorktree {
3023 project_id,
3024 worktree_id,
3025 abs_path: self.abs_path().to_proto(),
3026 root_name: self.root_name().to_string(),
3027 updated_entries,
3028 removed_entries,
3029 scan_id: self.scan_id as u64,
3030 is_last_update: self.completed_scan_id == self.scan_id,
3031 updated_repositories,
3032 removed_repositories,
3033 }
3034 }
3035
3036 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
3037 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
3038 let abs_path = self.abs_path.as_path().join(&entry.path);
3039 match smol::block_on(build_gitignore(&abs_path, fs)) {
3040 Ok(ignore) => {
3041 self.ignores_by_parent_abs_path
3042 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
3043 }
3044 Err(error) => {
3045 log::error!(
3046 "error loading .gitignore file {:?} - {:?}",
3047 &entry.path,
3048 error
3049 );
3050 }
3051 }
3052 }
3053
3054 if entry.kind == EntryKind::PendingDir {
3055 if let Some(existing_entry) =
3056 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
3057 {
3058 entry.kind = existing_entry.kind;
3059 }
3060 }
3061
3062 let scan_id = self.scan_id;
3063 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
3064 if let Some(removed) = removed {
3065 if removed.id != entry.id {
3066 self.entries_by_id.remove(&removed.id, &());
3067 }
3068 }
3069 self.entries_by_id.insert_or_replace(
3070 PathEntry {
3071 id: entry.id,
3072 path: entry.path.clone(),
3073 is_ignored: entry.is_ignored,
3074 scan_id,
3075 },
3076 &(),
3077 );
3078
3079 entry
3080 }
3081
3082 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
3083 let mut inodes = TreeSet::default();
3084 for ancestor in path.ancestors().skip(1) {
3085 if let Some(entry) = self.entry_for_path(ancestor) {
3086 inodes.insert(entry.inode);
3087 }
3088 }
3089 inodes
3090 }
3091
3092 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
3093 let mut new_ignores = Vec::new();
3094 for (index, ancestor) in abs_path.ancestors().enumerate() {
3095 if index > 0 {
3096 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
3097 new_ignores.push((ancestor, Some(ignore.clone())));
3098 } else {
3099 new_ignores.push((ancestor, None));
3100 }
3101 }
3102 if ancestor.join(*DOT_GIT).exists() {
3103 break;
3104 }
3105 }
3106
3107 let mut ignore_stack = IgnoreStack::none();
3108 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
3109 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
3110 ignore_stack = IgnoreStack::all();
3111 break;
3112 } else if let Some(ignore) = ignore {
3113 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
3114 }
3115 }
3116
3117 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
3118 ignore_stack = IgnoreStack::all();
3119 }
3120
3121 ignore_stack
3122 }
3123
3124 #[cfg(test)]
3125 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
3126 self.entries_by_path
3127 .cursor::<()>(&())
3128 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
3129 }
3130
3131 #[cfg(test)]
3132 pub fn check_invariants(&self, git_state: bool) {
3133 use pretty_assertions::assert_eq;
3134
3135 assert_eq!(
3136 self.entries_by_path
3137 .cursor::<()>(&())
3138 .map(|e| (&e.path, e.id))
3139 .collect::<Vec<_>>(),
3140 self.entries_by_id
3141 .cursor::<()>(&())
3142 .map(|e| (&e.path, e.id))
3143 .collect::<collections::BTreeSet<_>>()
3144 .into_iter()
3145 .collect::<Vec<_>>(),
3146 "entries_by_path and entries_by_id are inconsistent"
3147 );
3148
3149 let mut files = self.files(true, 0);
3150 let mut visible_files = self.files(false, 0);
3151 for entry in self.entries_by_path.cursor::<()>(&()) {
3152 if entry.is_file() {
3153 assert_eq!(files.next().unwrap().inode, entry.inode);
3154 if (!entry.is_ignored && !entry.is_external) || entry.is_always_included {
3155 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3156 }
3157 }
3158 }
3159
3160 assert!(files.next().is_none());
3161 assert!(visible_files.next().is_none());
3162
3163 let mut bfs_paths = Vec::new();
3164 let mut stack = self
3165 .root_entry()
3166 .map(|e| e.path.as_ref())
3167 .into_iter()
3168 .collect::<Vec<_>>();
3169 while let Some(path) = stack.pop() {
3170 bfs_paths.push(path);
3171 let ix = stack.len();
3172 for child_entry in self.child_entries(path) {
3173 stack.insert(ix, &child_entry.path);
3174 }
3175 }
3176
3177 let dfs_paths_via_iter = self
3178 .entries_by_path
3179 .cursor::<()>(&())
3180 .map(|e| e.path.as_ref())
3181 .collect::<Vec<_>>();
3182 assert_eq!(bfs_paths, dfs_paths_via_iter);
3183
3184 let dfs_paths_via_traversal = self
3185 .entries(true, 0)
3186 .map(|e| e.path.as_ref())
3187 .collect::<Vec<_>>();
3188 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3189
3190 if git_state {
3191 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3192 let ignore_parent_path = ignore_parent_abs_path
3193 .strip_prefix(self.abs_path.as_path())
3194 .unwrap();
3195 assert!(self.entry_for_path(ignore_parent_path).is_some());
3196 assert!(self
3197 .entry_for_path(ignore_parent_path.join(*GITIGNORE))
3198 .is_some());
3199 }
3200 }
3201 }
3202
3203 #[cfg(test)]
3204 fn check_git_invariants(&self) {
3205 let dotgit_paths = self
3206 .git_repositories
3207 .iter()
3208 .map(|repo| repo.1.dot_git_dir_abs_path.clone())
3209 .collect::<HashSet<_>>();
3210 let work_dir_paths = self
3211 .repositories
3212 .iter()
3213 .map(|repo| repo.work_directory.path_key())
3214 .collect::<HashSet<_>>();
3215 assert_eq!(dotgit_paths.len(), work_dir_paths.len());
3216 assert_eq!(self.repositories.iter().count(), work_dir_paths.len());
3217 assert_eq!(self.git_repositories.iter().count(), work_dir_paths.len());
3218 for entry in self.repositories.iter() {
3219 self.git_repositories.get(&entry.work_directory_id).unwrap();
3220 }
3221 }
3222
3223 #[cfg(test)]
3224 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3225 let mut paths = Vec::new();
3226 for entry in self.entries_by_path.cursor::<()>(&()) {
3227 if include_ignored || !entry.is_ignored {
3228 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3229 }
3230 }
3231 paths.sort_by(|a, b| a.0.cmp(b.0));
3232 paths
3233 }
3234}
3235
3236impl BackgroundScannerState {
3237 fn should_scan_directory(&self, entry: &Entry) -> bool {
3238 (!entry.is_external && (!entry.is_ignored || entry.is_always_included))
3239 || entry.path.file_name() == Some(*DOT_GIT)
3240 || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
3241 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
3242 || self
3243 .paths_to_scan
3244 .iter()
3245 .any(|p| p.starts_with(&entry.path))
3246 || self
3247 .path_prefixes_to_scan
3248 .iter()
3249 .any(|p| entry.path.starts_with(p))
3250 }
3251
3252 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
3253 let path = entry.path.clone();
3254 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
3255 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
3256
3257 if !ancestor_inodes.contains(&entry.inode) {
3258 ancestor_inodes.insert(entry.inode);
3259 scan_job_tx
3260 .try_send(ScanJob {
3261 abs_path,
3262 path,
3263 ignore_stack,
3264 scan_queue: scan_job_tx.clone(),
3265 ancestor_inodes,
3266 is_external: entry.is_external,
3267 })
3268 .unwrap();
3269 }
3270 }
3271
3272 fn reuse_entry_id(&mut self, entry: &mut Entry) {
3273 if let Some(mtime) = entry.mtime {
3274 // If an entry with the same inode was removed from the worktree during this scan,
3275 // then it *might* represent the same file or directory. But the OS might also have
3276 // re-used the inode for a completely different file or directory.
3277 //
3278 // Conditionally reuse the old entry's id:
3279 // * if the mtime is the same, the file was probably been renamed.
3280 // * if the path is the same, the file may just have been updated
3281 if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
3282 if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
3283 entry.id = removed_entry.id;
3284 }
3285 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
3286 entry.id = existing_entry.id;
3287 }
3288 }
3289 }
3290
3291 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
3292 self.reuse_entry_id(&mut entry);
3293 let entry = self.snapshot.insert_entry(entry, fs);
3294 if entry.path.file_name() == Some(&DOT_GIT) {
3295 self.insert_git_repository(entry.path.clone(), fs, watcher);
3296 }
3297
3298 #[cfg(test)]
3299 self.snapshot.check_invariants(false);
3300
3301 entry
3302 }
3303
3304 fn populate_dir(
3305 &mut self,
3306 parent_path: &Arc<Path>,
3307 entries: impl IntoIterator<Item = Entry>,
3308 ignore: Option<Arc<Gitignore>>,
3309 ) {
3310 let mut parent_entry = if let Some(parent_entry) = self
3311 .snapshot
3312 .entries_by_path
3313 .get(&PathKey(parent_path.clone()), &())
3314 {
3315 parent_entry.clone()
3316 } else {
3317 log::warn!(
3318 "populating a directory {:?} that has been removed",
3319 parent_path
3320 );
3321 return;
3322 };
3323
3324 match parent_entry.kind {
3325 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
3326 EntryKind::Dir => {}
3327 _ => return,
3328 }
3329
3330 if let Some(ignore) = ignore {
3331 let abs_parent_path = self.snapshot.abs_path.as_path().join(parent_path).into();
3332 self.snapshot
3333 .ignores_by_parent_abs_path
3334 .insert(abs_parent_path, (ignore, false));
3335 }
3336
3337 let parent_entry_id = parent_entry.id;
3338 self.scanned_dirs.insert(parent_entry_id);
3339 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
3340 let mut entries_by_id_edits = Vec::new();
3341
3342 for entry in entries {
3343 entries_by_id_edits.push(Edit::Insert(PathEntry {
3344 id: entry.id,
3345 path: entry.path.clone(),
3346 is_ignored: entry.is_ignored,
3347 scan_id: self.snapshot.scan_id,
3348 }));
3349 entries_by_path_edits.push(Edit::Insert(entry));
3350 }
3351
3352 self.snapshot
3353 .entries_by_path
3354 .edit(entries_by_path_edits, &());
3355 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3356
3357 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
3358 self.changed_paths.insert(ix, parent_path.clone());
3359 }
3360
3361 #[cfg(test)]
3362 self.snapshot.check_invariants(false);
3363 }
3364
3365 fn remove_path(&mut self, path: &Path) {
3366 let mut new_entries;
3367 let removed_entries;
3368 {
3369 let mut cursor = self
3370 .snapshot
3371 .entries_by_path
3372 .cursor::<TraversalProgress>(&());
3373 new_entries = cursor.slice(&TraversalTarget::path(path), Bias::Left, &());
3374 removed_entries = cursor.slice(&TraversalTarget::successor(path), Bias::Left, &());
3375 new_entries.append(cursor.suffix(&()), &());
3376 }
3377 self.snapshot.entries_by_path = new_entries;
3378
3379 let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
3380 for entry in removed_entries.cursor::<()>(&()) {
3381 match self.removed_entries.entry(entry.inode) {
3382 hash_map::Entry::Occupied(mut e) => {
3383 let prev_removed_entry = e.get_mut();
3384 if entry.id > prev_removed_entry.id {
3385 *prev_removed_entry = entry.clone();
3386 }
3387 }
3388 hash_map::Entry::Vacant(e) => {
3389 e.insert(entry.clone());
3390 }
3391 }
3392
3393 if entry.path.file_name() == Some(&GITIGNORE) {
3394 let abs_parent_path = self
3395 .snapshot
3396 .abs_path
3397 .as_path()
3398 .join(entry.path.parent().unwrap());
3399 if let Some((_, needs_update)) = self
3400 .snapshot
3401 .ignores_by_parent_abs_path
3402 .get_mut(abs_parent_path.as_path())
3403 {
3404 *needs_update = true;
3405 }
3406 }
3407
3408 if let Err(ix) = removed_ids.binary_search(&entry.id) {
3409 removed_ids.insert(ix, entry.id);
3410 }
3411 }
3412
3413 self.snapshot.entries_by_id.edit(
3414 removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
3415 &(),
3416 );
3417 self.snapshot
3418 .git_repositories
3419 .retain(|id, _| removed_ids.binary_search(id).is_err());
3420 self.snapshot.repositories.retain(&(), |repository| {
3421 !repository.work_directory.path_key().0.starts_with(path)
3422 });
3423
3424 #[cfg(test)]
3425 self.snapshot.check_invariants(false);
3426 }
3427
3428 fn insert_git_repository(
3429 &mut self,
3430 dot_git_path: Arc<Path>,
3431 fs: &dyn Fs,
3432 watcher: &dyn Watcher,
3433 ) -> Option<LocalRepositoryEntry> {
3434 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
3435 Some(parent_dir) => {
3436 // Guard against repositories inside the repository metadata
3437 if parent_dir.iter().any(|component| component == *DOT_GIT) {
3438 log::info!(
3439 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
3440 );
3441 return None;
3442 };
3443 log::info!(
3444 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
3445 );
3446
3447 parent_dir.into()
3448 }
3449 None => {
3450 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
3451 // no files inside that directory are tracked by git, so no need to build the repo around it
3452 log::info!(
3453 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
3454 );
3455 return None;
3456 }
3457 };
3458
3459 self.insert_git_repository_for_path(
3460 WorkDirectory::InProject {
3461 relative_path: work_dir_path,
3462 },
3463 dot_git_path,
3464 fs,
3465 watcher,
3466 )
3467 }
3468
3469 fn insert_git_repository_for_path(
3470 &mut self,
3471 work_directory: WorkDirectory,
3472 dot_git_path: Arc<Path>,
3473 fs: &dyn Fs,
3474 watcher: &dyn Watcher,
3475 ) -> Option<LocalRepositoryEntry> {
3476 let work_dir_id = self
3477 .snapshot
3478 .entry_for_path(work_directory.path_key().0)
3479 .map(|entry| entry.id)?;
3480
3481 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
3482 return None;
3483 }
3484
3485 let dot_git_abs_path = self.snapshot.abs_path.as_path().join(&dot_git_path);
3486
3487 let t0 = Instant::now();
3488 let repository = fs.open_repo(&dot_git_abs_path)?;
3489
3490 let repository_path = repository.path();
3491 watcher.add(&repository_path).log_err()?;
3492
3493 let actual_dot_git_dir_abs_path = repository.main_repository_path();
3494 let dot_git_worktree_abs_path = if actual_dot_git_dir_abs_path == dot_git_abs_path {
3495 None
3496 } else {
3497 // The two paths could be different because we opened a git worktree.
3498 // When that happens:
3499 //
3500 // * `dot_git_abs_path` is a file that points to the worktree-subdirectory in the actual
3501 // .git directory.
3502 //
3503 // * `repository_path` is the worktree-subdirectory.
3504 //
3505 // * `actual_dot_git_dir_abs_path` is the path to the actual .git directory. In git
3506 // documentation this is called the "commondir".
3507 watcher.add(&dot_git_abs_path).log_err()?;
3508 Some(Arc::from(dot_git_abs_path))
3509 };
3510
3511 log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
3512
3513 if let Some(git_hosting_provider_registry) = self.git_hosting_provider_registry.clone() {
3514 git_hosting_providers::register_additional_providers(
3515 git_hosting_provider_registry,
3516 repository.clone(),
3517 );
3518 }
3519
3520 self.snapshot.repositories.insert_or_replace(
3521 RepositoryEntry {
3522 work_directory_id: work_dir_id,
3523 work_directory: work_directory.clone(),
3524 current_branch: None,
3525 statuses_by_path: Default::default(),
3526 current_merge_conflicts: Default::default(),
3527 },
3528 &(),
3529 );
3530
3531 let local_repository = LocalRepositoryEntry {
3532 work_directory_id: work_dir_id,
3533 work_directory: work_directory.clone(),
3534 git_dir_scan_id: 0,
3535 status_scan_id: 0,
3536 repo_ptr: repository.clone(),
3537 dot_git_dir_abs_path: actual_dot_git_dir_abs_path.into(),
3538 dot_git_worktree_abs_path,
3539 current_merge_head_shas: Default::default(),
3540 merge_message: None,
3541 };
3542
3543 self.snapshot
3544 .git_repositories
3545 .insert(work_dir_id, local_repository.clone());
3546
3547 Some(local_repository)
3548 }
3549}
3550
3551async fn is_git_dir(path: &Path, fs: &dyn Fs) -> bool {
3552 if path.file_name() == Some(&*DOT_GIT) {
3553 return true;
3554 }
3555
3556 // If we're in a bare repository, we are not inside a `.git` folder. In a
3557 // bare repository, the root folder contains what would normally be in the
3558 // `.git` folder.
3559 let head_metadata = fs.metadata(&path.join("HEAD")).await;
3560 if !matches!(head_metadata, Ok(Some(_))) {
3561 return false;
3562 }
3563 let config_metadata = fs.metadata(&path.join("config")).await;
3564 matches!(config_metadata, Ok(Some(_)))
3565}
3566
3567async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
3568 let contents = fs.load(abs_path).await?;
3569 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
3570 let mut builder = GitignoreBuilder::new(parent);
3571 for line in contents.lines() {
3572 builder.add_line(Some(abs_path.into()), line)?;
3573 }
3574 Ok(builder.build()?)
3575}
3576
3577impl Deref for Worktree {
3578 type Target = Snapshot;
3579
3580 fn deref(&self) -> &Self::Target {
3581 match self {
3582 Worktree::Local(worktree) => &worktree.snapshot,
3583 Worktree::Remote(worktree) => &worktree.snapshot,
3584 }
3585 }
3586}
3587
3588impl Deref for LocalWorktree {
3589 type Target = LocalSnapshot;
3590
3591 fn deref(&self) -> &Self::Target {
3592 &self.snapshot
3593 }
3594}
3595
3596impl Deref for RemoteWorktree {
3597 type Target = Snapshot;
3598
3599 fn deref(&self) -> &Self::Target {
3600 &self.snapshot
3601 }
3602}
3603
3604impl fmt::Debug for LocalWorktree {
3605 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3606 self.snapshot.fmt(f)
3607 }
3608}
3609
3610impl fmt::Debug for Snapshot {
3611 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3612 struct EntriesById<'a>(&'a SumTree<PathEntry>);
3613 struct EntriesByPath<'a>(&'a SumTree<Entry>);
3614
3615 impl<'a> fmt::Debug for EntriesByPath<'a> {
3616 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3617 f.debug_map()
3618 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
3619 .finish()
3620 }
3621 }
3622
3623 impl<'a> fmt::Debug for EntriesById<'a> {
3624 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3625 f.debug_list().entries(self.0.iter()).finish()
3626 }
3627 }
3628
3629 f.debug_struct("Snapshot")
3630 .field("id", &self.id)
3631 .field("root_name", &self.root_name)
3632 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
3633 .field("entries_by_id", &EntriesById(&self.entries_by_id))
3634 .finish()
3635 }
3636}
3637
3638#[derive(Clone, PartialEq)]
3639pub struct File {
3640 pub worktree: Entity<Worktree>,
3641 pub path: Arc<Path>,
3642 pub disk_state: DiskState,
3643 pub entry_id: Option<ProjectEntryId>,
3644 pub is_local: bool,
3645 pub is_private: bool,
3646}
3647
3648impl language::File for File {
3649 fn as_local(&self) -> Option<&dyn language::LocalFile> {
3650 if self.is_local {
3651 Some(self)
3652 } else {
3653 None
3654 }
3655 }
3656
3657 fn disk_state(&self) -> DiskState {
3658 self.disk_state
3659 }
3660
3661 fn path(&self) -> &Arc<Path> {
3662 &self.path
3663 }
3664
3665 fn full_path(&self, cx: &App) -> PathBuf {
3666 let mut full_path = PathBuf::new();
3667 let worktree = self.worktree.read(cx);
3668
3669 if worktree.is_visible() {
3670 full_path.push(worktree.root_name());
3671 } else {
3672 let path = worktree.abs_path();
3673
3674 if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3675 full_path.push("~");
3676 full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3677 } else {
3678 full_path.push(path)
3679 }
3680 }
3681
3682 if self.path.components().next().is_some() {
3683 full_path.push(&self.path);
3684 }
3685
3686 full_path
3687 }
3688
3689 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3690 /// of its worktree, then this method will return the name of the worktree itself.
3691 fn file_name<'a>(&'a self, cx: &'a App) -> &'a OsStr {
3692 self.path
3693 .file_name()
3694 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3695 }
3696
3697 fn worktree_id(&self, cx: &App) -> WorktreeId {
3698 self.worktree.read(cx).id()
3699 }
3700
3701 fn as_any(&self) -> &dyn Any {
3702 self
3703 }
3704
3705 fn to_proto(&self, cx: &App) -> rpc::proto::File {
3706 rpc::proto::File {
3707 worktree_id: self.worktree.read(cx).id().to_proto(),
3708 entry_id: self.entry_id.map(|id| id.to_proto()),
3709 path: self.path.as_ref().to_proto(),
3710 mtime: self.disk_state.mtime().map(|time| time.into()),
3711 is_deleted: self.disk_state == DiskState::Deleted,
3712 }
3713 }
3714
3715 fn is_private(&self) -> bool {
3716 self.is_private
3717 }
3718}
3719
3720impl language::LocalFile for File {
3721 fn abs_path(&self, cx: &App) -> PathBuf {
3722 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3723 if self.path.as_ref() == Path::new("") {
3724 worktree_path.as_path().to_path_buf()
3725 } else {
3726 worktree_path.as_path().join(&self.path)
3727 }
3728 }
3729
3730 fn load(&self, cx: &App) -> Task<Result<String>> {
3731 let worktree = self.worktree.read(cx).as_local().unwrap();
3732 let abs_path = worktree.absolutize(&self.path);
3733 let fs = worktree.fs.clone();
3734 cx.background_spawn(async move { fs.load(&abs_path?).await })
3735 }
3736
3737 fn load_bytes(&self, cx: &App) -> Task<Result<Vec<u8>>> {
3738 let worktree = self.worktree.read(cx).as_local().unwrap();
3739 let abs_path = worktree.absolutize(&self.path);
3740 let fs = worktree.fs.clone();
3741 cx.background_spawn(async move { fs.load_bytes(&abs_path?).await })
3742 }
3743}
3744
3745impl File {
3746 pub fn for_entry(entry: Entry, worktree: Entity<Worktree>) -> Arc<Self> {
3747 Arc::new(Self {
3748 worktree,
3749 path: entry.path.clone(),
3750 disk_state: if let Some(mtime) = entry.mtime {
3751 DiskState::Present { mtime }
3752 } else {
3753 DiskState::New
3754 },
3755 entry_id: Some(entry.id),
3756 is_local: true,
3757 is_private: entry.is_private,
3758 })
3759 }
3760
3761 pub fn from_proto(
3762 proto: rpc::proto::File,
3763 worktree: Entity<Worktree>,
3764 cx: &App,
3765 ) -> Result<Self> {
3766 let worktree_id = worktree
3767 .read(cx)
3768 .as_remote()
3769 .ok_or_else(|| anyhow!("not remote"))?
3770 .id();
3771
3772 if worktree_id.to_proto() != proto.worktree_id {
3773 return Err(anyhow!("worktree id does not match file"));
3774 }
3775
3776 let disk_state = if proto.is_deleted {
3777 DiskState::Deleted
3778 } else {
3779 if let Some(mtime) = proto.mtime.map(&Into::into) {
3780 DiskState::Present { mtime }
3781 } else {
3782 DiskState::New
3783 }
3784 };
3785
3786 Ok(Self {
3787 worktree,
3788 path: Arc::<Path>::from_proto(proto.path),
3789 disk_state,
3790 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3791 is_local: false,
3792 is_private: false,
3793 })
3794 }
3795
3796 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3797 file.and_then(|f| f.as_any().downcast_ref())
3798 }
3799
3800 pub fn worktree_id(&self, cx: &App) -> WorktreeId {
3801 self.worktree.read(cx).id()
3802 }
3803
3804 pub fn project_entry_id(&self, _: &App) -> Option<ProjectEntryId> {
3805 match self.disk_state {
3806 DiskState::Deleted => None,
3807 _ => self.entry_id,
3808 }
3809 }
3810}
3811
3812#[derive(Clone, Debug, PartialEq, Eq)]
3813pub struct Entry {
3814 pub id: ProjectEntryId,
3815 pub kind: EntryKind,
3816 pub path: Arc<Path>,
3817 pub inode: u64,
3818 pub mtime: Option<MTime>,
3819
3820 pub canonical_path: Option<Box<Path>>,
3821 /// Whether this entry is ignored by Git.
3822 ///
3823 /// We only scan ignored entries once the directory is expanded and
3824 /// exclude them from searches.
3825 pub is_ignored: bool,
3826
3827 /// Whether this entry is always included in searches.
3828 ///
3829 /// This is used for entries that are always included in searches, even
3830 /// if they are ignored by git. Overridden by file_scan_exclusions.
3831 pub is_always_included: bool,
3832
3833 /// Whether this entry's canonical path is outside of the worktree.
3834 /// This means the entry is only accessible from the worktree root via a
3835 /// symlink.
3836 ///
3837 /// We only scan entries outside of the worktree once the symlinked
3838 /// directory is expanded. External entries are treated like gitignored
3839 /// entries in that they are not included in searches.
3840 pub is_external: bool,
3841
3842 /// Whether this entry is considered to be a `.env` file.
3843 pub is_private: bool,
3844 /// The entry's size on disk, in bytes.
3845 pub size: u64,
3846 pub char_bag: CharBag,
3847 pub is_fifo: bool,
3848}
3849
3850#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3851pub enum EntryKind {
3852 UnloadedDir,
3853 PendingDir,
3854 Dir,
3855 File,
3856}
3857
3858#[derive(Clone, Copy, Debug, PartialEq)]
3859pub enum PathChange {
3860 /// A filesystem entry was was created.
3861 Added,
3862 /// A filesystem entry was removed.
3863 Removed,
3864 /// A filesystem entry was updated.
3865 Updated,
3866 /// A filesystem entry was either updated or added. We don't know
3867 /// whether or not it already existed, because the path had not
3868 /// been loaded before the event.
3869 AddedOrUpdated,
3870 /// A filesystem entry was found during the initial scan of the worktree.
3871 Loaded,
3872}
3873
3874#[derive(Debug)]
3875pub struct GitRepositoryChange {
3876 /// The previous state of the repository, if it already existed.
3877 pub old_repository: Option<RepositoryEntry>,
3878}
3879
3880pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3881pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3882
3883#[derive(Clone, Debug, PartialEq, Eq)]
3884pub struct StatusEntry {
3885 pub repo_path: RepoPath,
3886 pub status: FileStatus,
3887}
3888
3889impl StatusEntry {
3890 pub fn is_staged(&self) -> Option<bool> {
3891 self.status.is_staged()
3892 }
3893
3894 fn to_proto(&self) -> proto::StatusEntry {
3895 let simple_status = match self.status {
3896 FileStatus::Ignored | FileStatus::Untracked => proto::GitStatus::Added as i32,
3897 FileStatus::Unmerged { .. } => proto::GitStatus::Conflict as i32,
3898 FileStatus::Tracked(TrackedStatus {
3899 index_status,
3900 worktree_status,
3901 }) => tracked_status_to_proto(if worktree_status != StatusCode::Unmodified {
3902 worktree_status
3903 } else {
3904 index_status
3905 }),
3906 };
3907
3908 proto::StatusEntry {
3909 repo_path: self.repo_path.as_ref().to_proto(),
3910 simple_status,
3911 status: Some(status_to_proto(self.status)),
3912 }
3913 }
3914}
3915
3916impl TryFrom<proto::StatusEntry> for StatusEntry {
3917 type Error = anyhow::Error;
3918
3919 fn try_from(value: proto::StatusEntry) -> Result<Self, Self::Error> {
3920 let repo_path = RepoPath(Arc::<Path>::from_proto(value.repo_path));
3921 let status = status_from_proto(value.simple_status, value.status)?;
3922 Ok(Self { repo_path, status })
3923 }
3924}
3925
3926#[derive(Clone, Debug)]
3927struct PathProgress<'a> {
3928 max_path: &'a Path,
3929}
3930
3931#[derive(Clone, Debug)]
3932pub struct PathSummary<S> {
3933 max_path: Arc<Path>,
3934 item_summary: S,
3935}
3936
3937impl<S: Summary> Summary for PathSummary<S> {
3938 type Context = S::Context;
3939
3940 fn zero(cx: &Self::Context) -> Self {
3941 Self {
3942 max_path: Path::new("").into(),
3943 item_summary: S::zero(cx),
3944 }
3945 }
3946
3947 fn add_summary(&mut self, rhs: &Self, cx: &Self::Context) {
3948 self.max_path = rhs.max_path.clone();
3949 self.item_summary.add_summary(&rhs.item_summary, cx);
3950 }
3951}
3952
3953impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathProgress<'a> {
3954 fn zero(_: &<PathSummary<S> as Summary>::Context) -> Self {
3955 Self {
3956 max_path: Path::new(""),
3957 }
3958 }
3959
3960 fn add_summary(
3961 &mut self,
3962 summary: &'a PathSummary<S>,
3963 _: &<PathSummary<S> as Summary>::Context,
3964 ) {
3965 self.max_path = summary.max_path.as_ref()
3966 }
3967}
3968
3969impl sum_tree::Item for RepositoryEntry {
3970 type Summary = PathSummary<Unit>;
3971
3972 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3973 PathSummary {
3974 max_path: self.work_directory.path_key().0,
3975 item_summary: Unit,
3976 }
3977 }
3978}
3979
3980impl sum_tree::KeyedItem for RepositoryEntry {
3981 type Key = PathKey;
3982
3983 fn key(&self) -> Self::Key {
3984 self.work_directory.path_key()
3985 }
3986}
3987
3988impl sum_tree::Item for StatusEntry {
3989 type Summary = PathSummary<GitSummary>;
3990
3991 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
3992 PathSummary {
3993 max_path: self.repo_path.0.clone(),
3994 item_summary: self.status.summary(),
3995 }
3996 }
3997}
3998
3999impl sum_tree::KeyedItem for StatusEntry {
4000 type Key = PathKey;
4001
4002 fn key(&self) -> Self::Key {
4003 PathKey(self.repo_path.0.clone())
4004 }
4005}
4006
4007impl<'a> sum_tree::Dimension<'a, PathSummary<GitSummary>> for GitSummary {
4008 fn zero(_cx: &()) -> Self {
4009 Default::default()
4010 }
4011
4012 fn add_summary(&mut self, summary: &'a PathSummary<GitSummary>, _: &()) {
4013 *self += summary.item_summary
4014 }
4015}
4016
4017impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathKey {
4018 fn zero(_: &S::Context) -> Self {
4019 Default::default()
4020 }
4021
4022 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4023 self.0 = summary.max_path.clone();
4024 }
4025}
4026
4027impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for TraversalProgress<'a> {
4028 fn zero(_cx: &S::Context) -> Self {
4029 Default::default()
4030 }
4031
4032 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4033 self.max_path = summary.max_path.as_ref();
4034 }
4035}
4036
4037impl Entry {
4038 fn new(
4039 path: Arc<Path>,
4040 metadata: &fs::Metadata,
4041 next_entry_id: &AtomicUsize,
4042 root_char_bag: CharBag,
4043 canonical_path: Option<Box<Path>>,
4044 ) -> Self {
4045 let char_bag = char_bag_for_path(root_char_bag, &path);
4046 Self {
4047 id: ProjectEntryId::new(next_entry_id),
4048 kind: if metadata.is_dir {
4049 EntryKind::PendingDir
4050 } else {
4051 EntryKind::File
4052 },
4053 path,
4054 inode: metadata.inode,
4055 mtime: Some(metadata.mtime),
4056 size: metadata.len,
4057 canonical_path,
4058 is_ignored: false,
4059 is_always_included: false,
4060 is_external: false,
4061 is_private: false,
4062 char_bag,
4063 is_fifo: metadata.is_fifo,
4064 }
4065 }
4066
4067 pub fn is_created(&self) -> bool {
4068 self.mtime.is_some()
4069 }
4070
4071 pub fn is_dir(&self) -> bool {
4072 self.kind.is_dir()
4073 }
4074
4075 pub fn is_file(&self) -> bool {
4076 self.kind.is_file()
4077 }
4078}
4079
4080impl EntryKind {
4081 pub fn is_dir(&self) -> bool {
4082 matches!(
4083 self,
4084 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
4085 )
4086 }
4087
4088 pub fn is_unloaded(&self) -> bool {
4089 matches!(self, EntryKind::UnloadedDir)
4090 }
4091
4092 pub fn is_file(&self) -> bool {
4093 matches!(self, EntryKind::File)
4094 }
4095}
4096
4097impl sum_tree::Item for Entry {
4098 type Summary = EntrySummary;
4099
4100 fn summary(&self, _cx: &()) -> Self::Summary {
4101 let non_ignored_count = if (self.is_ignored || self.is_external) && !self.is_always_included
4102 {
4103 0
4104 } else {
4105 1
4106 };
4107 let file_count;
4108 let non_ignored_file_count;
4109 if self.is_file() {
4110 file_count = 1;
4111 non_ignored_file_count = non_ignored_count;
4112 } else {
4113 file_count = 0;
4114 non_ignored_file_count = 0;
4115 }
4116
4117 EntrySummary {
4118 max_path: self.path.clone(),
4119 count: 1,
4120 non_ignored_count,
4121 file_count,
4122 non_ignored_file_count,
4123 }
4124 }
4125}
4126
4127impl sum_tree::KeyedItem for Entry {
4128 type Key = PathKey;
4129
4130 fn key(&self) -> Self::Key {
4131 PathKey(self.path.clone())
4132 }
4133}
4134
4135#[derive(Clone, Debug)]
4136pub struct EntrySummary {
4137 max_path: Arc<Path>,
4138 count: usize,
4139 non_ignored_count: usize,
4140 file_count: usize,
4141 non_ignored_file_count: usize,
4142}
4143
4144impl Default for EntrySummary {
4145 fn default() -> Self {
4146 Self {
4147 max_path: Arc::from(Path::new("")),
4148 count: 0,
4149 non_ignored_count: 0,
4150 file_count: 0,
4151 non_ignored_file_count: 0,
4152 }
4153 }
4154}
4155
4156impl sum_tree::Summary for EntrySummary {
4157 type Context = ();
4158
4159 fn zero(_cx: &()) -> Self {
4160 Default::default()
4161 }
4162
4163 fn add_summary(&mut self, rhs: &Self, _: &()) {
4164 self.max_path = rhs.max_path.clone();
4165 self.count += rhs.count;
4166 self.non_ignored_count += rhs.non_ignored_count;
4167 self.file_count += rhs.file_count;
4168 self.non_ignored_file_count += rhs.non_ignored_file_count;
4169 }
4170}
4171
4172#[derive(Clone, Debug)]
4173struct PathEntry {
4174 id: ProjectEntryId,
4175 path: Arc<Path>,
4176 is_ignored: bool,
4177 scan_id: usize,
4178}
4179
4180#[derive(Debug, Default)]
4181struct FsScanned {
4182 status_scans: Arc<AtomicU32>,
4183}
4184
4185impl sum_tree::Item for PathEntry {
4186 type Summary = PathEntrySummary;
4187
4188 fn summary(&self, _cx: &()) -> Self::Summary {
4189 PathEntrySummary { max_id: self.id }
4190 }
4191}
4192
4193impl sum_tree::KeyedItem for PathEntry {
4194 type Key = ProjectEntryId;
4195
4196 fn key(&self) -> Self::Key {
4197 self.id
4198 }
4199}
4200
4201#[derive(Clone, Debug, Default)]
4202struct PathEntrySummary {
4203 max_id: ProjectEntryId,
4204}
4205
4206impl sum_tree::Summary for PathEntrySummary {
4207 type Context = ();
4208
4209 fn zero(_cx: &Self::Context) -> Self {
4210 Default::default()
4211 }
4212
4213 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
4214 self.max_id = summary.max_id;
4215 }
4216}
4217
4218impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
4219 fn zero(_cx: &()) -> Self {
4220 Default::default()
4221 }
4222
4223 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
4224 *self = summary.max_id;
4225 }
4226}
4227
4228#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
4229pub struct PathKey(Arc<Path>);
4230
4231impl Default for PathKey {
4232 fn default() -> Self {
4233 Self(Path::new("").into())
4234 }
4235}
4236
4237impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
4238 fn zero(_cx: &()) -> Self {
4239 Default::default()
4240 }
4241
4242 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4243 self.0 = summary.max_path.clone();
4244 }
4245}
4246
4247struct BackgroundScanner {
4248 state: Arc<Mutex<BackgroundScannerState>>,
4249 fs: Arc<dyn Fs>,
4250 fs_case_sensitive: bool,
4251 status_updates_tx: UnboundedSender<ScanState>,
4252 executor: BackgroundExecutor,
4253 scan_requests_rx: channel::Receiver<ScanRequest>,
4254 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
4255 next_entry_id: Arc<AtomicUsize>,
4256 phase: BackgroundScannerPhase,
4257 watcher: Arc<dyn Watcher>,
4258 settings: WorktreeSettings,
4259 share_private_files: bool,
4260}
4261
4262#[derive(Copy, Clone, PartialEq)]
4263enum BackgroundScannerPhase {
4264 InitialScan,
4265 EventsReceivedDuringInitialScan,
4266 Events,
4267}
4268
4269impl BackgroundScanner {
4270 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
4271 // If the worktree root does not contain a git repository, then find
4272 // the git repository in an ancestor directory. Find any gitignore files
4273 // in ancestor directories.
4274 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
4275 let mut containing_git_repository = None;
4276 for (index, ancestor) in root_abs_path.as_path().ancestors().enumerate() {
4277 if index != 0 {
4278 if let Ok(ignore) =
4279 build_gitignore(&ancestor.join(*GITIGNORE), self.fs.as_ref()).await
4280 {
4281 self.state
4282 .lock()
4283 .snapshot
4284 .ignores_by_parent_abs_path
4285 .insert(ancestor.into(), (ignore.into(), false));
4286 }
4287 }
4288
4289 let ancestor_dot_git = ancestor.join(*DOT_GIT);
4290 // Check whether the directory or file called `.git` exists (in the
4291 // case of worktrees it's a file.)
4292 if self
4293 .fs
4294 .metadata(&ancestor_dot_git)
4295 .await
4296 .is_ok_and(|metadata| metadata.is_some())
4297 {
4298 if index != 0 {
4299 // We canonicalize, since the FS events use the canonicalized path.
4300 if let Some(ancestor_dot_git) =
4301 self.fs.canonicalize(&ancestor_dot_git).await.log_err()
4302 {
4303 // We associate the external git repo with our root folder and
4304 // also mark where in the git repo the root folder is located.
4305 let local_repository = self.state.lock().insert_git_repository_for_path(
4306 WorkDirectory::AboveProject {
4307 absolute_path: ancestor.into(),
4308 location_in_repo: root_abs_path
4309 .as_path()
4310 .strip_prefix(ancestor)
4311 .unwrap()
4312 .into(),
4313 },
4314 ancestor_dot_git.clone().into(),
4315 self.fs.as_ref(),
4316 self.watcher.as_ref(),
4317 );
4318
4319 if local_repository.is_some() {
4320 containing_git_repository = Some(ancestor_dot_git)
4321 }
4322 };
4323 }
4324
4325 // Reached root of git repository.
4326 break;
4327 }
4328 }
4329
4330 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4331 {
4332 let mut state = self.state.lock();
4333 state.snapshot.scan_id += 1;
4334 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
4335 let ignore_stack = state
4336 .snapshot
4337 .ignore_stack_for_abs_path(root_abs_path.as_path(), true);
4338 if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) {
4339 root_entry.is_ignored = true;
4340 state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
4341 }
4342 state.enqueue_scan_dir(root_abs_path.into(), &root_entry, &scan_job_tx);
4343 }
4344 };
4345
4346 // Perform an initial scan of the directory.
4347 drop(scan_job_tx);
4348 let scans_running = self.scan_dirs(true, scan_job_rx).await;
4349 {
4350 let mut state = self.state.lock();
4351 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4352 }
4353
4354 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4355 self.send_status_update(scanning, SmallVec::new());
4356
4357 // Process any any FS events that occurred while performing the initial scan.
4358 // For these events, update events cannot be as precise, because we didn't
4359 // have the previous state loaded yet.
4360 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
4361 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
4362 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4363 paths.extend(more_paths);
4364 }
4365 self.process_events(paths.into_iter().map(Into::into).collect())
4366 .await;
4367 }
4368 if let Some(abs_path) = containing_git_repository {
4369 self.process_events(vec![abs_path]).await;
4370 }
4371
4372 // Continue processing events until the worktree is dropped.
4373 self.phase = BackgroundScannerPhase::Events;
4374
4375 loop {
4376 select_biased! {
4377 // Process any path refresh requests from the worktree. Prioritize
4378 // these before handling changes reported by the filesystem.
4379 request = self.next_scan_request().fuse() => {
4380 let Ok(request) = request else { break };
4381 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4382 if !self.process_scan_request(request, scanning).await {
4383 return;
4384 }
4385 }
4386
4387 path_prefix_request = self.path_prefixes_to_scan_rx.recv().fuse() => {
4388 let Ok(request) = path_prefix_request else { break };
4389 log::trace!("adding path prefix {:?}", request.path);
4390
4391 let did_scan = self.forcibly_load_paths(&[request.path.clone()]).await;
4392 if did_scan {
4393 let abs_path =
4394 {
4395 let mut state = self.state.lock();
4396 state.path_prefixes_to_scan.insert(request.path.clone());
4397 state.snapshot.abs_path.as_path().join(&request.path)
4398 };
4399
4400 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
4401 self.process_events(vec![abs_path]).await;
4402 }
4403 }
4404 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4405 self.send_status_update(scanning, request.done);
4406 }
4407
4408 paths = fs_events_rx.next().fuse() => {
4409 let Some(mut paths) = paths else { break };
4410 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4411 paths.extend(more_paths);
4412 }
4413 self.process_events(paths.into_iter().map(Into::into).collect()).await;
4414 }
4415 }
4416 }
4417 }
4418
4419 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
4420 log::debug!("rescanning paths {:?}", request.relative_paths);
4421
4422 request.relative_paths.sort_unstable();
4423 self.forcibly_load_paths(&request.relative_paths).await;
4424
4425 let root_path = self.state.lock().snapshot.abs_path.clone();
4426 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4427 Ok(path) => SanitizedPath::from(path),
4428 Err(err) => {
4429 log::error!("failed to canonicalize root path: {}", err);
4430 return true;
4431 }
4432 };
4433 let abs_paths = request
4434 .relative_paths
4435 .iter()
4436 .map(|path| {
4437 if path.file_name().is_some() {
4438 root_canonical_path.as_path().join(path).to_path_buf()
4439 } else {
4440 root_canonical_path.as_path().to_path_buf()
4441 }
4442 })
4443 .collect::<Vec<_>>();
4444
4445 {
4446 let mut state = self.state.lock();
4447 let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
4448 state.snapshot.scan_id += 1;
4449 if is_idle {
4450 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4451 }
4452 }
4453
4454 self.reload_entries_for_paths(
4455 root_path,
4456 root_canonical_path,
4457 &request.relative_paths,
4458 abs_paths,
4459 None,
4460 )
4461 .await;
4462
4463 self.send_status_update(scanning, request.done)
4464 }
4465
4466 async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
4467 let root_path = self.state.lock().snapshot.abs_path.clone();
4468 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4469 Ok(path) => SanitizedPath::from(path),
4470 Err(err) => {
4471 let new_path = self
4472 .state
4473 .lock()
4474 .snapshot
4475 .root_file_handle
4476 .clone()
4477 .and_then(|handle| handle.current_path(&self.fs).log_err())
4478 .map(SanitizedPath::from)
4479 .filter(|new_path| *new_path != root_path);
4480
4481 if let Some(new_path) = new_path.as_ref() {
4482 log::info!(
4483 "root renamed from {} to {}",
4484 root_path.as_path().display(),
4485 new_path.as_path().display()
4486 )
4487 } else {
4488 log::warn!("root path could not be canonicalized: {}", err);
4489 }
4490 self.status_updates_tx
4491 .unbounded_send(ScanState::RootUpdated { new_path })
4492 .ok();
4493 return;
4494 }
4495 };
4496
4497 // Certain directories may have FS changes, but do not lead to git data changes that Zed cares about.
4498 // Ignore these, to avoid Zed unnecessarily rescanning git metadata.
4499 let skipped_files_in_dot_git = HashSet::from_iter([*COMMIT_MESSAGE, *INDEX_LOCK]);
4500 let skipped_dirs_in_dot_git = [*FSMONITOR_DAEMON];
4501
4502 let mut relative_paths = Vec::with_capacity(abs_paths.len());
4503 let mut dot_git_abs_paths = Vec::new();
4504 abs_paths.sort_unstable();
4505 abs_paths.dedup_by(|a, b| a.starts_with(b));
4506 abs_paths.retain(|abs_path| {
4507 let abs_path = SanitizedPath::from(abs_path);
4508
4509 let snapshot = &self.state.lock().snapshot;
4510 {
4511 let mut is_git_related = false;
4512
4513 let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| {
4514 if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) {
4515 let path_in_git_dir = abs_path.as_path().strip_prefix(ancestor).expect("stripping off the ancestor");
4516 Some((ancestor.to_owned(), path_in_git_dir.to_owned()))
4517 } else {
4518 None
4519 }
4520 });
4521
4522 if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
4523 if skipped_files_in_dot_git.contains(path_in_git_dir.as_os_str()) || skipped_dirs_in_dot_git.iter().any(|skipped_git_subdir| path_in_git_dir.starts_with(skipped_git_subdir)) {
4524 log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
4525 return false;
4526 }
4527
4528 is_git_related = true;
4529 if !dot_git_abs_paths.contains(&dot_git_abs_path) {
4530 dot_git_abs_paths.push(dot_git_abs_path);
4531 }
4532 }
4533
4534 let relative_path: Arc<Path> =
4535 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
4536 path.into()
4537 } else {
4538 if is_git_related {
4539 log::debug!(
4540 "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
4541 );
4542 } else {
4543 log::error!(
4544 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
4545 );
4546 }
4547 return false;
4548 };
4549
4550 if abs_path.0.file_name() == Some(*GITIGNORE) {
4551 for (_, repo) in snapshot.git_repositories.iter().filter(|(_, repo)| repo.directory_contains(&relative_path)) {
4552 if !dot_git_abs_paths.iter().any(|dot_git_abs_path| dot_git_abs_path == repo.dot_git_dir_abs_path.as_ref()) {
4553 dot_git_abs_paths.push(repo.dot_git_dir_abs_path.to_path_buf());
4554 }
4555 }
4556 }
4557
4558 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
4559 snapshot
4560 .entry_for_path(parent)
4561 .map_or(false, |entry| entry.kind == EntryKind::Dir)
4562 });
4563 if !parent_dir_is_loaded {
4564 log::debug!("ignoring event {relative_path:?} within unloaded directory");
4565 return false;
4566 }
4567
4568 if self.settings.is_path_excluded(&relative_path) {
4569 if !is_git_related {
4570 log::debug!("ignoring FS event for excluded path {relative_path:?}");
4571 }
4572 return false;
4573 }
4574
4575 relative_paths.push(relative_path);
4576 true
4577 }
4578 });
4579
4580 if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
4581 return;
4582 }
4583
4584 self.state.lock().snapshot.scan_id += 1;
4585
4586 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4587 log::debug!("received fs events {:?}", relative_paths);
4588 self.reload_entries_for_paths(
4589 root_path,
4590 root_canonical_path,
4591 &relative_paths,
4592 abs_paths,
4593 Some(scan_job_tx.clone()),
4594 )
4595 .await;
4596
4597 self.update_ignore_statuses(scan_job_tx).await;
4598 let scans_running = self.scan_dirs(false, scan_job_rx).await;
4599
4600 let status_update = if !dot_git_abs_paths.is_empty() {
4601 Some(self.update_git_repositories(dot_git_abs_paths))
4602 } else {
4603 None
4604 };
4605
4606 let phase = self.phase;
4607 let status_update_tx = self.status_updates_tx.clone();
4608 let state = self.state.clone();
4609 self.executor
4610 .spawn(async move {
4611 if let Some(status_update) = status_update {
4612 status_update.await;
4613 }
4614
4615 {
4616 let mut state = state.lock();
4617 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4618 for (_, entry) in mem::take(&mut state.removed_entries) {
4619 state.scanned_dirs.remove(&entry.id);
4620 }
4621 #[cfg(test)]
4622 state.snapshot.check_git_invariants();
4623 }
4624 let scanning = scans_running.status_scans.load(atomic::Ordering::Acquire) > 0;
4625 send_status_update_inner(phase, state, status_update_tx, scanning, SmallVec::new());
4626 })
4627 .detach();
4628 }
4629
4630 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
4631 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4632 {
4633 let mut state = self.state.lock();
4634 let root_path = state.snapshot.abs_path.clone();
4635 for path in paths {
4636 for ancestor in path.ancestors() {
4637 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
4638 if entry.kind == EntryKind::UnloadedDir {
4639 let abs_path = root_path.as_path().join(ancestor);
4640 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
4641 state.paths_to_scan.insert(path.clone());
4642 break;
4643 }
4644 }
4645 }
4646 }
4647 drop(scan_job_tx);
4648 }
4649 let scans_running = Arc::new(AtomicU32::new(0));
4650 while let Ok(job) = scan_job_rx.recv().await {
4651 self.scan_dir(&scans_running, &job).await.log_err();
4652 }
4653
4654 !mem::take(&mut self.state.lock().paths_to_scan).is_empty()
4655 }
4656
4657 async fn scan_dirs(
4658 &self,
4659 enable_progress_updates: bool,
4660 scan_jobs_rx: channel::Receiver<ScanJob>,
4661 ) -> FsScanned {
4662 if self
4663 .status_updates_tx
4664 .unbounded_send(ScanState::Started)
4665 .is_err()
4666 {
4667 return FsScanned::default();
4668 }
4669
4670 let scans_running = Arc::new(AtomicU32::new(1));
4671 let progress_update_count = AtomicUsize::new(0);
4672 self.executor
4673 .scoped(|scope| {
4674 for _ in 0..self.executor.num_cpus() {
4675 scope.spawn(async {
4676 let mut last_progress_update_count = 0;
4677 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
4678 futures::pin_mut!(progress_update_timer);
4679
4680 loop {
4681 select_biased! {
4682 // Process any path refresh requests before moving on to process
4683 // the scan queue, so that user operations are prioritized.
4684 request = self.next_scan_request().fuse() => {
4685 let Ok(request) = request else { break };
4686 if !self.process_scan_request(request, true).await {
4687 return;
4688 }
4689 }
4690
4691 // Send periodic progress updates to the worktree. Use an atomic counter
4692 // to ensure that only one of the workers sends a progress update after
4693 // the update interval elapses.
4694 _ = progress_update_timer => {
4695 match progress_update_count.compare_exchange(
4696 last_progress_update_count,
4697 last_progress_update_count + 1,
4698 SeqCst,
4699 SeqCst
4700 ) {
4701 Ok(_) => {
4702 last_progress_update_count += 1;
4703 self.send_status_update(true, SmallVec::new());
4704 }
4705 Err(count) => {
4706 last_progress_update_count = count;
4707 }
4708 }
4709 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
4710 }
4711
4712 // Recursively load directories from the file system.
4713 job = scan_jobs_rx.recv().fuse() => {
4714 let Ok(job) = job else { break };
4715 if let Err(err) = self.scan_dir(&scans_running, &job).await {
4716 if job.path.as_ref() != Path::new("") {
4717 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
4718 }
4719 }
4720 }
4721 }
4722 }
4723 });
4724 }
4725 })
4726 .await;
4727
4728 scans_running.fetch_sub(1, atomic::Ordering::Release);
4729 FsScanned {
4730 status_scans: scans_running,
4731 }
4732 }
4733
4734 fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
4735 send_status_update_inner(
4736 self.phase,
4737 self.state.clone(),
4738 self.status_updates_tx.clone(),
4739 scanning,
4740 barrier,
4741 )
4742 }
4743
4744 async fn scan_dir(&self, scans_running: &Arc<AtomicU32>, job: &ScanJob) -> Result<()> {
4745 let root_abs_path;
4746 let root_char_bag;
4747 {
4748 let snapshot = &self.state.lock().snapshot;
4749 if self.settings.is_path_excluded(&job.path) {
4750 log::error!("skipping excluded directory {:?}", job.path);
4751 return Ok(());
4752 }
4753 log::debug!("scanning directory {:?}", job.path);
4754 root_abs_path = snapshot.abs_path().clone();
4755 root_char_bag = snapshot.root_char_bag;
4756 }
4757
4758 let next_entry_id = self.next_entry_id.clone();
4759 let mut ignore_stack = job.ignore_stack.clone();
4760 let mut new_ignore = None;
4761 let mut root_canonical_path = None;
4762 let mut new_entries: Vec<Entry> = Vec::new();
4763 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
4764 let mut child_paths = self
4765 .fs
4766 .read_dir(&job.abs_path)
4767 .await?
4768 .filter_map(|entry| async {
4769 match entry {
4770 Ok(entry) => Some(entry),
4771 Err(error) => {
4772 log::error!("error processing entry {:?}", error);
4773 None
4774 }
4775 }
4776 })
4777 .collect::<Vec<_>>()
4778 .await;
4779
4780 // Ensure that .git and .gitignore are processed first.
4781 swap_to_front(&mut child_paths, *GITIGNORE);
4782 swap_to_front(&mut child_paths, *DOT_GIT);
4783
4784 let mut git_status_update_jobs = Vec::new();
4785 for child_abs_path in child_paths {
4786 let child_abs_path: Arc<Path> = child_abs_path.into();
4787 let child_name = child_abs_path.file_name().unwrap();
4788 let child_path: Arc<Path> = job.path.join(child_name).into();
4789
4790 if child_name == *DOT_GIT {
4791 {
4792 let mut state = self.state.lock();
4793 let repo = state.insert_git_repository(
4794 child_path.clone(),
4795 self.fs.as_ref(),
4796 self.watcher.as_ref(),
4797 );
4798 if let Some(local_repo) = repo {
4799 scans_running.fetch_add(1, atomic::Ordering::Release);
4800 git_status_update_jobs
4801 .push(self.schedule_git_statuses_update(&mut state, local_repo));
4802 }
4803 }
4804 } else if child_name == *GITIGNORE {
4805 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
4806 Ok(ignore) => {
4807 let ignore = Arc::new(ignore);
4808 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4809 new_ignore = Some(ignore);
4810 }
4811 Err(error) => {
4812 log::error!(
4813 "error loading .gitignore file {:?} - {:?}",
4814 child_name,
4815 error
4816 );
4817 }
4818 }
4819 }
4820
4821 if self.settings.is_path_excluded(&child_path) {
4822 log::debug!("skipping excluded child entry {child_path:?}");
4823 self.state.lock().remove_path(&child_path);
4824 continue;
4825 }
4826
4827 let child_metadata = match self.fs.metadata(&child_abs_path).await {
4828 Ok(Some(metadata)) => metadata,
4829 Ok(None) => continue,
4830 Err(err) => {
4831 log::error!("error processing {child_abs_path:?}: {err:?}");
4832 continue;
4833 }
4834 };
4835
4836 let mut child_entry = Entry::new(
4837 child_path.clone(),
4838 &child_metadata,
4839 &next_entry_id,
4840 root_char_bag,
4841 None,
4842 );
4843
4844 if job.is_external {
4845 child_entry.is_external = true;
4846 } else if child_metadata.is_symlink {
4847 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
4848 Ok(path) => path,
4849 Err(err) => {
4850 log::error!(
4851 "error reading target of symlink {:?}: {:?}",
4852 child_abs_path,
4853 err
4854 );
4855 continue;
4856 }
4857 };
4858
4859 // lazily canonicalize the root path in order to determine if
4860 // symlinks point outside of the worktree.
4861 let root_canonical_path = match &root_canonical_path {
4862 Some(path) => path,
4863 None => match self.fs.canonicalize(&root_abs_path).await {
4864 Ok(path) => root_canonical_path.insert(path),
4865 Err(err) => {
4866 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
4867 continue;
4868 }
4869 },
4870 };
4871
4872 if !canonical_path.starts_with(root_canonical_path) {
4873 child_entry.is_external = true;
4874 }
4875
4876 child_entry.canonical_path = Some(canonical_path.into());
4877 }
4878
4879 if child_entry.is_dir() {
4880 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
4881 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4882
4883 // Avoid recursing until crash in the case of a recursive symlink
4884 if job.ancestor_inodes.contains(&child_entry.inode) {
4885 new_jobs.push(None);
4886 } else {
4887 let mut ancestor_inodes = job.ancestor_inodes.clone();
4888 ancestor_inodes.insert(child_entry.inode);
4889
4890 new_jobs.push(Some(ScanJob {
4891 abs_path: child_abs_path.clone(),
4892 path: child_path,
4893 is_external: child_entry.is_external,
4894 ignore_stack: if child_entry.is_ignored {
4895 IgnoreStack::all()
4896 } else {
4897 ignore_stack.clone()
4898 },
4899 ancestor_inodes,
4900 scan_queue: job.scan_queue.clone(),
4901 }));
4902 }
4903 } else {
4904 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
4905 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
4906 }
4907
4908 {
4909 let relative_path = job.path.join(child_name);
4910 if self.is_path_private(&relative_path) {
4911 log::debug!("detected private file: {relative_path:?}");
4912 child_entry.is_private = true;
4913 }
4914 }
4915
4916 new_entries.push(child_entry);
4917 }
4918
4919 let task_state = self.state.clone();
4920 let phase = self.phase;
4921 let status_updates_tx = self.status_updates_tx.clone();
4922 let scans_running = scans_running.clone();
4923 self.executor
4924 .spawn(async move {
4925 if !git_status_update_jobs.is_empty() {
4926 let status_updates = join_all(git_status_update_jobs).await;
4927 let status_updated = status_updates
4928 .iter()
4929 .any(|update_result| update_result.is_ok());
4930 scans_running.fetch_sub(status_updates.len() as u32, atomic::Ordering::Release);
4931 if status_updated {
4932 let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
4933 send_status_update_inner(
4934 phase,
4935 task_state,
4936 status_updates_tx,
4937 scanning,
4938 SmallVec::new(),
4939 );
4940 }
4941 }
4942 })
4943 .detach();
4944
4945 let mut state = self.state.lock();
4946
4947 // Identify any subdirectories that should not be scanned.
4948 let mut job_ix = 0;
4949 for entry in &mut new_entries {
4950 state.reuse_entry_id(entry);
4951 if entry.is_dir() {
4952 if state.should_scan_directory(entry) {
4953 job_ix += 1;
4954 } else {
4955 log::debug!("defer scanning directory {:?}", entry.path);
4956 entry.kind = EntryKind::UnloadedDir;
4957 new_jobs.remove(job_ix);
4958 }
4959 }
4960 if entry.is_always_included {
4961 state
4962 .snapshot
4963 .always_included_entries
4964 .push(entry.path.clone());
4965 }
4966 }
4967
4968 state.populate_dir(&job.path, new_entries, new_ignore);
4969 self.watcher.add(job.abs_path.as_ref()).log_err();
4970
4971 for new_job in new_jobs.into_iter().flatten() {
4972 job.scan_queue
4973 .try_send(new_job)
4974 .expect("channel is unbounded");
4975 }
4976
4977 Ok(())
4978 }
4979
4980 /// All list arguments should be sorted before calling this function
4981 async fn reload_entries_for_paths(
4982 &self,
4983 root_abs_path: SanitizedPath,
4984 root_canonical_path: SanitizedPath,
4985 relative_paths: &[Arc<Path>],
4986 abs_paths: Vec<PathBuf>,
4987 scan_queue_tx: Option<Sender<ScanJob>>,
4988 ) {
4989 // grab metadata for all requested paths
4990 let metadata = futures::future::join_all(
4991 abs_paths
4992 .iter()
4993 .map(|abs_path| async move {
4994 let metadata = self.fs.metadata(abs_path).await?;
4995 if let Some(metadata) = metadata {
4996 let canonical_path = self.fs.canonicalize(abs_path).await?;
4997
4998 // If we're on a case-insensitive filesystem (default on macOS), we want
4999 // to only ignore metadata for non-symlink files if their absolute-path matches
5000 // the canonical-path.
5001 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
5002 // and we want to ignore the metadata for the old path (`test.txt`) so it's
5003 // treated as removed.
5004 if !self.fs_case_sensitive && !metadata.is_symlink {
5005 let canonical_file_name = canonical_path.file_name();
5006 let file_name = abs_path.file_name();
5007 if canonical_file_name != file_name {
5008 return Ok(None);
5009 }
5010 }
5011
5012 anyhow::Ok(Some((metadata, SanitizedPath::from(canonical_path))))
5013 } else {
5014 Ok(None)
5015 }
5016 })
5017 .collect::<Vec<_>>(),
5018 )
5019 .await;
5020
5021 let mut state = self.state.lock();
5022 let doing_recursive_update = scan_queue_tx.is_some();
5023
5024 // Remove any entries for paths that no longer exist or are being recursively
5025 // refreshed. Do this before adding any new entries, so that renames can be
5026 // detected regardless of the order of the paths.
5027 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
5028 if matches!(metadata, Ok(None)) || doing_recursive_update {
5029 log::trace!("remove path {:?}", path);
5030 state.remove_path(path);
5031 }
5032 }
5033
5034 // Group all relative paths by their git repository.
5035 let mut paths_by_git_repo = HashMap::default();
5036 for relative_path in relative_paths.iter() {
5037 let repository_data = state
5038 .snapshot
5039 .local_repo_for_path(relative_path)
5040 .zip(state.snapshot.repository_for_path(relative_path));
5041 if let Some((local_repo, entry)) = repository_data {
5042 if let Ok(repo_path) = local_repo.relativize(relative_path) {
5043 paths_by_git_repo
5044 .entry(local_repo.work_directory.clone())
5045 .or_insert_with(|| RepoPaths {
5046 entry: entry.clone(),
5047 repo: local_repo.repo_ptr.clone(),
5048 repo_paths: Default::default(),
5049 })
5050 .add_path(repo_path);
5051 }
5052 }
5053 }
5054
5055 for (work_directory, mut paths) in paths_by_git_repo {
5056 if let Ok(status) = paths.repo.status(&paths.repo_paths) {
5057 let mut changed_path_statuses = Vec::new();
5058 let statuses = paths.entry.statuses_by_path.clone();
5059 let mut cursor = statuses.cursor::<PathProgress>(&());
5060
5061 for (repo_path, status) in &*status.entries {
5062 paths.remove_repo_path(repo_path);
5063 if cursor.seek_forward(&PathTarget::Path(repo_path), Bias::Left, &()) {
5064 if &cursor.item().unwrap().status == status {
5065 continue;
5066 }
5067 }
5068
5069 changed_path_statuses.push(Edit::Insert(StatusEntry {
5070 repo_path: repo_path.clone(),
5071 status: *status,
5072 }));
5073 }
5074
5075 let mut cursor = statuses.cursor::<PathProgress>(&());
5076 for path in paths.repo_paths {
5077 if cursor.seek_forward(&PathTarget::Path(&path), Bias::Left, &()) {
5078 changed_path_statuses.push(Edit::Remove(PathKey(path.0)));
5079 }
5080 }
5081
5082 if !changed_path_statuses.is_empty() {
5083 let work_directory_id = state.snapshot.repositories.update(
5084 &work_directory.path_key(),
5085 &(),
5086 move |repository_entry| {
5087 repository_entry
5088 .statuses_by_path
5089 .edit(changed_path_statuses, &());
5090
5091 repository_entry.work_directory_id
5092 },
5093 );
5094
5095 if let Some(work_directory_id) = work_directory_id {
5096 let scan_id = state.snapshot.scan_id;
5097 state.snapshot.git_repositories.update(
5098 &work_directory_id,
5099 |local_repository_entry| {
5100 local_repository_entry.status_scan_id = scan_id;
5101 },
5102 );
5103 }
5104 }
5105 }
5106 }
5107
5108 for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
5109 let abs_path: Arc<Path> = root_abs_path.as_path().join(path).into();
5110 match metadata {
5111 Ok(Some((metadata, canonical_path))) => {
5112 let ignore_stack = state
5113 .snapshot
5114 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
5115 let is_external = !canonical_path.starts_with(&root_canonical_path);
5116 let mut fs_entry = Entry::new(
5117 path.clone(),
5118 &metadata,
5119 self.next_entry_id.as_ref(),
5120 state.snapshot.root_char_bag,
5121 if metadata.is_symlink {
5122 Some(canonical_path.as_path().to_path_buf().into())
5123 } else {
5124 None
5125 },
5126 );
5127
5128 let is_dir = fs_entry.is_dir();
5129 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
5130 fs_entry.is_external = is_external;
5131 fs_entry.is_private = self.is_path_private(path);
5132 fs_entry.is_always_included = self.settings.is_path_always_included(path);
5133
5134 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, is_dir) {
5135 if state.should_scan_directory(&fs_entry)
5136 || (fs_entry.path.as_os_str().is_empty()
5137 && abs_path.file_name() == Some(*DOT_GIT))
5138 {
5139 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
5140 } else {
5141 fs_entry.kind = EntryKind::UnloadedDir;
5142 }
5143 }
5144
5145 state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
5146 }
5147 Ok(None) => {
5148 self.remove_repo_path(path, &mut state.snapshot);
5149 }
5150 Err(err) => {
5151 log::error!("error reading file {abs_path:?} on event: {err:#}");
5152 }
5153 }
5154 }
5155
5156 util::extend_sorted(
5157 &mut state.changed_paths,
5158 relative_paths.iter().cloned(),
5159 usize::MAX,
5160 Ord::cmp,
5161 );
5162 }
5163
5164 fn remove_repo_path(&self, path: &Arc<Path>, snapshot: &mut LocalSnapshot) -> Option<()> {
5165 if !path
5166 .components()
5167 .any(|component| component.as_os_str() == *DOT_GIT)
5168 {
5169 if let Some(repository) = snapshot.repository(PathKey(path.clone())) {
5170 snapshot
5171 .git_repositories
5172 .remove(&repository.work_directory_id);
5173 snapshot
5174 .snapshot
5175 .repositories
5176 .remove(&repository.work_directory.path_key(), &());
5177 return Some(());
5178 }
5179 }
5180
5181 Some(())
5182 }
5183
5184 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
5185 let mut ignores_to_update = Vec::new();
5186 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
5187 let prev_snapshot;
5188 {
5189 let snapshot = &mut self.state.lock().snapshot;
5190 let abs_path = snapshot.abs_path.clone();
5191 snapshot
5192 .ignores_by_parent_abs_path
5193 .retain(|parent_abs_path, (_, needs_update)| {
5194 if let Ok(parent_path) = parent_abs_path.strip_prefix(abs_path.as_path()) {
5195 if *needs_update {
5196 *needs_update = false;
5197 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
5198 ignores_to_update.push(parent_abs_path.clone());
5199 }
5200 }
5201
5202 let ignore_path = parent_path.join(*GITIGNORE);
5203 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
5204 return false;
5205 }
5206 }
5207 true
5208 });
5209
5210 ignores_to_update.sort_unstable();
5211 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
5212 while let Some(parent_abs_path) = ignores_to_update.next() {
5213 while ignores_to_update
5214 .peek()
5215 .map_or(false, |p| p.starts_with(&parent_abs_path))
5216 {
5217 ignores_to_update.next().unwrap();
5218 }
5219
5220 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
5221 ignore_queue_tx
5222 .send_blocking(UpdateIgnoreStatusJob {
5223 abs_path: parent_abs_path,
5224 ignore_stack,
5225 ignore_queue: ignore_queue_tx.clone(),
5226 scan_queue: scan_job_tx.clone(),
5227 })
5228 .unwrap();
5229 }
5230
5231 prev_snapshot = snapshot.clone();
5232 }
5233 drop(ignore_queue_tx);
5234
5235 self.executor
5236 .scoped(|scope| {
5237 for _ in 0..self.executor.num_cpus() {
5238 scope.spawn(async {
5239 loop {
5240 select_biased! {
5241 // Process any path refresh requests before moving on to process
5242 // the queue of ignore statuses.
5243 request = self.next_scan_request().fuse() => {
5244 let Ok(request) = request else { break };
5245 if !self.process_scan_request(request, true).await {
5246 return;
5247 }
5248 }
5249
5250 // Recursively process directories whose ignores have changed.
5251 job = ignore_queue_rx.recv().fuse() => {
5252 let Ok(job) = job else { break };
5253 self.update_ignore_status(job, &prev_snapshot).await;
5254 }
5255 }
5256 }
5257 });
5258 }
5259 })
5260 .await;
5261 }
5262
5263 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
5264 log::trace!("update ignore status {:?}", job.abs_path);
5265
5266 let mut ignore_stack = job.ignore_stack;
5267 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
5268 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
5269 }
5270
5271 let mut entries_by_id_edits = Vec::new();
5272 let mut entries_by_path_edits = Vec::new();
5273 let path = job
5274 .abs_path
5275 .strip_prefix(snapshot.abs_path.as_path())
5276 .unwrap();
5277
5278 for mut entry in snapshot.child_entries(path).cloned() {
5279 let was_ignored = entry.is_ignored;
5280 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
5281 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
5282
5283 if entry.is_dir() {
5284 let child_ignore_stack = if entry.is_ignored {
5285 IgnoreStack::all()
5286 } else {
5287 ignore_stack.clone()
5288 };
5289
5290 // Scan any directories that were previously ignored and weren't previously scanned.
5291 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
5292 let state = self.state.lock();
5293 if state.should_scan_directory(&entry) {
5294 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
5295 }
5296 }
5297
5298 job.ignore_queue
5299 .send(UpdateIgnoreStatusJob {
5300 abs_path: abs_path.clone(),
5301 ignore_stack: child_ignore_stack,
5302 ignore_queue: job.ignore_queue.clone(),
5303 scan_queue: job.scan_queue.clone(),
5304 })
5305 .await
5306 .unwrap();
5307 }
5308
5309 if entry.is_ignored != was_ignored {
5310 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
5311 path_entry.scan_id = snapshot.scan_id;
5312 path_entry.is_ignored = entry.is_ignored;
5313 entries_by_id_edits.push(Edit::Insert(path_entry));
5314 entries_by_path_edits.push(Edit::Insert(entry));
5315 }
5316 }
5317
5318 let state = &mut self.state.lock();
5319 for edit in &entries_by_path_edits {
5320 if let Edit::Insert(entry) = edit {
5321 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
5322 state.changed_paths.insert(ix, entry.path.clone());
5323 }
5324 }
5325 }
5326
5327 state
5328 .snapshot
5329 .entries_by_path
5330 .edit(entries_by_path_edits, &());
5331 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
5332 }
5333
5334 fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) -> Task<()> {
5335 log::debug!("reloading repositories: {dot_git_paths:?}");
5336
5337 let mut status_updates = Vec::new();
5338 {
5339 let mut state = self.state.lock();
5340 let scan_id = state.snapshot.scan_id;
5341 for dot_git_dir in dot_git_paths {
5342 let existing_repository_entry =
5343 state
5344 .snapshot
5345 .git_repositories
5346 .iter()
5347 .find_map(|(_, repo)| {
5348 if repo.dot_git_dir_abs_path.as_ref() == &dot_git_dir
5349 || repo.dot_git_worktree_abs_path.as_deref() == Some(&dot_git_dir)
5350 {
5351 Some(repo.clone())
5352 } else {
5353 None
5354 }
5355 });
5356
5357 let local_repository = match existing_repository_entry {
5358 None => {
5359 let Ok(relative) = dot_git_dir.strip_prefix(state.snapshot.abs_path())
5360 else {
5361 return Task::ready(());
5362 };
5363 match state.insert_git_repository(
5364 relative.into(),
5365 self.fs.as_ref(),
5366 self.watcher.as_ref(),
5367 ) {
5368 Some(output) => output,
5369 None => continue,
5370 }
5371 }
5372 Some(local_repository) => {
5373 if local_repository.git_dir_scan_id == scan_id {
5374 continue;
5375 }
5376 local_repository.repo_ptr.reload_index();
5377
5378 state.snapshot.git_repositories.update(
5379 &local_repository.work_directory_id,
5380 |entry| {
5381 entry.git_dir_scan_id = scan_id;
5382 entry.status_scan_id = scan_id;
5383 },
5384 );
5385
5386 local_repository
5387 }
5388 };
5389
5390 status_updates
5391 .push(self.schedule_git_statuses_update(&mut state, local_repository));
5392 }
5393
5394 // Remove any git repositories whose .git entry no longer exists.
5395 let snapshot = &mut state.snapshot;
5396 let mut ids_to_preserve = HashSet::default();
5397 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
5398 let exists_in_snapshot = snapshot
5399 .entry_for_id(work_directory_id)
5400 .map_or(false, |entry| {
5401 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
5402 });
5403
5404 if exists_in_snapshot
5405 || matches!(
5406 smol::block_on(self.fs.metadata(&entry.dot_git_dir_abs_path)),
5407 Ok(Some(_))
5408 )
5409 {
5410 ids_to_preserve.insert(work_directory_id);
5411 }
5412 }
5413
5414 snapshot
5415 .git_repositories
5416 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
5417 snapshot.repositories.retain(&(), |entry| {
5418 ids_to_preserve.contains(&entry.work_directory_id)
5419 });
5420 }
5421
5422 self.executor.spawn(async move {
5423 let _updates_finished: Vec<Result<(), oneshot::Canceled>> =
5424 join_all(status_updates).await;
5425 })
5426 }
5427
5428 /// Update the git statuses for a given batch of entries.
5429 fn schedule_git_statuses_update(
5430 &self,
5431 state: &mut BackgroundScannerState,
5432 mut local_repository: LocalRepositoryEntry,
5433 ) -> oneshot::Receiver<()> {
5434 let repository_name = local_repository.work_directory.display_name();
5435 let path_key = local_repository.work_directory.path_key();
5436
5437 let job_state = self.state.clone();
5438 let (tx, rx) = oneshot::channel();
5439
5440 state.repository_scans.insert(
5441 path_key.clone(),
5442 self.executor.spawn(async move {
5443 update_branches(&job_state, &mut local_repository).log_err();
5444 log::trace!("updating git statuses for repo {repository_name}",);
5445 let t0 = Instant::now();
5446
5447 let Some(statuses) = local_repository
5448 .repo()
5449 .status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
5450 .log_err()
5451 else {
5452 return;
5453 };
5454
5455 log::trace!(
5456 "computed git statuses for repo {repository_name} in {:?}",
5457 t0.elapsed()
5458 );
5459
5460 let t0 = Instant::now();
5461 let mut changed_paths = Vec::new();
5462 let snapshot = job_state.lock().snapshot.snapshot.clone();
5463
5464 let Some(mut repository) = snapshot
5465 .repository(path_key)
5466 .context(
5467 "Tried to update git statuses for a repository that isn't in the snapshot",
5468 )
5469 .log_err()
5470 else {
5471 return;
5472 };
5473
5474 let merge_head_shas = local_repository.repo().merge_head_shas();
5475 if merge_head_shas != local_repository.current_merge_head_shas {
5476 mem::take(&mut repository.current_merge_conflicts);
5477 }
5478
5479 let mut new_entries_by_path = SumTree::new(&());
5480 for (repo_path, status) in statuses.entries.iter() {
5481 let project_path = repository.work_directory.unrelativize(repo_path);
5482
5483 new_entries_by_path.insert_or_replace(
5484 StatusEntry {
5485 repo_path: repo_path.clone(),
5486 status: *status,
5487 },
5488 &(),
5489 );
5490 if status.is_conflicted() {
5491 repository.current_merge_conflicts.insert(repo_path.clone());
5492 }
5493
5494 if let Some(path) = project_path {
5495 changed_paths.push(path);
5496 }
5497 }
5498
5499 repository.statuses_by_path = new_entries_by_path;
5500 let mut state = job_state.lock();
5501 state
5502 .snapshot
5503 .repositories
5504 .insert_or_replace(repository, &());
5505 state.snapshot.git_repositories.update(
5506 &local_repository.work_directory_id,
5507 |entry| {
5508 entry.current_merge_head_shas = merge_head_shas;
5509 entry.merge_message = std::fs::read_to_string(
5510 local_repository.dot_git_dir_abs_path.join("MERGE_MSG"),
5511 )
5512 .ok()
5513 .and_then(|merge_msg| Some(merge_msg.lines().next()?.to_owned()));
5514 entry.status_scan_id += 1;
5515 },
5516 );
5517
5518 util::extend_sorted(
5519 &mut state.changed_paths,
5520 changed_paths,
5521 usize::MAX,
5522 Ord::cmp,
5523 );
5524
5525 log::trace!(
5526 "applied git status updates for repo {repository_name} in {:?}",
5527 t0.elapsed(),
5528 );
5529 tx.send(()).ok();
5530 }),
5531 );
5532 rx
5533 }
5534
5535 async fn progress_timer(&self, running: bool) {
5536 if !running {
5537 return futures::future::pending().await;
5538 }
5539
5540 #[cfg(any(test, feature = "test-support"))]
5541 if self.fs.is_fake() {
5542 return self.executor.simulate_random_delay().await;
5543 }
5544
5545 smol::Timer::after(FS_WATCH_LATENCY).await;
5546 }
5547
5548 fn is_path_private(&self, path: &Path) -> bool {
5549 !self.share_private_files && self.settings.is_path_private(path)
5550 }
5551
5552 async fn next_scan_request(&self) -> Result<ScanRequest> {
5553 let mut request = self.scan_requests_rx.recv().await?;
5554 while let Ok(next_request) = self.scan_requests_rx.try_recv() {
5555 request.relative_paths.extend(next_request.relative_paths);
5556 request.done.extend(next_request.done);
5557 }
5558 Ok(request)
5559 }
5560}
5561
5562fn send_status_update_inner(
5563 phase: BackgroundScannerPhase,
5564 state: Arc<Mutex<BackgroundScannerState>>,
5565 status_updates_tx: UnboundedSender<ScanState>,
5566 scanning: bool,
5567 barrier: SmallVec<[barrier::Sender; 1]>,
5568) -> bool {
5569 let mut state = state.lock();
5570 if state.changed_paths.is_empty() && scanning {
5571 return true;
5572 }
5573
5574 let new_snapshot = state.snapshot.clone();
5575 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
5576 let changes = build_diff(phase, &old_snapshot, &new_snapshot, &state.changed_paths);
5577 state.changed_paths.clear();
5578
5579 status_updates_tx
5580 .unbounded_send(ScanState::Updated {
5581 snapshot: new_snapshot,
5582 changes,
5583 scanning,
5584 barrier,
5585 })
5586 .is_ok()
5587}
5588
5589fn update_branches(
5590 state: &Mutex<BackgroundScannerState>,
5591 repository: &mut LocalRepositoryEntry,
5592) -> Result<()> {
5593 let branches = repository.repo().branches()?;
5594 let snapshot = state.lock().snapshot.snapshot.clone();
5595 let mut repository = snapshot
5596 .repository(repository.work_directory.path_key())
5597 .context("Missing repository")?;
5598 repository.current_branch = branches.into_iter().find(|branch| branch.is_head);
5599
5600 let mut state = state.lock();
5601 state
5602 .snapshot
5603 .repositories
5604 .insert_or_replace(repository, &());
5605
5606 Ok(())
5607}
5608
5609fn build_diff(
5610 phase: BackgroundScannerPhase,
5611 old_snapshot: &Snapshot,
5612 new_snapshot: &Snapshot,
5613 event_paths: &[Arc<Path>],
5614) -> UpdatedEntriesSet {
5615 use BackgroundScannerPhase::*;
5616 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
5617
5618 // Identify which paths have changed. Use the known set of changed
5619 // parent paths to optimize the search.
5620 let mut changes = Vec::new();
5621 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(&());
5622 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(&());
5623 let mut last_newly_loaded_dir_path = None;
5624 old_paths.next(&());
5625 new_paths.next(&());
5626 for path in event_paths {
5627 let path = PathKey(path.clone());
5628 if old_paths.item().map_or(false, |e| e.path < path.0) {
5629 old_paths.seek_forward(&path, Bias::Left, &());
5630 }
5631 if new_paths.item().map_or(false, |e| e.path < path.0) {
5632 new_paths.seek_forward(&path, Bias::Left, &());
5633 }
5634 loop {
5635 match (old_paths.item(), new_paths.item()) {
5636 (Some(old_entry), Some(new_entry)) => {
5637 if old_entry.path > path.0
5638 && new_entry.path > path.0
5639 && !old_entry.path.starts_with(&path.0)
5640 && !new_entry.path.starts_with(&path.0)
5641 {
5642 break;
5643 }
5644
5645 match Ord::cmp(&old_entry.path, &new_entry.path) {
5646 Ordering::Less => {
5647 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5648 old_paths.next(&());
5649 }
5650 Ordering::Equal => {
5651 if phase == EventsReceivedDuringInitialScan {
5652 if old_entry.id != new_entry.id {
5653 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5654 }
5655 // If the worktree was not fully initialized when this event was generated,
5656 // we can't know whether this entry was added during the scan or whether
5657 // it was merely updated.
5658 changes.push((
5659 new_entry.path.clone(),
5660 new_entry.id,
5661 AddedOrUpdated,
5662 ));
5663 } else if old_entry.id != new_entry.id {
5664 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5665 changes.push((new_entry.path.clone(), new_entry.id, Added));
5666 } else if old_entry != new_entry {
5667 if old_entry.kind.is_unloaded() {
5668 last_newly_loaded_dir_path = Some(&new_entry.path);
5669 changes.push((new_entry.path.clone(), new_entry.id, Loaded));
5670 } else {
5671 changes.push((new_entry.path.clone(), new_entry.id, Updated));
5672 }
5673 }
5674 old_paths.next(&());
5675 new_paths.next(&());
5676 }
5677 Ordering::Greater => {
5678 let is_newly_loaded = phase == InitialScan
5679 || last_newly_loaded_dir_path
5680 .as_ref()
5681 .map_or(false, |dir| new_entry.path.starts_with(dir));
5682 changes.push((
5683 new_entry.path.clone(),
5684 new_entry.id,
5685 if is_newly_loaded { Loaded } else { Added },
5686 ));
5687 new_paths.next(&());
5688 }
5689 }
5690 }
5691 (Some(old_entry), None) => {
5692 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5693 old_paths.next(&());
5694 }
5695 (None, Some(new_entry)) => {
5696 let is_newly_loaded = phase == InitialScan
5697 || last_newly_loaded_dir_path
5698 .as_ref()
5699 .map_or(false, |dir| new_entry.path.starts_with(dir));
5700 changes.push((
5701 new_entry.path.clone(),
5702 new_entry.id,
5703 if is_newly_loaded { Loaded } else { Added },
5704 ));
5705 new_paths.next(&());
5706 }
5707 (None, None) => break,
5708 }
5709 }
5710 }
5711
5712 changes.into()
5713}
5714
5715fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
5716 let position = child_paths
5717 .iter()
5718 .position(|path| path.file_name().unwrap() == file);
5719 if let Some(position) = position {
5720 let temp = child_paths.remove(position);
5721 child_paths.insert(0, temp);
5722 }
5723}
5724
5725fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
5726 let mut result = root_char_bag;
5727 result.extend(
5728 path.to_string_lossy()
5729 .chars()
5730 .map(|c| c.to_ascii_lowercase()),
5731 );
5732 result
5733}
5734
5735#[derive(Debug)]
5736struct RepoPaths {
5737 repo: Arc<dyn GitRepository>,
5738 entry: RepositoryEntry,
5739 // sorted
5740 repo_paths: Vec<RepoPath>,
5741}
5742
5743impl RepoPaths {
5744 fn add_path(&mut self, repo_path: RepoPath) {
5745 match self.repo_paths.binary_search(&repo_path) {
5746 Ok(_) => {}
5747 Err(ix) => self.repo_paths.insert(ix, repo_path),
5748 }
5749 }
5750
5751 fn remove_repo_path(&mut self, repo_path: &RepoPath) {
5752 match self.repo_paths.binary_search(&repo_path) {
5753 Ok(ix) => {
5754 self.repo_paths.remove(ix);
5755 }
5756 Err(_) => {}
5757 }
5758 }
5759}
5760
5761#[derive(Debug)]
5762struct ScanJob {
5763 abs_path: Arc<Path>,
5764 path: Arc<Path>,
5765 ignore_stack: Arc<IgnoreStack>,
5766 scan_queue: Sender<ScanJob>,
5767 ancestor_inodes: TreeSet<u64>,
5768 is_external: bool,
5769}
5770
5771struct UpdateIgnoreStatusJob {
5772 abs_path: Arc<Path>,
5773 ignore_stack: Arc<IgnoreStack>,
5774 ignore_queue: Sender<UpdateIgnoreStatusJob>,
5775 scan_queue: Sender<ScanJob>,
5776}
5777
5778pub trait WorktreeModelHandle {
5779 #[cfg(any(test, feature = "test-support"))]
5780 fn flush_fs_events<'a>(
5781 &self,
5782 cx: &'a mut gpui::TestAppContext,
5783 ) -> futures::future::LocalBoxFuture<'a, ()>;
5784
5785 #[cfg(any(test, feature = "test-support"))]
5786 fn flush_fs_events_in_root_git_repository<'a>(
5787 &self,
5788 cx: &'a mut gpui::TestAppContext,
5789 ) -> futures::future::LocalBoxFuture<'a, ()>;
5790}
5791
5792impl WorktreeModelHandle for Entity<Worktree> {
5793 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
5794 // occurred before the worktree was constructed. These events can cause the worktree to perform
5795 // extra directory scans, and emit extra scan-state notifications.
5796 //
5797 // This function mutates the worktree's directory and waits for those mutations to be picked up,
5798 // to ensure that all redundant FS events have already been processed.
5799 #[cfg(any(test, feature = "test-support"))]
5800 fn flush_fs_events<'a>(
5801 &self,
5802 cx: &'a mut gpui::TestAppContext,
5803 ) -> futures::future::LocalBoxFuture<'a, ()> {
5804 let file_name = "fs-event-sentinel";
5805
5806 let tree = self.clone();
5807 let (fs, root_path) = self.update(cx, |tree, _| {
5808 let tree = tree.as_local().unwrap();
5809 (tree.fs.clone(), tree.abs_path().clone())
5810 });
5811
5812 async move {
5813 fs.create_file(&root_path.join(file_name), Default::default())
5814 .await
5815 .unwrap();
5816
5817 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
5818 .await;
5819
5820 fs.remove_file(&root_path.join(file_name), Default::default())
5821 .await
5822 .unwrap();
5823 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
5824 .await;
5825
5826 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5827 .await;
5828 }
5829 .boxed_local()
5830 }
5831
5832 // This function is similar to flush_fs_events, except that it waits for events to be flushed in
5833 // the .git folder of the root repository.
5834 // The reason for its existence is that a repository's .git folder might live *outside* of the
5835 // worktree and thus its FS events might go through a different path.
5836 // In order to flush those, we need to create artificial events in the .git folder and wait
5837 // for the repository to be reloaded.
5838 #[cfg(any(test, feature = "test-support"))]
5839 fn flush_fs_events_in_root_git_repository<'a>(
5840 &self,
5841 cx: &'a mut gpui::TestAppContext,
5842 ) -> futures::future::LocalBoxFuture<'a, ()> {
5843 let file_name = "fs-event-sentinel";
5844
5845 let tree = self.clone();
5846 let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
5847 let tree = tree.as_local().unwrap();
5848 let root_entry = tree.root_git_entry().unwrap();
5849 let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
5850 (
5851 tree.fs.clone(),
5852 local_repo_entry.dot_git_dir_abs_path.clone(),
5853 local_repo_entry.git_dir_scan_id,
5854 )
5855 });
5856
5857 let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
5858 let root_entry = tree.root_git_entry().unwrap();
5859 let local_repo_entry = tree
5860 .as_local()
5861 .unwrap()
5862 .get_local_repo(&root_entry)
5863 .unwrap();
5864
5865 if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
5866 *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
5867 true
5868 } else {
5869 false
5870 }
5871 };
5872
5873 async move {
5874 fs.create_file(&root_path.join(file_name), Default::default())
5875 .await
5876 .unwrap();
5877
5878 cx.condition(&tree, |tree, _| {
5879 scan_id_increased(tree, &mut git_dir_scan_id)
5880 })
5881 .await;
5882
5883 fs.remove_file(&root_path.join(file_name), Default::default())
5884 .await
5885 .unwrap();
5886
5887 cx.condition(&tree, |tree, _| {
5888 scan_id_increased(tree, &mut git_dir_scan_id)
5889 })
5890 .await;
5891
5892 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5893 .await;
5894 }
5895 .boxed_local()
5896 }
5897}
5898
5899#[derive(Clone, Debug)]
5900struct TraversalProgress<'a> {
5901 max_path: &'a Path,
5902 count: usize,
5903 non_ignored_count: usize,
5904 file_count: usize,
5905 non_ignored_file_count: usize,
5906}
5907
5908impl<'a> TraversalProgress<'a> {
5909 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
5910 match (include_files, include_dirs, include_ignored) {
5911 (true, true, true) => self.count,
5912 (true, true, false) => self.non_ignored_count,
5913 (true, false, true) => self.file_count,
5914 (true, false, false) => self.non_ignored_file_count,
5915 (false, true, true) => self.count - self.file_count,
5916 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
5917 (false, false, _) => 0,
5918 }
5919 }
5920}
5921
5922impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
5923 fn zero(_cx: &()) -> Self {
5924 Default::default()
5925 }
5926
5927 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
5928 self.max_path = summary.max_path.as_ref();
5929 self.count += summary.count;
5930 self.non_ignored_count += summary.non_ignored_count;
5931 self.file_count += summary.file_count;
5932 self.non_ignored_file_count += summary.non_ignored_file_count;
5933 }
5934}
5935
5936impl<'a> Default for TraversalProgress<'a> {
5937 fn default() -> Self {
5938 Self {
5939 max_path: Path::new(""),
5940 count: 0,
5941 non_ignored_count: 0,
5942 file_count: 0,
5943 non_ignored_file_count: 0,
5944 }
5945 }
5946}
5947
5948#[derive(Debug, Clone, Copy)]
5949pub struct GitEntryRef<'a> {
5950 pub entry: &'a Entry,
5951 pub git_summary: GitSummary,
5952}
5953
5954impl<'a> GitEntryRef<'a> {
5955 pub fn to_owned(&self) -> GitEntry {
5956 GitEntry {
5957 entry: self.entry.clone(),
5958 git_summary: self.git_summary,
5959 }
5960 }
5961}
5962
5963impl<'a> Deref for GitEntryRef<'a> {
5964 type Target = Entry;
5965
5966 fn deref(&self) -> &Self::Target {
5967 &self.entry
5968 }
5969}
5970
5971impl<'a> AsRef<Entry> for GitEntryRef<'a> {
5972 fn as_ref(&self) -> &Entry {
5973 self.entry
5974 }
5975}
5976
5977#[derive(Debug, Clone, PartialEq, Eq)]
5978pub struct GitEntry {
5979 pub entry: Entry,
5980 pub git_summary: GitSummary,
5981}
5982
5983impl GitEntry {
5984 pub fn to_ref(&self) -> GitEntryRef {
5985 GitEntryRef {
5986 entry: &self.entry,
5987 git_summary: self.git_summary,
5988 }
5989 }
5990}
5991
5992impl Deref for GitEntry {
5993 type Target = Entry;
5994
5995 fn deref(&self) -> &Self::Target {
5996 &self.entry
5997 }
5998}
5999
6000impl AsRef<Entry> for GitEntry {
6001 fn as_ref(&self) -> &Entry {
6002 &self.entry
6003 }
6004}
6005
6006/// Walks the worktree entries and their associated git statuses.
6007pub struct GitTraversal<'a> {
6008 traversal: Traversal<'a>,
6009 current_entry_summary: Option<GitSummary>,
6010 repo_location: Option<(
6011 &'a RepositoryEntry,
6012 Cursor<'a, StatusEntry, PathProgress<'a>>,
6013 )>,
6014}
6015
6016impl<'a> GitTraversal<'a> {
6017 fn synchronize_statuses(&mut self, reset: bool) {
6018 self.current_entry_summary = None;
6019
6020 let Some(entry) = self.traversal.cursor.item() else {
6021 return;
6022 };
6023
6024 let Some(repo) = self.traversal.snapshot.repository_for_path(&entry.path) else {
6025 self.repo_location = None;
6026 return;
6027 };
6028
6029 // Update our state if we changed repositories.
6030 if reset
6031 || self
6032 .repo_location
6033 .as_ref()
6034 .map(|(prev_repo, _)| &prev_repo.work_directory)
6035 != Some(&repo.work_directory)
6036 {
6037 self.repo_location = Some((repo, repo.statuses_by_path.cursor::<PathProgress>(&())));
6038 }
6039
6040 let Some((repo, statuses)) = &mut self.repo_location else {
6041 return;
6042 };
6043
6044 let repo_path = repo.relativize(&entry.path).unwrap();
6045
6046 if entry.is_dir() {
6047 let mut statuses = statuses.clone();
6048 statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &());
6049 let summary =
6050 statuses.summary(&PathTarget::Successor(repo_path.as_ref()), Bias::Left, &());
6051
6052 self.current_entry_summary = Some(summary);
6053 } else if entry.is_file() {
6054 // For a file entry, park the cursor on the corresponding status
6055 if statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &()) {
6056 // TODO: Investigate statuses.item() being None here.
6057 self.current_entry_summary = statuses.item().map(|item| item.status.into());
6058 } else {
6059 self.current_entry_summary = Some(GitSummary::UNCHANGED);
6060 }
6061 }
6062 }
6063
6064 pub fn advance(&mut self) -> bool {
6065 self.advance_by(1)
6066 }
6067
6068 pub fn advance_by(&mut self, count: usize) -> bool {
6069 let found = self.traversal.advance_by(count);
6070 self.synchronize_statuses(false);
6071 found
6072 }
6073
6074 pub fn advance_to_sibling(&mut self) -> bool {
6075 let found = self.traversal.advance_to_sibling();
6076 self.synchronize_statuses(false);
6077 found
6078 }
6079
6080 pub fn back_to_parent(&mut self) -> bool {
6081 let found = self.traversal.back_to_parent();
6082 self.synchronize_statuses(true);
6083 found
6084 }
6085
6086 pub fn start_offset(&self) -> usize {
6087 self.traversal.start_offset()
6088 }
6089
6090 pub fn end_offset(&self) -> usize {
6091 self.traversal.end_offset()
6092 }
6093
6094 pub fn entry(&self) -> Option<GitEntryRef<'a>> {
6095 let entry = self.traversal.cursor.item()?;
6096 let git_summary = self.current_entry_summary.unwrap_or(GitSummary::UNCHANGED);
6097 Some(GitEntryRef { entry, git_summary })
6098 }
6099}
6100
6101impl<'a> Iterator for GitTraversal<'a> {
6102 type Item = GitEntryRef<'a>;
6103 fn next(&mut self) -> Option<Self::Item> {
6104 if let Some(item) = self.entry() {
6105 self.advance();
6106 Some(item)
6107 } else {
6108 None
6109 }
6110 }
6111}
6112
6113#[derive(Debug)]
6114pub struct Traversal<'a> {
6115 snapshot: &'a Snapshot,
6116 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
6117 include_ignored: bool,
6118 include_files: bool,
6119 include_dirs: bool,
6120}
6121
6122impl<'a> Traversal<'a> {
6123 fn new(
6124 snapshot: &'a Snapshot,
6125 include_files: bool,
6126 include_dirs: bool,
6127 include_ignored: bool,
6128 start_path: &Path,
6129 ) -> Self {
6130 let mut cursor = snapshot.entries_by_path.cursor(&());
6131 cursor.seek(&TraversalTarget::path(start_path), Bias::Left, &());
6132 let mut traversal = Self {
6133 snapshot,
6134 cursor,
6135 include_files,
6136 include_dirs,
6137 include_ignored,
6138 };
6139 if traversal.end_offset() == traversal.start_offset() {
6140 traversal.next();
6141 }
6142 traversal
6143 }
6144
6145 pub fn with_git_statuses(self) -> GitTraversal<'a> {
6146 let mut this = GitTraversal {
6147 traversal: self,
6148 current_entry_summary: None,
6149 repo_location: None,
6150 };
6151 this.synchronize_statuses(true);
6152 this
6153 }
6154
6155 pub fn advance(&mut self) -> bool {
6156 self.advance_by(1)
6157 }
6158
6159 pub fn advance_by(&mut self, count: usize) -> bool {
6160 self.cursor.seek_forward(
6161 &TraversalTarget::Count {
6162 count: self.end_offset() + count,
6163 include_dirs: self.include_dirs,
6164 include_files: self.include_files,
6165 include_ignored: self.include_ignored,
6166 },
6167 Bias::Left,
6168 &(),
6169 )
6170 }
6171
6172 pub fn advance_to_sibling(&mut self) -> bool {
6173 while let Some(entry) = self.cursor.item() {
6174 self.cursor
6175 .seek_forward(&TraversalTarget::successor(&entry.path), Bias::Left, &());
6176 if let Some(entry) = self.cursor.item() {
6177 if (self.include_files || !entry.is_file())
6178 && (self.include_dirs || !entry.is_dir())
6179 && (self.include_ignored || !entry.is_ignored || entry.is_always_included)
6180 {
6181 return true;
6182 }
6183 }
6184 }
6185 false
6186 }
6187
6188 pub fn back_to_parent(&mut self) -> bool {
6189 let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
6190 return false;
6191 };
6192 self.cursor
6193 .seek(&TraversalTarget::path(parent_path), Bias::Left, &())
6194 }
6195
6196 pub fn entry(&self) -> Option<&'a Entry> {
6197 self.cursor.item()
6198 }
6199
6200 pub fn start_offset(&self) -> usize {
6201 self.cursor
6202 .start()
6203 .count(self.include_files, self.include_dirs, self.include_ignored)
6204 }
6205
6206 pub fn end_offset(&self) -> usize {
6207 self.cursor
6208 .end(&())
6209 .count(self.include_files, self.include_dirs, self.include_ignored)
6210 }
6211}
6212
6213impl<'a> Iterator for Traversal<'a> {
6214 type Item = &'a Entry;
6215
6216 fn next(&mut self) -> Option<Self::Item> {
6217 if let Some(item) = self.entry() {
6218 self.advance();
6219 Some(item)
6220 } else {
6221 None
6222 }
6223 }
6224}
6225
6226#[derive(Debug, Clone, Copy)]
6227enum PathTarget<'a> {
6228 Path(&'a Path),
6229 Successor(&'a Path),
6230}
6231
6232impl<'a> PathTarget<'a> {
6233 fn cmp_path(&self, other: &Path) -> Ordering {
6234 match self {
6235 PathTarget::Path(path) => path.cmp(&other),
6236 PathTarget::Successor(path) => {
6237 if other.starts_with(path) {
6238 Ordering::Greater
6239 } else {
6240 Ordering::Equal
6241 }
6242 }
6243 }
6244 }
6245}
6246
6247impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, PathProgress<'a>> for PathTarget<'b> {
6248 fn cmp(&self, cursor_location: &PathProgress<'a>, _: &S::Context) -> Ordering {
6249 self.cmp_path(&cursor_location.max_path)
6250 }
6251}
6252
6253impl<'a, 'b, S: Summary> SeekTarget<'a, PathSummary<S>, TraversalProgress<'a>> for PathTarget<'b> {
6254 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &S::Context) -> Ordering {
6255 self.cmp_path(&cursor_location.max_path)
6256 }
6257}
6258
6259impl<'a, 'b> SeekTarget<'a, PathSummary<GitSummary>, (TraversalProgress<'a>, GitSummary)>
6260 for PathTarget<'b>
6261{
6262 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitSummary), _: &()) -> Ordering {
6263 self.cmp_path(&cursor_location.0.max_path)
6264 }
6265}
6266
6267#[derive(Debug)]
6268enum TraversalTarget<'a> {
6269 Path(PathTarget<'a>),
6270 Count {
6271 count: usize,
6272 include_files: bool,
6273 include_ignored: bool,
6274 include_dirs: bool,
6275 },
6276}
6277
6278impl<'a> TraversalTarget<'a> {
6279 fn path(path: &'a Path) -> Self {
6280 Self::Path(PathTarget::Path(path))
6281 }
6282
6283 fn successor(path: &'a Path) -> Self {
6284 Self::Path(PathTarget::Successor(path))
6285 }
6286
6287 fn cmp_progress(&self, progress: &TraversalProgress) -> Ordering {
6288 match self {
6289 TraversalTarget::Path(path) => path.cmp_path(&progress.max_path),
6290 TraversalTarget::Count {
6291 count,
6292 include_files,
6293 include_dirs,
6294 include_ignored,
6295 } => Ord::cmp(
6296 count,
6297 &progress.count(*include_files, *include_dirs, *include_ignored),
6298 ),
6299 }
6300 }
6301}
6302
6303impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
6304 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6305 self.cmp_progress(cursor_location)
6306 }
6307}
6308
6309impl<'a, 'b> SeekTarget<'a, PathSummary<Unit>, TraversalProgress<'a>> for TraversalTarget<'b> {
6310 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6311 self.cmp_progress(cursor_location)
6312 }
6313}
6314
6315pub struct ChildEntriesOptions {
6316 pub include_files: bool,
6317 pub include_dirs: bool,
6318 pub include_ignored: bool,
6319}
6320
6321pub struct ChildEntriesIter<'a> {
6322 parent_path: &'a Path,
6323 traversal: Traversal<'a>,
6324}
6325
6326impl<'a> ChildEntriesIter<'a> {
6327 pub fn with_git_statuses(self) -> ChildEntriesGitIter<'a> {
6328 ChildEntriesGitIter {
6329 parent_path: self.parent_path,
6330 traversal: self.traversal.with_git_statuses(),
6331 }
6332 }
6333}
6334
6335pub struct ChildEntriesGitIter<'a> {
6336 parent_path: &'a Path,
6337 traversal: GitTraversal<'a>,
6338}
6339
6340impl<'a> Iterator for ChildEntriesIter<'a> {
6341 type Item = &'a Entry;
6342
6343 fn next(&mut self) -> Option<Self::Item> {
6344 if let Some(item) = self.traversal.entry() {
6345 if item.path.starts_with(self.parent_path) {
6346 self.traversal.advance_to_sibling();
6347 return Some(item);
6348 }
6349 }
6350 None
6351 }
6352}
6353
6354impl<'a> Iterator for ChildEntriesGitIter<'a> {
6355 type Item = GitEntryRef<'a>;
6356
6357 fn next(&mut self) -> Option<Self::Item> {
6358 if let Some(item) = self.traversal.entry() {
6359 if item.path.starts_with(self.parent_path) {
6360 self.traversal.advance_to_sibling();
6361 return Some(item);
6362 }
6363 }
6364 None
6365 }
6366}
6367
6368impl<'a> From<&'a Entry> for proto::Entry {
6369 fn from(entry: &'a Entry) -> Self {
6370 Self {
6371 id: entry.id.to_proto(),
6372 is_dir: entry.is_dir(),
6373 path: entry.path.as_ref().to_proto(),
6374 inode: entry.inode,
6375 mtime: entry.mtime.map(|time| time.into()),
6376 is_ignored: entry.is_ignored,
6377 is_external: entry.is_external,
6378 is_fifo: entry.is_fifo,
6379 size: Some(entry.size),
6380 canonical_path: entry
6381 .canonical_path
6382 .as_ref()
6383 .map(|path| path.as_ref().to_proto()),
6384 }
6385 }
6386}
6387
6388impl<'a> TryFrom<(&'a CharBag, &PathMatcher, proto::Entry)> for Entry {
6389 type Error = anyhow::Error;
6390
6391 fn try_from(
6392 (root_char_bag, always_included, entry): (&'a CharBag, &PathMatcher, proto::Entry),
6393 ) -> Result<Self> {
6394 let kind = if entry.is_dir {
6395 EntryKind::Dir
6396 } else {
6397 EntryKind::File
6398 };
6399
6400 let path = Arc::<Path>::from_proto(entry.path);
6401 let char_bag = char_bag_for_path(*root_char_bag, &path);
6402 let is_always_included = always_included.is_match(path.as_ref());
6403 Ok(Entry {
6404 id: ProjectEntryId::from_proto(entry.id),
6405 kind,
6406 path,
6407 inode: entry.inode,
6408 mtime: entry.mtime.map(|time| time.into()),
6409 size: entry.size.unwrap_or(0),
6410 canonical_path: entry
6411 .canonical_path
6412 .map(|path_string| Box::from(PathBuf::from_proto(path_string))),
6413 is_ignored: entry.is_ignored,
6414 is_always_included,
6415 is_external: entry.is_external,
6416 is_private: false,
6417 char_bag,
6418 is_fifo: entry.is_fifo,
6419 })
6420 }
6421}
6422
6423fn status_from_proto(
6424 simple_status: i32,
6425 status: Option<proto::GitFileStatus>,
6426) -> anyhow::Result<FileStatus> {
6427 use proto::git_file_status::Variant;
6428
6429 let Some(variant) = status.and_then(|status| status.variant) else {
6430 let code = proto::GitStatus::from_i32(simple_status)
6431 .ok_or_else(|| anyhow!("Invalid git status code: {simple_status}"))?;
6432 let result = match code {
6433 proto::GitStatus::Added => TrackedStatus {
6434 worktree_status: StatusCode::Added,
6435 index_status: StatusCode::Unmodified,
6436 }
6437 .into(),
6438 proto::GitStatus::Modified => TrackedStatus {
6439 worktree_status: StatusCode::Modified,
6440 index_status: StatusCode::Unmodified,
6441 }
6442 .into(),
6443 proto::GitStatus::Conflict => UnmergedStatus {
6444 first_head: UnmergedStatusCode::Updated,
6445 second_head: UnmergedStatusCode::Updated,
6446 }
6447 .into(),
6448 proto::GitStatus::Deleted => TrackedStatus {
6449 worktree_status: StatusCode::Deleted,
6450 index_status: StatusCode::Unmodified,
6451 }
6452 .into(),
6453 _ => return Err(anyhow!("Invalid code for simple status: {simple_status}")),
6454 };
6455 return Ok(result);
6456 };
6457
6458 let result = match variant {
6459 Variant::Untracked(_) => FileStatus::Untracked,
6460 Variant::Ignored(_) => FileStatus::Ignored,
6461 Variant::Unmerged(unmerged) => {
6462 let [first_head, second_head] =
6463 [unmerged.first_head, unmerged.second_head].map(|head| {
6464 let code = proto::GitStatus::from_i32(head)
6465 .ok_or_else(|| anyhow!("Invalid git status code: {head}"))?;
6466 let result = match code {
6467 proto::GitStatus::Added => UnmergedStatusCode::Added,
6468 proto::GitStatus::Updated => UnmergedStatusCode::Updated,
6469 proto::GitStatus::Deleted => UnmergedStatusCode::Deleted,
6470 _ => return Err(anyhow!("Invalid code for unmerged status: {code:?}")),
6471 };
6472 Ok(result)
6473 });
6474 let [first_head, second_head] = [first_head?, second_head?];
6475 UnmergedStatus {
6476 first_head,
6477 second_head,
6478 }
6479 .into()
6480 }
6481 Variant::Tracked(tracked) => {
6482 let [index_status, worktree_status] = [tracked.index_status, tracked.worktree_status]
6483 .map(|status| {
6484 let code = proto::GitStatus::from_i32(status)
6485 .ok_or_else(|| anyhow!("Invalid git status code: {status}"))?;
6486 let result = match code {
6487 proto::GitStatus::Modified => StatusCode::Modified,
6488 proto::GitStatus::TypeChanged => StatusCode::TypeChanged,
6489 proto::GitStatus::Added => StatusCode::Added,
6490 proto::GitStatus::Deleted => StatusCode::Deleted,
6491 proto::GitStatus::Renamed => StatusCode::Renamed,
6492 proto::GitStatus::Copied => StatusCode::Copied,
6493 proto::GitStatus::Unmodified => StatusCode::Unmodified,
6494 _ => return Err(anyhow!("Invalid code for tracked status: {code:?}")),
6495 };
6496 Ok(result)
6497 });
6498 let [index_status, worktree_status] = [index_status?, worktree_status?];
6499 TrackedStatus {
6500 index_status,
6501 worktree_status,
6502 }
6503 .into()
6504 }
6505 };
6506 Ok(result)
6507}
6508
6509fn status_to_proto(status: FileStatus) -> proto::GitFileStatus {
6510 use proto::git_file_status::{Tracked, Unmerged, Variant};
6511
6512 let variant = match status {
6513 FileStatus::Untracked => Variant::Untracked(Default::default()),
6514 FileStatus::Ignored => Variant::Ignored(Default::default()),
6515 FileStatus::Unmerged(UnmergedStatus {
6516 first_head,
6517 second_head,
6518 }) => Variant::Unmerged(Unmerged {
6519 first_head: unmerged_status_to_proto(first_head),
6520 second_head: unmerged_status_to_proto(second_head),
6521 }),
6522 FileStatus::Tracked(TrackedStatus {
6523 index_status,
6524 worktree_status,
6525 }) => Variant::Tracked(Tracked {
6526 index_status: tracked_status_to_proto(index_status),
6527 worktree_status: tracked_status_to_proto(worktree_status),
6528 }),
6529 };
6530 proto::GitFileStatus {
6531 variant: Some(variant),
6532 }
6533}
6534
6535fn unmerged_status_to_proto(code: UnmergedStatusCode) -> i32 {
6536 match code {
6537 UnmergedStatusCode::Added => proto::GitStatus::Added as _,
6538 UnmergedStatusCode::Deleted => proto::GitStatus::Deleted as _,
6539 UnmergedStatusCode::Updated => proto::GitStatus::Updated as _,
6540 }
6541}
6542
6543fn tracked_status_to_proto(code: StatusCode) -> i32 {
6544 match code {
6545 StatusCode::Added => proto::GitStatus::Added as _,
6546 StatusCode::Deleted => proto::GitStatus::Deleted as _,
6547 StatusCode::Modified => proto::GitStatus::Modified as _,
6548 StatusCode::Renamed => proto::GitStatus::Renamed as _,
6549 StatusCode::TypeChanged => proto::GitStatus::TypeChanged as _,
6550 StatusCode::Copied => proto::GitStatus::Copied as _,
6551 StatusCode::Unmodified => proto::GitStatus::Unmodified as _,
6552 }
6553}
6554
6555#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
6556pub struct ProjectEntryId(usize);
6557
6558impl ProjectEntryId {
6559 pub const MAX: Self = Self(usize::MAX);
6560 pub const MIN: Self = Self(usize::MIN);
6561
6562 pub fn new(counter: &AtomicUsize) -> Self {
6563 Self(counter.fetch_add(1, SeqCst))
6564 }
6565
6566 pub fn from_proto(id: u64) -> Self {
6567 Self(id as usize)
6568 }
6569
6570 pub fn to_proto(&self) -> u64 {
6571 self.0 as u64
6572 }
6573
6574 pub fn to_usize(&self) -> usize {
6575 self.0
6576 }
6577}
6578
6579#[cfg(any(test, feature = "test-support"))]
6580impl CreatedEntry {
6581 pub fn to_included(self) -> Option<Entry> {
6582 match self {
6583 CreatedEntry::Included(entry) => Some(entry),
6584 CreatedEntry::Excluded { .. } => None,
6585 }
6586 }
6587}