1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use clock::ReplicaId;
9use collections::{HashMap, HashSet, VecDeque};
10use fs::{copy_recursive, Fs, MTime, PathEvent, RemoveOptions, Watcher};
11use futures::{
12 channel::{
13 mpsc::{self, UnboundedSender},
14 oneshot,
15 },
16 future::join_all,
17 select_biased,
18 task::Poll,
19 FutureExt as _, Stream, StreamExt,
20};
21use fuzzy::CharBag;
22use git::{
23 repository::{Branch, GitRepository, RepoPath, UpstreamTrackingStatus},
24 status::{
25 FileStatus, GitSummary, StatusCode, TrackedStatus, UnmergedStatus, UnmergedStatusCode,
26 },
27 GitHostingProviderRegistry, COMMIT_MESSAGE, DOT_GIT, FSMONITOR_DAEMON, GITIGNORE, INDEX_LOCK,
28 LFS_DIR,
29};
30use gpui::{
31 App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, EventEmitter, Task,
32};
33use ignore::IgnoreStack;
34use language::DiskState;
35
36use parking_lot::Mutex;
37use paths::local_settings_folder_relative_path;
38use postage::{
39 barrier,
40 prelude::{Sink as _, Stream as _},
41 watch,
42};
43use rpc::{
44 proto::{self, split_worktree_related_message, FromProto, ToProto, WorktreeRelatedMessage},
45 AnyProtoClient,
46};
47pub use settings::WorktreeId;
48use settings::{Settings, SettingsLocation, SettingsStore};
49use smallvec::{smallvec, SmallVec};
50use smol::channel::{self, Sender};
51use std::{
52 any::Any,
53 cmp::Ordering,
54 collections::hash_map,
55 convert::TryFrom,
56 ffi::OsStr,
57 fmt,
58 future::Future,
59 mem::{self},
60 ops::{Deref, DerefMut},
61 path::{Component, Path, PathBuf},
62 pin::Pin,
63 sync::{
64 atomic::{self, AtomicI32, AtomicUsize, Ordering::SeqCst},
65 Arc,
66 },
67 time::{Duration, Instant},
68};
69use sum_tree::{
70 Bias, Cursor, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet, Unit,
71};
72use text::{LineEnding, Rope};
73use util::{
74 paths::{home_dir, PathMatcher, SanitizedPath},
75 ResultExt,
76};
77pub use worktree_settings::WorktreeSettings;
78
79pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
80
81/// A set of local or remote files that are being opened as part of a project.
82/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
83/// Stores git repositories data and the diagnostics for the file(s).
84///
85/// Has an absolute path, and may be set to be visible in Zed UI or not.
86/// May correspond to a directory or a single file.
87/// Possible examples:
88/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
89/// * a directory opened in Zed — may be added as a visible entry to the current worktree
90///
91/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
92pub enum Worktree {
93 Local(LocalWorktree),
94 Remote(RemoteWorktree),
95}
96
97/// An entry, created in the worktree.
98#[derive(Debug)]
99pub enum CreatedEntry {
100 /// Got created and indexed by the worktree, receiving a corresponding entry.
101 Included(Entry),
102 /// Got created, but not indexed due to falling under exclusion filters.
103 Excluded { abs_path: PathBuf },
104}
105
106pub struct LoadedFile {
107 pub file: Arc<File>,
108 pub text: String,
109}
110
111pub struct LoadedBinaryFile {
112 pub file: Arc<File>,
113 pub content: Vec<u8>,
114}
115
116pub struct LocalWorktree {
117 snapshot: LocalSnapshot,
118 scan_requests_tx: channel::Sender<ScanRequest>,
119 path_prefixes_to_scan_tx: channel::Sender<PathPrefixScanRequest>,
120 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
121 _background_scanner_tasks: Vec<Task<()>>,
122 update_observer: Option<UpdateObservationState>,
123 fs: Arc<dyn Fs>,
124 fs_case_sensitive: bool,
125 visible: bool,
126 next_entry_id: Arc<AtomicUsize>,
127 settings: WorktreeSettings,
128 share_private_files: bool,
129}
130
131pub struct PathPrefixScanRequest {
132 path: Arc<Path>,
133 done: SmallVec<[barrier::Sender; 1]>,
134}
135
136struct ScanRequest {
137 relative_paths: Vec<Arc<Path>>,
138 done: SmallVec<[barrier::Sender; 1]>,
139}
140
141pub struct RemoteWorktree {
142 snapshot: Snapshot,
143 background_snapshot: Arc<Mutex<(Snapshot, Vec<WorktreeRelatedMessage>)>>,
144 project_id: u64,
145 client: AnyProtoClient,
146 file_scan_inclusions: PathMatcher,
147 updates_tx: Option<UnboundedSender<WorktreeRelatedMessage>>,
148 update_observer: Option<mpsc::UnboundedSender<WorktreeRelatedMessage>>,
149 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
150 replica_id: ReplicaId,
151 visible: bool,
152 disconnected: bool,
153}
154
155#[derive(Clone)]
156pub struct Snapshot {
157 id: WorktreeId,
158 abs_path: SanitizedPath,
159 root_name: String,
160 root_char_bag: CharBag,
161 entries_by_path: SumTree<Entry>,
162 entries_by_id: SumTree<PathEntry>,
163 always_included_entries: Vec<Arc<Path>>,
164 repositories: SumTree<RepositoryEntry>,
165
166 /// A number that increases every time the worktree begins scanning
167 /// a set of paths from the filesystem. This scanning could be caused
168 /// by some operation performed on the worktree, such as reading or
169 /// writing a file, or by an event reported by the filesystem.
170 scan_id: usize,
171
172 /// The latest scan id that has completed, and whose preceding scans
173 /// have all completed. The current `scan_id` could be more than one
174 /// greater than the `completed_scan_id` if operations are performed
175 /// on the worktree while it is processing a file-system event.
176 completed_scan_id: usize,
177}
178
179#[derive(Debug, Clone, PartialEq, Eq)]
180pub struct RepositoryEntry {
181 /// The git status entries for this repository.
182 /// Note that the paths on this repository are relative to the git work directory.
183 /// If the .git folder is external to Zed, these paths will be relative to that folder,
184 /// and this data structure might reference files external to this worktree.
185 ///
186 /// For example:
187 ///
188 /// my_root_folder/ <-- repository root
189 /// .git
190 /// my_sub_folder_1/
191 /// project_root/ <-- Project root, Zed opened here
192 /// changed_file_1 <-- File with changes, in worktree
193 /// my_sub_folder_2/
194 /// changed_file_2 <-- File with changes, out of worktree
195 /// ...
196 ///
197 /// With this setup, this field would contain 2 entries, like so:
198 /// - my_sub_folder_1/project_root/changed_file_1
199 /// - my_sub_folder_2/changed_file_2
200 pub(crate) statuses_by_path: SumTree<StatusEntry>,
201 work_directory_id: ProjectEntryId,
202 pub work_directory: WorkDirectory,
203 work_directory_abs_path: PathBuf,
204 pub(crate) current_branch: Option<Branch>,
205 pub current_merge_conflicts: TreeSet<RepoPath>,
206}
207
208impl RepositoryEntry {
209 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
210 self.work_directory.relativize(path)
211 }
212
213 pub fn try_unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
214 self.work_directory.try_unrelativize(path)
215 }
216
217 pub fn unrelativize(&self, path: &RepoPath) -> Arc<Path> {
218 self.work_directory.unrelativize(path)
219 }
220
221 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
222 self.work_directory.directory_contains(path)
223 }
224
225 pub fn branch(&self) -> Option<&Branch> {
226 self.current_branch.as_ref()
227 }
228
229 pub fn work_directory_id(&self) -> ProjectEntryId {
230 self.work_directory_id
231 }
232
233 pub fn status(&self) -> impl Iterator<Item = StatusEntry> + '_ {
234 self.statuses_by_path.iter().cloned()
235 }
236
237 pub fn status_len(&self) -> usize {
238 self.statuses_by_path.summary().item_summary.count
239 }
240
241 pub fn status_summary(&self) -> GitSummary {
242 self.statuses_by_path.summary().item_summary
243 }
244
245 pub fn status_for_path(&self, path: &RepoPath) -> Option<StatusEntry> {
246 self.statuses_by_path
247 .get(&PathKey(path.0.clone()), &())
248 .cloned()
249 }
250
251 pub fn initial_update(
252 &self,
253 project_id: u64,
254 worktree_scan_id: usize,
255 ) -> proto::UpdateRepository {
256 proto::UpdateRepository {
257 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
258 updated_statuses: self
259 .statuses_by_path
260 .iter()
261 .map(|entry| entry.to_proto())
262 .collect(),
263 removed_statuses: Default::default(),
264 current_merge_conflicts: self
265 .current_merge_conflicts
266 .iter()
267 .map(|repo_path| repo_path.to_proto())
268 .collect(),
269 project_id,
270 // This is semantically wrong---we want to move to having separate IDs for repositories.
271 // But for the moment, RepositoryEntry isn't set up to provide that at this level, so we
272 // shim it using the work directory's project entry ID. The pair of this + project ID will
273 // be globally unique.
274 id: self.work_directory_id().to_proto(),
275 abs_path: self.work_directory_abs_path.as_path().to_proto(),
276 entry_ids: vec![self.work_directory_id().to_proto()],
277 // This is also semantically wrong, and should be replaced once we separate git repo updates
278 // from worktree scans.
279 scan_id: worktree_scan_id as u64,
280 }
281 }
282
283 pub fn build_update(
284 &self,
285 old: &Self,
286 project_id: u64,
287 scan_id: usize,
288 ) -> proto::UpdateRepository {
289 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
290 let mut removed_statuses: Vec<String> = Vec::new();
291
292 let mut new_statuses = self.statuses_by_path.iter().peekable();
293 let mut old_statuses = old.statuses_by_path.iter().peekable();
294
295 let mut current_new_entry = new_statuses.next();
296 let mut current_old_entry = old_statuses.next();
297 loop {
298 match (current_new_entry, current_old_entry) {
299 (Some(new_entry), Some(old_entry)) => {
300 match new_entry.repo_path.cmp(&old_entry.repo_path) {
301 Ordering::Less => {
302 updated_statuses.push(new_entry.to_proto());
303 current_new_entry = new_statuses.next();
304 }
305 Ordering::Equal => {
306 if new_entry.status != old_entry.status {
307 updated_statuses.push(new_entry.to_proto());
308 }
309 current_old_entry = old_statuses.next();
310 current_new_entry = new_statuses.next();
311 }
312 Ordering::Greater => {
313 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
314 current_old_entry = old_statuses.next();
315 }
316 }
317 }
318 (None, Some(old_entry)) => {
319 removed_statuses.push(old_entry.repo_path.as_ref().to_proto());
320 current_old_entry = old_statuses.next();
321 }
322 (Some(new_entry), None) => {
323 updated_statuses.push(new_entry.to_proto());
324 current_new_entry = new_statuses.next();
325 }
326 (None, None) => break,
327 }
328 }
329
330 proto::UpdateRepository {
331 branch_summary: self.current_branch.as_ref().map(branch_to_proto),
332 updated_statuses,
333 removed_statuses,
334 current_merge_conflicts: self
335 .current_merge_conflicts
336 .iter()
337 .map(|path| path.as_ref().to_proto())
338 .collect(),
339 project_id,
340 id: self.work_directory_id.to_proto(),
341 abs_path: self.work_directory_abs_path.as_path().to_proto(),
342 entry_ids: vec![self.work_directory_id.to_proto()],
343 scan_id: scan_id as u64,
344 }
345 }
346}
347
348pub fn branch_to_proto(branch: &git::repository::Branch) -> proto::Branch {
349 proto::Branch {
350 is_head: branch.is_head,
351 name: branch.name.to_string(),
352 unix_timestamp: branch
353 .most_recent_commit
354 .as_ref()
355 .map(|commit| commit.commit_timestamp as u64),
356 upstream: branch.upstream.as_ref().map(|upstream| proto::GitUpstream {
357 ref_name: upstream.ref_name.to_string(),
358 tracking: upstream
359 .tracking
360 .status()
361 .map(|upstream| proto::UpstreamTracking {
362 ahead: upstream.ahead as u64,
363 behind: upstream.behind as u64,
364 }),
365 }),
366 most_recent_commit: branch
367 .most_recent_commit
368 .as_ref()
369 .map(|commit| proto::CommitSummary {
370 sha: commit.sha.to_string(),
371 subject: commit.subject.to_string(),
372 commit_timestamp: commit.commit_timestamp,
373 }),
374 }
375}
376
377pub fn proto_to_branch(proto: &proto::Branch) -> git::repository::Branch {
378 git::repository::Branch {
379 is_head: proto.is_head,
380 name: proto.name.clone().into(),
381 upstream: proto
382 .upstream
383 .as_ref()
384 .map(|upstream| git::repository::Upstream {
385 ref_name: upstream.ref_name.to_string().into(),
386 tracking: upstream
387 .tracking
388 .as_ref()
389 .map(|tracking| {
390 git::repository::UpstreamTracking::Tracked(UpstreamTrackingStatus {
391 ahead: tracking.ahead as u32,
392 behind: tracking.behind as u32,
393 })
394 })
395 .unwrap_or(git::repository::UpstreamTracking::Gone),
396 }),
397 most_recent_commit: proto.most_recent_commit.as_ref().map(|commit| {
398 git::repository::CommitSummary {
399 sha: commit.sha.to_string().into(),
400 subject: commit.subject.to_string().into(),
401 commit_timestamp: commit.commit_timestamp,
402 has_parent: true,
403 }
404 }),
405 }
406}
407
408/// This path corresponds to the 'content path' of a repository in relation
409/// to Zed's project root.
410/// In the majority of the cases, this is the folder that contains the .git folder.
411/// But if a sub-folder of a git repository is opened, this corresponds to the
412/// project root and the .git folder is located in a parent directory.
413#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)]
414pub enum WorkDirectory {
415 InProject {
416 relative_path: Arc<Path>,
417 },
418 AboveProject {
419 absolute_path: Arc<Path>,
420 location_in_repo: Arc<Path>,
421 },
422}
423
424impl WorkDirectory {
425 #[cfg(test)]
426 fn in_project(path: &str) -> Self {
427 let path = Path::new(path);
428 Self::InProject {
429 relative_path: path.into(),
430 }
431 }
432
433 #[cfg(test)]
434 fn canonicalize(&self) -> Self {
435 match self {
436 WorkDirectory::InProject { relative_path } => WorkDirectory::InProject {
437 relative_path: relative_path.clone(),
438 },
439 WorkDirectory::AboveProject {
440 absolute_path,
441 location_in_repo,
442 } => WorkDirectory::AboveProject {
443 absolute_path: absolute_path.canonicalize().unwrap().into(),
444 location_in_repo: location_in_repo.clone(),
445 },
446 }
447 }
448
449 pub fn is_above_project(&self) -> bool {
450 match self {
451 WorkDirectory::InProject { .. } => false,
452 WorkDirectory::AboveProject { .. } => true,
453 }
454 }
455
456 fn path_key(&self) -> PathKey {
457 match self {
458 WorkDirectory::InProject { relative_path } => PathKey(relative_path.clone()),
459 WorkDirectory::AboveProject { .. } => PathKey(Path::new("").into()),
460 }
461 }
462
463 /// Returns true if the given path is a child of the work directory.
464 ///
465 /// Note that the path may not be a member of this repository, if there
466 /// is a repository in a directory between these two paths
467 /// external .git folder in a parent folder of the project root.
468 #[track_caller]
469 pub fn directory_contains(&self, path: impl AsRef<Path>) -> bool {
470 let path = path.as_ref();
471 debug_assert!(path.is_relative());
472 match self {
473 WorkDirectory::InProject { relative_path } => path.starts_with(relative_path),
474 WorkDirectory::AboveProject { .. } => true,
475 }
476 }
477
478 /// relativize returns the given project path relative to the root folder of the
479 /// repository.
480 /// If the root of the repository (and its .git folder) are located in a parent folder
481 /// of the project root folder, then the returned RepoPath is relative to the root
482 /// of the repository and not a valid path inside the project.
483 pub fn relativize(&self, path: &Path) -> Result<RepoPath> {
484 // path is assumed to be relative to worktree root.
485 debug_assert!(path.is_relative());
486 match self {
487 WorkDirectory::InProject { relative_path } => Ok(path
488 .strip_prefix(relative_path)
489 .map_err(|_| {
490 anyhow!(
491 "could not relativize {:?} against {:?}",
492 path,
493 relative_path
494 )
495 })?
496 .into()),
497 WorkDirectory::AboveProject {
498 location_in_repo, ..
499 } => {
500 // Avoid joining a `/` to location_in_repo in the case of a single-file worktree.
501 if path == Path::new("") {
502 Ok(RepoPath(location_in_repo.clone()))
503 } else {
504 Ok(location_in_repo.join(path).into())
505 }
506 }
507 }
508 }
509
510 /// This is the opposite operation to `relativize` above
511 pub fn try_unrelativize(&self, path: &RepoPath) -> Option<Arc<Path>> {
512 match self {
513 WorkDirectory::InProject { relative_path } => Some(relative_path.join(path).into()),
514 WorkDirectory::AboveProject {
515 location_in_repo, ..
516 } => {
517 // If we fail to strip the prefix, that means this status entry is
518 // external to this worktree, and we definitely won't have an entry_id
519 path.strip_prefix(location_in_repo).ok().map(Into::into)
520 }
521 }
522 }
523
524 pub fn unrelativize(&self, path: &RepoPath) -> Arc<Path> {
525 match self {
526 WorkDirectory::InProject { relative_path } => relative_path.join(path).into(),
527 WorkDirectory::AboveProject {
528 location_in_repo, ..
529 } => {
530 if &path.0 == location_in_repo {
531 // Single-file worktree
532 return location_in_repo
533 .file_name()
534 .map(Path::new)
535 .unwrap_or(Path::new(""))
536 .into();
537 }
538 let mut location_in_repo = &**location_in_repo;
539 let mut parents = PathBuf::new();
540 loop {
541 if let Ok(segment) = path.strip_prefix(location_in_repo) {
542 return parents.join(segment).into();
543 }
544 location_in_repo = location_in_repo.parent().unwrap_or(Path::new(""));
545 parents.push(Component::ParentDir);
546 }
547 }
548 }
549 }
550
551 pub fn display_name(&self) -> String {
552 match self {
553 WorkDirectory::InProject { relative_path } => relative_path.display().to_string(),
554 WorkDirectory::AboveProject {
555 absolute_path,
556 location_in_repo,
557 } => {
558 let num_of_dots = location_in_repo.components().count();
559
560 "../".repeat(num_of_dots)
561 + &absolute_path
562 .file_name()
563 .map(|s| s.to_string_lossy())
564 .unwrap_or_default()
565 + "/"
566 }
567 }
568 }
569}
570
571impl Default for WorkDirectory {
572 fn default() -> Self {
573 Self::InProject {
574 relative_path: Arc::from(Path::new("")),
575 }
576 }
577}
578
579#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
580pub struct WorkDirectoryEntry(ProjectEntryId);
581
582impl Deref for WorkDirectoryEntry {
583 type Target = ProjectEntryId;
584
585 fn deref(&self) -> &Self::Target {
586 &self.0
587 }
588}
589
590impl From<ProjectEntryId> for WorkDirectoryEntry {
591 fn from(value: ProjectEntryId) -> Self {
592 WorkDirectoryEntry(value)
593 }
594}
595
596#[derive(Debug, Clone)]
597pub struct LocalSnapshot {
598 snapshot: Snapshot,
599 /// All of the gitignore files in the worktree, indexed by their relative path.
600 /// The boolean indicates whether the gitignore needs to be updated.
601 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
602 /// All of the git repositories in the worktree, indexed by the project entry
603 /// id of their parent directory.
604 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
605 /// The file handle of the root dir
606 /// (so we can find it after it's been moved)
607 root_file_handle: Option<Arc<dyn fs::FileHandle>>,
608}
609
610struct BackgroundScannerState {
611 snapshot: LocalSnapshot,
612 scanned_dirs: HashSet<ProjectEntryId>,
613 path_prefixes_to_scan: HashSet<Arc<Path>>,
614 paths_to_scan: HashSet<Arc<Path>>,
615 /// The ids of all of the entries that were removed from the snapshot
616 /// as part of the current update. These entry ids may be re-used
617 /// if the same inode is discovered at a new path, or if the given
618 /// path is re-created after being deleted.
619 removed_entries: HashMap<u64, Entry>,
620 changed_paths: Vec<Arc<Path>>,
621 prev_snapshot: Snapshot,
622 git_hosting_provider_registry: Option<Arc<GitHostingProviderRegistry>>,
623 repository_scans: HashMap<PathKey, Task<()>>,
624}
625
626#[derive(Debug, Clone)]
627pub struct LocalRepositoryEntry {
628 pub(crate) work_directory_id: ProjectEntryId,
629 pub(crate) work_directory: WorkDirectory,
630 pub(crate) git_dir_scan_id: usize,
631 pub(crate) status_scan_id: usize,
632 pub(crate) repo_ptr: Arc<dyn GitRepository>,
633 /// Absolute path to the actual .git folder.
634 /// Note: if .git is a file, this points to the folder indicated by the .git file
635 pub(crate) dot_git_dir_abs_path: Arc<Path>,
636 /// Absolute path to the .git file, if we're in a git worktree.
637 pub(crate) dot_git_worktree_abs_path: Option<Arc<Path>>,
638 pub current_merge_head_shas: Vec<String>,
639 pub merge_message: Option<String>,
640}
641
642impl sum_tree::Item for LocalRepositoryEntry {
643 type Summary = PathSummary<Unit>;
644
645 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
646 PathSummary {
647 max_path: self.work_directory.path_key().0,
648 item_summary: Unit,
649 }
650 }
651}
652
653impl KeyedItem for LocalRepositoryEntry {
654 type Key = PathKey;
655
656 fn key(&self) -> Self::Key {
657 self.work_directory.path_key()
658 }
659}
660
661impl LocalRepositoryEntry {
662 pub fn repo(&self) -> &Arc<dyn GitRepository> {
663 &self.repo_ptr
664 }
665}
666
667impl Deref for LocalRepositoryEntry {
668 type Target = WorkDirectory;
669
670 fn deref(&self) -> &Self::Target {
671 &self.work_directory
672 }
673}
674
675impl Deref for LocalSnapshot {
676 type Target = Snapshot;
677
678 fn deref(&self) -> &Self::Target {
679 &self.snapshot
680 }
681}
682
683impl DerefMut for LocalSnapshot {
684 fn deref_mut(&mut self) -> &mut Self::Target {
685 &mut self.snapshot
686 }
687}
688
689#[derive(Debug)]
690enum ScanState {
691 Started,
692 Updated {
693 snapshot: LocalSnapshot,
694 changes: UpdatedEntriesSet,
695 barrier: SmallVec<[barrier::Sender; 1]>,
696 scanning: bool,
697 },
698 RootUpdated {
699 new_path: Option<SanitizedPath>,
700 },
701}
702
703struct UpdateObservationState {
704 snapshots_tx:
705 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
706 resume_updates: watch::Sender<()>,
707 _maintain_remote_snapshot: Task<Option<()>>,
708}
709
710#[derive(Clone)]
711pub enum Event {
712 UpdatedEntries(UpdatedEntriesSet),
713 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
714 DeletedEntry(ProjectEntryId),
715}
716
717const EMPTY_PATH: &str = "";
718
719impl EventEmitter<Event> for Worktree {}
720
721impl Worktree {
722 pub async fn local(
723 path: impl Into<Arc<Path>>,
724 visible: bool,
725 fs: Arc<dyn Fs>,
726 next_entry_id: Arc<AtomicUsize>,
727 cx: &mut AsyncApp,
728 ) -> Result<Entity<Self>> {
729 let abs_path = path.into();
730 let metadata = fs
731 .metadata(&abs_path)
732 .await
733 .context("failed to stat worktree path")?;
734
735 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
736 log::error!(
737 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
738 );
739 true
740 });
741
742 let root_file_handle = fs.open_handle(&abs_path).await.log_err();
743
744 cx.new(move |cx: &mut Context<Worktree>| {
745 let mut snapshot = LocalSnapshot {
746 ignores_by_parent_abs_path: Default::default(),
747 git_repositories: Default::default(),
748 snapshot: Snapshot::new(
749 cx.entity_id().as_u64(),
750 abs_path
751 .file_name()
752 .map_or(String::new(), |f| f.to_string_lossy().to_string()),
753 abs_path.clone(),
754 ),
755 root_file_handle,
756 };
757
758 let worktree_id = snapshot.id();
759 let settings_location = Some(SettingsLocation {
760 worktree_id,
761 path: Path::new(EMPTY_PATH),
762 });
763
764 let settings = WorktreeSettings::get(settings_location, cx).clone();
765 cx.observe_global::<SettingsStore>(move |this, cx| {
766 if let Self::Local(this) = this {
767 let settings = WorktreeSettings::get(settings_location, cx).clone();
768 if this.settings != settings {
769 this.settings = settings;
770 this.restart_background_scanners(cx);
771 }
772 }
773 })
774 .detach();
775
776 let share_private_files = false;
777 if let Some(metadata) = metadata {
778 let mut entry = Entry::new(
779 Arc::from(Path::new("")),
780 &metadata,
781 &next_entry_id,
782 snapshot.root_char_bag,
783 None,
784 );
785 if !metadata.is_dir {
786 entry.is_private = !share_private_files
787 && settings.is_path_private(abs_path.file_name().unwrap().as_ref());
788 }
789 snapshot.insert_entry(entry, fs.as_ref());
790 }
791
792 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
793 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
794 let mut worktree = LocalWorktree {
795 share_private_files,
796 next_entry_id,
797 snapshot,
798 is_scanning: watch::channel_with(true),
799 update_observer: None,
800 scan_requests_tx,
801 path_prefixes_to_scan_tx,
802 _background_scanner_tasks: Vec::new(),
803 fs,
804 fs_case_sensitive,
805 visible,
806 settings,
807 };
808 worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
809 Worktree::Local(worktree)
810 })
811 }
812
813 pub fn remote(
814 project_id: u64,
815 replica_id: ReplicaId,
816 worktree: proto::WorktreeMetadata,
817 client: AnyProtoClient,
818 cx: &mut App,
819 ) -> Entity<Self> {
820 cx.new(|cx: &mut Context<Self>| {
821 let snapshot = Snapshot::new(
822 worktree.id,
823 worktree.root_name,
824 Arc::<Path>::from_proto(worktree.abs_path),
825 );
826
827 let background_snapshot = Arc::new(Mutex::new((
828 snapshot.clone(),
829 Vec::<WorktreeRelatedMessage>::new(),
830 )));
831 let (background_updates_tx, mut background_updates_rx) =
832 mpsc::unbounded::<WorktreeRelatedMessage>();
833 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
834
835 let worktree_id = snapshot.id();
836 let settings_location = Some(SettingsLocation {
837 worktree_id,
838 path: Path::new(EMPTY_PATH),
839 });
840
841 let settings = WorktreeSettings::get(settings_location, cx).clone();
842 let worktree = RemoteWorktree {
843 client,
844 project_id,
845 replica_id,
846 snapshot,
847 file_scan_inclusions: settings.file_scan_inclusions.clone(),
848 background_snapshot: background_snapshot.clone(),
849 updates_tx: Some(background_updates_tx),
850 update_observer: None,
851 snapshot_subscriptions: Default::default(),
852 visible: worktree.visible,
853 disconnected: false,
854 };
855
856 // Apply updates to a separate snapshot in a background task, then
857 // send them to a foreground task which updates the model.
858 cx.background_spawn(async move {
859 while let Some(update) = background_updates_rx.next().await {
860 {
861 let mut lock = background_snapshot.lock();
862 lock.0
863 .apply_remote_update(update.clone(), &settings.file_scan_inclusions)
864 .log_err();
865 lock.1.push(update);
866 }
867 snapshot_updated_tx.send(()).await.ok();
868 }
869 })
870 .detach();
871
872 // On the foreground task, update to the latest snapshot and notify
873 // any update observer of all updates that led to that snapshot.
874 cx.spawn(async move |this, cx| {
875 while (snapshot_updated_rx.recv().await).is_some() {
876 this.update(cx, |this, cx| {
877 let mut git_repos_changed = false;
878 let mut entries_changed = false;
879 let this = this.as_remote_mut().unwrap();
880 {
881 let mut lock = this.background_snapshot.lock();
882 this.snapshot = lock.0.clone();
883 for update in lock.1.drain(..) {
884 entries_changed |= match &update {
885 WorktreeRelatedMessage::UpdateWorktree(update_worktree) => {
886 !update_worktree.updated_entries.is_empty()
887 || !update_worktree.removed_entries.is_empty()
888 }
889 _ => false,
890 };
891 git_repos_changed |= matches!(
892 update,
893 WorktreeRelatedMessage::UpdateRepository(_)
894 | WorktreeRelatedMessage::RemoveRepository(_)
895 );
896 if let Some(tx) = &this.update_observer {
897 tx.unbounded_send(update).ok();
898 }
899 }
900 };
901
902 if entries_changed {
903 cx.emit(Event::UpdatedEntries(Arc::default()));
904 }
905 if git_repos_changed {
906 cx.emit(Event::UpdatedGitRepositories(Arc::default()));
907 }
908 cx.notify();
909 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
910 if this.observed_snapshot(*scan_id) {
911 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
912 let _ = tx.send(());
913 } else {
914 break;
915 }
916 }
917 })?;
918 }
919 anyhow::Ok(())
920 })
921 .detach();
922
923 Worktree::Remote(worktree)
924 })
925 }
926
927 pub fn as_local(&self) -> Option<&LocalWorktree> {
928 if let Worktree::Local(worktree) = self {
929 Some(worktree)
930 } else {
931 None
932 }
933 }
934
935 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
936 if let Worktree::Remote(worktree) = self {
937 Some(worktree)
938 } else {
939 None
940 }
941 }
942
943 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
944 if let Worktree::Local(worktree) = self {
945 Some(worktree)
946 } else {
947 None
948 }
949 }
950
951 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
952 if let Worktree::Remote(worktree) = self {
953 Some(worktree)
954 } else {
955 None
956 }
957 }
958
959 pub fn is_local(&self) -> bool {
960 matches!(self, Worktree::Local(_))
961 }
962
963 pub fn is_remote(&self) -> bool {
964 !self.is_local()
965 }
966
967 pub fn settings_location(&self, _: &Context<Self>) -> SettingsLocation<'static> {
968 SettingsLocation {
969 worktree_id: self.id(),
970 path: Path::new(EMPTY_PATH),
971 }
972 }
973
974 pub fn snapshot(&self) -> Snapshot {
975 match self {
976 Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
977 Worktree::Remote(worktree) => worktree.snapshot.clone(),
978 }
979 }
980
981 pub fn scan_id(&self) -> usize {
982 match self {
983 Worktree::Local(worktree) => worktree.snapshot.scan_id,
984 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
985 }
986 }
987
988 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
989 proto::WorktreeMetadata {
990 id: self.id().to_proto(),
991 root_name: self.root_name().to_string(),
992 visible: self.is_visible(),
993 abs_path: self.abs_path().to_proto(),
994 }
995 }
996
997 pub fn completed_scan_id(&self) -> usize {
998 match self {
999 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
1000 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
1001 }
1002 }
1003
1004 pub fn is_visible(&self) -> bool {
1005 match self {
1006 Worktree::Local(worktree) => worktree.visible,
1007 Worktree::Remote(worktree) => worktree.visible,
1008 }
1009 }
1010
1011 pub fn replica_id(&self) -> ReplicaId {
1012 match self {
1013 Worktree::Local(_) => 0,
1014 Worktree::Remote(worktree) => worktree.replica_id,
1015 }
1016 }
1017
1018 pub fn abs_path(&self) -> Arc<Path> {
1019 match self {
1020 Worktree::Local(worktree) => worktree.abs_path.clone().into(),
1021 Worktree::Remote(worktree) => worktree.abs_path.clone().into(),
1022 }
1023 }
1024
1025 pub fn root_file(&self, cx: &Context<Self>) -> Option<Arc<File>> {
1026 let entry = self.root_entry()?;
1027 Some(File::for_entry(entry.clone(), cx.entity()))
1028 }
1029
1030 pub fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
1031 where
1032 F: 'static + Send + Fn(WorktreeRelatedMessage) -> Fut,
1033 Fut: 'static + Send + Future<Output = bool>,
1034 {
1035 match self {
1036 Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
1037 Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
1038 }
1039 }
1040
1041 pub fn stop_observing_updates(&mut self) {
1042 match self {
1043 Worktree::Local(this) => {
1044 this.update_observer.take();
1045 }
1046 Worktree::Remote(this) => {
1047 this.update_observer.take();
1048 }
1049 }
1050 }
1051
1052 #[cfg(any(test, feature = "test-support"))]
1053 pub fn has_update_observer(&self) -> bool {
1054 match self {
1055 Worktree::Local(this) => this.update_observer.is_some(),
1056 Worktree::Remote(this) => this.update_observer.is_some(),
1057 }
1058 }
1059
1060 pub fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1061 match self {
1062 Worktree::Local(this) => this.load_file(path, cx),
1063 Worktree::Remote(_) => {
1064 Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
1065 }
1066 }
1067 }
1068
1069 pub fn load_staged_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1070 match self {
1071 Worktree::Local(this) => {
1072 let path = Arc::from(path);
1073 let snapshot = this.snapshot();
1074 cx.spawn(async move |cx| {
1075 if let Some(repo) = snapshot.repository_for_path(&path) {
1076 if let Some(repo_path) = repo.relativize(&path).log_err() {
1077 if let Some(git_repo) =
1078 snapshot.git_repositories.get(&repo.work_directory_id)
1079 {
1080 return Ok(git_repo
1081 .repo_ptr
1082 .load_index_text(repo_path, cx.clone())
1083 .await);
1084 }
1085 }
1086 }
1087 Err(anyhow!("No repository found for {path:?}"))
1088 })
1089 }
1090 Worktree::Remote(_) => {
1091 Task::ready(Err(anyhow!("remote worktrees can't yet load staged files")))
1092 }
1093 }
1094 }
1095
1096 pub fn load_committed_file(&self, path: &Path, cx: &App) -> Task<Result<Option<String>>> {
1097 match self {
1098 Worktree::Local(this) => {
1099 let path = Arc::from(path);
1100 let snapshot = this.snapshot();
1101 cx.spawn(async move |cx| {
1102 if let Some(repo) = snapshot.repository_for_path(&path) {
1103 if let Some(repo_path) = repo.relativize(&path).log_err() {
1104 if let Some(git_repo) =
1105 snapshot.git_repositories.get(&repo.work_directory_id)
1106 {
1107 return Ok(git_repo
1108 .repo_ptr
1109 .load_committed_text(repo_path, cx.clone())
1110 .await);
1111 }
1112 }
1113 }
1114 Err(anyhow!("No repository found for {path:?}"))
1115 })
1116 }
1117 Worktree::Remote(_) => Task::ready(Err(anyhow!(
1118 "remote worktrees can't yet load committed files"
1119 ))),
1120 }
1121 }
1122
1123 pub fn load_binary_file(
1124 &self,
1125 path: &Path,
1126 cx: &Context<Worktree>,
1127 ) -> Task<Result<LoadedBinaryFile>> {
1128 match self {
1129 Worktree::Local(this) => this.load_binary_file(path, cx),
1130 Worktree::Remote(_) => {
1131 Task::ready(Err(anyhow!("remote worktrees can't yet load binary files")))
1132 }
1133 }
1134 }
1135
1136 pub fn write_file(
1137 &self,
1138 path: &Path,
1139 text: Rope,
1140 line_ending: LineEnding,
1141 cx: &Context<Worktree>,
1142 ) -> Task<Result<Arc<File>>> {
1143 match self {
1144 Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
1145 Worktree::Remote(_) => {
1146 Task::ready(Err(anyhow!("remote worktree can't yet write files")))
1147 }
1148 }
1149 }
1150
1151 pub fn create_entry(
1152 &mut self,
1153 path: impl Into<Arc<Path>>,
1154 is_directory: bool,
1155 cx: &Context<Worktree>,
1156 ) -> Task<Result<CreatedEntry>> {
1157 let path: Arc<Path> = path.into();
1158 let worktree_id = self.id();
1159 match self {
1160 Worktree::Local(this) => this.create_entry(path, is_directory, cx),
1161 Worktree::Remote(this) => {
1162 let project_id = this.project_id;
1163 let request = this.client.request(proto::CreateProjectEntry {
1164 worktree_id: worktree_id.to_proto(),
1165 project_id,
1166 path: path.as_ref().to_proto(),
1167 is_directory,
1168 });
1169 cx.spawn(async move |this, cx| {
1170 let response = request.await?;
1171 match response.entry {
1172 Some(entry) => this
1173 .update(cx, |worktree, cx| {
1174 worktree.as_remote_mut().unwrap().insert_entry(
1175 entry,
1176 response.worktree_scan_id as usize,
1177 cx,
1178 )
1179 })?
1180 .await
1181 .map(CreatedEntry::Included),
1182 None => {
1183 let abs_path = this.update(cx, |worktree, _| {
1184 worktree
1185 .absolutize(&path)
1186 .with_context(|| format!("absolutizing {path:?}"))
1187 })??;
1188 Ok(CreatedEntry::Excluded { abs_path })
1189 }
1190 }
1191 })
1192 }
1193 }
1194 }
1195
1196 pub fn delete_entry(
1197 &mut self,
1198 entry_id: ProjectEntryId,
1199 trash: bool,
1200 cx: &mut Context<Worktree>,
1201 ) -> Option<Task<Result<()>>> {
1202 let task = match self {
1203 Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
1204 Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
1205 }?;
1206
1207 let entry = match self {
1208 Worktree::Local(ref this) => this.entry_for_id(entry_id),
1209 Worktree::Remote(ref this) => this.entry_for_id(entry_id),
1210 }?;
1211
1212 let mut ids = vec![entry_id];
1213 let path = &*entry.path;
1214
1215 self.get_children_ids_recursive(path, &mut ids);
1216
1217 for id in ids {
1218 cx.emit(Event::DeletedEntry(id));
1219 }
1220 Some(task)
1221 }
1222
1223 fn get_children_ids_recursive(&self, path: &Path, ids: &mut Vec<ProjectEntryId>) {
1224 let children_iter = self.child_entries(path);
1225 for child in children_iter {
1226 ids.push(child.id);
1227 self.get_children_ids_recursive(&child.path, ids);
1228 }
1229 }
1230
1231 pub fn rename_entry(
1232 &mut self,
1233 entry_id: ProjectEntryId,
1234 new_path: impl Into<Arc<Path>>,
1235 cx: &Context<Self>,
1236 ) -> Task<Result<CreatedEntry>> {
1237 let new_path = new_path.into();
1238 match self {
1239 Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
1240 Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
1241 }
1242 }
1243
1244 pub fn copy_entry(
1245 &mut self,
1246 entry_id: ProjectEntryId,
1247 relative_worktree_source_path: Option<PathBuf>,
1248 new_path: impl Into<Arc<Path>>,
1249 cx: &Context<Self>,
1250 ) -> Task<Result<Option<Entry>>> {
1251 let new_path: Arc<Path> = new_path.into();
1252 match self {
1253 Worktree::Local(this) => {
1254 this.copy_entry(entry_id, relative_worktree_source_path, new_path, cx)
1255 }
1256 Worktree::Remote(this) => {
1257 let relative_worktree_source_path = relative_worktree_source_path
1258 .map(|relative_worktree_source_path| relative_worktree_source_path.to_proto());
1259 let response = this.client.request(proto::CopyProjectEntry {
1260 project_id: this.project_id,
1261 entry_id: entry_id.to_proto(),
1262 relative_worktree_source_path,
1263 new_path: new_path.to_proto(),
1264 });
1265 cx.spawn(async move |this, cx| {
1266 let response = response.await?;
1267 match response.entry {
1268 Some(entry) => this
1269 .update(cx, |worktree, cx| {
1270 worktree.as_remote_mut().unwrap().insert_entry(
1271 entry,
1272 response.worktree_scan_id as usize,
1273 cx,
1274 )
1275 })?
1276 .await
1277 .map(Some),
1278 None => Ok(None),
1279 }
1280 })
1281 }
1282 }
1283 }
1284
1285 pub fn copy_external_entries(
1286 &mut self,
1287 target_directory: PathBuf,
1288 paths: Vec<Arc<Path>>,
1289 overwrite_existing_files: bool,
1290 cx: &Context<Worktree>,
1291 ) -> Task<Result<Vec<ProjectEntryId>>> {
1292 match self {
1293 Worktree::Local(this) => {
1294 this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
1295 }
1296 _ => Task::ready(Err(anyhow!(
1297 "Copying external entries is not supported for remote worktrees"
1298 ))),
1299 }
1300 }
1301
1302 pub fn expand_entry(
1303 &mut self,
1304 entry_id: ProjectEntryId,
1305 cx: &Context<Worktree>,
1306 ) -> Option<Task<Result<()>>> {
1307 match self {
1308 Worktree::Local(this) => this.expand_entry(entry_id, cx),
1309 Worktree::Remote(this) => {
1310 let response = this.client.request(proto::ExpandProjectEntry {
1311 project_id: this.project_id,
1312 entry_id: entry_id.to_proto(),
1313 });
1314 Some(cx.spawn(async move |this, cx| {
1315 let response = response.await?;
1316 this.update(cx, |this, _| {
1317 this.as_remote_mut()
1318 .unwrap()
1319 .wait_for_snapshot(response.worktree_scan_id as usize)
1320 })?
1321 .await?;
1322 Ok(())
1323 }))
1324 }
1325 }
1326 }
1327
1328 pub fn expand_all_for_entry(
1329 &mut self,
1330 entry_id: ProjectEntryId,
1331 cx: &Context<Worktree>,
1332 ) -> Option<Task<Result<()>>> {
1333 match self {
1334 Worktree::Local(this) => this.expand_all_for_entry(entry_id, cx),
1335 Worktree::Remote(this) => {
1336 let response = this.client.request(proto::ExpandAllForProjectEntry {
1337 project_id: this.project_id,
1338 entry_id: entry_id.to_proto(),
1339 });
1340 Some(cx.spawn(async move |this, cx| {
1341 let response = response.await?;
1342 this.update(cx, |this, _| {
1343 this.as_remote_mut()
1344 .unwrap()
1345 .wait_for_snapshot(response.worktree_scan_id as usize)
1346 })?
1347 .await?;
1348 Ok(())
1349 }))
1350 }
1351 }
1352 }
1353
1354 pub async fn handle_create_entry(
1355 this: Entity<Self>,
1356 request: proto::CreateProjectEntry,
1357 mut cx: AsyncApp,
1358 ) -> Result<proto::ProjectEntryResponse> {
1359 let (scan_id, entry) = this.update(&mut cx, |this, cx| {
1360 (
1361 this.scan_id(),
1362 this.create_entry(
1363 Arc::<Path>::from_proto(request.path),
1364 request.is_directory,
1365 cx,
1366 ),
1367 )
1368 })?;
1369 Ok(proto::ProjectEntryResponse {
1370 entry: match &entry.await? {
1371 CreatedEntry::Included(entry) => Some(entry.into()),
1372 CreatedEntry::Excluded { .. } => None,
1373 },
1374 worktree_scan_id: scan_id as u64,
1375 })
1376 }
1377
1378 pub async fn handle_delete_entry(
1379 this: Entity<Self>,
1380 request: proto::DeleteProjectEntry,
1381 mut cx: AsyncApp,
1382 ) -> Result<proto::ProjectEntryResponse> {
1383 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1384 (
1385 this.scan_id(),
1386 this.delete_entry(
1387 ProjectEntryId::from_proto(request.entry_id),
1388 request.use_trash,
1389 cx,
1390 ),
1391 )
1392 })?;
1393 task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
1394 Ok(proto::ProjectEntryResponse {
1395 entry: None,
1396 worktree_scan_id: scan_id as u64,
1397 })
1398 }
1399
1400 pub async fn handle_expand_entry(
1401 this: Entity<Self>,
1402 request: proto::ExpandProjectEntry,
1403 mut cx: AsyncApp,
1404 ) -> Result<proto::ExpandProjectEntryResponse> {
1405 let task = this.update(&mut cx, |this, cx| {
1406 this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1407 })?;
1408 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1409 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1410 Ok(proto::ExpandProjectEntryResponse {
1411 worktree_scan_id: scan_id as u64,
1412 })
1413 }
1414
1415 pub async fn handle_expand_all_for_entry(
1416 this: Entity<Self>,
1417 request: proto::ExpandAllForProjectEntry,
1418 mut cx: AsyncApp,
1419 ) -> Result<proto::ExpandAllForProjectEntryResponse> {
1420 let task = this.update(&mut cx, |this, cx| {
1421 this.expand_all_for_entry(ProjectEntryId::from_proto(request.entry_id), cx)
1422 })?;
1423 task.ok_or_else(|| anyhow!("no such entry"))?.await?;
1424 let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
1425 Ok(proto::ExpandAllForProjectEntryResponse {
1426 worktree_scan_id: scan_id as u64,
1427 })
1428 }
1429
1430 pub async fn handle_rename_entry(
1431 this: Entity<Self>,
1432 request: proto::RenameProjectEntry,
1433 mut cx: AsyncApp,
1434 ) -> Result<proto::ProjectEntryResponse> {
1435 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1436 (
1437 this.scan_id(),
1438 this.rename_entry(
1439 ProjectEntryId::from_proto(request.entry_id),
1440 Arc::<Path>::from_proto(request.new_path),
1441 cx,
1442 ),
1443 )
1444 })?;
1445 Ok(proto::ProjectEntryResponse {
1446 entry: match &task.await? {
1447 CreatedEntry::Included(entry) => Some(entry.into()),
1448 CreatedEntry::Excluded { .. } => None,
1449 },
1450 worktree_scan_id: scan_id as u64,
1451 })
1452 }
1453
1454 pub async fn handle_copy_entry(
1455 this: Entity<Self>,
1456 request: proto::CopyProjectEntry,
1457 mut cx: AsyncApp,
1458 ) -> Result<proto::ProjectEntryResponse> {
1459 let (scan_id, task) = this.update(&mut cx, |this, cx| {
1460 let relative_worktree_source_path = request
1461 .relative_worktree_source_path
1462 .map(PathBuf::from_proto);
1463 (
1464 this.scan_id(),
1465 this.copy_entry(
1466 ProjectEntryId::from_proto(request.entry_id),
1467 relative_worktree_source_path,
1468 PathBuf::from_proto(request.new_path),
1469 cx,
1470 ),
1471 )
1472 })?;
1473 Ok(proto::ProjectEntryResponse {
1474 entry: task.await?.as_ref().map(|e| e.into()),
1475 worktree_scan_id: scan_id as u64,
1476 })
1477 }
1478
1479 pub fn dot_git_abs_path(&self, work_directory: &WorkDirectory) -> PathBuf {
1480 let mut path = match work_directory {
1481 WorkDirectory::InProject { relative_path } => self.abs_path().join(relative_path),
1482 WorkDirectory::AboveProject { absolute_path, .. } => absolute_path.as_ref().to_owned(),
1483 };
1484 path.push(".git");
1485 path
1486 }
1487
1488 pub fn is_single_file(&self) -> bool {
1489 self.root_dir().is_none()
1490 }
1491}
1492
1493impl LocalWorktree {
1494 pub fn fs(&self) -> &Arc<dyn Fs> {
1495 &self.fs
1496 }
1497
1498 pub fn is_path_private(&self, path: &Path) -> bool {
1499 !self.share_private_files && self.settings.is_path_private(path)
1500 }
1501
1502 fn restart_background_scanners(&mut self, cx: &Context<Worktree>) {
1503 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
1504 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
1505 self.scan_requests_tx = scan_requests_tx;
1506 self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
1507
1508 self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
1509 let always_included_entries = mem::take(&mut self.snapshot.always_included_entries);
1510 log::debug!(
1511 "refreshing entries for the following always included paths: {:?}",
1512 always_included_entries
1513 );
1514
1515 // Cleans up old always included entries to ensure they get updated properly. Otherwise,
1516 // nested always included entries may not get updated and will result in out-of-date info.
1517 self.refresh_entries_for_paths(always_included_entries);
1518 }
1519
1520 fn start_background_scanner(
1521 &mut self,
1522 scan_requests_rx: channel::Receiver<ScanRequest>,
1523 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
1524 cx: &Context<Worktree>,
1525 ) {
1526 let snapshot = self.snapshot();
1527 let share_private_files = self.share_private_files;
1528 let next_entry_id = self.next_entry_id.clone();
1529 let fs = self.fs.clone();
1530 let git_hosting_provider_registry = GitHostingProviderRegistry::try_global(cx);
1531 let settings = self.settings.clone();
1532 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
1533 let background_scanner = cx.background_spawn({
1534 let abs_path = snapshot.abs_path.as_path().to_path_buf();
1535 let background = cx.background_executor().clone();
1536 async move {
1537 let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
1538 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
1539 log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
1540 true
1541 });
1542
1543 let mut scanner = BackgroundScanner {
1544 fs,
1545 fs_case_sensitive,
1546 status_updates_tx: scan_states_tx,
1547 scans_running: Arc::new(AtomicI32::new(0)),
1548 executor: background,
1549 scan_requests_rx,
1550 path_prefixes_to_scan_rx,
1551 next_entry_id,
1552 state: Arc::new(Mutex::new(BackgroundScannerState {
1553 prev_snapshot: snapshot.snapshot.clone(),
1554 snapshot,
1555 scanned_dirs: Default::default(),
1556 path_prefixes_to_scan: Default::default(),
1557 paths_to_scan: Default::default(),
1558 removed_entries: Default::default(),
1559 changed_paths: Default::default(),
1560 repository_scans: HashMap::default(),
1561 git_hosting_provider_registry,
1562 })),
1563 phase: BackgroundScannerPhase::InitialScan,
1564 share_private_files,
1565 settings,
1566 watcher,
1567 };
1568
1569 scanner
1570 .run(Box::pin(events.map(|events| events.into_iter().collect())))
1571 .await;
1572 }
1573 });
1574 let scan_state_updater = cx.spawn(async move |this, cx| {
1575 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1576 this.update(cx, |this, cx| {
1577 let this = this.as_local_mut().unwrap();
1578 match state {
1579 ScanState::Started => {
1580 *this.is_scanning.0.borrow_mut() = true;
1581 }
1582 ScanState::Updated {
1583 snapshot,
1584 changes,
1585 barrier,
1586 scanning,
1587 } => {
1588 *this.is_scanning.0.borrow_mut() = scanning;
1589 this.set_snapshot(snapshot, changes, cx);
1590 drop(barrier);
1591 }
1592 ScanState::RootUpdated { new_path } => {
1593 this.update_abs_path_and_refresh(new_path, cx);
1594 }
1595 }
1596 })
1597 .ok();
1598 }
1599 });
1600 self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1601 self.is_scanning = watch::channel_with(true);
1602 }
1603
1604 fn set_snapshot(
1605 &mut self,
1606 new_snapshot: LocalSnapshot,
1607 entry_changes: UpdatedEntriesSet,
1608 cx: &mut Context<Worktree>,
1609 ) {
1610 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1611 self.snapshot = new_snapshot;
1612
1613 if let Some(share) = self.update_observer.as_mut() {
1614 share
1615 .snapshots_tx
1616 .unbounded_send((
1617 self.snapshot.clone(),
1618 entry_changes.clone(),
1619 repo_changes.clone(),
1620 ))
1621 .ok();
1622 }
1623
1624 if !entry_changes.is_empty() {
1625 cx.emit(Event::UpdatedEntries(entry_changes));
1626 }
1627 if !repo_changes.is_empty() {
1628 cx.emit(Event::UpdatedGitRepositories(repo_changes));
1629 }
1630 }
1631
1632 fn changed_repos(
1633 &self,
1634 old_snapshot: &LocalSnapshot,
1635 new_snapshot: &LocalSnapshot,
1636 ) -> UpdatedGitRepositoriesSet {
1637 let mut changes = Vec::new();
1638 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1639 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1640
1641 loop {
1642 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1643 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1644 match Ord::cmp(&new_entry_id, &old_entry_id) {
1645 Ordering::Less => {
1646 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1647 changes.push((
1648 entry.clone(),
1649 GitRepositoryChange {
1650 old_repository: None,
1651 },
1652 ));
1653 }
1654 new_repos.next();
1655 }
1656 Ordering::Equal => {
1657 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id
1658 || new_repo.status_scan_id != old_repo.status_scan_id
1659 {
1660 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1661 let old_repo = old_snapshot
1662 .repositories
1663 .get(&PathKey(entry.path.clone()), &())
1664 .cloned();
1665 changes.push((
1666 entry.clone(),
1667 GitRepositoryChange {
1668 old_repository: old_repo,
1669 },
1670 ));
1671 }
1672 }
1673 new_repos.next();
1674 old_repos.next();
1675 }
1676 Ordering::Greater => {
1677 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1678 let old_repo = old_snapshot
1679 .repositories
1680 .get(&PathKey(entry.path.clone()), &())
1681 .cloned();
1682 changes.push((
1683 entry.clone(),
1684 GitRepositoryChange {
1685 old_repository: old_repo,
1686 },
1687 ));
1688 }
1689 old_repos.next();
1690 }
1691 }
1692 }
1693 (Some((entry_id, _)), None) => {
1694 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1695 changes.push((
1696 entry.clone(),
1697 GitRepositoryChange {
1698 old_repository: None,
1699 },
1700 ));
1701 }
1702 new_repos.next();
1703 }
1704 (None, Some((entry_id, _))) => {
1705 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1706 let old_repo = old_snapshot
1707 .repositories
1708 .get(&PathKey(entry.path.clone()), &())
1709 .cloned();
1710 changes.push((
1711 entry.clone(),
1712 GitRepositoryChange {
1713 old_repository: old_repo,
1714 },
1715 ));
1716 }
1717 old_repos.next();
1718 }
1719 (None, None) => break,
1720 }
1721 }
1722
1723 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1724 (value.0.clone(), value.1.clone())
1725 }
1726
1727 changes.into()
1728 }
1729
1730 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1731 let mut is_scanning_rx = self.is_scanning.1.clone();
1732 async move {
1733 let mut is_scanning = *is_scanning_rx.borrow();
1734 while is_scanning {
1735 if let Some(value) = is_scanning_rx.recv().await {
1736 is_scanning = value;
1737 } else {
1738 break;
1739 }
1740 }
1741 }
1742 }
1743
1744 pub fn snapshot(&self) -> LocalSnapshot {
1745 self.snapshot.clone()
1746 }
1747
1748 pub fn settings(&self) -> WorktreeSettings {
1749 self.settings.clone()
1750 }
1751
1752 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1753 self.git_repositories.get(&repo.work_directory_id)
1754 }
1755
1756 fn load_binary_file(
1757 &self,
1758 path: &Path,
1759 cx: &Context<Worktree>,
1760 ) -> Task<Result<LoadedBinaryFile>> {
1761 let path = Arc::from(path);
1762 let abs_path = self.absolutize(&path);
1763 let fs = self.fs.clone();
1764 let entry = self.refresh_entry(path.clone(), None, cx);
1765 let is_private = self.is_path_private(path.as_ref());
1766
1767 let worktree = cx.weak_entity();
1768 cx.background_spawn(async move {
1769 let abs_path = abs_path?;
1770 let content = fs.load_bytes(&abs_path).await?;
1771
1772 let worktree = worktree
1773 .upgrade()
1774 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1775 let file = match entry.await? {
1776 Some(entry) => File::for_entry(entry, worktree),
1777 None => {
1778 let metadata = fs
1779 .metadata(&abs_path)
1780 .await
1781 .with_context(|| {
1782 format!("Loading metadata for excluded file {abs_path:?}")
1783 })?
1784 .with_context(|| {
1785 format!("Excluded file {abs_path:?} got removed during loading")
1786 })?;
1787 Arc::new(File {
1788 entry_id: None,
1789 worktree,
1790 path,
1791 disk_state: DiskState::Present {
1792 mtime: metadata.mtime,
1793 },
1794 is_local: true,
1795 is_private,
1796 })
1797 }
1798 };
1799
1800 Ok(LoadedBinaryFile { file, content })
1801 })
1802 }
1803
1804 fn load_file(&self, path: &Path, cx: &Context<Worktree>) -> Task<Result<LoadedFile>> {
1805 let path = Arc::from(path);
1806 let abs_path = self.absolutize(&path);
1807 let fs = self.fs.clone();
1808 let entry = self.refresh_entry(path.clone(), None, cx);
1809 let is_private = self.is_path_private(path.as_ref());
1810
1811 cx.spawn(async move |this, _cx| {
1812 let abs_path = abs_path?;
1813 let text = fs.load(&abs_path).await?;
1814
1815 let worktree = this
1816 .upgrade()
1817 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1818 let file = match entry.await? {
1819 Some(entry) => File::for_entry(entry, worktree),
1820 None => {
1821 let metadata = fs
1822 .metadata(&abs_path)
1823 .await
1824 .with_context(|| {
1825 format!("Loading metadata for excluded file {abs_path:?}")
1826 })?
1827 .with_context(|| {
1828 format!("Excluded file {abs_path:?} got removed during loading")
1829 })?;
1830 Arc::new(File {
1831 entry_id: None,
1832 worktree,
1833 path,
1834 disk_state: DiskState::Present {
1835 mtime: metadata.mtime,
1836 },
1837 is_local: true,
1838 is_private,
1839 })
1840 }
1841 };
1842
1843 Ok(LoadedFile { file, text })
1844 })
1845 }
1846
1847 /// Find the lowest path in the worktree's datastructures that is an ancestor
1848 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1849 let mut lowest_ancestor = None;
1850 for path in path.ancestors() {
1851 if self.entry_for_path(path).is_some() {
1852 lowest_ancestor = Some(path.to_path_buf());
1853 break;
1854 }
1855 }
1856
1857 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1858 }
1859
1860 fn create_entry(
1861 &self,
1862 path: impl Into<Arc<Path>>,
1863 is_dir: bool,
1864 cx: &Context<Worktree>,
1865 ) -> Task<Result<CreatedEntry>> {
1866 let path = path.into();
1867 let abs_path = match self.absolutize(&path) {
1868 Ok(path) => path,
1869 Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1870 };
1871 let path_excluded = self.settings.is_path_excluded(&abs_path);
1872 let fs = self.fs.clone();
1873 let task_abs_path = abs_path.clone();
1874 let write = cx.background_spawn(async move {
1875 if is_dir {
1876 fs.create_dir(&task_abs_path)
1877 .await
1878 .with_context(|| format!("creating directory {task_abs_path:?}"))
1879 } else {
1880 fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1881 .await
1882 .with_context(|| format!("creating file {task_abs_path:?}"))
1883 }
1884 });
1885
1886 let lowest_ancestor = self.lowest_ancestor(&path);
1887 cx.spawn(async move |this, cx| {
1888 write.await?;
1889 if path_excluded {
1890 return Ok(CreatedEntry::Excluded { abs_path });
1891 }
1892
1893 let (result, refreshes) = this.update(cx, |this, cx| {
1894 let mut refreshes = Vec::new();
1895 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1896 for refresh_path in refresh_paths.ancestors() {
1897 if refresh_path == Path::new("") {
1898 continue;
1899 }
1900 let refresh_full_path = lowest_ancestor.join(refresh_path);
1901
1902 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1903 refresh_full_path.into(),
1904 None,
1905 cx,
1906 ));
1907 }
1908 (
1909 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1910 refreshes,
1911 )
1912 })?;
1913 for refresh in refreshes {
1914 refresh.await.log_err();
1915 }
1916
1917 Ok(result
1918 .await?
1919 .map(CreatedEntry::Included)
1920 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1921 })
1922 }
1923
1924 fn write_file(
1925 &self,
1926 path: impl Into<Arc<Path>>,
1927 text: Rope,
1928 line_ending: LineEnding,
1929 cx: &Context<Worktree>,
1930 ) -> Task<Result<Arc<File>>> {
1931 let path = path.into();
1932 let fs = self.fs.clone();
1933 let is_private = self.is_path_private(&path);
1934 let Ok(abs_path) = self.absolutize(&path) else {
1935 return Task::ready(Err(anyhow!("invalid path {path:?}")));
1936 };
1937
1938 let write = cx.background_spawn({
1939 let fs = fs.clone();
1940 let abs_path = abs_path.clone();
1941 async move { fs.save(&abs_path, &text, line_ending).await }
1942 });
1943
1944 cx.spawn(async move |this, cx| {
1945 write.await?;
1946 let entry = this
1947 .update(cx, |this, cx| {
1948 this.as_local_mut()
1949 .unwrap()
1950 .refresh_entry(path.clone(), None, cx)
1951 })?
1952 .await?;
1953 let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1954 if let Some(entry) = entry {
1955 Ok(File::for_entry(entry, worktree))
1956 } else {
1957 let metadata = fs
1958 .metadata(&abs_path)
1959 .await
1960 .with_context(|| {
1961 format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1962 })?
1963 .with_context(|| {
1964 format!("Excluded buffer {path:?} got removed during saving")
1965 })?;
1966 Ok(Arc::new(File {
1967 worktree,
1968 path,
1969 disk_state: DiskState::Present {
1970 mtime: metadata.mtime,
1971 },
1972 entry_id: None,
1973 is_local: true,
1974 is_private,
1975 }))
1976 }
1977 })
1978 }
1979
1980 fn delete_entry(
1981 &self,
1982 entry_id: ProjectEntryId,
1983 trash: bool,
1984 cx: &Context<Worktree>,
1985 ) -> Option<Task<Result<()>>> {
1986 let entry = self.entry_for_id(entry_id)?.clone();
1987 let abs_path = self.absolutize(&entry.path);
1988 let fs = self.fs.clone();
1989
1990 let delete = cx.background_spawn(async move {
1991 if entry.is_file() {
1992 if trash {
1993 fs.trash_file(&abs_path?, Default::default()).await?;
1994 } else {
1995 fs.remove_file(&abs_path?, Default::default()).await?;
1996 }
1997 } else if trash {
1998 fs.trash_dir(
1999 &abs_path?,
2000 RemoveOptions {
2001 recursive: true,
2002 ignore_if_not_exists: false,
2003 },
2004 )
2005 .await?;
2006 } else {
2007 fs.remove_dir(
2008 &abs_path?,
2009 RemoveOptions {
2010 recursive: true,
2011 ignore_if_not_exists: false,
2012 },
2013 )
2014 .await?;
2015 }
2016 anyhow::Ok(entry.path)
2017 });
2018
2019 Some(cx.spawn(async move |this, cx| {
2020 let path = delete.await?;
2021 this.update(cx, |this, _| {
2022 this.as_local_mut()
2023 .unwrap()
2024 .refresh_entries_for_paths(vec![path])
2025 })?
2026 .recv()
2027 .await;
2028 Ok(())
2029 }))
2030 }
2031
2032 /// Rename an entry.
2033 ///
2034 /// `new_path` is the new relative path to the worktree root.
2035 /// If the root entry is renamed then `new_path` is the new root name instead.
2036 fn rename_entry(
2037 &self,
2038 entry_id: ProjectEntryId,
2039 new_path: impl Into<Arc<Path>>,
2040 cx: &Context<Worktree>,
2041 ) -> Task<Result<CreatedEntry>> {
2042 let old_path = match self.entry_for_id(entry_id) {
2043 Some(entry) => entry.path.clone(),
2044 None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
2045 };
2046 let new_path = new_path.into();
2047 let abs_old_path = self.absolutize(&old_path);
2048
2049 let is_root_entry = self.root_entry().is_some_and(|e| e.id == entry_id);
2050 let abs_new_path = if is_root_entry {
2051 let Some(root_parent_path) = self.abs_path().parent() else {
2052 return Task::ready(Err(anyhow!("no parent for path {:?}", self.abs_path)));
2053 };
2054 root_parent_path.join(&new_path)
2055 } else {
2056 let Ok(absolutize_path) = self.absolutize(&new_path) else {
2057 return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
2058 };
2059 absolutize_path
2060 };
2061 let abs_path = abs_new_path.clone();
2062 let fs = self.fs.clone();
2063 let case_sensitive = self.fs_case_sensitive;
2064 let rename = cx.background_spawn(async move {
2065 let abs_old_path = abs_old_path?;
2066 let abs_new_path = abs_new_path;
2067
2068 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
2069 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
2070
2071 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
2072 // we want to overwrite, because otherwise we run into a file-already-exists error.
2073 let overwrite = !case_sensitive
2074 && abs_old_path != abs_new_path
2075 && abs_old_path_lower == abs_new_path_lower;
2076
2077 fs.rename(
2078 &abs_old_path,
2079 &abs_new_path,
2080 fs::RenameOptions {
2081 overwrite,
2082 ..Default::default()
2083 },
2084 )
2085 .await
2086 .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
2087 });
2088
2089 cx.spawn(async move |this, cx| {
2090 rename.await?;
2091 Ok(this
2092 .update(cx, |this, cx| {
2093 let local = this.as_local_mut().unwrap();
2094 if is_root_entry {
2095 // We eagerly update `abs_path` and refresh this worktree.
2096 // Otherwise, the FS watcher would do it on the `RootUpdated` event,
2097 // but with a noticeable delay, so we handle it proactively.
2098 local.update_abs_path_and_refresh(
2099 Some(SanitizedPath::from(abs_path.clone())),
2100 cx,
2101 );
2102 Task::ready(Ok(this.root_entry().cloned()))
2103 } else {
2104 local.refresh_entry(new_path.clone(), Some(old_path), cx)
2105 }
2106 })?
2107 .await?
2108 .map(CreatedEntry::Included)
2109 .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
2110 })
2111 }
2112
2113 fn copy_entry(
2114 &self,
2115 entry_id: ProjectEntryId,
2116 relative_worktree_source_path: Option<PathBuf>,
2117 new_path: impl Into<Arc<Path>>,
2118 cx: &Context<Worktree>,
2119 ) -> Task<Result<Option<Entry>>> {
2120 let old_path = match self.entry_for_id(entry_id) {
2121 Some(entry) => entry.path.clone(),
2122 None => return Task::ready(Ok(None)),
2123 };
2124 let new_path = new_path.into();
2125 let abs_old_path =
2126 if let Some(relative_worktree_source_path) = relative_worktree_source_path {
2127 Ok(self.abs_path().join(relative_worktree_source_path))
2128 } else {
2129 self.absolutize(&old_path)
2130 };
2131 let abs_new_path = self.absolutize(&new_path);
2132 let fs = self.fs.clone();
2133 let copy = cx.background_spawn(async move {
2134 copy_recursive(
2135 fs.as_ref(),
2136 &abs_old_path?,
2137 &abs_new_path?,
2138 Default::default(),
2139 )
2140 .await
2141 });
2142
2143 cx.spawn(async move |this, cx| {
2144 copy.await?;
2145 this.update(cx, |this, cx| {
2146 this.as_local_mut()
2147 .unwrap()
2148 .refresh_entry(new_path.clone(), None, cx)
2149 })?
2150 .await
2151 })
2152 }
2153
2154 pub fn copy_external_entries(
2155 &self,
2156 target_directory: PathBuf,
2157 paths: Vec<Arc<Path>>,
2158 overwrite_existing_files: bool,
2159 cx: &Context<Worktree>,
2160 ) -> Task<Result<Vec<ProjectEntryId>>> {
2161 let worktree_path = self.abs_path().clone();
2162 let fs = self.fs.clone();
2163 let paths = paths
2164 .into_iter()
2165 .filter_map(|source| {
2166 let file_name = source.file_name()?;
2167 let mut target = target_directory.clone();
2168 target.push(file_name);
2169
2170 // Do not allow copying the same file to itself.
2171 if source.as_ref() != target.as_path() {
2172 Some((source, target))
2173 } else {
2174 None
2175 }
2176 })
2177 .collect::<Vec<_>>();
2178
2179 let paths_to_refresh = paths
2180 .iter()
2181 .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
2182 .collect::<Vec<_>>();
2183
2184 cx.spawn(async move |this, cx| {
2185 cx.background_spawn(async move {
2186 for (source, target) in paths {
2187 copy_recursive(
2188 fs.as_ref(),
2189 &source,
2190 &target,
2191 fs::CopyOptions {
2192 overwrite: overwrite_existing_files,
2193 ..Default::default()
2194 },
2195 )
2196 .await
2197 .with_context(|| {
2198 anyhow!("Failed to copy file from {source:?} to {target:?}")
2199 })?;
2200 }
2201 Ok::<(), anyhow::Error>(())
2202 })
2203 .await
2204 .log_err();
2205 let mut refresh = cx.read_entity(
2206 &this.upgrade().with_context(|| "Dropped worktree")?,
2207 |this, _| {
2208 Ok::<postage::barrier::Receiver, anyhow::Error>(
2209 this.as_local()
2210 .with_context(|| "Worktree is not local")?
2211 .refresh_entries_for_paths(paths_to_refresh.clone()),
2212 )
2213 },
2214 )??;
2215
2216 cx.background_spawn(async move {
2217 refresh.next().await;
2218 Ok::<(), anyhow::Error>(())
2219 })
2220 .await
2221 .log_err();
2222
2223 let this = this.upgrade().with_context(|| "Dropped worktree")?;
2224 cx.read_entity(&this, |this, _| {
2225 paths_to_refresh
2226 .iter()
2227 .filter_map(|path| Some(this.entry_for_path(path)?.id))
2228 .collect()
2229 })
2230 })
2231 }
2232
2233 fn expand_entry(
2234 &self,
2235 entry_id: ProjectEntryId,
2236 cx: &Context<Worktree>,
2237 ) -> Option<Task<Result<()>>> {
2238 let path = self.entry_for_id(entry_id)?.path.clone();
2239 let mut refresh = self.refresh_entries_for_paths(vec![path]);
2240 Some(cx.background_spawn(async move {
2241 refresh.next().await;
2242 Ok(())
2243 }))
2244 }
2245
2246 fn expand_all_for_entry(
2247 &self,
2248 entry_id: ProjectEntryId,
2249 cx: &Context<Worktree>,
2250 ) -> Option<Task<Result<()>>> {
2251 let path = self.entry_for_id(entry_id).unwrap().path.clone();
2252 let mut rx = self.add_path_prefix_to_scan(path.clone());
2253 Some(cx.background_spawn(async move {
2254 rx.next().await;
2255 Ok(())
2256 }))
2257 }
2258
2259 fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
2260 let (tx, rx) = barrier::channel();
2261 self.scan_requests_tx
2262 .try_send(ScanRequest {
2263 relative_paths: paths,
2264 done: smallvec![tx],
2265 })
2266 .ok();
2267 rx
2268 }
2269
2270 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) -> barrier::Receiver {
2271 let (tx, rx) = barrier::channel();
2272 self.path_prefixes_to_scan_tx
2273 .try_send(PathPrefixScanRequest {
2274 path: path_prefix,
2275 done: smallvec![tx],
2276 })
2277 .ok();
2278 rx
2279 }
2280
2281 fn refresh_entry(
2282 &self,
2283 path: Arc<Path>,
2284 old_path: Option<Arc<Path>>,
2285 cx: &Context<Worktree>,
2286 ) -> Task<Result<Option<Entry>>> {
2287 if self.settings.is_path_excluded(&path) {
2288 return Task::ready(Ok(None));
2289 }
2290 let paths = if let Some(old_path) = old_path.as_ref() {
2291 vec![old_path.clone(), path.clone()]
2292 } else {
2293 vec![path.clone()]
2294 };
2295 let t0 = Instant::now();
2296 let mut refresh = self.refresh_entries_for_paths(paths);
2297 cx.spawn(async move |this, cx| {
2298 refresh.recv().await;
2299 log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
2300 let new_entry = this.update(cx, |this, _| {
2301 this.entry_for_path(path)
2302 .cloned()
2303 .ok_or_else(|| anyhow!("failed to read path after update"))
2304 })??;
2305 Ok(Some(new_entry))
2306 })
2307 }
2308
2309 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2310 where
2311 F: 'static + Send + Fn(WorktreeRelatedMessage) -> Fut,
2312 Fut: 'static + Send + Future<Output = bool>,
2313 {
2314 if let Some(observer) = self.update_observer.as_mut() {
2315 *observer.resume_updates.borrow_mut() = ();
2316 return;
2317 }
2318
2319 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
2320 let (snapshots_tx, mut snapshots_rx) =
2321 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
2322 snapshots_tx
2323 .unbounded_send((self.snapshot(), Arc::default(), Arc::default()))
2324 .ok();
2325
2326 let worktree_id = cx.entity_id().as_u64();
2327 let _maintain_remote_snapshot = cx.background_spawn(async move {
2328 let mut is_first = true;
2329 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
2330 let updates = if is_first {
2331 is_first = false;
2332 snapshot.build_initial_update(project_id, worktree_id)
2333 } else {
2334 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes)
2335 };
2336
2337 for update in updates
2338 .into_iter()
2339 .flat_map(proto::split_worktree_related_message)
2340 {
2341 let _ = resume_updates_rx.try_recv();
2342 loop {
2343 let result = callback(update.clone());
2344 if result.await {
2345 break;
2346 } else {
2347 log::info!("waiting to resume updates");
2348 if resume_updates_rx.next().await.is_none() {
2349 return Some(());
2350 }
2351 }
2352 }
2353 }
2354 }
2355 Some(())
2356 });
2357
2358 self.update_observer = Some(UpdateObservationState {
2359 snapshots_tx,
2360 resume_updates: resume_updates_tx,
2361 _maintain_remote_snapshot,
2362 });
2363 }
2364
2365 pub fn share_private_files(&mut self, cx: &Context<Worktree>) {
2366 self.share_private_files = true;
2367 self.restart_background_scanners(cx);
2368 }
2369
2370 fn update_abs_path_and_refresh(
2371 &mut self,
2372 new_path: Option<SanitizedPath>,
2373 cx: &Context<Worktree>,
2374 ) {
2375 if let Some(new_path) = new_path {
2376 self.snapshot.git_repositories = Default::default();
2377 self.snapshot.ignores_by_parent_abs_path = Default::default();
2378 let root_name = new_path
2379 .as_path()
2380 .file_name()
2381 .map_or(String::new(), |f| f.to_string_lossy().to_string());
2382 self.snapshot.update_abs_path(new_path, root_name);
2383 }
2384 self.restart_background_scanners(cx);
2385 }
2386}
2387
2388impl RemoteWorktree {
2389 pub fn project_id(&self) -> u64 {
2390 self.project_id
2391 }
2392
2393 pub fn client(&self) -> AnyProtoClient {
2394 self.client.clone()
2395 }
2396
2397 pub fn disconnected_from_host(&mut self) {
2398 self.updates_tx.take();
2399 self.snapshot_subscriptions.clear();
2400 self.disconnected = true;
2401 }
2402
2403 pub fn update_from_remote(&self, update: WorktreeRelatedMessage) {
2404 if let Some(updates_tx) = &self.updates_tx {
2405 updates_tx
2406 .unbounded_send(update)
2407 .expect("consumer runs to completion");
2408 }
2409 }
2410
2411 fn observe_updates<F, Fut>(&mut self, project_id: u64, cx: &Context<Worktree>, callback: F)
2412 where
2413 F: 'static + Send + Fn(WorktreeRelatedMessage) -> Fut,
2414 Fut: 'static + Send + Future<Output = bool>,
2415 {
2416 let (tx, mut rx) = mpsc::unbounded();
2417 let initial_updates = self
2418 .snapshot
2419 .build_initial_update(project_id, self.id().to_proto());
2420 self.update_observer = Some(tx);
2421 cx.spawn(async move |this, cx| {
2422 let mut updates = initial_updates;
2423 'outer: loop {
2424 for mut update in updates {
2425 // SSH projects use a special project ID of 0, and we need to
2426 // remap it to the correct one here.
2427 match &mut update {
2428 WorktreeRelatedMessage::UpdateWorktree(update_worktree) => {
2429 update_worktree.project_id = project_id;
2430 }
2431 WorktreeRelatedMessage::UpdateRepository(update_repository) => {
2432 update_repository.project_id = project_id;
2433 }
2434 WorktreeRelatedMessage::RemoveRepository(remove_repository) => {
2435 remove_repository.project_id = project_id;
2436 }
2437 };
2438
2439 for chunk in split_worktree_related_message(update) {
2440 if !callback(chunk).await {
2441 break 'outer;
2442 }
2443 }
2444 }
2445
2446 if let Some(next_update) = rx.next().await {
2447 updates = vec![next_update];
2448 } else {
2449 break;
2450 }
2451 }
2452 this.update(cx, |this, _| {
2453 let this = this.as_remote_mut().unwrap();
2454 this.update_observer.take();
2455 })
2456 })
2457 .detach();
2458 }
2459
2460 fn observed_snapshot(&self, scan_id: usize) -> bool {
2461 self.completed_scan_id >= scan_id
2462 }
2463
2464 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
2465 let (tx, rx) = oneshot::channel();
2466 if self.observed_snapshot(scan_id) {
2467 let _ = tx.send(());
2468 } else if self.disconnected {
2469 drop(tx);
2470 } else {
2471 match self
2472 .snapshot_subscriptions
2473 .binary_search_by_key(&scan_id, |probe| probe.0)
2474 {
2475 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
2476 }
2477 }
2478
2479 async move {
2480 rx.await?;
2481 Ok(())
2482 }
2483 }
2484
2485 fn insert_entry(
2486 &mut self,
2487 entry: proto::Entry,
2488 scan_id: usize,
2489 cx: &Context<Worktree>,
2490 ) -> Task<Result<Entry>> {
2491 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
2492 cx.spawn(async move |this, cx| {
2493 wait_for_snapshot.await?;
2494 this.update(cx, |worktree, _| {
2495 let worktree = worktree.as_remote_mut().unwrap();
2496 let snapshot = &mut worktree.background_snapshot.lock().0;
2497 let entry = snapshot.insert_entry(entry, &worktree.file_scan_inclusions);
2498 worktree.snapshot = snapshot.clone();
2499 entry
2500 })?
2501 })
2502 }
2503
2504 fn delete_entry(
2505 &self,
2506 entry_id: ProjectEntryId,
2507 trash: bool,
2508 cx: &Context<Worktree>,
2509 ) -> Option<Task<Result<()>>> {
2510 let response = self.client.request(proto::DeleteProjectEntry {
2511 project_id: self.project_id,
2512 entry_id: entry_id.to_proto(),
2513 use_trash: trash,
2514 });
2515 Some(cx.spawn(async move |this, cx| {
2516 let response = response.await?;
2517 let scan_id = response.worktree_scan_id as usize;
2518
2519 this.update(cx, move |this, _| {
2520 this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
2521 })?
2522 .await?;
2523
2524 this.update(cx, |this, _| {
2525 let this = this.as_remote_mut().unwrap();
2526 let snapshot = &mut this.background_snapshot.lock().0;
2527 snapshot.delete_entry(entry_id);
2528 this.snapshot = snapshot.clone();
2529 })
2530 }))
2531 }
2532
2533 fn rename_entry(
2534 &self,
2535 entry_id: ProjectEntryId,
2536 new_path: impl Into<Arc<Path>>,
2537 cx: &Context<Worktree>,
2538 ) -> Task<Result<CreatedEntry>> {
2539 let new_path: Arc<Path> = new_path.into();
2540 let response = self.client.request(proto::RenameProjectEntry {
2541 project_id: self.project_id,
2542 entry_id: entry_id.to_proto(),
2543 new_path: new_path.as_ref().to_proto(),
2544 });
2545 cx.spawn(async move |this, cx| {
2546 let response = response.await?;
2547 match response.entry {
2548 Some(entry) => this
2549 .update(cx, |this, cx| {
2550 this.as_remote_mut().unwrap().insert_entry(
2551 entry,
2552 response.worktree_scan_id as usize,
2553 cx,
2554 )
2555 })?
2556 .await
2557 .map(CreatedEntry::Included),
2558 None => {
2559 let abs_path = this.update(cx, |worktree, _| {
2560 worktree
2561 .absolutize(&new_path)
2562 .with_context(|| format!("absolutizing {new_path:?}"))
2563 })??;
2564 Ok(CreatedEntry::Excluded { abs_path })
2565 }
2566 }
2567 })
2568 }
2569}
2570
2571impl Snapshot {
2572 pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
2573 Snapshot {
2574 id: WorktreeId::from_usize(id as usize),
2575 abs_path: abs_path.into(),
2576 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
2577 root_name,
2578 always_included_entries: Default::default(),
2579 entries_by_path: Default::default(),
2580 entries_by_id: Default::default(),
2581 repositories: Default::default(),
2582 scan_id: 1,
2583 completed_scan_id: 0,
2584 }
2585 }
2586
2587 pub fn id(&self) -> WorktreeId {
2588 self.id
2589 }
2590
2591 // TODO:
2592 // Consider the following:
2593 //
2594 // ```rust
2595 // let abs_path: Arc<Path> = snapshot.abs_path(); // e.g. "C:\Users\user\Desktop\project"
2596 // let some_non_trimmed_path = Path::new("\\\\?\\C:\\Users\\user\\Desktop\\project\\main.rs");
2597 // // The caller perform some actions here:
2598 // some_non_trimmed_path.strip_prefix(abs_path); // This fails
2599 // some_non_trimmed_path.starts_with(abs_path); // This fails too
2600 // ```
2601 //
2602 // This is definitely a bug, but it's not clear if we should handle it here or not.
2603 pub fn abs_path(&self) -> &Arc<Path> {
2604 self.abs_path.as_path()
2605 }
2606
2607 fn build_initial_update(
2608 &self,
2609 project_id: u64,
2610 worktree_id: u64,
2611 ) -> Vec<WorktreeRelatedMessage> {
2612 let mut updated_entries = self
2613 .entries_by_path
2614 .iter()
2615 .map(proto::Entry::from)
2616 .collect::<Vec<_>>();
2617 updated_entries.sort_unstable_by_key(|e| e.id);
2618
2619 [proto::UpdateWorktree {
2620 project_id,
2621 worktree_id,
2622 abs_path: self.abs_path().to_proto(),
2623 root_name: self.root_name().to_string(),
2624 updated_entries,
2625 removed_entries: Vec::new(),
2626 scan_id: self.scan_id as u64,
2627 is_last_update: self.completed_scan_id == self.scan_id,
2628 // Sent in separate messages.
2629 updated_repositories: Vec::new(),
2630 removed_repositories: Vec::new(),
2631 }
2632 .into()]
2633 .into_iter()
2634 .chain(
2635 self.repositories
2636 .iter()
2637 .map(|repository| repository.initial_update(project_id, self.scan_id).into()),
2638 )
2639 .collect()
2640 }
2641
2642 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
2643 if path
2644 .components()
2645 .any(|component| !matches!(component, std::path::Component::Normal(_)))
2646 {
2647 return Err(anyhow!("invalid path"));
2648 }
2649 if path.file_name().is_some() {
2650 Ok(self.abs_path.as_path().join(path))
2651 } else {
2652 Ok(self.abs_path.as_path().to_path_buf())
2653 }
2654 }
2655
2656 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
2657 self.entries_by_id.get(&entry_id, &()).is_some()
2658 }
2659
2660 fn insert_entry(
2661 &mut self,
2662 entry: proto::Entry,
2663 always_included_paths: &PathMatcher,
2664 ) -> Result<Entry> {
2665 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2666 let old_entry = self.entries_by_id.insert_or_replace(
2667 PathEntry {
2668 id: entry.id,
2669 path: entry.path.clone(),
2670 is_ignored: entry.is_ignored,
2671 scan_id: 0,
2672 },
2673 &(),
2674 );
2675 if let Some(old_entry) = old_entry {
2676 self.entries_by_path.remove(&PathKey(old_entry.path), &());
2677 }
2678 self.entries_by_path.insert_or_replace(entry.clone(), &());
2679 Ok(entry)
2680 }
2681
2682 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2683 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2684 self.entries_by_path = {
2685 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>(&());
2686 let mut new_entries_by_path =
2687 cursor.slice(&TraversalTarget::path(&removed_entry.path), Bias::Left, &());
2688 while let Some(entry) = cursor.item() {
2689 if entry.path.starts_with(&removed_entry.path) {
2690 self.entries_by_id.remove(&entry.id, &());
2691 cursor.next(&());
2692 } else {
2693 break;
2694 }
2695 }
2696 new_entries_by_path.append(cursor.suffix(&()), &());
2697 new_entries_by_path
2698 };
2699
2700 Some(removed_entry.path)
2701 }
2702
2703 pub fn status_for_file(&self, path: impl AsRef<Path>) -> Option<FileStatus> {
2704 let path = path.as_ref();
2705 self.repository_for_path(path).and_then(|repo| {
2706 let repo_path = repo.relativize(path).unwrap();
2707 repo.statuses_by_path
2708 .get(&PathKey(repo_path.0), &())
2709 .map(|entry| entry.status)
2710 })
2711 }
2712
2713 fn update_abs_path(&mut self, abs_path: SanitizedPath, root_name: String) {
2714 self.abs_path = abs_path;
2715 if root_name != self.root_name {
2716 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
2717 self.root_name = root_name;
2718 }
2719 }
2720
2721 pub(crate) fn apply_update_repository(
2722 &mut self,
2723 update: proto::UpdateRepository,
2724 ) -> Result<()> {
2725 // NOTE: this is practically but not semantically correct. For now we're using the
2726 // ID field to store the work directory ID, but eventually it will be a different
2727 // kind of ID.
2728 let work_directory_id = ProjectEntryId::from_proto(update.id);
2729
2730 if let Some(work_dir_entry) = self.entry_for_id(work_directory_id) {
2731 let conflicted_paths = TreeSet::from_ordered_entries(
2732 update
2733 .current_merge_conflicts
2734 .into_iter()
2735 .map(|path| RepoPath(Path::new(&path).into())),
2736 );
2737
2738 if self
2739 .repositories
2740 .contains(&PathKey(work_dir_entry.path.clone()), &())
2741 {
2742 let edits = update
2743 .removed_statuses
2744 .into_iter()
2745 .map(|path| Edit::Remove(PathKey(FromProto::from_proto(path))))
2746 .chain(
2747 update
2748 .updated_statuses
2749 .into_iter()
2750 .filter_map(|updated_status| {
2751 Some(Edit::Insert(updated_status.try_into().log_err()?))
2752 }),
2753 )
2754 .collect::<Vec<_>>();
2755
2756 self.repositories
2757 .update(&PathKey(work_dir_entry.path.clone()), &(), |repo| {
2758 repo.current_branch = update.branch_summary.as_ref().map(proto_to_branch);
2759 repo.statuses_by_path.edit(edits, &());
2760 repo.current_merge_conflicts = conflicted_paths
2761 });
2762 } else {
2763 let statuses = SumTree::from_iter(
2764 update
2765 .updated_statuses
2766 .into_iter()
2767 .filter_map(|updated_status| updated_status.try_into().log_err()),
2768 &(),
2769 );
2770
2771 self.repositories.insert_or_replace(
2772 RepositoryEntry {
2773 work_directory_id,
2774 // When syncing repository entries from a peer, we don't need
2775 // the location_in_repo field, since git operations don't happen locally
2776 // anyway.
2777 work_directory: WorkDirectory::InProject {
2778 relative_path: work_dir_entry.path.clone(),
2779 },
2780 current_branch: update.branch_summary.as_ref().map(proto_to_branch),
2781 statuses_by_path: statuses,
2782 current_merge_conflicts: conflicted_paths,
2783 work_directory_abs_path: update.abs_path.into(),
2784 },
2785 &(),
2786 );
2787 }
2788 } else {
2789 log::error!("no work directory entry for repository {:?}", update.id)
2790 }
2791
2792 Ok(())
2793 }
2794
2795 pub(crate) fn apply_remove_repository(
2796 &mut self,
2797 update: proto::RemoveRepository,
2798 ) -> Result<()> {
2799 // NOTE: this is practically but not semantically correct. For now we're using the
2800 // ID field to store the work directory ID, but eventually it will be a different
2801 // kind of ID.
2802 let work_directory_id = ProjectEntryId::from_proto(update.id);
2803 self.repositories.retain(&(), |entry: &RepositoryEntry| {
2804 entry.work_directory_id != work_directory_id
2805 });
2806 Ok(())
2807 }
2808
2809 pub(crate) fn apply_update_worktree(
2810 &mut self,
2811 update: proto::UpdateWorktree,
2812 always_included_paths: &PathMatcher,
2813 ) -> Result<()> {
2814 log::debug!(
2815 "applying remote worktree update. {} entries updated, {} removed",
2816 update.updated_entries.len(),
2817 update.removed_entries.len()
2818 );
2819 self.update_abs_path(
2820 SanitizedPath::from(PathBuf::from_proto(update.abs_path)),
2821 update.root_name,
2822 );
2823
2824 let mut entries_by_path_edits = Vec::new();
2825 let mut entries_by_id_edits = Vec::new();
2826
2827 for entry_id in update.removed_entries {
2828 let entry_id = ProjectEntryId::from_proto(entry_id);
2829 entries_by_id_edits.push(Edit::Remove(entry_id));
2830 if let Some(entry) = self.entry_for_id(entry_id) {
2831 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2832 }
2833 }
2834
2835 for entry in update.updated_entries {
2836 let entry = Entry::try_from((&self.root_char_bag, always_included_paths, entry))?;
2837 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2838 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2839 }
2840 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2841 if old_entry.id != entry.id {
2842 entries_by_id_edits.push(Edit::Remove(old_entry.id));
2843 }
2844 }
2845 entries_by_id_edits.push(Edit::Insert(PathEntry {
2846 id: entry.id,
2847 path: entry.path.clone(),
2848 is_ignored: entry.is_ignored,
2849 scan_id: 0,
2850 }));
2851 entries_by_path_edits.push(Edit::Insert(entry));
2852 }
2853
2854 self.entries_by_path.edit(entries_by_path_edits, &());
2855 self.entries_by_id.edit(entries_by_id_edits, &());
2856
2857 self.scan_id = update.scan_id as usize;
2858 if update.is_last_update {
2859 self.completed_scan_id = update.scan_id as usize;
2860 }
2861
2862 Ok(())
2863 }
2864
2865 pub(crate) fn apply_remote_update(
2866 &mut self,
2867 update: WorktreeRelatedMessage,
2868 always_included_paths: &PathMatcher,
2869 ) -> Result<()> {
2870 match update {
2871 WorktreeRelatedMessage::UpdateWorktree(update) => {
2872 self.apply_update_worktree(update, always_included_paths)
2873 }
2874 WorktreeRelatedMessage::UpdateRepository(update) => {
2875 self.apply_update_repository(update)
2876 }
2877 WorktreeRelatedMessage::RemoveRepository(update) => {
2878 self.apply_remove_repository(update)
2879 }
2880 }
2881 }
2882
2883 pub fn entry_count(&self) -> usize {
2884 self.entries_by_path.summary().count
2885 }
2886
2887 pub fn visible_entry_count(&self) -> usize {
2888 self.entries_by_path.summary().non_ignored_count
2889 }
2890
2891 pub fn dir_count(&self) -> usize {
2892 let summary = self.entries_by_path.summary();
2893 summary.count - summary.file_count
2894 }
2895
2896 pub fn visible_dir_count(&self) -> usize {
2897 let summary = self.entries_by_path.summary();
2898 summary.non_ignored_count - summary.non_ignored_file_count
2899 }
2900
2901 pub fn file_count(&self) -> usize {
2902 self.entries_by_path.summary().file_count
2903 }
2904
2905 pub fn visible_file_count(&self) -> usize {
2906 self.entries_by_path.summary().non_ignored_file_count
2907 }
2908
2909 fn traverse_from_offset(
2910 &self,
2911 include_files: bool,
2912 include_dirs: bool,
2913 include_ignored: bool,
2914 start_offset: usize,
2915 ) -> Traversal {
2916 let mut cursor = self.entries_by_path.cursor(&());
2917 cursor.seek(
2918 &TraversalTarget::Count {
2919 count: start_offset,
2920 include_files,
2921 include_dirs,
2922 include_ignored,
2923 },
2924 Bias::Right,
2925 &(),
2926 );
2927 Traversal {
2928 snapshot: self,
2929 cursor,
2930 include_files,
2931 include_dirs,
2932 include_ignored,
2933 }
2934 }
2935
2936 pub fn traverse_from_path(
2937 &self,
2938 include_files: bool,
2939 include_dirs: bool,
2940 include_ignored: bool,
2941 path: &Path,
2942 ) -> Traversal {
2943 Traversal::new(self, include_files, include_dirs, include_ignored, path)
2944 }
2945
2946 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2947 self.traverse_from_offset(true, false, include_ignored, start)
2948 }
2949
2950 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2951 self.traverse_from_offset(false, true, include_ignored, start)
2952 }
2953
2954 pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2955 self.traverse_from_offset(true, true, include_ignored, start)
2956 }
2957
2958 #[cfg(any(feature = "test-support", test))]
2959 pub fn git_status(&self, work_dir: &Path) -> Option<Vec<StatusEntry>> {
2960 self.repositories
2961 .get(&PathKey(work_dir.into()), &())
2962 .map(|repo| repo.status().collect())
2963 }
2964
2965 pub fn repositories(&self) -> &SumTree<RepositoryEntry> {
2966 &self.repositories
2967 }
2968
2969 /// Get the repository whose work directory corresponds to the given path.
2970 pub(crate) fn repository(&self, work_directory: PathKey) -> Option<RepositoryEntry> {
2971 self.repositories.get(&work_directory, &()).cloned()
2972 }
2973
2974 /// Get the repository whose work directory contains the given path.
2975 #[track_caller]
2976 pub fn repository_for_path(&self, path: &Path) -> Option<&RepositoryEntry> {
2977 self.repositories
2978 .iter()
2979 .filter(|repo| repo.directory_contains(path))
2980 .last()
2981 }
2982
2983 /// Given an ordered iterator of entries, returns an iterator of those entries,
2984 /// along with their containing git repository.
2985 #[track_caller]
2986 pub fn entries_with_repositories<'a>(
2987 &'a self,
2988 entries: impl 'a + Iterator<Item = &'a Entry>,
2989 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2990 let mut containing_repos = Vec::<&RepositoryEntry>::new();
2991 let mut repositories = self.repositories().iter().peekable();
2992 entries.map(move |entry| {
2993 while let Some(repository) = containing_repos.last() {
2994 if repository.directory_contains(&entry.path) {
2995 break;
2996 } else {
2997 containing_repos.pop();
2998 }
2999 }
3000 while let Some(repository) = repositories.peek() {
3001 if repository.directory_contains(&entry.path) {
3002 containing_repos.push(repositories.next().unwrap());
3003 } else {
3004 break;
3005 }
3006 }
3007 let repo = containing_repos.last().copied();
3008 (entry, repo)
3009 })
3010 }
3011
3012 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
3013 let empty_path = Path::new("");
3014 self.entries_by_path
3015 .cursor::<()>(&())
3016 .filter(move |entry| entry.path.as_ref() != empty_path)
3017 .map(|entry| &entry.path)
3018 }
3019
3020 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
3021 let options = ChildEntriesOptions {
3022 include_files: true,
3023 include_dirs: true,
3024 include_ignored: true,
3025 };
3026 self.child_entries_with_options(parent_path, options)
3027 }
3028
3029 pub fn child_entries_with_options<'a>(
3030 &'a self,
3031 parent_path: &'a Path,
3032 options: ChildEntriesOptions,
3033 ) -> ChildEntriesIter<'a> {
3034 let mut cursor = self.entries_by_path.cursor(&());
3035 cursor.seek(&TraversalTarget::path(parent_path), Bias::Right, &());
3036 let traversal = Traversal {
3037 snapshot: self,
3038 cursor,
3039 include_files: options.include_files,
3040 include_dirs: options.include_dirs,
3041 include_ignored: options.include_ignored,
3042 };
3043 ChildEntriesIter {
3044 traversal,
3045 parent_path,
3046 }
3047 }
3048
3049 pub fn root_entry(&self) -> Option<&Entry> {
3050 self.entry_for_path("")
3051 }
3052
3053 /// TODO: what's the difference between `root_dir` and `abs_path`?
3054 /// is there any? if so, document it.
3055 pub fn root_dir(&self) -> Option<Arc<Path>> {
3056 self.root_entry()
3057 .filter(|entry| entry.is_dir())
3058 .map(|_| self.abs_path().clone())
3059 }
3060
3061 pub fn root_name(&self) -> &str {
3062 &self.root_name
3063 }
3064
3065 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
3066 self.repositories
3067 .get(&PathKey(Path::new("").into()), &())
3068 .map(|entry| entry.to_owned())
3069 }
3070
3071 pub fn git_entry(&self, work_directory_path: Arc<Path>) -> Option<RepositoryEntry> {
3072 self.repositories
3073 .get(&PathKey(work_directory_path), &())
3074 .map(|entry| entry.to_owned())
3075 }
3076
3077 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
3078 self.repositories.iter()
3079 }
3080
3081 pub fn scan_id(&self) -> usize {
3082 self.scan_id
3083 }
3084
3085 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
3086 let path = path.as_ref();
3087 debug_assert!(path.is_relative());
3088 self.traverse_from_path(true, true, true, path)
3089 .entry()
3090 .and_then(|entry| {
3091 if entry.path.as_ref() == path {
3092 Some(entry)
3093 } else {
3094 None
3095 }
3096 })
3097 }
3098
3099 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
3100 let entry = self.entries_by_id.get(&id, &())?;
3101 self.entry_for_path(&entry.path)
3102 }
3103
3104 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
3105 self.entry_for_path(path.as_ref()).map(|e| e.inode)
3106 }
3107}
3108
3109impl LocalSnapshot {
3110 pub fn local_repo_for_path(&self, path: &Path) -> Option<&LocalRepositoryEntry> {
3111 let repository_entry = self.repository_for_path(path)?;
3112 let work_directory_id = repository_entry.work_directory_id();
3113 self.git_repositories.get(&work_directory_id)
3114 }
3115
3116 fn build_update(
3117 &self,
3118 project_id: u64,
3119 worktree_id: u64,
3120 entry_changes: UpdatedEntriesSet,
3121 repo_changes: UpdatedGitRepositoriesSet,
3122 ) -> Vec<WorktreeRelatedMessage> {
3123 let mut updated_entries = Vec::new();
3124 let mut removed_entries = Vec::new();
3125 let mut updates = Vec::new();
3126
3127 for (_, entry_id, path_change) in entry_changes.iter() {
3128 if let PathChange::Removed = path_change {
3129 removed_entries.push(entry_id.0 as u64);
3130 } else if let Some(entry) = self.entry_for_id(*entry_id) {
3131 updated_entries.push(proto::Entry::from(entry));
3132 }
3133 }
3134
3135 for (entry, change) in repo_changes.iter() {
3136 let new_repo = self.repositories.get(&PathKey(entry.path.clone()), &());
3137 match (&change.old_repository, new_repo) {
3138 (Some(old_repo), Some(new_repo)) => {
3139 updates.push(
3140 new_repo
3141 .build_update(old_repo, project_id, self.scan_id)
3142 .into(),
3143 );
3144 }
3145 (None, Some(new_repo)) => {
3146 updates.push(new_repo.initial_update(project_id, self.scan_id).into());
3147 }
3148 (Some(old_repo), None) => {
3149 updates.push(
3150 proto::RemoveRepository {
3151 project_id,
3152 id: old_repo.work_directory_id.to_proto(),
3153 }
3154 .into(),
3155 );
3156 }
3157 _ => {}
3158 }
3159 }
3160
3161 removed_entries.sort_unstable();
3162 updated_entries.sort_unstable_by_key(|e| e.id);
3163
3164 // TODO - optimize, knowing that removed_entries are sorted.
3165 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
3166
3167 updates.push(
3168 proto::UpdateWorktree {
3169 project_id,
3170 worktree_id,
3171 abs_path: self.abs_path().to_proto(),
3172 root_name: self.root_name().to_string(),
3173 updated_entries,
3174 removed_entries,
3175 scan_id: self.scan_id as u64,
3176 is_last_update: self.completed_scan_id == self.scan_id,
3177 // Sent in separate messages.
3178 updated_repositories: Vec::new(),
3179 removed_repositories: Vec::new(),
3180 }
3181 .into(),
3182 );
3183 updates
3184 }
3185
3186 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
3187 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
3188 let abs_path = self.abs_path.as_path().join(&entry.path);
3189 match smol::block_on(build_gitignore(&abs_path, fs)) {
3190 Ok(ignore) => {
3191 self.ignores_by_parent_abs_path
3192 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
3193 }
3194 Err(error) => {
3195 log::error!(
3196 "error loading .gitignore file {:?} - {:?}",
3197 &entry.path,
3198 error
3199 );
3200 }
3201 }
3202 }
3203
3204 if entry.kind == EntryKind::PendingDir {
3205 if let Some(existing_entry) =
3206 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
3207 {
3208 entry.kind = existing_entry.kind;
3209 }
3210 }
3211
3212 let scan_id = self.scan_id;
3213 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
3214 if let Some(removed) = removed {
3215 if removed.id != entry.id {
3216 self.entries_by_id.remove(&removed.id, &());
3217 }
3218 }
3219 self.entries_by_id.insert_or_replace(
3220 PathEntry {
3221 id: entry.id,
3222 path: entry.path.clone(),
3223 is_ignored: entry.is_ignored,
3224 scan_id,
3225 },
3226 &(),
3227 );
3228
3229 entry
3230 }
3231
3232 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
3233 let mut inodes = TreeSet::default();
3234 for ancestor in path.ancestors().skip(1) {
3235 if let Some(entry) = self.entry_for_path(ancestor) {
3236 inodes.insert(entry.inode);
3237 }
3238 }
3239 inodes
3240 }
3241
3242 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
3243 let mut new_ignores = Vec::new();
3244 for (index, ancestor) in abs_path.ancestors().enumerate() {
3245 if index > 0 {
3246 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
3247 new_ignores.push((ancestor, Some(ignore.clone())));
3248 } else {
3249 new_ignores.push((ancestor, None));
3250 }
3251 }
3252 if ancestor.join(*DOT_GIT).exists() {
3253 break;
3254 }
3255 }
3256
3257 let mut ignore_stack = IgnoreStack::none();
3258 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
3259 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
3260 ignore_stack = IgnoreStack::all();
3261 break;
3262 } else if let Some(ignore) = ignore {
3263 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
3264 }
3265 }
3266
3267 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
3268 ignore_stack = IgnoreStack::all();
3269 }
3270
3271 ignore_stack
3272 }
3273
3274 #[cfg(test)]
3275 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
3276 self.entries_by_path
3277 .cursor::<()>(&())
3278 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
3279 }
3280
3281 #[cfg(test)]
3282 pub fn check_invariants(&self, git_state: bool) {
3283 use pretty_assertions::assert_eq;
3284
3285 assert_eq!(
3286 self.entries_by_path
3287 .cursor::<()>(&())
3288 .map(|e| (&e.path, e.id))
3289 .collect::<Vec<_>>(),
3290 self.entries_by_id
3291 .cursor::<()>(&())
3292 .map(|e| (&e.path, e.id))
3293 .collect::<collections::BTreeSet<_>>()
3294 .into_iter()
3295 .collect::<Vec<_>>(),
3296 "entries_by_path and entries_by_id are inconsistent"
3297 );
3298
3299 let mut files = self.files(true, 0);
3300 let mut visible_files = self.files(false, 0);
3301 for entry in self.entries_by_path.cursor::<()>(&()) {
3302 if entry.is_file() {
3303 assert_eq!(files.next().unwrap().inode, entry.inode);
3304 if (!entry.is_ignored && !entry.is_external) || entry.is_always_included {
3305 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3306 }
3307 }
3308 }
3309
3310 assert!(files.next().is_none());
3311 assert!(visible_files.next().is_none());
3312
3313 let mut bfs_paths = Vec::new();
3314 let mut stack = self
3315 .root_entry()
3316 .map(|e| e.path.as_ref())
3317 .into_iter()
3318 .collect::<Vec<_>>();
3319 while let Some(path) = stack.pop() {
3320 bfs_paths.push(path);
3321 let ix = stack.len();
3322 for child_entry in self.child_entries(path) {
3323 stack.insert(ix, &child_entry.path);
3324 }
3325 }
3326
3327 let dfs_paths_via_iter = self
3328 .entries_by_path
3329 .cursor::<()>(&())
3330 .map(|e| e.path.as_ref())
3331 .collect::<Vec<_>>();
3332 assert_eq!(bfs_paths, dfs_paths_via_iter);
3333
3334 let dfs_paths_via_traversal = self
3335 .entries(true, 0)
3336 .map(|e| e.path.as_ref())
3337 .collect::<Vec<_>>();
3338 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3339
3340 if git_state {
3341 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3342 let ignore_parent_path = ignore_parent_abs_path
3343 .strip_prefix(self.abs_path.as_path())
3344 .unwrap();
3345 assert!(self.entry_for_path(ignore_parent_path).is_some());
3346 assert!(self
3347 .entry_for_path(ignore_parent_path.join(*GITIGNORE))
3348 .is_some());
3349 }
3350 }
3351 }
3352
3353 #[cfg(test)]
3354 fn check_git_invariants(&self) {
3355 let dotgit_paths = self
3356 .git_repositories
3357 .iter()
3358 .map(|repo| repo.1.dot_git_dir_abs_path.clone())
3359 .collect::<HashSet<_>>();
3360 let work_dir_paths = self
3361 .repositories
3362 .iter()
3363 .map(|repo| repo.work_directory.path_key())
3364 .collect::<HashSet<_>>();
3365 assert_eq!(dotgit_paths.len(), work_dir_paths.len());
3366 assert_eq!(self.repositories.iter().count(), work_dir_paths.len());
3367 assert_eq!(self.git_repositories.iter().count(), work_dir_paths.len());
3368 for entry in self.repositories.iter() {
3369 self.git_repositories.get(&entry.work_directory_id).unwrap();
3370 }
3371 }
3372
3373 #[cfg(test)]
3374 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3375 let mut paths = Vec::new();
3376 for entry in self.entries_by_path.cursor::<()>(&()) {
3377 if include_ignored || !entry.is_ignored {
3378 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3379 }
3380 }
3381 paths.sort_by(|a, b| a.0.cmp(b.0));
3382 paths
3383 }
3384}
3385
3386impl BackgroundScannerState {
3387 fn should_scan_directory(&self, entry: &Entry) -> bool {
3388 (!entry.is_external && (!entry.is_ignored || entry.is_always_included))
3389 || entry.path.file_name() == Some(*DOT_GIT)
3390 || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
3391 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
3392 || self
3393 .paths_to_scan
3394 .iter()
3395 .any(|p| p.starts_with(&entry.path))
3396 || self
3397 .path_prefixes_to_scan
3398 .iter()
3399 .any(|p| entry.path.starts_with(p))
3400 }
3401
3402 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
3403 let path = entry.path.clone();
3404 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
3405 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
3406
3407 if !ancestor_inodes.contains(&entry.inode) {
3408 ancestor_inodes.insert(entry.inode);
3409 scan_job_tx
3410 .try_send(ScanJob {
3411 abs_path,
3412 path,
3413 ignore_stack,
3414 scan_queue: scan_job_tx.clone(),
3415 ancestor_inodes,
3416 is_external: entry.is_external,
3417 })
3418 .unwrap();
3419 }
3420 }
3421
3422 fn reuse_entry_id(&mut self, entry: &mut Entry) {
3423 if let Some(mtime) = entry.mtime {
3424 // If an entry with the same inode was removed from the worktree during this scan,
3425 // then it *might* represent the same file or directory. But the OS might also have
3426 // re-used the inode for a completely different file or directory.
3427 //
3428 // Conditionally reuse the old entry's id:
3429 // * if the mtime is the same, the file was probably been renamed.
3430 // * if the path is the same, the file may just have been updated
3431 if let Some(removed_entry) = self.removed_entries.remove(&entry.inode) {
3432 if removed_entry.mtime == Some(mtime) || removed_entry.path == entry.path {
3433 entry.id = removed_entry.id;
3434 }
3435 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
3436 entry.id = existing_entry.id;
3437 }
3438 }
3439 }
3440
3441 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
3442 self.reuse_entry_id(&mut entry);
3443 let entry = self.snapshot.insert_entry(entry, fs);
3444 if entry.path.file_name() == Some(&DOT_GIT) {
3445 self.insert_git_repository(entry.path.clone(), fs, watcher);
3446 }
3447
3448 #[cfg(test)]
3449 self.snapshot.check_invariants(false);
3450
3451 entry
3452 }
3453
3454 fn populate_dir(
3455 &mut self,
3456 parent_path: &Arc<Path>,
3457 entries: impl IntoIterator<Item = Entry>,
3458 ignore: Option<Arc<Gitignore>>,
3459 ) {
3460 let mut parent_entry = if let Some(parent_entry) = self
3461 .snapshot
3462 .entries_by_path
3463 .get(&PathKey(parent_path.clone()), &())
3464 {
3465 parent_entry.clone()
3466 } else {
3467 log::warn!(
3468 "populating a directory {:?} that has been removed",
3469 parent_path
3470 );
3471 return;
3472 };
3473
3474 match parent_entry.kind {
3475 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
3476 EntryKind::Dir => {}
3477 _ => return,
3478 }
3479
3480 if let Some(ignore) = ignore {
3481 let abs_parent_path = self.snapshot.abs_path.as_path().join(parent_path).into();
3482 self.snapshot
3483 .ignores_by_parent_abs_path
3484 .insert(abs_parent_path, (ignore, false));
3485 }
3486
3487 let parent_entry_id = parent_entry.id;
3488 self.scanned_dirs.insert(parent_entry_id);
3489 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
3490 let mut entries_by_id_edits = Vec::new();
3491
3492 for entry in entries {
3493 entries_by_id_edits.push(Edit::Insert(PathEntry {
3494 id: entry.id,
3495 path: entry.path.clone(),
3496 is_ignored: entry.is_ignored,
3497 scan_id: self.snapshot.scan_id,
3498 }));
3499 entries_by_path_edits.push(Edit::Insert(entry));
3500 }
3501
3502 self.snapshot
3503 .entries_by_path
3504 .edit(entries_by_path_edits, &());
3505 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3506
3507 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
3508 self.changed_paths.insert(ix, parent_path.clone());
3509 }
3510
3511 #[cfg(test)]
3512 self.snapshot.check_invariants(false);
3513 }
3514
3515 fn remove_path(&mut self, path: &Path) {
3516 log::info!("background scanner removing path {path:?}");
3517 let mut new_entries;
3518 let removed_entries;
3519 {
3520 let mut cursor = self
3521 .snapshot
3522 .entries_by_path
3523 .cursor::<TraversalProgress>(&());
3524 new_entries = cursor.slice(&TraversalTarget::path(path), Bias::Left, &());
3525 removed_entries = cursor.slice(&TraversalTarget::successor(path), Bias::Left, &());
3526 new_entries.append(cursor.suffix(&()), &());
3527 }
3528 self.snapshot.entries_by_path = new_entries;
3529
3530 let mut removed_ids = Vec::with_capacity(removed_entries.summary().count);
3531 for entry in removed_entries.cursor::<()>(&()) {
3532 match self.removed_entries.entry(entry.inode) {
3533 hash_map::Entry::Occupied(mut e) => {
3534 let prev_removed_entry = e.get_mut();
3535 if entry.id > prev_removed_entry.id {
3536 *prev_removed_entry = entry.clone();
3537 }
3538 }
3539 hash_map::Entry::Vacant(e) => {
3540 e.insert(entry.clone());
3541 }
3542 }
3543
3544 if entry.path.file_name() == Some(&GITIGNORE) {
3545 let abs_parent_path = self
3546 .snapshot
3547 .abs_path
3548 .as_path()
3549 .join(entry.path.parent().unwrap());
3550 if let Some((_, needs_update)) = self
3551 .snapshot
3552 .ignores_by_parent_abs_path
3553 .get_mut(abs_parent_path.as_path())
3554 {
3555 *needs_update = true;
3556 }
3557 }
3558
3559 if let Err(ix) = removed_ids.binary_search(&entry.id) {
3560 removed_ids.insert(ix, entry.id);
3561 }
3562 }
3563
3564 self.snapshot.entries_by_id.edit(
3565 removed_ids.iter().map(|&id| Edit::Remove(id)).collect(),
3566 &(),
3567 );
3568 self.snapshot
3569 .git_repositories
3570 .retain(|id, _| removed_ids.binary_search(id).is_err());
3571 self.snapshot.repositories.retain(&(), |repository| {
3572 let retain = !repository.work_directory.path_key().0.starts_with(path);
3573 if !retain {
3574 log::info!(
3575 "dropping repository entry for {:?}",
3576 repository.work_directory
3577 );
3578 }
3579 retain
3580 });
3581
3582 #[cfg(test)]
3583 self.snapshot.check_invariants(false);
3584 }
3585
3586 fn insert_git_repository(
3587 &mut self,
3588 dot_git_path: Arc<Path>,
3589 fs: &dyn Fs,
3590 watcher: &dyn Watcher,
3591 ) -> Option<LocalRepositoryEntry> {
3592 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
3593 Some(parent_dir) => {
3594 // Guard against repositories inside the repository metadata
3595 if parent_dir.iter().any(|component| component == *DOT_GIT) {
3596 log::info!(
3597 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
3598 );
3599 return None;
3600 };
3601 log::info!(
3602 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
3603 );
3604
3605 parent_dir.into()
3606 }
3607 None => {
3608 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
3609 // no files inside that directory are tracked by git, so no need to build the repo around it
3610 log::info!(
3611 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
3612 );
3613 return None;
3614 }
3615 };
3616
3617 self.insert_git_repository_for_path(
3618 WorkDirectory::InProject {
3619 relative_path: work_dir_path,
3620 },
3621 dot_git_path,
3622 fs,
3623 watcher,
3624 )
3625 }
3626
3627 fn insert_git_repository_for_path(
3628 &mut self,
3629 work_directory: WorkDirectory,
3630 dot_git_path: Arc<Path>,
3631 fs: &dyn Fs,
3632 watcher: &dyn Watcher,
3633 ) -> Option<LocalRepositoryEntry> {
3634 log::info!("insert git repository for {dot_git_path:?}");
3635 let work_dir_entry = self.snapshot.entry_for_path(work_directory.path_key().0)?;
3636 let work_directory_abs_path = self.snapshot.absolutize(&work_dir_entry.path).log_err()?;
3637
3638 if self
3639 .snapshot
3640 .git_repositories
3641 .get(&work_dir_entry.id)
3642 .is_some()
3643 {
3644 log::info!("existing git repository for {work_directory:?}");
3645 return None;
3646 }
3647
3648 let dot_git_abs_path = self.snapshot.abs_path.as_path().join(&dot_git_path);
3649
3650 let t0 = Instant::now();
3651 let repository = fs.open_repo(&dot_git_abs_path)?;
3652 log::info!("opened git repo for {dot_git_abs_path:?}");
3653
3654 let repository_path = repository.path();
3655 watcher.add(&repository_path).log_err()?;
3656
3657 let actual_dot_git_dir_abs_path = repository.main_repository_path();
3658 let dot_git_worktree_abs_path = if actual_dot_git_dir_abs_path == dot_git_abs_path {
3659 None
3660 } else {
3661 // The two paths could be different because we opened a git worktree.
3662 // When that happens:
3663 //
3664 // * `dot_git_abs_path` is a file that points to the worktree-subdirectory in the actual
3665 // .git directory.
3666 //
3667 // * `repository_path` is the worktree-subdirectory.
3668 //
3669 // * `actual_dot_git_dir_abs_path` is the path to the actual .git directory. In git
3670 // documentation this is called the "commondir".
3671 watcher.add(&dot_git_abs_path).log_err()?;
3672 Some(Arc::from(dot_git_abs_path))
3673 };
3674
3675 log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
3676
3677 if let Some(git_hosting_provider_registry) = self.git_hosting_provider_registry.clone() {
3678 git_hosting_providers::register_additional_providers(
3679 git_hosting_provider_registry,
3680 repository.clone(),
3681 );
3682 }
3683
3684 let work_directory_id = work_dir_entry.id;
3685 self.snapshot.repositories.insert_or_replace(
3686 RepositoryEntry {
3687 work_directory_id,
3688 work_directory: work_directory.clone(),
3689 work_directory_abs_path,
3690 current_branch: None,
3691 statuses_by_path: Default::default(),
3692 current_merge_conflicts: Default::default(),
3693 },
3694 &(),
3695 );
3696
3697 let local_repository = LocalRepositoryEntry {
3698 work_directory_id,
3699 work_directory: work_directory.clone(),
3700 git_dir_scan_id: 0,
3701 status_scan_id: 0,
3702 repo_ptr: repository.clone(),
3703 dot_git_dir_abs_path: actual_dot_git_dir_abs_path.into(),
3704 dot_git_worktree_abs_path,
3705 current_merge_head_shas: Default::default(),
3706 merge_message: None,
3707 };
3708
3709 self.snapshot
3710 .git_repositories
3711 .insert(work_directory_id, local_repository.clone());
3712
3713 log::info!("inserting new local git repository");
3714 Some(local_repository)
3715 }
3716}
3717
3718async fn is_git_dir(path: &Path, fs: &dyn Fs) -> bool {
3719 if path.file_name() == Some(&*DOT_GIT) {
3720 return true;
3721 }
3722
3723 // If we're in a bare repository, we are not inside a `.git` folder. In a
3724 // bare repository, the root folder contains what would normally be in the
3725 // `.git` folder.
3726 let head_metadata = fs.metadata(&path.join("HEAD")).await;
3727 if !matches!(head_metadata, Ok(Some(_))) {
3728 return false;
3729 }
3730 let config_metadata = fs.metadata(&path.join("config")).await;
3731 matches!(config_metadata, Ok(Some(_)))
3732}
3733
3734async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
3735 let contents = fs.load(abs_path).await?;
3736 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
3737 let mut builder = GitignoreBuilder::new(parent);
3738 for line in contents.lines() {
3739 builder.add_line(Some(abs_path.into()), line)?;
3740 }
3741 Ok(builder.build()?)
3742}
3743
3744impl Deref for Worktree {
3745 type Target = Snapshot;
3746
3747 fn deref(&self) -> &Self::Target {
3748 match self {
3749 Worktree::Local(worktree) => &worktree.snapshot,
3750 Worktree::Remote(worktree) => &worktree.snapshot,
3751 }
3752 }
3753}
3754
3755impl Deref for LocalWorktree {
3756 type Target = LocalSnapshot;
3757
3758 fn deref(&self) -> &Self::Target {
3759 &self.snapshot
3760 }
3761}
3762
3763impl Deref for RemoteWorktree {
3764 type Target = Snapshot;
3765
3766 fn deref(&self) -> &Self::Target {
3767 &self.snapshot
3768 }
3769}
3770
3771impl fmt::Debug for LocalWorktree {
3772 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3773 self.snapshot.fmt(f)
3774 }
3775}
3776
3777impl fmt::Debug for Snapshot {
3778 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3779 struct EntriesById<'a>(&'a SumTree<PathEntry>);
3780 struct EntriesByPath<'a>(&'a SumTree<Entry>);
3781
3782 impl fmt::Debug for EntriesByPath<'_> {
3783 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3784 f.debug_map()
3785 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
3786 .finish()
3787 }
3788 }
3789
3790 impl fmt::Debug for EntriesById<'_> {
3791 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3792 f.debug_list().entries(self.0.iter()).finish()
3793 }
3794 }
3795
3796 f.debug_struct("Snapshot")
3797 .field("id", &self.id)
3798 .field("root_name", &self.root_name)
3799 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
3800 .field("entries_by_id", &EntriesById(&self.entries_by_id))
3801 .finish()
3802 }
3803}
3804
3805#[derive(Clone, PartialEq)]
3806pub struct File {
3807 pub worktree: Entity<Worktree>,
3808 pub path: Arc<Path>,
3809 pub disk_state: DiskState,
3810 pub entry_id: Option<ProjectEntryId>,
3811 pub is_local: bool,
3812 pub is_private: bool,
3813}
3814
3815impl language::File for File {
3816 fn as_local(&self) -> Option<&dyn language::LocalFile> {
3817 if self.is_local {
3818 Some(self)
3819 } else {
3820 None
3821 }
3822 }
3823
3824 fn disk_state(&self) -> DiskState {
3825 self.disk_state
3826 }
3827
3828 fn path(&self) -> &Arc<Path> {
3829 &self.path
3830 }
3831
3832 fn full_path(&self, cx: &App) -> PathBuf {
3833 let mut full_path = PathBuf::new();
3834 let worktree = self.worktree.read(cx);
3835
3836 if worktree.is_visible() {
3837 full_path.push(worktree.root_name());
3838 } else {
3839 let path = worktree.abs_path();
3840
3841 if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3842 full_path.push("~");
3843 full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3844 } else {
3845 full_path.push(path)
3846 }
3847 }
3848
3849 if self.path.components().next().is_some() {
3850 full_path.push(&self.path);
3851 }
3852
3853 full_path
3854 }
3855
3856 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3857 /// of its worktree, then this method will return the name of the worktree itself.
3858 fn file_name<'a>(&'a self, cx: &'a App) -> &'a OsStr {
3859 self.path
3860 .file_name()
3861 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3862 }
3863
3864 fn worktree_id(&self, cx: &App) -> WorktreeId {
3865 self.worktree.read(cx).id()
3866 }
3867
3868 fn as_any(&self) -> &dyn Any {
3869 self
3870 }
3871
3872 fn to_proto(&self, cx: &App) -> rpc::proto::File {
3873 rpc::proto::File {
3874 worktree_id: self.worktree.read(cx).id().to_proto(),
3875 entry_id: self.entry_id.map(|id| id.to_proto()),
3876 path: self.path.as_ref().to_proto(),
3877 mtime: self.disk_state.mtime().map(|time| time.into()),
3878 is_deleted: self.disk_state == DiskState::Deleted,
3879 }
3880 }
3881
3882 fn is_private(&self) -> bool {
3883 self.is_private
3884 }
3885}
3886
3887impl language::LocalFile for File {
3888 fn abs_path(&self, cx: &App) -> PathBuf {
3889 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3890 if self.path.as_ref() == Path::new("") {
3891 worktree_path.as_path().to_path_buf()
3892 } else {
3893 worktree_path.as_path().join(&self.path)
3894 }
3895 }
3896
3897 fn load(&self, cx: &App) -> Task<Result<String>> {
3898 let worktree = self.worktree.read(cx).as_local().unwrap();
3899 let abs_path = worktree.absolutize(&self.path);
3900 let fs = worktree.fs.clone();
3901 cx.background_spawn(async move { fs.load(&abs_path?).await })
3902 }
3903
3904 fn load_bytes(&self, cx: &App) -> Task<Result<Vec<u8>>> {
3905 let worktree = self.worktree.read(cx).as_local().unwrap();
3906 let abs_path = worktree.absolutize(&self.path);
3907 let fs = worktree.fs.clone();
3908 cx.background_spawn(async move { fs.load_bytes(&abs_path?).await })
3909 }
3910}
3911
3912impl File {
3913 pub fn for_entry(entry: Entry, worktree: Entity<Worktree>) -> Arc<Self> {
3914 Arc::new(Self {
3915 worktree,
3916 path: entry.path.clone(),
3917 disk_state: if let Some(mtime) = entry.mtime {
3918 DiskState::Present { mtime }
3919 } else {
3920 DiskState::New
3921 },
3922 entry_id: Some(entry.id),
3923 is_local: true,
3924 is_private: entry.is_private,
3925 })
3926 }
3927
3928 pub fn from_proto(
3929 proto: rpc::proto::File,
3930 worktree: Entity<Worktree>,
3931 cx: &App,
3932 ) -> Result<Self> {
3933 let worktree_id = worktree
3934 .read(cx)
3935 .as_remote()
3936 .ok_or_else(|| anyhow!("not remote"))?
3937 .id();
3938
3939 if worktree_id.to_proto() != proto.worktree_id {
3940 return Err(anyhow!("worktree id does not match file"));
3941 }
3942
3943 let disk_state = if proto.is_deleted {
3944 DiskState::Deleted
3945 } else {
3946 if let Some(mtime) = proto.mtime.map(&Into::into) {
3947 DiskState::Present { mtime }
3948 } else {
3949 DiskState::New
3950 }
3951 };
3952
3953 Ok(Self {
3954 worktree,
3955 path: Arc::<Path>::from_proto(proto.path),
3956 disk_state,
3957 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3958 is_local: false,
3959 is_private: false,
3960 })
3961 }
3962
3963 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3964 file.and_then(|f| f.as_any().downcast_ref())
3965 }
3966
3967 pub fn worktree_id(&self, cx: &App) -> WorktreeId {
3968 self.worktree.read(cx).id()
3969 }
3970
3971 pub fn project_entry_id(&self, _: &App) -> Option<ProjectEntryId> {
3972 match self.disk_state {
3973 DiskState::Deleted => None,
3974 _ => self.entry_id,
3975 }
3976 }
3977}
3978
3979#[derive(Clone, Debug, PartialEq, Eq)]
3980pub struct Entry {
3981 pub id: ProjectEntryId,
3982 pub kind: EntryKind,
3983 pub path: Arc<Path>,
3984 pub inode: u64,
3985 pub mtime: Option<MTime>,
3986
3987 pub canonical_path: Option<Arc<Path>>,
3988 /// Whether this entry is ignored by Git.
3989 ///
3990 /// We only scan ignored entries once the directory is expanded and
3991 /// exclude them from searches.
3992 pub is_ignored: bool,
3993
3994 /// Whether this entry is always included in searches.
3995 ///
3996 /// This is used for entries that are always included in searches, even
3997 /// if they are ignored by git. Overridden by file_scan_exclusions.
3998 pub is_always_included: bool,
3999
4000 /// Whether this entry's canonical path is outside of the worktree.
4001 /// This means the entry is only accessible from the worktree root via a
4002 /// symlink.
4003 ///
4004 /// We only scan entries outside of the worktree once the symlinked
4005 /// directory is expanded. External entries are treated like gitignored
4006 /// entries in that they are not included in searches.
4007 pub is_external: bool,
4008
4009 /// Whether this entry is considered to be a `.env` file.
4010 pub is_private: bool,
4011 /// The entry's size on disk, in bytes.
4012 pub size: u64,
4013 pub char_bag: CharBag,
4014 pub is_fifo: bool,
4015}
4016
4017#[derive(Clone, Copy, Debug, PartialEq, Eq)]
4018pub enum EntryKind {
4019 UnloadedDir,
4020 PendingDir,
4021 Dir,
4022 File,
4023}
4024
4025#[derive(Clone, Copy, Debug, PartialEq)]
4026pub enum PathChange {
4027 /// A filesystem entry was was created.
4028 Added,
4029 /// A filesystem entry was removed.
4030 Removed,
4031 /// A filesystem entry was updated.
4032 Updated,
4033 /// A filesystem entry was either updated or added. We don't know
4034 /// whether or not it already existed, because the path had not
4035 /// been loaded before the event.
4036 AddedOrUpdated,
4037 /// A filesystem entry was found during the initial scan of the worktree.
4038 Loaded,
4039}
4040
4041#[derive(Debug)]
4042pub struct GitRepositoryChange {
4043 /// The previous state of the repository, if it already existed.
4044 pub old_repository: Option<RepositoryEntry>,
4045}
4046
4047pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
4048pub type UpdatedGitRepositoriesSet = Arc<[(Entry, GitRepositoryChange)]>;
4049
4050#[derive(Clone, Debug, PartialEq, Eq)]
4051pub struct StatusEntry {
4052 pub repo_path: RepoPath,
4053 pub status: FileStatus,
4054}
4055
4056impl StatusEntry {
4057 fn to_proto(&self) -> proto::StatusEntry {
4058 let simple_status = match self.status {
4059 FileStatus::Ignored | FileStatus::Untracked => proto::GitStatus::Added as i32,
4060 FileStatus::Unmerged { .. } => proto::GitStatus::Conflict as i32,
4061 FileStatus::Tracked(TrackedStatus {
4062 index_status,
4063 worktree_status,
4064 }) => tracked_status_to_proto(if worktree_status != StatusCode::Unmodified {
4065 worktree_status
4066 } else {
4067 index_status
4068 }),
4069 };
4070
4071 proto::StatusEntry {
4072 repo_path: self.repo_path.as_ref().to_proto(),
4073 simple_status,
4074 status: Some(status_to_proto(self.status)),
4075 }
4076 }
4077}
4078
4079impl TryFrom<proto::StatusEntry> for StatusEntry {
4080 type Error = anyhow::Error;
4081
4082 fn try_from(value: proto::StatusEntry) -> Result<Self, Self::Error> {
4083 let repo_path = RepoPath(Arc::<Path>::from_proto(value.repo_path));
4084 let status = status_from_proto(value.simple_status, value.status)?;
4085 Ok(Self { repo_path, status })
4086 }
4087}
4088
4089#[derive(Clone, Debug)]
4090struct PathProgress<'a> {
4091 max_path: &'a Path,
4092}
4093
4094#[derive(Clone, Debug)]
4095pub struct PathSummary<S> {
4096 max_path: Arc<Path>,
4097 item_summary: S,
4098}
4099
4100impl<S: Summary> Summary for PathSummary<S> {
4101 type Context = S::Context;
4102
4103 fn zero(cx: &Self::Context) -> Self {
4104 Self {
4105 max_path: Path::new("").into(),
4106 item_summary: S::zero(cx),
4107 }
4108 }
4109
4110 fn add_summary(&mut self, rhs: &Self, cx: &Self::Context) {
4111 self.max_path = rhs.max_path.clone();
4112 self.item_summary.add_summary(&rhs.item_summary, cx);
4113 }
4114}
4115
4116impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathProgress<'a> {
4117 fn zero(_: &<PathSummary<S> as Summary>::Context) -> Self {
4118 Self {
4119 max_path: Path::new(""),
4120 }
4121 }
4122
4123 fn add_summary(
4124 &mut self,
4125 summary: &'a PathSummary<S>,
4126 _: &<PathSummary<S> as Summary>::Context,
4127 ) {
4128 self.max_path = summary.max_path.as_ref()
4129 }
4130}
4131
4132impl sum_tree::Item for RepositoryEntry {
4133 type Summary = PathSummary<Unit>;
4134
4135 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
4136 PathSummary {
4137 max_path: self.work_directory.path_key().0,
4138 item_summary: Unit,
4139 }
4140 }
4141}
4142
4143impl sum_tree::KeyedItem for RepositoryEntry {
4144 type Key = PathKey;
4145
4146 fn key(&self) -> Self::Key {
4147 self.work_directory.path_key()
4148 }
4149}
4150
4151impl sum_tree::Item for StatusEntry {
4152 type Summary = PathSummary<GitSummary>;
4153
4154 fn summary(&self, _: &<Self::Summary as Summary>::Context) -> Self::Summary {
4155 PathSummary {
4156 max_path: self.repo_path.0.clone(),
4157 item_summary: self.status.summary(),
4158 }
4159 }
4160}
4161
4162impl sum_tree::KeyedItem for StatusEntry {
4163 type Key = PathKey;
4164
4165 fn key(&self) -> Self::Key {
4166 PathKey(self.repo_path.0.clone())
4167 }
4168}
4169
4170impl<'a> sum_tree::Dimension<'a, PathSummary<GitSummary>> for GitSummary {
4171 fn zero(_cx: &()) -> Self {
4172 Default::default()
4173 }
4174
4175 fn add_summary(&mut self, summary: &'a PathSummary<GitSummary>, _: &()) {
4176 *self += summary.item_summary
4177 }
4178}
4179
4180impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for PathKey {
4181 fn zero(_: &S::Context) -> Self {
4182 Default::default()
4183 }
4184
4185 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4186 self.0 = summary.max_path.clone();
4187 }
4188}
4189
4190impl<'a, S: Summary> sum_tree::Dimension<'a, PathSummary<S>> for TraversalProgress<'a> {
4191 fn zero(_cx: &S::Context) -> Self {
4192 Default::default()
4193 }
4194
4195 fn add_summary(&mut self, summary: &'a PathSummary<S>, _: &S::Context) {
4196 self.max_path = summary.max_path.as_ref();
4197 }
4198}
4199
4200impl Entry {
4201 fn new(
4202 path: Arc<Path>,
4203 metadata: &fs::Metadata,
4204 next_entry_id: &AtomicUsize,
4205 root_char_bag: CharBag,
4206 canonical_path: Option<Arc<Path>>,
4207 ) -> Self {
4208 let char_bag = char_bag_for_path(root_char_bag, &path);
4209 Self {
4210 id: ProjectEntryId::new(next_entry_id),
4211 kind: if metadata.is_dir {
4212 EntryKind::PendingDir
4213 } else {
4214 EntryKind::File
4215 },
4216 path,
4217 inode: metadata.inode,
4218 mtime: Some(metadata.mtime),
4219 size: metadata.len,
4220 canonical_path,
4221 is_ignored: false,
4222 is_always_included: false,
4223 is_external: false,
4224 is_private: false,
4225 char_bag,
4226 is_fifo: metadata.is_fifo,
4227 }
4228 }
4229
4230 pub fn is_created(&self) -> bool {
4231 self.mtime.is_some()
4232 }
4233
4234 pub fn is_dir(&self) -> bool {
4235 self.kind.is_dir()
4236 }
4237
4238 pub fn is_file(&self) -> bool {
4239 self.kind.is_file()
4240 }
4241}
4242
4243impl EntryKind {
4244 pub fn is_dir(&self) -> bool {
4245 matches!(
4246 self,
4247 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
4248 )
4249 }
4250
4251 pub fn is_unloaded(&self) -> bool {
4252 matches!(self, EntryKind::UnloadedDir)
4253 }
4254
4255 pub fn is_file(&self) -> bool {
4256 matches!(self, EntryKind::File)
4257 }
4258}
4259
4260impl sum_tree::Item for Entry {
4261 type Summary = EntrySummary;
4262
4263 fn summary(&self, _cx: &()) -> Self::Summary {
4264 let non_ignored_count = if (self.is_ignored || self.is_external) && !self.is_always_included
4265 {
4266 0
4267 } else {
4268 1
4269 };
4270 let file_count;
4271 let non_ignored_file_count;
4272 if self.is_file() {
4273 file_count = 1;
4274 non_ignored_file_count = non_ignored_count;
4275 } else {
4276 file_count = 0;
4277 non_ignored_file_count = 0;
4278 }
4279
4280 EntrySummary {
4281 max_path: self.path.clone(),
4282 count: 1,
4283 non_ignored_count,
4284 file_count,
4285 non_ignored_file_count,
4286 }
4287 }
4288}
4289
4290impl sum_tree::KeyedItem for Entry {
4291 type Key = PathKey;
4292
4293 fn key(&self) -> Self::Key {
4294 PathKey(self.path.clone())
4295 }
4296}
4297
4298#[derive(Clone, Debug)]
4299pub struct EntrySummary {
4300 max_path: Arc<Path>,
4301 count: usize,
4302 non_ignored_count: usize,
4303 file_count: usize,
4304 non_ignored_file_count: usize,
4305}
4306
4307impl Default for EntrySummary {
4308 fn default() -> Self {
4309 Self {
4310 max_path: Arc::from(Path::new("")),
4311 count: 0,
4312 non_ignored_count: 0,
4313 file_count: 0,
4314 non_ignored_file_count: 0,
4315 }
4316 }
4317}
4318
4319impl sum_tree::Summary for EntrySummary {
4320 type Context = ();
4321
4322 fn zero(_cx: &()) -> Self {
4323 Default::default()
4324 }
4325
4326 fn add_summary(&mut self, rhs: &Self, _: &()) {
4327 self.max_path = rhs.max_path.clone();
4328 self.count += rhs.count;
4329 self.non_ignored_count += rhs.non_ignored_count;
4330 self.file_count += rhs.file_count;
4331 self.non_ignored_file_count += rhs.non_ignored_file_count;
4332 }
4333}
4334
4335#[derive(Clone, Debug)]
4336struct PathEntry {
4337 id: ProjectEntryId,
4338 path: Arc<Path>,
4339 is_ignored: bool,
4340 scan_id: usize,
4341}
4342
4343impl sum_tree::Item for PathEntry {
4344 type Summary = PathEntrySummary;
4345
4346 fn summary(&self, _cx: &()) -> Self::Summary {
4347 PathEntrySummary { max_id: self.id }
4348 }
4349}
4350
4351impl sum_tree::KeyedItem for PathEntry {
4352 type Key = ProjectEntryId;
4353
4354 fn key(&self) -> Self::Key {
4355 self.id
4356 }
4357}
4358
4359#[derive(Clone, Debug, Default)]
4360struct PathEntrySummary {
4361 max_id: ProjectEntryId,
4362}
4363
4364impl sum_tree::Summary for PathEntrySummary {
4365 type Context = ();
4366
4367 fn zero(_cx: &Self::Context) -> Self {
4368 Default::default()
4369 }
4370
4371 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
4372 self.max_id = summary.max_id;
4373 }
4374}
4375
4376impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
4377 fn zero(_cx: &()) -> Self {
4378 Default::default()
4379 }
4380
4381 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
4382 *self = summary.max_id;
4383 }
4384}
4385
4386#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
4387pub struct PathKey(Arc<Path>);
4388
4389impl Default for PathKey {
4390 fn default() -> Self {
4391 Self(Path::new("").into())
4392 }
4393}
4394
4395impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
4396 fn zero(_cx: &()) -> Self {
4397 Default::default()
4398 }
4399
4400 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4401 self.0 = summary.max_path.clone();
4402 }
4403}
4404
4405struct BackgroundScanner {
4406 state: Arc<Mutex<BackgroundScannerState>>,
4407 fs: Arc<dyn Fs>,
4408 fs_case_sensitive: bool,
4409 status_updates_tx: UnboundedSender<ScanState>,
4410 scans_running: Arc<AtomicI32>,
4411 executor: BackgroundExecutor,
4412 scan_requests_rx: channel::Receiver<ScanRequest>,
4413 path_prefixes_to_scan_rx: channel::Receiver<PathPrefixScanRequest>,
4414 next_entry_id: Arc<AtomicUsize>,
4415 phase: BackgroundScannerPhase,
4416 watcher: Arc<dyn Watcher>,
4417 settings: WorktreeSettings,
4418 share_private_files: bool,
4419}
4420
4421#[derive(Copy, Clone, PartialEq)]
4422enum BackgroundScannerPhase {
4423 InitialScan,
4424 EventsReceivedDuringInitialScan,
4425 Events,
4426}
4427
4428impl BackgroundScanner {
4429 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>) {
4430 // If the worktree root does not contain a git repository, then find
4431 // the git repository in an ancestor directory. Find any gitignore files
4432 // in ancestor directories.
4433 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
4434 let mut containing_git_repository = None;
4435 for (index, ancestor) in root_abs_path.as_path().ancestors().enumerate() {
4436 if index != 0 {
4437 if Some(ancestor) == self.fs.home_dir().as_deref() {
4438 // Unless $HOME is itself the worktree root, don't consider it as a
4439 // containing git repository---expensive and likely unwanted.
4440 break;
4441 } else if let Ok(ignore) =
4442 build_gitignore(&ancestor.join(*GITIGNORE), self.fs.as_ref()).await
4443 {
4444 self.state
4445 .lock()
4446 .snapshot
4447 .ignores_by_parent_abs_path
4448 .insert(ancestor.into(), (ignore.into(), false));
4449 }
4450 }
4451
4452 let ancestor_dot_git = ancestor.join(*DOT_GIT);
4453 log::info!("considering ancestor: {ancestor_dot_git:?}");
4454 // Check whether the directory or file called `.git` exists (in the
4455 // case of worktrees it's a file.)
4456 if self
4457 .fs
4458 .metadata(&ancestor_dot_git)
4459 .await
4460 .is_ok_and(|metadata| metadata.is_some())
4461 {
4462 if index != 0 {
4463 // We canonicalize, since the FS events use the canonicalized path.
4464 if let Some(ancestor_dot_git) =
4465 self.fs.canonicalize(&ancestor_dot_git).await.log_err()
4466 {
4467 let location_in_repo = root_abs_path
4468 .as_path()
4469 .strip_prefix(ancestor)
4470 .unwrap()
4471 .into();
4472 log::info!(
4473 "inserting parent git repo for this worktree: {location_in_repo:?}"
4474 );
4475 // We associate the external git repo with our root folder and
4476 // also mark where in the git repo the root folder is located.
4477 let local_repository = self.state.lock().insert_git_repository_for_path(
4478 WorkDirectory::AboveProject {
4479 absolute_path: ancestor.into(),
4480 location_in_repo,
4481 },
4482 ancestor_dot_git.clone().into(),
4483 self.fs.as_ref(),
4484 self.watcher.as_ref(),
4485 );
4486
4487 if local_repository.is_some() {
4488 containing_git_repository = Some(ancestor_dot_git)
4489 }
4490 };
4491 }
4492
4493 // Reached root of git repository.
4494 break;
4495 }
4496 }
4497
4498 log::info!("containing git repository: {containing_git_repository:?}");
4499
4500 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4501 {
4502 let mut state = self.state.lock();
4503 state.snapshot.scan_id += 1;
4504 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
4505 let ignore_stack = state
4506 .snapshot
4507 .ignore_stack_for_abs_path(root_abs_path.as_path(), true);
4508 if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) {
4509 root_entry.is_ignored = true;
4510 state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
4511 }
4512 state.enqueue_scan_dir(root_abs_path.into(), &root_entry, &scan_job_tx);
4513 }
4514 };
4515
4516 // Perform an initial scan of the directory.
4517 drop(scan_job_tx);
4518 self.scan_dirs(true, scan_job_rx).await;
4519 {
4520 let mut state = self.state.lock();
4521 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4522 }
4523
4524 let scanning = self.scans_running.load(atomic::Ordering::Acquire) > 0;
4525 self.send_status_update(scanning, SmallVec::new());
4526
4527 // Process any any FS events that occurred while performing the initial scan.
4528 // For these events, update events cannot be as precise, because we didn't
4529 // have the previous state loaded yet.
4530 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
4531 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
4532 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4533 paths.extend(more_paths);
4534 }
4535 self.process_events(paths.into_iter().map(Into::into).collect())
4536 .await;
4537 }
4538 if let Some(abs_path) = containing_git_repository {
4539 self.process_events(vec![abs_path]).await;
4540 }
4541
4542 // Continue processing events until the worktree is dropped.
4543 self.phase = BackgroundScannerPhase::Events;
4544
4545 loop {
4546 select_biased! {
4547 // Process any path refresh requests from the worktree. Prioritize
4548 // these before handling changes reported by the filesystem.
4549 request = self.next_scan_request().fuse() => {
4550 let Ok(request) = request else { break };
4551 let scanning = self.scans_running.load(atomic::Ordering::Acquire) > 0;
4552 if !self.process_scan_request(request, scanning).await {
4553 return;
4554 }
4555 }
4556
4557 path_prefix_request = self.path_prefixes_to_scan_rx.recv().fuse() => {
4558 let Ok(request) = path_prefix_request else { break };
4559 log::trace!("adding path prefix {:?}", request.path);
4560
4561 let did_scan = self.forcibly_load_paths(&[request.path.clone()]).await;
4562 if did_scan {
4563 let abs_path =
4564 {
4565 let mut state = self.state.lock();
4566 state.path_prefixes_to_scan.insert(request.path.clone());
4567 state.snapshot.abs_path.as_path().join(&request.path)
4568 };
4569
4570 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
4571 self.process_events(vec![abs_path]).await;
4572 }
4573 }
4574 let scanning = self.scans_running.load(atomic::Ordering::Acquire) > 0;
4575 self.send_status_update(scanning, request.done);
4576 }
4577
4578 paths = fs_events_rx.next().fuse() => {
4579 let Some(mut paths) = paths else { break };
4580 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
4581 paths.extend(more_paths);
4582 }
4583 self.process_events(paths.into_iter().map(Into::into).collect()).await;
4584 }
4585 }
4586 }
4587 }
4588
4589 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
4590 log::debug!("rescanning paths {:?}", request.relative_paths);
4591
4592 request.relative_paths.sort_unstable();
4593 self.forcibly_load_paths(&request.relative_paths).await;
4594
4595 let root_path = self.state.lock().snapshot.abs_path.clone();
4596 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4597 Ok(path) => SanitizedPath::from(path),
4598 Err(err) => {
4599 log::error!("failed to canonicalize root path: {}", err);
4600 return true;
4601 }
4602 };
4603 let abs_paths = request
4604 .relative_paths
4605 .iter()
4606 .map(|path| {
4607 if path.file_name().is_some() {
4608 root_canonical_path.as_path().join(path).to_path_buf()
4609 } else {
4610 root_canonical_path.as_path().to_path_buf()
4611 }
4612 })
4613 .collect::<Vec<_>>();
4614
4615 {
4616 let mut state = self.state.lock();
4617 let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
4618 state.snapshot.scan_id += 1;
4619 if is_idle {
4620 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4621 }
4622 }
4623
4624 self.reload_entries_for_paths(
4625 root_path,
4626 root_canonical_path,
4627 &request.relative_paths,
4628 abs_paths,
4629 None,
4630 )
4631 .await;
4632
4633 self.send_status_update(scanning, request.done)
4634 }
4635
4636 async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
4637 let root_path = self.state.lock().snapshot.abs_path.clone();
4638 let root_canonical_path = match self.fs.canonicalize(root_path.as_path()).await {
4639 Ok(path) => SanitizedPath::from(path),
4640 Err(err) => {
4641 let new_path = self
4642 .state
4643 .lock()
4644 .snapshot
4645 .root_file_handle
4646 .clone()
4647 .and_then(|handle| handle.current_path(&self.fs).log_err())
4648 .map(SanitizedPath::from)
4649 .filter(|new_path| *new_path != root_path);
4650
4651 if let Some(new_path) = new_path.as_ref() {
4652 log::info!(
4653 "root renamed from {} to {}",
4654 root_path.as_path().display(),
4655 new_path.as_path().display()
4656 )
4657 } else {
4658 log::warn!("root path could not be canonicalized: {}", err);
4659 }
4660 self.status_updates_tx
4661 .unbounded_send(ScanState::RootUpdated { new_path })
4662 .ok();
4663 return;
4664 }
4665 };
4666
4667 // Certain directories may have FS changes, but do not lead to git data changes that Zed cares about.
4668 // Ignore these, to avoid Zed unnecessarily rescanning git metadata.
4669 let skipped_files_in_dot_git = HashSet::from_iter([*COMMIT_MESSAGE, *INDEX_LOCK]);
4670 let skipped_dirs_in_dot_git = [*FSMONITOR_DAEMON, *LFS_DIR];
4671
4672 let mut relative_paths = Vec::with_capacity(abs_paths.len());
4673 let mut dot_git_abs_paths = Vec::new();
4674 abs_paths.sort_unstable();
4675 abs_paths.dedup_by(|a, b| a.starts_with(b));
4676 abs_paths.retain(|abs_path| {
4677 let abs_path = SanitizedPath::from(abs_path);
4678
4679 let snapshot = &self.state.lock().snapshot;
4680 {
4681 let mut is_git_related = false;
4682
4683 let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| {
4684 if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) {
4685 let path_in_git_dir = abs_path.as_path().strip_prefix(ancestor).expect("stripping off the ancestor");
4686 Some((ancestor.to_owned(), path_in_git_dir.to_owned()))
4687 } else {
4688 None
4689 }
4690 });
4691
4692 if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
4693 if skipped_files_in_dot_git.contains(path_in_git_dir.as_os_str()) || skipped_dirs_in_dot_git.iter().any(|skipped_git_subdir| path_in_git_dir.starts_with(skipped_git_subdir)) {
4694 log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
4695 return false;
4696 }
4697
4698 is_git_related = true;
4699 if !dot_git_abs_paths.contains(&dot_git_abs_path) {
4700 dot_git_abs_paths.push(dot_git_abs_path);
4701 }
4702 }
4703
4704 let relative_path: Arc<Path> =
4705 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
4706 path.into()
4707 } else {
4708 if is_git_related {
4709 log::debug!(
4710 "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
4711 );
4712 } else {
4713 log::error!(
4714 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
4715 );
4716 }
4717 return false;
4718 };
4719
4720 if abs_path.0.file_name() == Some(*GITIGNORE) {
4721 for (_, repo) in snapshot.git_repositories.iter().filter(|(_, repo)| repo.directory_contains(&relative_path)) {
4722 if !dot_git_abs_paths.iter().any(|dot_git_abs_path| dot_git_abs_path == repo.dot_git_dir_abs_path.as_ref()) {
4723 dot_git_abs_paths.push(repo.dot_git_dir_abs_path.to_path_buf());
4724 }
4725 }
4726 }
4727
4728 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
4729 snapshot
4730 .entry_for_path(parent)
4731 .map_or(false, |entry| entry.kind == EntryKind::Dir)
4732 });
4733 if !parent_dir_is_loaded {
4734 log::debug!("ignoring event {relative_path:?} within unloaded directory");
4735 return false;
4736 }
4737
4738 if self.settings.is_path_excluded(&relative_path) {
4739 if !is_git_related {
4740 log::debug!("ignoring FS event for excluded path {relative_path:?}");
4741 }
4742 return false;
4743 }
4744
4745 relative_paths.push(relative_path);
4746 true
4747 }
4748 });
4749
4750 if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
4751 return;
4752 }
4753
4754 self.state.lock().snapshot.scan_id += 1;
4755
4756 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4757 log::debug!("received fs events {:?}", relative_paths);
4758 self.reload_entries_for_paths(
4759 root_path,
4760 root_canonical_path,
4761 &relative_paths,
4762 abs_paths,
4763 Some(scan_job_tx.clone()),
4764 )
4765 .await;
4766
4767 self.update_ignore_statuses(scan_job_tx).await;
4768 self.scan_dirs(false, scan_job_rx).await;
4769
4770 let status_update = if !dot_git_abs_paths.is_empty() {
4771 Some(self.update_git_repositories(dot_git_abs_paths))
4772 } else {
4773 None
4774 };
4775
4776 let phase = self.phase;
4777 let status_update_tx = self.status_updates_tx.clone();
4778 let state = self.state.clone();
4779 let scans_running = self.scans_running.clone();
4780 self.executor
4781 .spawn(async move {
4782 if let Some(status_update) = status_update {
4783 status_update.await;
4784 }
4785
4786 {
4787 let mut state = state.lock();
4788 state.snapshot.completed_scan_id = state.snapshot.scan_id;
4789 for (_, entry) in mem::take(&mut state.removed_entries) {
4790 state.scanned_dirs.remove(&entry.id);
4791 }
4792 #[cfg(test)]
4793 state.snapshot.check_git_invariants();
4794 }
4795 let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
4796 send_status_update_inner(phase, state, status_update_tx, scanning, SmallVec::new());
4797 })
4798 .detach();
4799 }
4800
4801 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
4802 let (scan_job_tx, scan_job_rx) = channel::unbounded();
4803 {
4804 let mut state = self.state.lock();
4805 let root_path = state.snapshot.abs_path.clone();
4806 for path in paths {
4807 for ancestor in path.ancestors() {
4808 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
4809 if entry.kind == EntryKind::UnloadedDir {
4810 let abs_path = root_path.as_path().join(ancestor);
4811 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
4812 state.paths_to_scan.insert(path.clone());
4813 break;
4814 }
4815 }
4816 }
4817 }
4818 drop(scan_job_tx);
4819 }
4820 while let Ok(job) = scan_job_rx.recv().await {
4821 self.scan_dir(&job).await.log_err();
4822 }
4823
4824 !mem::take(&mut self.state.lock().paths_to_scan).is_empty()
4825 }
4826
4827 async fn scan_dirs(
4828 &self,
4829 enable_progress_updates: bool,
4830 scan_jobs_rx: channel::Receiver<ScanJob>,
4831 ) {
4832 if self
4833 .status_updates_tx
4834 .unbounded_send(ScanState::Started)
4835 .is_err()
4836 {
4837 return;
4838 }
4839
4840 inc_scans_running(&self.scans_running);
4841 let progress_update_count = AtomicUsize::new(0);
4842 self.executor
4843 .scoped(|scope| {
4844 for _ in 0..self.executor.num_cpus() {
4845 scope.spawn(async {
4846 let mut last_progress_update_count = 0;
4847 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
4848 futures::pin_mut!(progress_update_timer);
4849
4850 loop {
4851 select_biased! {
4852 // Process any path refresh requests before moving on to process
4853 // the scan queue, so that user operations are prioritized.
4854 request = self.next_scan_request().fuse() => {
4855 let Ok(request) = request else { break };
4856 if !self.process_scan_request(request, true).await {
4857 return;
4858 }
4859 }
4860
4861 // Send periodic progress updates to the worktree. Use an atomic counter
4862 // to ensure that only one of the workers sends a progress update after
4863 // the update interval elapses.
4864 _ = progress_update_timer => {
4865 match progress_update_count.compare_exchange(
4866 last_progress_update_count,
4867 last_progress_update_count + 1,
4868 SeqCst,
4869 SeqCst
4870 ) {
4871 Ok(_) => {
4872 last_progress_update_count += 1;
4873 self.send_status_update(true, SmallVec::new());
4874 }
4875 Err(count) => {
4876 last_progress_update_count = count;
4877 }
4878 }
4879 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
4880 }
4881
4882 // Recursively load directories from the file system.
4883 job = scan_jobs_rx.recv().fuse() => {
4884 let Ok(job) = job else { break };
4885 if let Err(err) = self.scan_dir(&job).await {
4886 if job.path.as_ref() != Path::new("") {
4887 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
4888 }
4889 }
4890 }
4891 }
4892 }
4893 });
4894 }
4895 })
4896 .await;
4897
4898 dec_scans_running(&self.scans_running, 1);
4899 }
4900
4901 fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool {
4902 send_status_update_inner(
4903 self.phase,
4904 self.state.clone(),
4905 self.status_updates_tx.clone(),
4906 scanning,
4907 barrier,
4908 )
4909 }
4910
4911 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
4912 let root_abs_path;
4913 let root_char_bag;
4914 {
4915 let snapshot = &self.state.lock().snapshot;
4916 if self.settings.is_path_excluded(&job.path) {
4917 log::error!("skipping excluded directory {:?}", job.path);
4918 return Ok(());
4919 }
4920 log::info!("scanning directory {:?}", job.path);
4921 root_abs_path = snapshot.abs_path().clone();
4922 root_char_bag = snapshot.root_char_bag;
4923 }
4924
4925 let next_entry_id = self.next_entry_id.clone();
4926 let mut ignore_stack = job.ignore_stack.clone();
4927 let mut new_ignore = None;
4928 let mut root_canonical_path = None;
4929 let mut new_entries: Vec<Entry> = Vec::new();
4930 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
4931 let mut child_paths = self
4932 .fs
4933 .read_dir(&job.abs_path)
4934 .await?
4935 .filter_map(|entry| async {
4936 match entry {
4937 Ok(entry) => Some(entry),
4938 Err(error) => {
4939 log::error!("error processing entry {:?}", error);
4940 None
4941 }
4942 }
4943 })
4944 .collect::<Vec<_>>()
4945 .await;
4946
4947 // Ensure that .git and .gitignore are processed first.
4948 swap_to_front(&mut child_paths, *GITIGNORE);
4949 swap_to_front(&mut child_paths, *DOT_GIT);
4950
4951 let mut git_status_update_jobs = Vec::new();
4952 for child_abs_path in child_paths {
4953 let child_abs_path: Arc<Path> = child_abs_path.into();
4954 let child_name = child_abs_path.file_name().unwrap();
4955 let child_path: Arc<Path> = job.path.join(child_name).into();
4956
4957 if child_name == *DOT_GIT {
4958 {
4959 let mut state = self.state.lock();
4960 let repo = state.insert_git_repository(
4961 child_path.clone(),
4962 self.fs.as_ref(),
4963 self.watcher.as_ref(),
4964 );
4965 if let Some(local_repo) = repo {
4966 inc_scans_running(&self.scans_running);
4967 git_status_update_jobs
4968 .push(self.schedule_git_statuses_update(&mut state, local_repo));
4969 }
4970 }
4971 } else if child_name == *GITIGNORE {
4972 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
4973 Ok(ignore) => {
4974 let ignore = Arc::new(ignore);
4975 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4976 new_ignore = Some(ignore);
4977 }
4978 Err(error) => {
4979 log::error!(
4980 "error loading .gitignore file {:?} - {:?}",
4981 child_name,
4982 error
4983 );
4984 }
4985 }
4986 }
4987
4988 if self.settings.is_path_excluded(&child_path) {
4989 log::debug!("skipping excluded child entry {child_path:?}");
4990 self.state.lock().remove_path(&child_path);
4991 continue;
4992 }
4993
4994 let child_metadata = match self.fs.metadata(&child_abs_path).await {
4995 Ok(Some(metadata)) => metadata,
4996 Ok(None) => continue,
4997 Err(err) => {
4998 log::error!("error processing {child_abs_path:?}: {err:?}");
4999 continue;
5000 }
5001 };
5002
5003 let mut child_entry = Entry::new(
5004 child_path.clone(),
5005 &child_metadata,
5006 &next_entry_id,
5007 root_char_bag,
5008 None,
5009 );
5010
5011 if job.is_external {
5012 child_entry.is_external = true;
5013 } else if child_metadata.is_symlink {
5014 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
5015 Ok(path) => path,
5016 Err(err) => {
5017 log::error!(
5018 "error reading target of symlink {:?}: {:?}",
5019 child_abs_path,
5020 err
5021 );
5022 continue;
5023 }
5024 };
5025
5026 // lazily canonicalize the root path in order to determine if
5027 // symlinks point outside of the worktree.
5028 let root_canonical_path = match &root_canonical_path {
5029 Some(path) => path,
5030 None => match self.fs.canonicalize(&root_abs_path).await {
5031 Ok(path) => root_canonical_path.insert(path),
5032 Err(err) => {
5033 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
5034 continue;
5035 }
5036 },
5037 };
5038
5039 if !canonical_path.starts_with(root_canonical_path) {
5040 child_entry.is_external = true;
5041 }
5042
5043 child_entry.canonical_path = Some(canonical_path.into());
5044 }
5045
5046 if child_entry.is_dir() {
5047 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
5048 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
5049
5050 // Avoid recursing until crash in the case of a recursive symlink
5051 if job.ancestor_inodes.contains(&child_entry.inode) {
5052 new_jobs.push(None);
5053 } else {
5054 let mut ancestor_inodes = job.ancestor_inodes.clone();
5055 ancestor_inodes.insert(child_entry.inode);
5056
5057 new_jobs.push(Some(ScanJob {
5058 abs_path: child_abs_path.clone(),
5059 path: child_path,
5060 is_external: child_entry.is_external,
5061 ignore_stack: if child_entry.is_ignored {
5062 IgnoreStack::all()
5063 } else {
5064 ignore_stack.clone()
5065 },
5066 ancestor_inodes,
5067 scan_queue: job.scan_queue.clone(),
5068 }));
5069 }
5070 } else {
5071 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
5072 child_entry.is_always_included = self.settings.is_path_always_included(&child_path);
5073 }
5074
5075 {
5076 let relative_path = job.path.join(child_name);
5077 if self.is_path_private(&relative_path) {
5078 log::debug!("detected private file: {relative_path:?}");
5079 child_entry.is_private = true;
5080 }
5081 }
5082
5083 new_entries.push(child_entry);
5084 }
5085
5086 let task_state = self.state.clone();
5087 let phase = self.phase;
5088 let status_updates_tx = self.status_updates_tx.clone();
5089 let scans_running = self.scans_running.clone();
5090 self.executor
5091 .spawn(async move {
5092 if !git_status_update_jobs.is_empty() {
5093 let status_updates = join_all(git_status_update_jobs).await;
5094 let status_updated = status_updates
5095 .iter()
5096 .any(|update_result| update_result.is_ok());
5097 dec_scans_running(&scans_running, status_updates.len() as i32);
5098 if status_updated {
5099 let scanning = scans_running.load(atomic::Ordering::Acquire) > 0;
5100 send_status_update_inner(
5101 phase,
5102 task_state,
5103 status_updates_tx,
5104 scanning,
5105 SmallVec::new(),
5106 );
5107 }
5108 }
5109 })
5110 .detach();
5111
5112 let mut state = self.state.lock();
5113
5114 // Identify any subdirectories that should not be scanned.
5115 let mut job_ix = 0;
5116 for entry in &mut new_entries {
5117 state.reuse_entry_id(entry);
5118 if entry.is_dir() {
5119 if state.should_scan_directory(entry) {
5120 job_ix += 1;
5121 } else {
5122 log::debug!("defer scanning directory {:?}", entry.path);
5123 entry.kind = EntryKind::UnloadedDir;
5124 new_jobs.remove(job_ix);
5125 }
5126 }
5127 if entry.is_always_included {
5128 state
5129 .snapshot
5130 .always_included_entries
5131 .push(entry.path.clone());
5132 }
5133 }
5134
5135 state.populate_dir(&job.path, new_entries, new_ignore);
5136 self.watcher.add(job.abs_path.as_ref()).log_err();
5137
5138 for new_job in new_jobs.into_iter().flatten() {
5139 job.scan_queue
5140 .try_send(new_job)
5141 .expect("channel is unbounded");
5142 }
5143
5144 Ok(())
5145 }
5146
5147 /// All list arguments should be sorted before calling this function
5148 async fn reload_entries_for_paths(
5149 &self,
5150 root_abs_path: SanitizedPath,
5151 root_canonical_path: SanitizedPath,
5152 relative_paths: &[Arc<Path>],
5153 abs_paths: Vec<PathBuf>,
5154 scan_queue_tx: Option<Sender<ScanJob>>,
5155 ) {
5156 // grab metadata for all requested paths
5157 let metadata = futures::future::join_all(
5158 abs_paths
5159 .iter()
5160 .map(|abs_path| async move {
5161 let metadata = self.fs.metadata(abs_path).await?;
5162 if let Some(metadata) = metadata {
5163 let canonical_path = self.fs.canonicalize(abs_path).await?;
5164
5165 // If we're on a case-insensitive filesystem (default on macOS), we want
5166 // to only ignore metadata for non-symlink files if their absolute-path matches
5167 // the canonical-path.
5168 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
5169 // and we want to ignore the metadata for the old path (`test.txt`) so it's
5170 // treated as removed.
5171 if !self.fs_case_sensitive && !metadata.is_symlink {
5172 let canonical_file_name = canonical_path.file_name();
5173 let file_name = abs_path.file_name();
5174 if canonical_file_name != file_name {
5175 return Ok(None);
5176 }
5177 }
5178
5179 anyhow::Ok(Some((metadata, SanitizedPath::from(canonical_path))))
5180 } else {
5181 Ok(None)
5182 }
5183 })
5184 .collect::<Vec<_>>(),
5185 )
5186 .await;
5187
5188 let mut state = self.state.lock();
5189 let doing_recursive_update = scan_queue_tx.is_some();
5190
5191 // Remove any entries for paths that no longer exist or are being recursively
5192 // refreshed. Do this before adding any new entries, so that renames can be
5193 // detected regardless of the order of the paths.
5194 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
5195 if matches!(metadata, Ok(None)) || doing_recursive_update {
5196 log::trace!("remove path {:?}", path);
5197 state.remove_path(path);
5198 }
5199 }
5200
5201 // Group all relative paths by their git repository.
5202 let mut paths_by_git_repo = HashMap::default();
5203 for relative_path in relative_paths.iter() {
5204 let repository_data = state
5205 .snapshot
5206 .local_repo_for_path(relative_path)
5207 .zip(state.snapshot.repository_for_path(relative_path));
5208 if let Some((local_repo, entry)) = repository_data {
5209 if let Ok(repo_path) = local_repo.relativize(relative_path) {
5210 paths_by_git_repo
5211 .entry(local_repo.work_directory.clone())
5212 .or_insert_with(|| RepoPaths {
5213 entry: entry.clone(),
5214 repo: local_repo.repo_ptr.clone(),
5215 repo_paths: Default::default(),
5216 })
5217 .add_path(repo_path);
5218 }
5219 }
5220 }
5221
5222 for (work_directory, mut paths) in paths_by_git_repo {
5223 if let Ok(status) = paths.repo.status(&paths.repo_paths) {
5224 let mut changed_path_statuses = Vec::new();
5225 let statuses = paths.entry.statuses_by_path.clone();
5226 let mut cursor = statuses.cursor::<PathProgress>(&());
5227
5228 for (repo_path, status) in &*status.entries {
5229 paths.remove_repo_path(repo_path);
5230 if cursor.seek_forward(&PathTarget::Path(repo_path), Bias::Left, &()) {
5231 if &cursor.item().unwrap().status == status {
5232 continue;
5233 }
5234 }
5235
5236 changed_path_statuses.push(Edit::Insert(StatusEntry {
5237 repo_path: repo_path.clone(),
5238 status: *status,
5239 }));
5240 }
5241
5242 let mut cursor = statuses.cursor::<PathProgress>(&());
5243 for path in paths.repo_paths {
5244 if cursor.seek_forward(&PathTarget::Path(&path), Bias::Left, &()) {
5245 changed_path_statuses.push(Edit::Remove(PathKey(path.0)));
5246 }
5247 }
5248
5249 if !changed_path_statuses.is_empty() {
5250 let work_directory_id = state.snapshot.repositories.update(
5251 &work_directory.path_key(),
5252 &(),
5253 move |repository_entry| {
5254 repository_entry
5255 .statuses_by_path
5256 .edit(changed_path_statuses, &());
5257
5258 repository_entry.work_directory_id
5259 },
5260 );
5261
5262 if let Some(work_directory_id) = work_directory_id {
5263 let scan_id = state.snapshot.scan_id;
5264 state.snapshot.git_repositories.update(
5265 &work_directory_id,
5266 |local_repository_entry| {
5267 local_repository_entry.status_scan_id = scan_id;
5268 },
5269 );
5270 }
5271 }
5272 }
5273 }
5274
5275 for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
5276 let abs_path: Arc<Path> = root_abs_path.as_path().join(path).into();
5277 match metadata {
5278 Ok(Some((metadata, canonical_path))) => {
5279 let ignore_stack = state
5280 .snapshot
5281 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
5282 let is_external = !canonical_path.starts_with(&root_canonical_path);
5283 let mut fs_entry = Entry::new(
5284 path.clone(),
5285 &metadata,
5286 self.next_entry_id.as_ref(),
5287 state.snapshot.root_char_bag,
5288 if metadata.is_symlink {
5289 Some(canonical_path.as_path().to_path_buf().into())
5290 } else {
5291 None
5292 },
5293 );
5294
5295 let is_dir = fs_entry.is_dir();
5296 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
5297 fs_entry.is_external = is_external;
5298 fs_entry.is_private = self.is_path_private(path);
5299 fs_entry.is_always_included = self.settings.is_path_always_included(path);
5300
5301 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, is_dir) {
5302 if state.should_scan_directory(&fs_entry)
5303 || (fs_entry.path.as_os_str().is_empty()
5304 && abs_path.file_name() == Some(*DOT_GIT))
5305 {
5306 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
5307 } else {
5308 fs_entry.kind = EntryKind::UnloadedDir;
5309 }
5310 }
5311
5312 state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref());
5313 }
5314 Ok(None) => {
5315 self.remove_repo_path(path, &mut state.snapshot);
5316 }
5317 Err(err) => {
5318 log::error!("error reading file {abs_path:?} on event: {err:#}");
5319 }
5320 }
5321 }
5322
5323 util::extend_sorted(
5324 &mut state.changed_paths,
5325 relative_paths.iter().cloned(),
5326 usize::MAX,
5327 Ord::cmp,
5328 );
5329 }
5330
5331 fn remove_repo_path(&self, path: &Arc<Path>, snapshot: &mut LocalSnapshot) -> Option<()> {
5332 if !path
5333 .components()
5334 .any(|component| component.as_os_str() == *DOT_GIT)
5335 {
5336 if let Some(repository) = snapshot.repository(PathKey(path.clone())) {
5337 snapshot
5338 .git_repositories
5339 .remove(&repository.work_directory_id);
5340 snapshot
5341 .snapshot
5342 .repositories
5343 .remove(&repository.work_directory.path_key(), &());
5344 return Some(());
5345 }
5346 }
5347
5348 Some(())
5349 }
5350
5351 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
5352 let mut ignores_to_update = Vec::new();
5353 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
5354 let prev_snapshot;
5355 {
5356 let snapshot = &mut self.state.lock().snapshot;
5357 let abs_path = snapshot.abs_path.clone();
5358 snapshot
5359 .ignores_by_parent_abs_path
5360 .retain(|parent_abs_path, (_, needs_update)| {
5361 if let Ok(parent_path) = parent_abs_path.strip_prefix(abs_path.as_path()) {
5362 if *needs_update {
5363 *needs_update = false;
5364 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
5365 ignores_to_update.push(parent_abs_path.clone());
5366 }
5367 }
5368
5369 let ignore_path = parent_path.join(*GITIGNORE);
5370 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
5371 return false;
5372 }
5373 }
5374 true
5375 });
5376
5377 ignores_to_update.sort_unstable();
5378 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
5379 while let Some(parent_abs_path) = ignores_to_update.next() {
5380 while ignores_to_update
5381 .peek()
5382 .map_or(false, |p| p.starts_with(&parent_abs_path))
5383 {
5384 ignores_to_update.next().unwrap();
5385 }
5386
5387 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
5388 ignore_queue_tx
5389 .send_blocking(UpdateIgnoreStatusJob {
5390 abs_path: parent_abs_path,
5391 ignore_stack,
5392 ignore_queue: ignore_queue_tx.clone(),
5393 scan_queue: scan_job_tx.clone(),
5394 })
5395 .unwrap();
5396 }
5397
5398 prev_snapshot = snapshot.clone();
5399 }
5400 drop(ignore_queue_tx);
5401
5402 self.executor
5403 .scoped(|scope| {
5404 for _ in 0..self.executor.num_cpus() {
5405 scope.spawn(async {
5406 loop {
5407 select_biased! {
5408 // Process any path refresh requests before moving on to process
5409 // the queue of ignore statuses.
5410 request = self.next_scan_request().fuse() => {
5411 let Ok(request) = request else { break };
5412 if !self.process_scan_request(request, true).await {
5413 return;
5414 }
5415 }
5416
5417 // Recursively process directories whose ignores have changed.
5418 job = ignore_queue_rx.recv().fuse() => {
5419 let Ok(job) = job else { break };
5420 self.update_ignore_status(job, &prev_snapshot).await;
5421 }
5422 }
5423 }
5424 });
5425 }
5426 })
5427 .await;
5428 }
5429
5430 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
5431 log::trace!("update ignore status {:?}", job.abs_path);
5432
5433 let mut ignore_stack = job.ignore_stack;
5434 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
5435 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
5436 }
5437
5438 let mut entries_by_id_edits = Vec::new();
5439 let mut entries_by_path_edits = Vec::new();
5440 let path = job
5441 .abs_path
5442 .strip_prefix(snapshot.abs_path.as_path())
5443 .unwrap();
5444
5445 for mut entry in snapshot.child_entries(path).cloned() {
5446 let was_ignored = entry.is_ignored;
5447 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
5448 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
5449
5450 if entry.is_dir() {
5451 let child_ignore_stack = if entry.is_ignored {
5452 IgnoreStack::all()
5453 } else {
5454 ignore_stack.clone()
5455 };
5456
5457 // Scan any directories that were previously ignored and weren't previously scanned.
5458 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
5459 let state = self.state.lock();
5460 if state.should_scan_directory(&entry) {
5461 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
5462 }
5463 }
5464
5465 job.ignore_queue
5466 .send(UpdateIgnoreStatusJob {
5467 abs_path: abs_path.clone(),
5468 ignore_stack: child_ignore_stack,
5469 ignore_queue: job.ignore_queue.clone(),
5470 scan_queue: job.scan_queue.clone(),
5471 })
5472 .await
5473 .unwrap();
5474 }
5475
5476 if entry.is_ignored != was_ignored {
5477 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
5478 path_entry.scan_id = snapshot.scan_id;
5479 path_entry.is_ignored = entry.is_ignored;
5480 entries_by_id_edits.push(Edit::Insert(path_entry));
5481 entries_by_path_edits.push(Edit::Insert(entry));
5482 }
5483 }
5484
5485 let state = &mut self.state.lock();
5486 for edit in &entries_by_path_edits {
5487 if let Edit::Insert(entry) = edit {
5488 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
5489 state.changed_paths.insert(ix, entry.path.clone());
5490 }
5491 }
5492 }
5493
5494 state
5495 .snapshot
5496 .entries_by_path
5497 .edit(entries_by_path_edits, &());
5498 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
5499 }
5500
5501 fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) -> Task<()> {
5502 log::info!("reloading repositories: {dot_git_paths:?}");
5503
5504 let mut status_updates = Vec::new();
5505 {
5506 let mut state = self.state.lock();
5507 let scan_id = state.snapshot.scan_id;
5508 for dot_git_dir in dot_git_paths {
5509 let existing_repository_entry =
5510 state
5511 .snapshot
5512 .git_repositories
5513 .iter()
5514 .find_map(|(_, repo)| {
5515 if repo.dot_git_dir_abs_path.as_ref() == &dot_git_dir
5516 || repo.dot_git_worktree_abs_path.as_deref() == Some(&dot_git_dir)
5517 {
5518 Some(repo.clone())
5519 } else {
5520 None
5521 }
5522 });
5523
5524 let local_repository = match existing_repository_entry {
5525 None => {
5526 let Ok(relative) = dot_git_dir.strip_prefix(state.snapshot.abs_path())
5527 else {
5528 return Task::ready(());
5529 };
5530 match state.insert_git_repository(
5531 relative.into(),
5532 self.fs.as_ref(),
5533 self.watcher.as_ref(),
5534 ) {
5535 Some(output) => output,
5536 None => continue,
5537 }
5538 }
5539 Some(local_repository) => {
5540 if local_repository.git_dir_scan_id == scan_id {
5541 continue;
5542 }
5543 local_repository.repo_ptr.reload_index();
5544
5545 state.snapshot.git_repositories.update(
5546 &local_repository.work_directory_id,
5547 |entry| {
5548 entry.git_dir_scan_id = scan_id;
5549 entry.status_scan_id = scan_id;
5550 },
5551 );
5552
5553 local_repository
5554 }
5555 };
5556
5557 inc_scans_running(&self.scans_running);
5558 status_updates
5559 .push(self.schedule_git_statuses_update(&mut state, local_repository));
5560 }
5561
5562 // Remove any git repositories whose .git entry no longer exists.
5563 let snapshot = &mut state.snapshot;
5564 let mut ids_to_preserve = HashSet::default();
5565 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
5566 let exists_in_snapshot = snapshot
5567 .entry_for_id(work_directory_id)
5568 .map_or(false, |entry| {
5569 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
5570 });
5571
5572 if exists_in_snapshot
5573 || matches!(
5574 smol::block_on(self.fs.metadata(&entry.dot_git_dir_abs_path)),
5575 Ok(Some(_))
5576 )
5577 {
5578 ids_to_preserve.insert(work_directory_id);
5579 }
5580 }
5581
5582 snapshot
5583 .git_repositories
5584 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
5585 snapshot.repositories.retain(&(), |entry| {
5586 ids_to_preserve.contains(&entry.work_directory_id)
5587 });
5588 }
5589
5590 let scans_running = self.scans_running.clone();
5591 self.executor.spawn(async move {
5592 let updates_finished: Vec<Result<(), oneshot::Canceled>> =
5593 join_all(status_updates).await;
5594 let n = updates_finished.len();
5595 dec_scans_running(&scans_running, n as i32);
5596 })
5597 }
5598
5599 /// Update the git statuses for a given batch of entries.
5600 fn schedule_git_statuses_update(
5601 &self,
5602 state: &mut BackgroundScannerState,
5603 local_repository: LocalRepositoryEntry,
5604 ) -> oneshot::Receiver<()> {
5605 let job_state = self.state.clone();
5606 let (tx, rx) = oneshot::channel();
5607
5608 state.repository_scans.insert(
5609 local_repository.work_directory.path_key(),
5610 self.executor
5611 .spawn(do_git_status_update(job_state, local_repository, tx)),
5612 );
5613 rx
5614 }
5615
5616 async fn progress_timer(&self, running: bool) {
5617 if !running {
5618 return futures::future::pending().await;
5619 }
5620
5621 #[cfg(any(test, feature = "test-support"))]
5622 if self.fs.is_fake() {
5623 return self.executor.simulate_random_delay().await;
5624 }
5625
5626 smol::Timer::after(FS_WATCH_LATENCY).await;
5627 }
5628
5629 fn is_path_private(&self, path: &Path) -> bool {
5630 !self.share_private_files && self.settings.is_path_private(path)
5631 }
5632
5633 async fn next_scan_request(&self) -> Result<ScanRequest> {
5634 let mut request = self.scan_requests_rx.recv().await?;
5635 while let Ok(next_request) = self.scan_requests_rx.try_recv() {
5636 request.relative_paths.extend(next_request.relative_paths);
5637 request.done.extend(next_request.done);
5638 }
5639 Ok(request)
5640 }
5641}
5642
5643fn inc_scans_running(scans_running: &AtomicI32) {
5644 scans_running.fetch_add(1, atomic::Ordering::Release);
5645}
5646
5647fn dec_scans_running(scans_running: &AtomicI32, by: i32) {
5648 let old = scans_running.fetch_sub(by, atomic::Ordering::Release);
5649 debug_assert!(old >= by);
5650}
5651
5652fn send_status_update_inner(
5653 phase: BackgroundScannerPhase,
5654 state: Arc<Mutex<BackgroundScannerState>>,
5655 status_updates_tx: UnboundedSender<ScanState>,
5656 scanning: bool,
5657 barrier: SmallVec<[barrier::Sender; 1]>,
5658) -> bool {
5659 let mut state = state.lock();
5660 if state.changed_paths.is_empty() && scanning {
5661 return true;
5662 }
5663
5664 let new_snapshot = state.snapshot.clone();
5665 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
5666 let changes = build_diff(phase, &old_snapshot, &new_snapshot, &state.changed_paths);
5667 state.changed_paths.clear();
5668
5669 status_updates_tx
5670 .unbounded_send(ScanState::Updated {
5671 snapshot: new_snapshot,
5672 changes,
5673 scanning,
5674 barrier,
5675 })
5676 .is_ok()
5677}
5678
5679async fn update_branches(
5680 state: &Mutex<BackgroundScannerState>,
5681 repository: &mut LocalRepositoryEntry,
5682) -> Result<()> {
5683 let branches = repository.repo().branches().await?;
5684 let snapshot = state.lock().snapshot.snapshot.clone();
5685 let mut repository = snapshot
5686 .repository(repository.work_directory.path_key())
5687 .context("Missing repository")?;
5688 repository.current_branch = branches.into_iter().find(|branch| branch.is_head);
5689
5690 let mut state = state.lock();
5691 state
5692 .snapshot
5693 .repositories
5694 .insert_or_replace(repository, &());
5695
5696 Ok(())
5697}
5698
5699async fn do_git_status_update(
5700 job_state: Arc<Mutex<BackgroundScannerState>>,
5701 mut local_repository: LocalRepositoryEntry,
5702 tx: oneshot::Sender<()>,
5703) {
5704 let repository_name = local_repository.work_directory.display_name();
5705 log::trace!("updating git branches for repo {repository_name}");
5706 update_branches(&job_state, &mut local_repository)
5707 .await
5708 .log_err();
5709 let t0 = Instant::now();
5710
5711 log::trace!("updating git statuses for repo {repository_name}");
5712 let Some(statuses) = local_repository
5713 .repo()
5714 .status(&[git::WORK_DIRECTORY_REPO_PATH.clone()])
5715 .log_err()
5716 else {
5717 return;
5718 };
5719 log::trace!(
5720 "computed git statuses for repo {repository_name} in {:?}",
5721 t0.elapsed()
5722 );
5723
5724 let t0 = Instant::now();
5725 let mut changed_paths = Vec::new();
5726 let snapshot = job_state.lock().snapshot.snapshot.clone();
5727
5728 let Some(mut repository) = snapshot
5729 .repository(local_repository.work_directory.path_key())
5730 .context("Tried to update git statuses for a repository that isn't in the snapshot")
5731 .log_err()
5732 else {
5733 return;
5734 };
5735
5736 let merge_head_shas = local_repository.repo().merge_head_shas();
5737 if merge_head_shas != local_repository.current_merge_head_shas {
5738 mem::take(&mut repository.current_merge_conflicts);
5739 }
5740
5741 let mut new_entries_by_path = SumTree::new(&());
5742 for (repo_path, status) in statuses.entries.iter() {
5743 let project_path = repository.work_directory.try_unrelativize(repo_path);
5744
5745 new_entries_by_path.insert_or_replace(
5746 StatusEntry {
5747 repo_path: repo_path.clone(),
5748 status: *status,
5749 },
5750 &(),
5751 );
5752 if status.is_conflicted() {
5753 repository.current_merge_conflicts.insert(repo_path.clone());
5754 }
5755
5756 if let Some(path) = project_path {
5757 changed_paths.push(path);
5758 }
5759 }
5760
5761 repository.statuses_by_path = new_entries_by_path;
5762 let mut state = job_state.lock();
5763 state
5764 .snapshot
5765 .repositories
5766 .insert_or_replace(repository, &());
5767 state
5768 .snapshot
5769 .git_repositories
5770 .update(&local_repository.work_directory_id, |entry| {
5771 entry.current_merge_head_shas = merge_head_shas;
5772 entry.merge_message =
5773 std::fs::read_to_string(local_repository.dot_git_dir_abs_path.join("MERGE_MSG"))
5774 .ok()
5775 .and_then(|merge_msg| Some(merge_msg.lines().next()?.to_owned()));
5776 entry.status_scan_id += 1;
5777 });
5778
5779 util::extend_sorted(
5780 &mut state.changed_paths,
5781 changed_paths,
5782 usize::MAX,
5783 Ord::cmp,
5784 );
5785
5786 log::trace!(
5787 "applied git status updates for repo {repository_name} in {:?}",
5788 t0.elapsed(),
5789 );
5790 tx.send(()).ok();
5791}
5792
5793fn build_diff(
5794 phase: BackgroundScannerPhase,
5795 old_snapshot: &Snapshot,
5796 new_snapshot: &Snapshot,
5797 event_paths: &[Arc<Path>],
5798) -> UpdatedEntriesSet {
5799 use BackgroundScannerPhase::*;
5800 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
5801
5802 // Identify which paths have changed. Use the known set of changed
5803 // parent paths to optimize the search.
5804 let mut changes = Vec::new();
5805 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(&());
5806 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(&());
5807 let mut last_newly_loaded_dir_path = None;
5808 old_paths.next(&());
5809 new_paths.next(&());
5810 for path in event_paths {
5811 let path = PathKey(path.clone());
5812 if old_paths.item().map_or(false, |e| e.path < path.0) {
5813 old_paths.seek_forward(&path, Bias::Left, &());
5814 }
5815 if new_paths.item().map_or(false, |e| e.path < path.0) {
5816 new_paths.seek_forward(&path, Bias::Left, &());
5817 }
5818 loop {
5819 match (old_paths.item(), new_paths.item()) {
5820 (Some(old_entry), Some(new_entry)) => {
5821 if old_entry.path > path.0
5822 && new_entry.path > path.0
5823 && !old_entry.path.starts_with(&path.0)
5824 && !new_entry.path.starts_with(&path.0)
5825 {
5826 break;
5827 }
5828
5829 match Ord::cmp(&old_entry.path, &new_entry.path) {
5830 Ordering::Less => {
5831 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5832 old_paths.next(&());
5833 }
5834 Ordering::Equal => {
5835 if phase == EventsReceivedDuringInitialScan {
5836 if old_entry.id != new_entry.id {
5837 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5838 }
5839 // If the worktree was not fully initialized when this event was generated,
5840 // we can't know whether this entry was added during the scan or whether
5841 // it was merely updated.
5842 changes.push((
5843 new_entry.path.clone(),
5844 new_entry.id,
5845 AddedOrUpdated,
5846 ));
5847 } else if old_entry.id != new_entry.id {
5848 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5849 changes.push((new_entry.path.clone(), new_entry.id, Added));
5850 } else if old_entry != new_entry {
5851 if old_entry.kind.is_unloaded() {
5852 last_newly_loaded_dir_path = Some(&new_entry.path);
5853 changes.push((new_entry.path.clone(), new_entry.id, Loaded));
5854 } else {
5855 changes.push((new_entry.path.clone(), new_entry.id, Updated));
5856 }
5857 }
5858 old_paths.next(&());
5859 new_paths.next(&());
5860 }
5861 Ordering::Greater => {
5862 let is_newly_loaded = phase == InitialScan
5863 || last_newly_loaded_dir_path
5864 .as_ref()
5865 .map_or(false, |dir| new_entry.path.starts_with(dir));
5866 changes.push((
5867 new_entry.path.clone(),
5868 new_entry.id,
5869 if is_newly_loaded { Loaded } else { Added },
5870 ));
5871 new_paths.next(&());
5872 }
5873 }
5874 }
5875 (Some(old_entry), None) => {
5876 changes.push((old_entry.path.clone(), old_entry.id, Removed));
5877 old_paths.next(&());
5878 }
5879 (None, Some(new_entry)) => {
5880 let is_newly_loaded = phase == InitialScan
5881 || last_newly_loaded_dir_path
5882 .as_ref()
5883 .map_or(false, |dir| new_entry.path.starts_with(dir));
5884 changes.push((
5885 new_entry.path.clone(),
5886 new_entry.id,
5887 if is_newly_loaded { Loaded } else { Added },
5888 ));
5889 new_paths.next(&());
5890 }
5891 (None, None) => break,
5892 }
5893 }
5894 }
5895
5896 changes.into()
5897}
5898
5899fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
5900 let position = child_paths
5901 .iter()
5902 .position(|path| path.file_name().unwrap() == file);
5903 if let Some(position) = position {
5904 let temp = child_paths.remove(position);
5905 child_paths.insert(0, temp);
5906 }
5907}
5908
5909fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
5910 let mut result = root_char_bag;
5911 result.extend(
5912 path.to_string_lossy()
5913 .chars()
5914 .map(|c| c.to_ascii_lowercase()),
5915 );
5916 result
5917}
5918
5919#[derive(Debug)]
5920struct RepoPaths {
5921 repo: Arc<dyn GitRepository>,
5922 entry: RepositoryEntry,
5923 // sorted
5924 repo_paths: Vec<RepoPath>,
5925}
5926
5927impl RepoPaths {
5928 fn add_path(&mut self, repo_path: RepoPath) {
5929 match self.repo_paths.binary_search(&repo_path) {
5930 Ok(_) => {}
5931 Err(ix) => self.repo_paths.insert(ix, repo_path),
5932 }
5933 }
5934
5935 fn remove_repo_path(&mut self, repo_path: &RepoPath) {
5936 match self.repo_paths.binary_search(&repo_path) {
5937 Ok(ix) => {
5938 self.repo_paths.remove(ix);
5939 }
5940 Err(_) => {}
5941 }
5942 }
5943}
5944
5945#[derive(Debug)]
5946struct ScanJob {
5947 abs_path: Arc<Path>,
5948 path: Arc<Path>,
5949 ignore_stack: Arc<IgnoreStack>,
5950 scan_queue: Sender<ScanJob>,
5951 ancestor_inodes: TreeSet<u64>,
5952 is_external: bool,
5953}
5954
5955struct UpdateIgnoreStatusJob {
5956 abs_path: Arc<Path>,
5957 ignore_stack: Arc<IgnoreStack>,
5958 ignore_queue: Sender<UpdateIgnoreStatusJob>,
5959 scan_queue: Sender<ScanJob>,
5960}
5961
5962pub trait WorktreeModelHandle {
5963 #[cfg(any(test, feature = "test-support"))]
5964 fn flush_fs_events<'a>(
5965 &self,
5966 cx: &'a mut gpui::TestAppContext,
5967 ) -> futures::future::LocalBoxFuture<'a, ()>;
5968
5969 #[cfg(any(test, feature = "test-support"))]
5970 fn flush_fs_events_in_root_git_repository<'a>(
5971 &self,
5972 cx: &'a mut gpui::TestAppContext,
5973 ) -> futures::future::LocalBoxFuture<'a, ()>;
5974}
5975
5976impl WorktreeModelHandle for Entity<Worktree> {
5977 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
5978 // occurred before the worktree was constructed. These events can cause the worktree to perform
5979 // extra directory scans, and emit extra scan-state notifications.
5980 //
5981 // This function mutates the worktree's directory and waits for those mutations to be picked up,
5982 // to ensure that all redundant FS events have already been processed.
5983 #[cfg(any(test, feature = "test-support"))]
5984 fn flush_fs_events<'a>(
5985 &self,
5986 cx: &'a mut gpui::TestAppContext,
5987 ) -> futures::future::LocalBoxFuture<'a, ()> {
5988 let file_name = "fs-event-sentinel";
5989
5990 let tree = self.clone();
5991 let (fs, root_path) = self.update(cx, |tree, _| {
5992 let tree = tree.as_local().unwrap();
5993 (tree.fs.clone(), tree.abs_path().clone())
5994 });
5995
5996 async move {
5997 fs.create_file(&root_path.join(file_name), Default::default())
5998 .await
5999 .unwrap();
6000
6001 let mut events = cx.events(&tree);
6002 while events.next().await.is_some() {
6003 if tree.update(cx, |tree, _| tree.entry_for_path(file_name).is_some()) {
6004 break;
6005 }
6006 }
6007
6008 fs.remove_file(&root_path.join(file_name), Default::default())
6009 .await
6010 .unwrap();
6011 while events.next().await.is_some() {
6012 if tree.update(cx, |tree, _| tree.entry_for_path(file_name).is_none()) {
6013 break;
6014 }
6015 }
6016
6017 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
6018 .await;
6019 }
6020 .boxed_local()
6021 }
6022
6023 // This function is similar to flush_fs_events, except that it waits for events to be flushed in
6024 // the .git folder of the root repository.
6025 // The reason for its existence is that a repository's .git folder might live *outside* of the
6026 // worktree and thus its FS events might go through a different path.
6027 // In order to flush those, we need to create artificial events in the .git folder and wait
6028 // for the repository to be reloaded.
6029 #[cfg(any(test, feature = "test-support"))]
6030 fn flush_fs_events_in_root_git_repository<'a>(
6031 &self,
6032 cx: &'a mut gpui::TestAppContext,
6033 ) -> futures::future::LocalBoxFuture<'a, ()> {
6034 let file_name = "fs-event-sentinel";
6035
6036 let tree = self.clone();
6037 let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
6038 let tree = tree.as_local().unwrap();
6039 let root_entry = tree.root_git_entry().unwrap();
6040 let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
6041 (
6042 tree.fs.clone(),
6043 local_repo_entry.dot_git_dir_abs_path.clone(),
6044 local_repo_entry.git_dir_scan_id,
6045 )
6046 });
6047
6048 let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
6049 let root_entry = tree.root_git_entry().unwrap();
6050 let local_repo_entry = tree
6051 .as_local()
6052 .unwrap()
6053 .get_local_repo(&root_entry)
6054 .unwrap();
6055
6056 if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
6057 *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
6058 true
6059 } else {
6060 false
6061 }
6062 };
6063
6064 async move {
6065 fs.create_file(&root_path.join(file_name), Default::default())
6066 .await
6067 .unwrap();
6068
6069 let mut events = cx.events(&tree);
6070 while events.next().await.is_some() {
6071 if tree.update(cx, |tree, _| scan_id_increased(tree, &mut git_dir_scan_id)) {
6072 break;
6073 }
6074 }
6075
6076 fs.remove_file(&root_path.join(file_name), Default::default())
6077 .await
6078 .unwrap();
6079
6080 while events.next().await.is_some() {
6081 if tree.update(cx, |tree, _| scan_id_increased(tree, &mut git_dir_scan_id)) {
6082 break;
6083 }
6084 }
6085
6086 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
6087 .await;
6088 }
6089 .boxed_local()
6090 }
6091}
6092
6093#[derive(Clone, Debug)]
6094struct TraversalProgress<'a> {
6095 max_path: &'a Path,
6096 count: usize,
6097 non_ignored_count: usize,
6098 file_count: usize,
6099 non_ignored_file_count: usize,
6100}
6101
6102impl TraversalProgress<'_> {
6103 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
6104 match (include_files, include_dirs, include_ignored) {
6105 (true, true, true) => self.count,
6106 (true, true, false) => self.non_ignored_count,
6107 (true, false, true) => self.file_count,
6108 (true, false, false) => self.non_ignored_file_count,
6109 (false, true, true) => self.count - self.file_count,
6110 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
6111 (false, false, _) => 0,
6112 }
6113 }
6114}
6115
6116impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
6117 fn zero(_cx: &()) -> Self {
6118 Default::default()
6119 }
6120
6121 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
6122 self.max_path = summary.max_path.as_ref();
6123 self.count += summary.count;
6124 self.non_ignored_count += summary.non_ignored_count;
6125 self.file_count += summary.file_count;
6126 self.non_ignored_file_count += summary.non_ignored_file_count;
6127 }
6128}
6129
6130impl Default for TraversalProgress<'_> {
6131 fn default() -> Self {
6132 Self {
6133 max_path: Path::new(""),
6134 count: 0,
6135 non_ignored_count: 0,
6136 file_count: 0,
6137 non_ignored_file_count: 0,
6138 }
6139 }
6140}
6141
6142#[derive(Debug, Clone, Copy)]
6143pub struct GitEntryRef<'a> {
6144 pub entry: &'a Entry,
6145 pub git_summary: GitSummary,
6146}
6147
6148impl GitEntryRef<'_> {
6149 pub fn to_owned(&self) -> GitEntry {
6150 GitEntry {
6151 entry: self.entry.clone(),
6152 git_summary: self.git_summary,
6153 }
6154 }
6155}
6156
6157impl Deref for GitEntryRef<'_> {
6158 type Target = Entry;
6159
6160 fn deref(&self) -> &Self::Target {
6161 &self.entry
6162 }
6163}
6164
6165impl AsRef<Entry> for GitEntryRef<'_> {
6166 fn as_ref(&self) -> &Entry {
6167 self.entry
6168 }
6169}
6170
6171#[derive(Debug, Clone, PartialEq, Eq)]
6172pub struct GitEntry {
6173 pub entry: Entry,
6174 pub git_summary: GitSummary,
6175}
6176
6177impl GitEntry {
6178 pub fn to_ref(&self) -> GitEntryRef {
6179 GitEntryRef {
6180 entry: &self.entry,
6181 git_summary: self.git_summary,
6182 }
6183 }
6184}
6185
6186impl Deref for GitEntry {
6187 type Target = Entry;
6188
6189 fn deref(&self) -> &Self::Target {
6190 &self.entry
6191 }
6192}
6193
6194impl AsRef<Entry> for GitEntry {
6195 fn as_ref(&self) -> &Entry {
6196 &self.entry
6197 }
6198}
6199
6200/// Walks the worktree entries and their associated git statuses.
6201pub struct GitTraversal<'a> {
6202 traversal: Traversal<'a>,
6203 current_entry_summary: Option<GitSummary>,
6204 repo_location: Option<(
6205 &'a RepositoryEntry,
6206 Cursor<'a, StatusEntry, PathProgress<'a>>,
6207 )>,
6208}
6209
6210impl<'a> GitTraversal<'a> {
6211 fn synchronize_statuses(&mut self, reset: bool) {
6212 self.current_entry_summary = None;
6213
6214 let Some(entry) = self.traversal.cursor.item() else {
6215 return;
6216 };
6217
6218 let Some(repo) = self.traversal.snapshot.repository_for_path(&entry.path) else {
6219 self.repo_location = None;
6220 return;
6221 };
6222
6223 // Update our state if we changed repositories.
6224 if reset
6225 || self
6226 .repo_location
6227 .as_ref()
6228 .map(|(prev_repo, _)| &prev_repo.work_directory)
6229 != Some(&repo.work_directory)
6230 {
6231 self.repo_location = Some((repo, repo.statuses_by_path.cursor::<PathProgress>(&())));
6232 }
6233
6234 let Some((repo, statuses)) = &mut self.repo_location else {
6235 return;
6236 };
6237
6238 let repo_path = repo.relativize(&entry.path).unwrap();
6239
6240 if entry.is_dir() {
6241 let mut statuses = statuses.clone();
6242 statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &());
6243 let summary =
6244 statuses.summary(&PathTarget::Successor(repo_path.as_ref()), Bias::Left, &());
6245
6246 self.current_entry_summary = Some(summary);
6247 } else if entry.is_file() {
6248 // For a file entry, park the cursor on the corresponding status
6249 if statuses.seek_forward(&PathTarget::Path(repo_path.as_ref()), Bias::Left, &()) {
6250 // TODO: Investigate statuses.item() being None here.
6251 self.current_entry_summary = statuses.item().map(|item| item.status.into());
6252 } else {
6253 self.current_entry_summary = Some(GitSummary::UNCHANGED);
6254 }
6255 }
6256 }
6257
6258 pub fn advance(&mut self) -> bool {
6259 self.advance_by(1)
6260 }
6261
6262 pub fn advance_by(&mut self, count: usize) -> bool {
6263 let found = self.traversal.advance_by(count);
6264 self.synchronize_statuses(false);
6265 found
6266 }
6267
6268 pub fn advance_to_sibling(&mut self) -> bool {
6269 let found = self.traversal.advance_to_sibling();
6270 self.synchronize_statuses(false);
6271 found
6272 }
6273
6274 pub fn back_to_parent(&mut self) -> bool {
6275 let found = self.traversal.back_to_parent();
6276 self.synchronize_statuses(true);
6277 found
6278 }
6279
6280 pub fn start_offset(&self) -> usize {
6281 self.traversal.start_offset()
6282 }
6283
6284 pub fn end_offset(&self) -> usize {
6285 self.traversal.end_offset()
6286 }
6287
6288 pub fn entry(&self) -> Option<GitEntryRef<'a>> {
6289 let entry = self.traversal.cursor.item()?;
6290 let git_summary = self.current_entry_summary.unwrap_or(GitSummary::UNCHANGED);
6291 Some(GitEntryRef { entry, git_summary })
6292 }
6293}
6294
6295impl<'a> Iterator for GitTraversal<'a> {
6296 type Item = GitEntryRef<'a>;
6297 fn next(&mut self) -> Option<Self::Item> {
6298 if let Some(item) = self.entry() {
6299 self.advance();
6300 Some(item)
6301 } else {
6302 None
6303 }
6304 }
6305}
6306
6307#[derive(Debug)]
6308pub struct Traversal<'a> {
6309 snapshot: &'a Snapshot,
6310 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
6311 include_ignored: bool,
6312 include_files: bool,
6313 include_dirs: bool,
6314}
6315
6316impl<'a> Traversal<'a> {
6317 fn new(
6318 snapshot: &'a Snapshot,
6319 include_files: bool,
6320 include_dirs: bool,
6321 include_ignored: bool,
6322 start_path: &Path,
6323 ) -> Self {
6324 let mut cursor = snapshot.entries_by_path.cursor(&());
6325 cursor.seek(&TraversalTarget::path(start_path), Bias::Left, &());
6326 let mut traversal = Self {
6327 snapshot,
6328 cursor,
6329 include_files,
6330 include_dirs,
6331 include_ignored,
6332 };
6333 if traversal.end_offset() == traversal.start_offset() {
6334 traversal.next();
6335 }
6336 traversal
6337 }
6338
6339 pub fn with_git_statuses(self) -> GitTraversal<'a> {
6340 let mut this = GitTraversal {
6341 traversal: self,
6342 current_entry_summary: None,
6343 repo_location: None,
6344 };
6345 this.synchronize_statuses(true);
6346 this
6347 }
6348
6349 pub fn advance(&mut self) -> bool {
6350 self.advance_by(1)
6351 }
6352
6353 pub fn advance_by(&mut self, count: usize) -> bool {
6354 self.cursor.seek_forward(
6355 &TraversalTarget::Count {
6356 count: self.end_offset() + count,
6357 include_dirs: self.include_dirs,
6358 include_files: self.include_files,
6359 include_ignored: self.include_ignored,
6360 },
6361 Bias::Left,
6362 &(),
6363 )
6364 }
6365
6366 pub fn advance_to_sibling(&mut self) -> bool {
6367 while let Some(entry) = self.cursor.item() {
6368 self.cursor
6369 .seek_forward(&TraversalTarget::successor(&entry.path), Bias::Left, &());
6370 if let Some(entry) = self.cursor.item() {
6371 if (self.include_files || !entry.is_file())
6372 && (self.include_dirs || !entry.is_dir())
6373 && (self.include_ignored || !entry.is_ignored || entry.is_always_included)
6374 {
6375 return true;
6376 }
6377 }
6378 }
6379 false
6380 }
6381
6382 pub fn back_to_parent(&mut self) -> bool {
6383 let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
6384 return false;
6385 };
6386 self.cursor
6387 .seek(&TraversalTarget::path(parent_path), Bias::Left, &())
6388 }
6389
6390 pub fn entry(&self) -> Option<&'a Entry> {
6391 self.cursor.item()
6392 }
6393
6394 pub fn start_offset(&self) -> usize {
6395 self.cursor
6396 .start()
6397 .count(self.include_files, self.include_dirs, self.include_ignored)
6398 }
6399
6400 pub fn end_offset(&self) -> usize {
6401 self.cursor
6402 .end(&())
6403 .count(self.include_files, self.include_dirs, self.include_ignored)
6404 }
6405}
6406
6407impl<'a> Iterator for Traversal<'a> {
6408 type Item = &'a Entry;
6409
6410 fn next(&mut self) -> Option<Self::Item> {
6411 if let Some(item) = self.entry() {
6412 self.advance();
6413 Some(item)
6414 } else {
6415 None
6416 }
6417 }
6418}
6419
6420#[derive(Debug, Clone, Copy)]
6421enum PathTarget<'a> {
6422 Path(&'a Path),
6423 Successor(&'a Path),
6424}
6425
6426impl PathTarget<'_> {
6427 fn cmp_path(&self, other: &Path) -> Ordering {
6428 match self {
6429 PathTarget::Path(path) => path.cmp(&other),
6430 PathTarget::Successor(path) => {
6431 if other.starts_with(path) {
6432 Ordering::Greater
6433 } else {
6434 Ordering::Equal
6435 }
6436 }
6437 }
6438 }
6439}
6440
6441impl<'a, S: Summary> SeekTarget<'a, PathSummary<S>, PathProgress<'a>> for PathTarget<'_> {
6442 fn cmp(&self, cursor_location: &PathProgress<'a>, _: &S::Context) -> Ordering {
6443 self.cmp_path(&cursor_location.max_path)
6444 }
6445}
6446
6447impl<'a, S: Summary> SeekTarget<'a, PathSummary<S>, TraversalProgress<'a>> for PathTarget<'_> {
6448 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &S::Context) -> Ordering {
6449 self.cmp_path(&cursor_location.max_path)
6450 }
6451}
6452
6453impl<'a> SeekTarget<'a, PathSummary<GitSummary>, (TraversalProgress<'a>, GitSummary)>
6454 for PathTarget<'_>
6455{
6456 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitSummary), _: &()) -> Ordering {
6457 self.cmp_path(&cursor_location.0.max_path)
6458 }
6459}
6460
6461#[derive(Debug)]
6462enum TraversalTarget<'a> {
6463 Path(PathTarget<'a>),
6464 Count {
6465 count: usize,
6466 include_files: bool,
6467 include_ignored: bool,
6468 include_dirs: bool,
6469 },
6470}
6471
6472impl<'a> TraversalTarget<'a> {
6473 fn path(path: &'a Path) -> Self {
6474 Self::Path(PathTarget::Path(path))
6475 }
6476
6477 fn successor(path: &'a Path) -> Self {
6478 Self::Path(PathTarget::Successor(path))
6479 }
6480
6481 fn cmp_progress(&self, progress: &TraversalProgress) -> Ordering {
6482 match self {
6483 TraversalTarget::Path(path) => path.cmp_path(&progress.max_path),
6484 TraversalTarget::Count {
6485 count,
6486 include_files,
6487 include_dirs,
6488 include_ignored,
6489 } => Ord::cmp(
6490 count,
6491 &progress.count(*include_files, *include_dirs, *include_ignored),
6492 ),
6493 }
6494 }
6495}
6496
6497impl<'a> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'_> {
6498 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6499 self.cmp_progress(cursor_location)
6500 }
6501}
6502
6503impl<'a> SeekTarget<'a, PathSummary<Unit>, TraversalProgress<'a>> for TraversalTarget<'_> {
6504 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
6505 self.cmp_progress(cursor_location)
6506 }
6507}
6508
6509pub struct ChildEntriesOptions {
6510 pub include_files: bool,
6511 pub include_dirs: bool,
6512 pub include_ignored: bool,
6513}
6514
6515pub struct ChildEntriesIter<'a> {
6516 parent_path: &'a Path,
6517 traversal: Traversal<'a>,
6518}
6519
6520impl<'a> ChildEntriesIter<'a> {
6521 pub fn with_git_statuses(self) -> ChildEntriesGitIter<'a> {
6522 ChildEntriesGitIter {
6523 parent_path: self.parent_path,
6524 traversal: self.traversal.with_git_statuses(),
6525 }
6526 }
6527}
6528
6529pub struct ChildEntriesGitIter<'a> {
6530 parent_path: &'a Path,
6531 traversal: GitTraversal<'a>,
6532}
6533
6534impl<'a> Iterator for ChildEntriesIter<'a> {
6535 type Item = &'a Entry;
6536
6537 fn next(&mut self) -> Option<Self::Item> {
6538 if let Some(item) = self.traversal.entry() {
6539 if item.path.starts_with(self.parent_path) {
6540 self.traversal.advance_to_sibling();
6541 return Some(item);
6542 }
6543 }
6544 None
6545 }
6546}
6547
6548impl<'a> Iterator for ChildEntriesGitIter<'a> {
6549 type Item = GitEntryRef<'a>;
6550
6551 fn next(&mut self) -> Option<Self::Item> {
6552 if let Some(item) = self.traversal.entry() {
6553 if item.path.starts_with(self.parent_path) {
6554 self.traversal.advance_to_sibling();
6555 return Some(item);
6556 }
6557 }
6558 None
6559 }
6560}
6561
6562impl<'a> From<&'a Entry> for proto::Entry {
6563 fn from(entry: &'a Entry) -> Self {
6564 Self {
6565 id: entry.id.to_proto(),
6566 is_dir: entry.is_dir(),
6567 path: entry.path.as_ref().to_proto(),
6568 inode: entry.inode,
6569 mtime: entry.mtime.map(|time| time.into()),
6570 is_ignored: entry.is_ignored,
6571 is_external: entry.is_external,
6572 is_fifo: entry.is_fifo,
6573 size: Some(entry.size),
6574 canonical_path: entry
6575 .canonical_path
6576 .as_ref()
6577 .map(|path| path.as_ref().to_proto()),
6578 }
6579 }
6580}
6581
6582impl<'a> TryFrom<(&'a CharBag, &PathMatcher, proto::Entry)> for Entry {
6583 type Error = anyhow::Error;
6584
6585 fn try_from(
6586 (root_char_bag, always_included, entry): (&'a CharBag, &PathMatcher, proto::Entry),
6587 ) -> Result<Self> {
6588 let kind = if entry.is_dir {
6589 EntryKind::Dir
6590 } else {
6591 EntryKind::File
6592 };
6593
6594 let path = Arc::<Path>::from_proto(entry.path);
6595 let char_bag = char_bag_for_path(*root_char_bag, &path);
6596 let is_always_included = always_included.is_match(path.as_ref());
6597 Ok(Entry {
6598 id: ProjectEntryId::from_proto(entry.id),
6599 kind,
6600 path,
6601 inode: entry.inode,
6602 mtime: entry.mtime.map(|time| time.into()),
6603 size: entry.size.unwrap_or(0),
6604 canonical_path: entry
6605 .canonical_path
6606 .map(|path_string| Arc::from(PathBuf::from_proto(path_string))),
6607 is_ignored: entry.is_ignored,
6608 is_always_included,
6609 is_external: entry.is_external,
6610 is_private: false,
6611 char_bag,
6612 is_fifo: entry.is_fifo,
6613 })
6614 }
6615}
6616
6617fn status_from_proto(
6618 simple_status: i32,
6619 status: Option<proto::GitFileStatus>,
6620) -> anyhow::Result<FileStatus> {
6621 use proto::git_file_status::Variant;
6622
6623 let Some(variant) = status.and_then(|status| status.variant) else {
6624 let code = proto::GitStatus::from_i32(simple_status)
6625 .ok_or_else(|| anyhow!("Invalid git status code: {simple_status}"))?;
6626 let result = match code {
6627 proto::GitStatus::Added => TrackedStatus {
6628 worktree_status: StatusCode::Added,
6629 index_status: StatusCode::Unmodified,
6630 }
6631 .into(),
6632 proto::GitStatus::Modified => TrackedStatus {
6633 worktree_status: StatusCode::Modified,
6634 index_status: StatusCode::Unmodified,
6635 }
6636 .into(),
6637 proto::GitStatus::Conflict => UnmergedStatus {
6638 first_head: UnmergedStatusCode::Updated,
6639 second_head: UnmergedStatusCode::Updated,
6640 }
6641 .into(),
6642 proto::GitStatus::Deleted => TrackedStatus {
6643 worktree_status: StatusCode::Deleted,
6644 index_status: StatusCode::Unmodified,
6645 }
6646 .into(),
6647 _ => return Err(anyhow!("Invalid code for simple status: {simple_status}")),
6648 };
6649 return Ok(result);
6650 };
6651
6652 let result = match variant {
6653 Variant::Untracked(_) => FileStatus::Untracked,
6654 Variant::Ignored(_) => FileStatus::Ignored,
6655 Variant::Unmerged(unmerged) => {
6656 let [first_head, second_head] =
6657 [unmerged.first_head, unmerged.second_head].map(|head| {
6658 let code = proto::GitStatus::from_i32(head)
6659 .ok_or_else(|| anyhow!("Invalid git status code: {head}"))?;
6660 let result = match code {
6661 proto::GitStatus::Added => UnmergedStatusCode::Added,
6662 proto::GitStatus::Updated => UnmergedStatusCode::Updated,
6663 proto::GitStatus::Deleted => UnmergedStatusCode::Deleted,
6664 _ => return Err(anyhow!("Invalid code for unmerged status: {code:?}")),
6665 };
6666 Ok(result)
6667 });
6668 let [first_head, second_head] = [first_head?, second_head?];
6669 UnmergedStatus {
6670 first_head,
6671 second_head,
6672 }
6673 .into()
6674 }
6675 Variant::Tracked(tracked) => {
6676 let [index_status, worktree_status] = [tracked.index_status, tracked.worktree_status]
6677 .map(|status| {
6678 let code = proto::GitStatus::from_i32(status)
6679 .ok_or_else(|| anyhow!("Invalid git status code: {status}"))?;
6680 let result = match code {
6681 proto::GitStatus::Modified => StatusCode::Modified,
6682 proto::GitStatus::TypeChanged => StatusCode::TypeChanged,
6683 proto::GitStatus::Added => StatusCode::Added,
6684 proto::GitStatus::Deleted => StatusCode::Deleted,
6685 proto::GitStatus::Renamed => StatusCode::Renamed,
6686 proto::GitStatus::Copied => StatusCode::Copied,
6687 proto::GitStatus::Unmodified => StatusCode::Unmodified,
6688 _ => return Err(anyhow!("Invalid code for tracked status: {code:?}")),
6689 };
6690 Ok(result)
6691 });
6692 let [index_status, worktree_status] = [index_status?, worktree_status?];
6693 TrackedStatus {
6694 index_status,
6695 worktree_status,
6696 }
6697 .into()
6698 }
6699 };
6700 Ok(result)
6701}
6702
6703fn status_to_proto(status: FileStatus) -> proto::GitFileStatus {
6704 use proto::git_file_status::{Tracked, Unmerged, Variant};
6705
6706 let variant = match status {
6707 FileStatus::Untracked => Variant::Untracked(Default::default()),
6708 FileStatus::Ignored => Variant::Ignored(Default::default()),
6709 FileStatus::Unmerged(UnmergedStatus {
6710 first_head,
6711 second_head,
6712 }) => Variant::Unmerged(Unmerged {
6713 first_head: unmerged_status_to_proto(first_head),
6714 second_head: unmerged_status_to_proto(second_head),
6715 }),
6716 FileStatus::Tracked(TrackedStatus {
6717 index_status,
6718 worktree_status,
6719 }) => Variant::Tracked(Tracked {
6720 index_status: tracked_status_to_proto(index_status),
6721 worktree_status: tracked_status_to_proto(worktree_status),
6722 }),
6723 };
6724 proto::GitFileStatus {
6725 variant: Some(variant),
6726 }
6727}
6728
6729fn unmerged_status_to_proto(code: UnmergedStatusCode) -> i32 {
6730 match code {
6731 UnmergedStatusCode::Added => proto::GitStatus::Added as _,
6732 UnmergedStatusCode::Deleted => proto::GitStatus::Deleted as _,
6733 UnmergedStatusCode::Updated => proto::GitStatus::Updated as _,
6734 }
6735}
6736
6737fn tracked_status_to_proto(code: StatusCode) -> i32 {
6738 match code {
6739 StatusCode::Added => proto::GitStatus::Added as _,
6740 StatusCode::Deleted => proto::GitStatus::Deleted as _,
6741 StatusCode::Modified => proto::GitStatus::Modified as _,
6742 StatusCode::Renamed => proto::GitStatus::Renamed as _,
6743 StatusCode::TypeChanged => proto::GitStatus::TypeChanged as _,
6744 StatusCode::Copied => proto::GitStatus::Copied as _,
6745 StatusCode::Unmodified => proto::GitStatus::Unmodified as _,
6746 }
6747}
6748
6749#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
6750pub struct ProjectEntryId(usize);
6751
6752impl ProjectEntryId {
6753 pub const MAX: Self = Self(usize::MAX);
6754 pub const MIN: Self = Self(usize::MIN);
6755
6756 pub fn new(counter: &AtomicUsize) -> Self {
6757 Self(counter.fetch_add(1, SeqCst))
6758 }
6759
6760 pub fn from_proto(id: u64) -> Self {
6761 Self(id as usize)
6762 }
6763
6764 pub fn to_proto(&self) -> u64 {
6765 self.0 as u64
6766 }
6767
6768 pub fn to_usize(&self) -> usize {
6769 self.0
6770 }
6771}
6772
6773#[cfg(any(test, feature = "test-support"))]
6774impl CreatedEntry {
6775 pub fn to_included(self) -> Option<Entry> {
6776 match self {
6777 CreatedEntry::Included(entry) => Some(entry),
6778 CreatedEntry::Excluded { .. } => None,
6779 }
6780 }
6781}