1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use client::{proto, Client};
9use clock::ReplicaId;
10use collections::{HashMap, HashSet, VecDeque};
11use fs::{copy_recursive, RemoveOptions};
12use fs::{
13 repository::{GitFileStatus, GitRepository, RepoPath},
14 Fs,
15};
16use futures::{
17 channel::{
18 mpsc::{self, UnboundedSender},
19 oneshot,
20 },
21 select_biased,
22 task::Poll,
23 FutureExt as _, Stream, StreamExt,
24};
25use fuzzy::CharBag;
26use git::{DOT_GIT, GITIGNORE};
27use gpui::{
28 AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
29 Task,
30};
31use ignore::IgnoreStack;
32use itertools::Itertools;
33use language::{
34 proto::{deserialize_version, serialize_fingerprint, serialize_line_ending, serialize_version},
35 Buffer, Capability, DiagnosticEntry, File as _, LineEnding, PointUtf16, Rope, RopeFingerprint,
36 Unclipped,
37};
38use lsp::{DiagnosticSeverity, LanguageServerId};
39use parking_lot::Mutex;
40use postage::{
41 barrier,
42 prelude::{Sink as _, Stream as _},
43 watch,
44};
45use serde::Serialize;
46use settings::{Settings, SettingsLocation, SettingsStore};
47use smol::channel::{self, Sender};
48use std::{
49 any::Any,
50 cmp::{self, Ordering},
51 convert::TryFrom,
52 ffi::OsStr,
53 fmt,
54 future::Future,
55 mem,
56 ops::{AddAssign, Deref, DerefMut, Sub},
57 path::{Path, PathBuf},
58 pin::Pin,
59 sync::{
60 atomic::{AtomicUsize, Ordering::SeqCst},
61 Arc,
62 },
63 time::{Duration, SystemTime},
64};
65use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
66use text::BufferId;
67use util::{
68 paths::{PathMatcher, HOME},
69 ResultExt,
70};
71
72pub use worktree_settings::WorktreeSettings;
73
74#[cfg(feature = "test-support")]
75pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
76#[cfg(not(feature = "test-support"))]
77pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
78
79#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
80pub struct WorktreeId(usize);
81
82/// A set of local or remote files that are being opened as part of a project.
83/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
84/// Stores git repositories data and the diagnostics for the file(s).
85///
86/// Has an absolute path, and may be set to be visible in Zed UI or not.
87/// May correspond to a directory or a single file.
88/// Possible examples:
89/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
90/// * a directory opened in Zed — may be added as a visible entry to the current worktree
91///
92/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
93pub enum Worktree {
94 Local(LocalWorktree),
95 Remote(RemoteWorktree),
96}
97
98pub struct LocalWorktree {
99 snapshot: LocalSnapshot,
100 scan_requests_tx: channel::Sender<ScanRequest>,
101 path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
102 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
103 _background_scanner_tasks: Vec<Task<()>>,
104 share: Option<ShareState>,
105 diagnostics: HashMap<
106 Arc<Path>,
107 Vec<(
108 LanguageServerId,
109 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
110 )>,
111 >,
112 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
113 client: Arc<Client>,
114 fs: Arc<dyn Fs>,
115 fs_case_sensitive: bool,
116 visible: bool,
117}
118
119struct ScanRequest {
120 relative_paths: Vec<Arc<Path>>,
121 done: barrier::Sender,
122}
123
124pub struct RemoteWorktree {
125 snapshot: Snapshot,
126 background_snapshot: Arc<Mutex<Snapshot>>,
127 project_id: u64,
128 client: Arc<Client>,
129 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
130 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
131 replica_id: ReplicaId,
132 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
133 visible: bool,
134 disconnected: bool,
135}
136
137#[derive(Clone)]
138pub struct Snapshot {
139 id: WorktreeId,
140 abs_path: Arc<Path>,
141 root_name: String,
142 root_char_bag: CharBag,
143 entries_by_path: SumTree<Entry>,
144 entries_by_id: SumTree<PathEntry>,
145 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
146
147 /// A number that increases every time the worktree begins scanning
148 /// a set of paths from the filesystem. This scanning could be caused
149 /// by some operation performed on the worktree, such as reading or
150 /// writing a file, or by an event reported by the filesystem.
151 scan_id: usize,
152
153 /// The latest scan id that has completed, and whose preceding scans
154 /// have all completed. The current `scan_id` could be more than one
155 /// greater than the `completed_scan_id` if operations are performed
156 /// on the worktree while it is processing a file-system event.
157 completed_scan_id: usize,
158}
159
160#[derive(Clone, Debug, PartialEq, Eq)]
161pub struct RepositoryEntry {
162 pub(crate) work_directory: WorkDirectoryEntry,
163 pub(crate) branch: Option<Arc<str>>,
164}
165
166impl RepositoryEntry {
167 pub fn branch(&self) -> Option<Arc<str>> {
168 self.branch.clone()
169 }
170
171 pub fn work_directory_id(&self) -> ProjectEntryId {
172 *self.work_directory
173 }
174
175 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
176 snapshot
177 .entry_for_id(self.work_directory_id())
178 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
179 }
180
181 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
182 proto::RepositoryEntry {
183 work_directory_id: self.work_directory_id().to_proto(),
184 branch: self.branch.as_ref().map(|str| str.to_string()),
185 }
186 }
187}
188
189impl From<&RepositoryEntry> for proto::RepositoryEntry {
190 fn from(value: &RepositoryEntry) -> Self {
191 proto::RepositoryEntry {
192 work_directory_id: value.work_directory.to_proto(),
193 branch: value.branch.as_ref().map(|str| str.to_string()),
194 }
195 }
196}
197
198/// This path corresponds to the 'content path' (the folder that contains the .git)
199#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
200pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
201
202impl Default for RepositoryWorkDirectory {
203 fn default() -> Self {
204 RepositoryWorkDirectory(Arc::from(Path::new("")))
205 }
206}
207
208impl AsRef<Path> for RepositoryWorkDirectory {
209 fn as_ref(&self) -> &Path {
210 self.0.as_ref()
211 }
212}
213
214#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
215pub struct WorkDirectoryEntry(ProjectEntryId);
216
217impl WorkDirectoryEntry {
218 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
219 let entry = worktree
220 .entry_for_id(self.0)
221 .ok_or_else(|| anyhow!("entry not found"))?;
222 let path = path
223 .strip_prefix(&entry.path)
224 .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
225 Ok(path.into())
226 }
227}
228
229impl Deref for WorkDirectoryEntry {
230 type Target = ProjectEntryId;
231
232 fn deref(&self) -> &Self::Target {
233 &self.0
234 }
235}
236
237impl From<ProjectEntryId> for WorkDirectoryEntry {
238 fn from(value: ProjectEntryId) -> Self {
239 WorkDirectoryEntry(value)
240 }
241}
242
243#[derive(Debug, Clone)]
244pub struct LocalSnapshot {
245 snapshot: Snapshot,
246 /// All of the gitignore files in the worktree, indexed by their relative path.
247 /// The boolean indicates whether the gitignore needs to be updated.
248 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
249 /// All of the git repositories in the worktree, indexed by the project entry
250 /// id of their parent directory.
251 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
252 file_scan_exclusions: Vec<PathMatcher>,
253 private_files: Vec<PathMatcher>,
254}
255
256struct BackgroundScannerState {
257 snapshot: LocalSnapshot,
258 scanned_dirs: HashSet<ProjectEntryId>,
259 path_prefixes_to_scan: HashSet<Arc<Path>>,
260 paths_to_scan: HashSet<Arc<Path>>,
261 /// The ids of all of the entries that were removed from the snapshot
262 /// as part of the current update. These entry ids may be re-used
263 /// if the same inode is discovered at a new path, or if the given
264 /// path is re-created after being deleted.
265 removed_entry_ids: HashMap<u64, ProjectEntryId>,
266 changed_paths: Vec<Arc<Path>>,
267 prev_snapshot: Snapshot,
268}
269
270#[derive(Debug, Clone)]
271pub struct LocalRepositoryEntry {
272 pub(crate) git_dir_scan_id: usize,
273 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
274 /// Path to the actual .git folder.
275 /// Note: if .git is a file, this points to the folder indicated by the .git file
276 pub(crate) git_dir_path: Arc<Path>,
277}
278
279impl LocalRepositoryEntry {
280 pub fn repo(&self) -> &Arc<Mutex<dyn GitRepository>> {
281 &self.repo_ptr
282 }
283}
284
285impl Deref for LocalSnapshot {
286 type Target = Snapshot;
287
288 fn deref(&self) -> &Self::Target {
289 &self.snapshot
290 }
291}
292
293impl DerefMut for LocalSnapshot {
294 fn deref_mut(&mut self) -> &mut Self::Target {
295 &mut self.snapshot
296 }
297}
298
299enum ScanState {
300 Started,
301 Updated {
302 snapshot: LocalSnapshot,
303 changes: UpdatedEntriesSet,
304 barrier: Option<barrier::Sender>,
305 scanning: bool,
306 },
307}
308
309struct ShareState {
310 project_id: u64,
311 snapshots_tx:
312 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
313 resume_updates: watch::Sender<()>,
314 _maintain_remote_snapshot: Task<Option<()>>,
315}
316
317#[derive(Clone)]
318pub enum Event {
319 UpdatedEntries(UpdatedEntriesSet),
320 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
321}
322
323impl EventEmitter<Event> for Worktree {}
324
325impl Worktree {
326 pub async fn local(
327 client: Arc<Client>,
328 path: impl Into<Arc<Path>>,
329 visible: bool,
330 fs: Arc<dyn Fs>,
331 next_entry_id: Arc<AtomicUsize>,
332 cx: &mut AsyncAppContext,
333 ) -> Result<Model<Self>> {
334 // After determining whether the root entry is a file or a directory, populate the
335 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
336 let abs_path = path.into();
337
338 let metadata = fs
339 .metadata(&abs_path)
340 .await
341 .context("failed to stat worktree path")?;
342
343 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
344 log::error!(
345 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
346 );
347 true
348 });
349
350 let closure_fs = Arc::clone(&fs);
351 let closure_next_entry_id = Arc::clone(&next_entry_id);
352 let closure_abs_path = abs_path.to_path_buf();
353 cx.new_model(move |cx: &mut ModelContext<Worktree>| {
354 cx.observe_global::<SettingsStore>(move |this, cx| {
355 if let Self::Local(this) = this {
356 let new_file_scan_exclusions = path_matchers(
357 WorktreeSettings::get_global(cx)
358 .file_scan_exclusions
359 .as_deref(),
360 "file_scan_exclusions",
361 );
362 let new_private_files = path_matchers(
363 WorktreeSettings::get(Some(settings::SettingsLocation {
364 worktree_id: cx.handle().entity_id().as_u64() as usize,
365 path: Path::new("")
366 }), cx).private_files.as_deref(),
367 "private_files",
368 );
369
370 if new_file_scan_exclusions != this.snapshot.file_scan_exclusions
371 || new_private_files != this.snapshot.private_files
372 {
373 this.snapshot.file_scan_exclusions = new_file_scan_exclusions;
374 this.snapshot.private_files = new_private_files;
375
376 log::info!(
377 "Re-scanning directories, new scan exclude files: {:?}, new dotenv files: {:?}",
378 this.snapshot
379 .file_scan_exclusions
380 .iter()
381 .map(ToString::to_string)
382 .collect::<Vec<_>>(),
383 this.snapshot
384 .private_files
385 .iter()
386 .map(ToString::to_string)
387 .collect::<Vec<_>>()
388 );
389
390 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
391 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) =
392 channel::unbounded();
393 this.scan_requests_tx = scan_requests_tx;
394 this.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
395 this._background_scanner_tasks = start_background_scan_tasks(
396 &closure_abs_path,
397 this.snapshot(),
398 scan_requests_rx,
399 path_prefixes_to_scan_rx,
400 Arc::clone(&closure_next_entry_id),
401 Arc::clone(&closure_fs),
402 cx,
403 );
404 this.is_scanning = watch::channel_with(true);
405 }
406 }
407 })
408 .detach();
409
410 let root_name = abs_path
411 .file_name()
412 .map_or(String::new(), |f| f.to_string_lossy().to_string());
413
414 let mut snapshot = LocalSnapshot {
415 file_scan_exclusions: path_matchers(
416 WorktreeSettings::get_global(cx)
417 .file_scan_exclusions
418 .as_deref(),
419 "file_scan_exclusions",
420 ),
421 private_files: path_matchers(
422 WorktreeSettings::get(Some(SettingsLocation {
423 worktree_id: cx.handle().entity_id().as_u64() as usize,
424 path: Path::new(""),
425 }), cx).private_files.as_deref(),
426 "private_files",
427 ),
428 ignores_by_parent_abs_path: Default::default(),
429 git_repositories: Default::default(),
430 snapshot: Snapshot {
431 id: WorktreeId::from_usize(cx.entity_id().as_u64() as usize),
432 abs_path: abs_path.to_path_buf().into(),
433 root_name: root_name.clone(),
434 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
435 entries_by_path: Default::default(),
436 entries_by_id: Default::default(),
437 repository_entries: Default::default(),
438 scan_id: 1,
439 completed_scan_id: 0,
440 },
441 };
442
443 if let Some(metadata) = metadata {
444 snapshot.insert_entry(
445 Entry::new(
446 Arc::from(Path::new("")),
447 &metadata,
448 &next_entry_id,
449 snapshot.root_char_bag,
450 ),
451 fs.as_ref(),
452 );
453 }
454
455 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
456 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
457 let task_snapshot = snapshot.clone();
458 Worktree::Local(LocalWorktree {
459 snapshot,
460 is_scanning: watch::channel_with(true),
461 share: None,
462 scan_requests_tx,
463 path_prefixes_to_scan_tx,
464 _background_scanner_tasks: start_background_scan_tasks(
465 &abs_path,
466 task_snapshot,
467 scan_requests_rx,
468 path_prefixes_to_scan_rx,
469 Arc::clone(&next_entry_id),
470 Arc::clone(&fs),
471 cx,
472 ),
473 diagnostics: Default::default(),
474 diagnostic_summaries: Default::default(),
475 client,
476 fs,
477 fs_case_sensitive,
478 visible,
479 })
480 })
481 }
482
483 pub fn remote(
484 project_remote_id: u64,
485 replica_id: ReplicaId,
486 worktree: proto::WorktreeMetadata,
487 client: Arc<Client>,
488 cx: &mut AppContext,
489 ) -> Model<Self> {
490 cx.new_model(|cx: &mut ModelContext<Self>| {
491 let snapshot = Snapshot {
492 id: WorktreeId(worktree.id as usize),
493 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
494 root_name: worktree.root_name.clone(),
495 root_char_bag: worktree
496 .root_name
497 .chars()
498 .map(|c| c.to_ascii_lowercase())
499 .collect(),
500 entries_by_path: Default::default(),
501 entries_by_id: Default::default(),
502 repository_entries: Default::default(),
503 scan_id: 1,
504 completed_scan_id: 0,
505 };
506
507 let (updates_tx, mut updates_rx) = mpsc::unbounded();
508 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
509 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
510
511 cx.background_executor()
512 .spawn({
513 let background_snapshot = background_snapshot.clone();
514 async move {
515 while let Some(update) = updates_rx.next().await {
516 if let Err(error) =
517 background_snapshot.lock().apply_remote_update(update)
518 {
519 log::error!("error applying worktree update: {}", error);
520 }
521 snapshot_updated_tx.send(()).await.ok();
522 }
523 }
524 })
525 .detach();
526
527 cx.spawn(|this, mut cx| async move {
528 while (snapshot_updated_rx.recv().await).is_some() {
529 this.update(&mut cx, |this, cx| {
530 let this = this.as_remote_mut().unwrap();
531 this.snapshot = this.background_snapshot.lock().clone();
532 cx.emit(Event::UpdatedEntries(Arc::from([])));
533 cx.notify();
534 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
535 if this.observed_snapshot(*scan_id) {
536 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
537 let _ = tx.send(());
538 } else {
539 break;
540 }
541 }
542 })?;
543 }
544 anyhow::Ok(())
545 })
546 .detach();
547
548 Worktree::Remote(RemoteWorktree {
549 project_id: project_remote_id,
550 replica_id,
551 snapshot: snapshot.clone(),
552 background_snapshot,
553 updates_tx: Some(updates_tx),
554 snapshot_subscriptions: Default::default(),
555 client: client.clone(),
556 diagnostic_summaries: Default::default(),
557 visible: worktree.visible,
558 disconnected: false,
559 })
560 })
561 }
562
563 pub fn as_local(&self) -> Option<&LocalWorktree> {
564 if let Worktree::Local(worktree) = self {
565 Some(worktree)
566 } else {
567 None
568 }
569 }
570
571 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
572 if let Worktree::Remote(worktree) = self {
573 Some(worktree)
574 } else {
575 None
576 }
577 }
578
579 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
580 if let Worktree::Local(worktree) = self {
581 Some(worktree)
582 } else {
583 None
584 }
585 }
586
587 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
588 if let Worktree::Remote(worktree) = self {
589 Some(worktree)
590 } else {
591 None
592 }
593 }
594
595 pub fn is_local(&self) -> bool {
596 matches!(self, Worktree::Local(_))
597 }
598
599 pub fn is_remote(&self) -> bool {
600 !self.is_local()
601 }
602
603 pub fn snapshot(&self) -> Snapshot {
604 match self {
605 Worktree::Local(worktree) => worktree.snapshot().snapshot,
606 Worktree::Remote(worktree) => worktree.snapshot(),
607 }
608 }
609
610 pub fn scan_id(&self) -> usize {
611 match self {
612 Worktree::Local(worktree) => worktree.snapshot.scan_id,
613 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
614 }
615 }
616
617 pub fn completed_scan_id(&self) -> usize {
618 match self {
619 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
620 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
621 }
622 }
623
624 pub fn is_visible(&self) -> bool {
625 match self {
626 Worktree::Local(worktree) => worktree.visible,
627 Worktree::Remote(worktree) => worktree.visible,
628 }
629 }
630
631 pub fn replica_id(&self) -> ReplicaId {
632 match self {
633 Worktree::Local(_) => 0,
634 Worktree::Remote(worktree) => worktree.replica_id,
635 }
636 }
637
638 pub fn diagnostic_summaries(
639 &self,
640 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
641 match self {
642 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
643 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
644 }
645 .iter()
646 .flat_map(|(path, summaries)| {
647 summaries
648 .iter()
649 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
650 })
651 }
652
653 pub fn abs_path(&self) -> Arc<Path> {
654 match self {
655 Worktree::Local(worktree) => worktree.abs_path.clone(),
656 Worktree::Remote(worktree) => worktree.abs_path.clone(),
657 }
658 }
659
660 pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
661 let entry = self.root_entry()?;
662 Some(File::for_entry(entry.clone(), cx.handle()))
663 }
664}
665
666fn start_background_scan_tasks(
667 abs_path: &Path,
668 snapshot: LocalSnapshot,
669 scan_requests_rx: channel::Receiver<ScanRequest>,
670 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
671 next_entry_id: Arc<AtomicUsize>,
672 fs: Arc<dyn Fs>,
673 cx: &mut ModelContext<'_, Worktree>,
674) -> Vec<Task<()>> {
675 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
676 let background_scanner = cx.background_executor().spawn({
677 let abs_path = abs_path.to_path_buf();
678 let background = cx.background_executor().clone();
679 async move {
680 let events = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
681 let case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
682 log::error!(
683 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
684 );
685 true
686 });
687
688 BackgroundScanner::new(
689 snapshot,
690 next_entry_id,
691 fs,
692 case_sensitive,
693 scan_states_tx,
694 background,
695 scan_requests_rx,
696 path_prefixes_to_scan_rx,
697 )
698 .run(events)
699 .await;
700 }
701 });
702 let scan_state_updater = cx.spawn(|this, mut cx| async move {
703 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
704 this.update(&mut cx, |this, cx| {
705 let this = this.as_local_mut().unwrap();
706 match state {
707 ScanState::Started => {
708 *this.is_scanning.0.borrow_mut() = true;
709 }
710 ScanState::Updated {
711 snapshot,
712 changes,
713 barrier,
714 scanning,
715 } => {
716 *this.is_scanning.0.borrow_mut() = scanning;
717 this.set_snapshot(snapshot, changes, cx);
718 drop(barrier);
719 }
720 }
721 cx.notify();
722 })
723 .ok();
724 }
725 });
726 vec![background_scanner, scan_state_updater]
727}
728
729fn path_matchers(values: Option<&[String]>, context: &'static str) -> Vec<PathMatcher> {
730 values
731 .unwrap_or(&[])
732 .iter()
733 .sorted()
734 .filter_map(|pattern| {
735 PathMatcher::new(pattern)
736 .map(Some)
737 .unwrap_or_else(|e| {
738 log::error!(
739 "Skipping pattern {pattern} in `{}` project settings due to parsing error: {e:#}", context
740 );
741 None
742 })
743 })
744 .collect()
745}
746
747impl LocalWorktree {
748 pub fn contains_abs_path(&self, path: &Path) -> bool {
749 path.starts_with(&self.abs_path)
750 }
751
752 pub fn load_buffer(
753 &mut self,
754 path: &Path,
755 cx: &mut ModelContext<Worktree>,
756 ) -> Task<Result<Model<Buffer>>> {
757 let path = Arc::from(path);
758 let reservation = cx.reserve_model();
759 let buffer_id = BufferId::from(reservation.entity_id().as_non_zero_u64());
760 cx.spawn(move |this, mut cx| async move {
761 let (file, contents, diff_base) = this
762 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))?
763 .await?;
764 let text_buffer = cx
765 .background_executor()
766 .spawn(async move { text::Buffer::new(0, buffer_id, contents) })
767 .await;
768 cx.insert_model(reservation, |_| {
769 Buffer::build(
770 text_buffer,
771 diff_base,
772 Some(Arc::new(file)),
773 Capability::ReadWrite,
774 )
775 })
776 })
777 }
778
779 pub fn new_buffer(
780 &mut self,
781 path: Arc<Path>,
782 cx: &mut ModelContext<Worktree>,
783 ) -> Model<Buffer> {
784 let worktree = cx.handle();
785 cx.new_model(|cx| {
786 let buffer_id = BufferId::from(cx.entity_id().as_non_zero_u64());
787 let text_buffer = text::Buffer::new(0, buffer_id, "".into());
788 Buffer::build(
789 text_buffer,
790 None,
791 Some(Arc::new(File {
792 worktree,
793 path,
794 mtime: None,
795 entry_id: None,
796 is_local: true,
797 is_deleted: false,
798 is_private: false,
799 })),
800 Capability::ReadWrite,
801 )
802 })
803 }
804
805 pub fn diagnostics_for_path(
806 &self,
807 path: &Path,
808 ) -> Vec<(
809 LanguageServerId,
810 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
811 )> {
812 self.diagnostics.get(path).cloned().unwrap_or_default()
813 }
814
815 pub fn clear_diagnostics_for_language_server(
816 &mut self,
817 server_id: LanguageServerId,
818 _: &mut ModelContext<Worktree>,
819 ) {
820 let worktree_id = self.id().to_proto();
821 self.diagnostic_summaries
822 .retain(|path, summaries_by_server_id| {
823 if summaries_by_server_id.remove(&server_id).is_some() {
824 if let Some(share) = self.share.as_ref() {
825 self.client
826 .send(proto::UpdateDiagnosticSummary {
827 project_id: share.project_id,
828 worktree_id,
829 summary: Some(proto::DiagnosticSummary {
830 path: path.to_string_lossy().to_string(),
831 language_server_id: server_id.0 as u64,
832 error_count: 0,
833 warning_count: 0,
834 }),
835 })
836 .log_err();
837 }
838 !summaries_by_server_id.is_empty()
839 } else {
840 true
841 }
842 });
843
844 self.diagnostics.retain(|_, diagnostics_by_server_id| {
845 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
846 diagnostics_by_server_id.remove(ix);
847 !diagnostics_by_server_id.is_empty()
848 } else {
849 true
850 }
851 });
852 }
853
854 pub fn update_diagnostics(
855 &mut self,
856 server_id: LanguageServerId,
857 worktree_path: Arc<Path>,
858 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
859 _: &mut ModelContext<Worktree>,
860 ) -> Result<bool> {
861 let summaries_by_server_id = self
862 .diagnostic_summaries
863 .entry(worktree_path.clone())
864 .or_default();
865
866 let old_summary = summaries_by_server_id
867 .remove(&server_id)
868 .unwrap_or_default();
869
870 let new_summary = DiagnosticSummary::new(&diagnostics);
871 if new_summary.is_empty() {
872 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
873 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
874 diagnostics_by_server_id.remove(ix);
875 }
876 if diagnostics_by_server_id.is_empty() {
877 self.diagnostics.remove(&worktree_path);
878 }
879 }
880 } else {
881 summaries_by_server_id.insert(server_id, new_summary);
882 let diagnostics_by_server_id =
883 self.diagnostics.entry(worktree_path.clone()).or_default();
884 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
885 Ok(ix) => {
886 diagnostics_by_server_id[ix] = (server_id, diagnostics);
887 }
888 Err(ix) => {
889 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
890 }
891 }
892 }
893
894 if !old_summary.is_empty() || !new_summary.is_empty() {
895 if let Some(share) = self.share.as_ref() {
896 self.client
897 .send(proto::UpdateDiagnosticSummary {
898 project_id: share.project_id,
899 worktree_id: self.id().to_proto(),
900 summary: Some(proto::DiagnosticSummary {
901 path: worktree_path.to_string_lossy().to_string(),
902 language_server_id: server_id.0 as u64,
903 error_count: new_summary.error_count as u32,
904 warning_count: new_summary.warning_count as u32,
905 }),
906 })
907 .log_err();
908 }
909 }
910
911 Ok(!old_summary.is_empty() || !new_summary.is_empty())
912 }
913
914 fn set_snapshot(
915 &mut self,
916 new_snapshot: LocalSnapshot,
917 entry_changes: UpdatedEntriesSet,
918 cx: &mut ModelContext<Worktree>,
919 ) {
920 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
921
922 self.snapshot = new_snapshot;
923
924 if let Some(share) = self.share.as_mut() {
925 share
926 .snapshots_tx
927 .unbounded_send((
928 self.snapshot.clone(),
929 entry_changes.clone(),
930 repo_changes.clone(),
931 ))
932 .ok();
933 }
934
935 if !entry_changes.is_empty() {
936 cx.emit(Event::UpdatedEntries(entry_changes));
937 }
938 if !repo_changes.is_empty() {
939 cx.emit(Event::UpdatedGitRepositories(repo_changes));
940 }
941 }
942
943 fn changed_repos(
944 &self,
945 old_snapshot: &LocalSnapshot,
946 new_snapshot: &LocalSnapshot,
947 ) -> UpdatedGitRepositoriesSet {
948 let mut changes = Vec::new();
949 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
950 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
951 loop {
952 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
953 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
954 match Ord::cmp(&new_entry_id, &old_entry_id) {
955 Ordering::Less => {
956 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
957 changes.push((
958 entry.path.clone(),
959 GitRepositoryChange {
960 old_repository: None,
961 },
962 ));
963 }
964 new_repos.next();
965 }
966 Ordering::Equal => {
967 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
968 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
969 let old_repo = old_snapshot
970 .repository_entries
971 .get(&RepositoryWorkDirectory(entry.path.clone()))
972 .cloned();
973 changes.push((
974 entry.path.clone(),
975 GitRepositoryChange {
976 old_repository: old_repo,
977 },
978 ));
979 }
980 }
981 new_repos.next();
982 old_repos.next();
983 }
984 Ordering::Greater => {
985 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
986 let old_repo = old_snapshot
987 .repository_entries
988 .get(&RepositoryWorkDirectory(entry.path.clone()))
989 .cloned();
990 changes.push((
991 entry.path.clone(),
992 GitRepositoryChange {
993 old_repository: old_repo,
994 },
995 ));
996 }
997 old_repos.next();
998 }
999 }
1000 }
1001 (Some((entry_id, _)), None) => {
1002 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1003 changes.push((
1004 entry.path.clone(),
1005 GitRepositoryChange {
1006 old_repository: None,
1007 },
1008 ));
1009 }
1010 new_repos.next();
1011 }
1012 (None, Some((entry_id, _))) => {
1013 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1014 let old_repo = old_snapshot
1015 .repository_entries
1016 .get(&RepositoryWorkDirectory(entry.path.clone()))
1017 .cloned();
1018 changes.push((
1019 entry.path.clone(),
1020 GitRepositoryChange {
1021 old_repository: old_repo,
1022 },
1023 ));
1024 }
1025 old_repos.next();
1026 }
1027 (None, None) => break,
1028 }
1029 }
1030
1031 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1032 (value.0.clone(), value.1.clone())
1033 }
1034
1035 changes.into()
1036 }
1037
1038 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1039 let mut is_scanning_rx = self.is_scanning.1.clone();
1040 async move {
1041 let mut is_scanning = *is_scanning_rx.borrow();
1042 while is_scanning {
1043 if let Some(value) = is_scanning_rx.recv().await {
1044 is_scanning = value;
1045 } else {
1046 break;
1047 }
1048 }
1049 }
1050 }
1051
1052 pub fn snapshot(&self) -> LocalSnapshot {
1053 self.snapshot.clone()
1054 }
1055
1056 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
1057 proto::WorktreeMetadata {
1058 id: self.id().to_proto(),
1059 root_name: self.root_name().to_string(),
1060 visible: self.visible,
1061 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
1062 }
1063 }
1064
1065 fn load(
1066 &self,
1067 path: &Path,
1068 cx: &mut ModelContext<Worktree>,
1069 ) -> Task<Result<(File, String, Option<String>)>> {
1070 let path = Arc::from(path);
1071 let abs_path = self.absolutize(&path);
1072 let fs = self.fs.clone();
1073 let entry = self.refresh_entry(path.clone(), None, cx);
1074
1075 cx.spawn(|this, mut cx| async move {
1076 let abs_path = abs_path?;
1077 let text = fs.load(&abs_path).await?;
1078 let mut index_task = None;
1079 let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1080 if let Some(repo) = snapshot.repository_for_path(&path) {
1081 if let Some(repo_path) = repo.work_directory.relativize(&snapshot, &path).log_err()
1082 {
1083 if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1084 let git_repo = git_repo.repo_ptr.clone();
1085 index_task = Some(cx.background_executor().spawn({
1086 let fs = fs.clone();
1087 let abs_path = abs_path.clone();
1088 async move {
1089 let abs_path_metadata = fs
1090 .metadata(&abs_path)
1091 .await
1092 .with_context(|| {
1093 format!("loading file and FS metadata for {abs_path:?}")
1094 })
1095 .log_err()
1096 .flatten()?;
1097 if abs_path_metadata.is_dir || abs_path_metadata.is_symlink {
1098 None
1099 } else {
1100 git_repo.lock().load_index_text(&repo_path)
1101 }
1102 }
1103 }));
1104 }
1105 }
1106 }
1107
1108 let diff_base = if let Some(index_task) = index_task {
1109 index_task.await
1110 } else {
1111 None
1112 };
1113
1114 let worktree = this
1115 .upgrade()
1116 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1117 match entry.await? {
1118 Some(entry) => Ok((
1119 File {
1120 entry_id: Some(entry.id),
1121 worktree,
1122 path: entry.path,
1123 mtime: entry.mtime,
1124 is_local: true,
1125 is_deleted: false,
1126 is_private: entry.is_private,
1127 },
1128 text,
1129 diff_base,
1130 )),
1131 None => {
1132 let metadata = fs
1133 .metadata(&abs_path)
1134 .await
1135 .with_context(|| {
1136 format!("Loading metadata for excluded file {abs_path:?}")
1137 })?
1138 .with_context(|| {
1139 format!("Excluded file {abs_path:?} got removed during loading")
1140 })?;
1141 let is_private = snapshot.is_path_private(path.as_ref());
1142 Ok((
1143 File {
1144 entry_id: None,
1145 worktree,
1146 path,
1147 mtime: Some(metadata.mtime),
1148 is_local: true,
1149 is_deleted: false,
1150 is_private,
1151 },
1152 text,
1153 diff_base,
1154 ))
1155 }
1156 }
1157 })
1158 }
1159
1160 pub fn save_buffer(
1161 &self,
1162 buffer_handle: Model<Buffer>,
1163 path: Arc<Path>,
1164 mut has_changed_file: bool,
1165 cx: &mut ModelContext<Worktree>,
1166 ) -> Task<Result<()>> {
1167 let buffer = buffer_handle.read(cx);
1168
1169 let rpc = self.client.clone();
1170 let buffer_id: u64 = buffer.remote_id().into();
1171 let project_id = self.share.as_ref().map(|share| share.project_id);
1172
1173 if buffer.file().is_some_and(|file| !file.is_created()) {
1174 has_changed_file = true;
1175 }
1176
1177 let text = buffer.as_rope().clone();
1178 let fingerprint = text.fingerprint();
1179 let version = buffer.version();
1180 let save = self.write_file(path.as_ref(), text, buffer.line_ending(), cx);
1181 let fs = Arc::clone(&self.fs);
1182 let abs_path = self.absolutize(&path);
1183 let is_private = self.snapshot.is_path_private(&path);
1184
1185 cx.spawn(move |this, mut cx| async move {
1186 let entry = save.await?;
1187 let abs_path = abs_path?;
1188 let this = this.upgrade().context("worktree dropped")?;
1189
1190 let (entry_id, mtime, path, is_dotenv) = match entry {
1191 Some(entry) => (Some(entry.id), entry.mtime, entry.path, entry.is_private),
1192 None => {
1193 let metadata = fs
1194 .metadata(&abs_path)
1195 .await
1196 .with_context(|| {
1197 format!(
1198 "Fetching metadata after saving the excluded buffer {abs_path:?}"
1199 )
1200 })?
1201 .with_context(|| {
1202 format!("Excluded buffer {path:?} got removed during saving")
1203 })?;
1204 (None, Some(metadata.mtime), path, is_private)
1205 }
1206 };
1207
1208 if has_changed_file {
1209 let new_file = Arc::new(File {
1210 entry_id,
1211 worktree: this,
1212 path,
1213 mtime,
1214 is_local: true,
1215 is_deleted: false,
1216 is_private: is_dotenv,
1217 });
1218
1219 if let Some(project_id) = project_id {
1220 rpc.send(proto::UpdateBufferFile {
1221 project_id,
1222 buffer_id,
1223 file: Some(new_file.to_proto()),
1224 })
1225 .log_err();
1226 }
1227
1228 buffer_handle.update(&mut cx, |buffer, cx| {
1229 if has_changed_file {
1230 buffer.file_updated(new_file, cx);
1231 }
1232 })?;
1233 }
1234
1235 if let Some(project_id) = project_id {
1236 rpc.send(proto::BufferSaved {
1237 project_id,
1238 buffer_id,
1239 version: serialize_version(&version),
1240 mtime: mtime.map(|time| time.into()),
1241 fingerprint: serialize_fingerprint(fingerprint),
1242 })?;
1243 }
1244
1245 buffer_handle.update(&mut cx, |buffer, cx| {
1246 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1247 })?;
1248
1249 Ok(())
1250 })
1251 }
1252
1253 /// Find the lowest path in the worktree's datastructures that is an ancestor
1254 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1255 let mut lowest_ancestor = None;
1256 for path in path.ancestors() {
1257 if self.entry_for_path(path).is_some() {
1258 lowest_ancestor = Some(path.to_path_buf());
1259 break;
1260 }
1261 }
1262
1263 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1264 }
1265
1266 pub fn create_entry(
1267 &self,
1268 path: impl Into<Arc<Path>>,
1269 is_dir: bool,
1270 cx: &mut ModelContext<Worktree>,
1271 ) -> Task<Result<Option<Entry>>> {
1272 let path = path.into();
1273 let lowest_ancestor = self.lowest_ancestor(&path);
1274 let abs_path = self.absolutize(&path);
1275 let fs = self.fs.clone();
1276 let write = cx.background_executor().spawn(async move {
1277 if is_dir {
1278 fs.create_dir(&abs_path?).await
1279 } else {
1280 fs.save(&abs_path?, &Default::default(), Default::default())
1281 .await
1282 }
1283 });
1284
1285 cx.spawn(|this, mut cx| async move {
1286 write.await?;
1287 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1288 let mut refreshes = Vec::new();
1289 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1290 for refresh_path in refresh_paths.ancestors() {
1291 if refresh_path == Path::new("") {
1292 continue;
1293 }
1294 let refresh_full_path = lowest_ancestor.join(refresh_path);
1295
1296 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1297 refresh_full_path.into(),
1298 None,
1299 cx,
1300 ));
1301 }
1302 (
1303 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1304 refreshes,
1305 )
1306 })?;
1307 for refresh in refreshes {
1308 refresh.await.log_err();
1309 }
1310
1311 result.await
1312 })
1313 }
1314
1315 pub(crate) fn write_file(
1316 &self,
1317 path: impl Into<Arc<Path>>,
1318 text: Rope,
1319 line_ending: LineEnding,
1320 cx: &mut ModelContext<Worktree>,
1321 ) -> Task<Result<Option<Entry>>> {
1322 let path: Arc<Path> = path.into();
1323 let abs_path = self.absolutize(&path);
1324 let fs = self.fs.clone();
1325 let write = cx
1326 .background_executor()
1327 .spawn(async move { fs.save(&abs_path?, &text, line_ending).await });
1328
1329 cx.spawn(|this, mut cx| async move {
1330 write.await?;
1331 this.update(&mut cx, |this, cx| {
1332 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1333 })?
1334 .await
1335 })
1336 }
1337
1338 pub fn delete_entry(
1339 &self,
1340 entry_id: ProjectEntryId,
1341 cx: &mut ModelContext<Worktree>,
1342 ) -> Option<Task<Result<()>>> {
1343 let entry = self.entry_for_id(entry_id)?.clone();
1344 let abs_path = self.absolutize(&entry.path);
1345 let fs = self.fs.clone();
1346
1347 let delete = cx.background_executor().spawn(async move {
1348 if entry.is_file() {
1349 fs.remove_file(&abs_path?, Default::default()).await?;
1350 } else {
1351 fs.remove_dir(
1352 &abs_path?,
1353 RemoveOptions {
1354 recursive: true,
1355 ignore_if_not_exists: false,
1356 },
1357 )
1358 .await?;
1359 }
1360 anyhow::Ok(entry.path)
1361 });
1362
1363 Some(cx.spawn(|this, mut cx| async move {
1364 let path = delete.await?;
1365 this.update(&mut cx, |this, _| {
1366 this.as_local_mut()
1367 .unwrap()
1368 .refresh_entries_for_paths(vec![path])
1369 })?
1370 .recv()
1371 .await;
1372 Ok(())
1373 }))
1374 }
1375
1376 pub fn rename_entry(
1377 &self,
1378 entry_id: ProjectEntryId,
1379 new_path: impl Into<Arc<Path>>,
1380 cx: &mut ModelContext<Worktree>,
1381 ) -> Task<Result<Option<Entry>>> {
1382 let old_path = match self.entry_for_id(entry_id) {
1383 Some(entry) => entry.path.clone(),
1384 None => return Task::ready(Ok(None)),
1385 };
1386 let new_path = new_path.into();
1387 let abs_old_path = self.absolutize(&old_path);
1388 let abs_new_path = self.absolutize(&new_path);
1389 let fs = self.fs.clone();
1390 let case_sensitive = self.fs_case_sensitive;
1391 let rename = cx.background_executor().spawn(async move {
1392 let abs_old_path = abs_old_path?;
1393 let abs_new_path = abs_new_path?;
1394
1395 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1396 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1397
1398 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1399 // we want to overwrite, because otherwise we run into a file-already-exists error.
1400 let overwrite = !case_sensitive
1401 && abs_old_path != abs_new_path
1402 && abs_old_path_lower == abs_new_path_lower;
1403
1404 fs.rename(
1405 &abs_old_path,
1406 &abs_new_path,
1407 fs::RenameOptions {
1408 overwrite,
1409 ..Default::default()
1410 },
1411 )
1412 .await
1413 });
1414
1415 cx.spawn(|this, mut cx| async move {
1416 rename.await?;
1417 this.update(&mut cx, |this, cx| {
1418 this.as_local_mut()
1419 .unwrap()
1420 .refresh_entry(new_path.clone(), Some(old_path), cx)
1421 })?
1422 .await
1423 })
1424 }
1425
1426 pub fn copy_entry(
1427 &self,
1428 entry_id: ProjectEntryId,
1429 new_path: impl Into<Arc<Path>>,
1430 cx: &mut ModelContext<Worktree>,
1431 ) -> Task<Result<Option<Entry>>> {
1432 let old_path = match self.entry_for_id(entry_id) {
1433 Some(entry) => entry.path.clone(),
1434 None => return Task::ready(Ok(None)),
1435 };
1436 let new_path = new_path.into();
1437 let abs_old_path = self.absolutize(&old_path);
1438 let abs_new_path = self.absolutize(&new_path);
1439 let fs = self.fs.clone();
1440 let copy = cx.background_executor().spawn(async move {
1441 copy_recursive(
1442 fs.as_ref(),
1443 &abs_old_path?,
1444 &abs_new_path?,
1445 Default::default(),
1446 )
1447 .await
1448 });
1449
1450 cx.spawn(|this, mut cx| async move {
1451 copy.await?;
1452 this.update(&mut cx, |this, cx| {
1453 this.as_local_mut()
1454 .unwrap()
1455 .refresh_entry(new_path.clone(), None, cx)
1456 })?
1457 .await
1458 })
1459 }
1460
1461 pub fn expand_entry(
1462 &mut self,
1463 entry_id: ProjectEntryId,
1464 cx: &mut ModelContext<Worktree>,
1465 ) -> Option<Task<Result<()>>> {
1466 let path = self.entry_for_id(entry_id)?.path.clone();
1467 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1468 Some(cx.background_executor().spawn(async move {
1469 refresh.next().await;
1470 Ok(())
1471 }))
1472 }
1473
1474 pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1475 let (tx, rx) = barrier::channel();
1476 self.scan_requests_tx
1477 .try_send(ScanRequest {
1478 relative_paths: paths,
1479 done: tx,
1480 })
1481 .ok();
1482 rx
1483 }
1484
1485 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1486 self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1487 }
1488
1489 fn refresh_entry(
1490 &self,
1491 path: Arc<Path>,
1492 old_path: Option<Arc<Path>>,
1493 cx: &mut ModelContext<Worktree>,
1494 ) -> Task<Result<Option<Entry>>> {
1495 if self.is_path_excluded(path.to_path_buf()) {
1496 return Task::ready(Ok(None));
1497 }
1498 let paths = if let Some(old_path) = old_path.as_ref() {
1499 vec![old_path.clone(), path.clone()]
1500 } else {
1501 vec![path.clone()]
1502 };
1503 let mut refresh = self.refresh_entries_for_paths(paths);
1504 cx.spawn(move |this, mut cx| async move {
1505 refresh.recv().await;
1506 let new_entry = this.update(&mut cx, |this, _| {
1507 this.entry_for_path(path)
1508 .cloned()
1509 .ok_or_else(|| anyhow!("failed to read path after update"))
1510 })??;
1511 Ok(Some(new_entry))
1512 })
1513 }
1514
1515 pub fn observe_updates<F, Fut>(
1516 &mut self,
1517 project_id: u64,
1518 cx: &mut ModelContext<Worktree>,
1519 callback: F,
1520 ) -> oneshot::Receiver<()>
1521 where
1522 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1523 Fut: Send + Future<Output = bool>,
1524 {
1525 #[cfg(any(test, feature = "test-support"))]
1526 const MAX_CHUNK_SIZE: usize = 2;
1527 #[cfg(not(any(test, feature = "test-support")))]
1528 const MAX_CHUNK_SIZE: usize = 256;
1529
1530 let (share_tx, share_rx) = oneshot::channel();
1531
1532 if let Some(share) = self.share.as_mut() {
1533 share_tx.send(()).ok();
1534 *share.resume_updates.borrow_mut() = ();
1535 return share_rx;
1536 }
1537
1538 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1539 let (snapshots_tx, mut snapshots_rx) =
1540 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1541 snapshots_tx
1542 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1543 .ok();
1544
1545 let worktree_id = cx.entity_id().as_u64();
1546 let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1547 let mut is_first = true;
1548 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1549 let update;
1550 if is_first {
1551 update = snapshot.build_initial_update(project_id, worktree_id);
1552 is_first = false;
1553 } else {
1554 update =
1555 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1556 }
1557
1558 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1559 let _ = resume_updates_rx.try_recv();
1560 loop {
1561 let result = callback(update.clone());
1562 if result.await {
1563 break;
1564 } else {
1565 log::info!("waiting to resume updates");
1566 if resume_updates_rx.next().await.is_none() {
1567 return Some(());
1568 }
1569 }
1570 }
1571 }
1572 }
1573 share_tx.send(()).ok();
1574 Some(())
1575 });
1576
1577 self.share = Some(ShareState {
1578 project_id,
1579 snapshots_tx,
1580 resume_updates: resume_updates_tx,
1581 _maintain_remote_snapshot,
1582 });
1583 share_rx
1584 }
1585
1586 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1587 let client = self.client.clone();
1588
1589 for (path, summaries) in &self.diagnostic_summaries {
1590 for (&server_id, summary) in summaries {
1591 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1592 project_id,
1593 worktree_id: cx.entity_id().as_u64(),
1594 summary: Some(summary.to_proto(server_id, path)),
1595 }) {
1596 return Task::ready(Err(e));
1597 }
1598 }
1599 }
1600
1601 let rx = self.observe_updates(project_id, cx, move |update| {
1602 client.request(update).map(|result| result.is_ok())
1603 });
1604 cx.background_executor()
1605 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1606 }
1607
1608 pub fn unshare(&mut self) {
1609 self.share.take();
1610 }
1611
1612 pub fn is_shared(&self) -> bool {
1613 self.share.is_some()
1614 }
1615}
1616
1617impl RemoteWorktree {
1618 fn snapshot(&self) -> Snapshot {
1619 self.snapshot.clone()
1620 }
1621
1622 pub fn disconnected_from_host(&mut self) {
1623 self.updates_tx.take();
1624 self.snapshot_subscriptions.clear();
1625 self.disconnected = true;
1626 }
1627
1628 pub fn save_buffer(
1629 &self,
1630 buffer_handle: Model<Buffer>,
1631 cx: &mut ModelContext<Worktree>,
1632 ) -> Task<Result<()>> {
1633 let buffer = buffer_handle.read(cx);
1634 let buffer_id = buffer.remote_id().into();
1635 let version = buffer.version();
1636 let rpc = self.client.clone();
1637 let project_id = self.project_id;
1638 cx.spawn(move |_, mut cx| async move {
1639 let response = rpc
1640 .request(proto::SaveBuffer {
1641 project_id,
1642 buffer_id,
1643 version: serialize_version(&version),
1644 })
1645 .await?;
1646 let version = deserialize_version(&response.version);
1647 let fingerprint = RopeFingerprint::default();
1648 let mtime = response.mtime.map(|mtime| mtime.into());
1649
1650 buffer_handle.update(&mut cx, |buffer, cx| {
1651 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1652 })?;
1653
1654 Ok(())
1655 })
1656 }
1657
1658 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1659 if let Some(updates_tx) = &self.updates_tx {
1660 updates_tx
1661 .unbounded_send(update)
1662 .expect("consumer runs to completion");
1663 }
1664 }
1665
1666 fn observed_snapshot(&self, scan_id: usize) -> bool {
1667 self.completed_scan_id >= scan_id
1668 }
1669
1670 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1671 let (tx, rx) = oneshot::channel();
1672 if self.observed_snapshot(scan_id) {
1673 let _ = tx.send(());
1674 } else if self.disconnected {
1675 drop(tx);
1676 } else {
1677 match self
1678 .snapshot_subscriptions
1679 .binary_search_by_key(&scan_id, |probe| probe.0)
1680 {
1681 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1682 }
1683 }
1684
1685 async move {
1686 rx.await?;
1687 Ok(())
1688 }
1689 }
1690
1691 pub fn update_diagnostic_summary(
1692 &mut self,
1693 path: Arc<Path>,
1694 summary: &proto::DiagnosticSummary,
1695 ) {
1696 let server_id = LanguageServerId(summary.language_server_id as usize);
1697 let summary = DiagnosticSummary {
1698 error_count: summary.error_count as usize,
1699 warning_count: summary.warning_count as usize,
1700 };
1701
1702 if summary.is_empty() {
1703 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1704 summaries.remove(&server_id);
1705 if summaries.is_empty() {
1706 self.diagnostic_summaries.remove(&path);
1707 }
1708 }
1709 } else {
1710 self.diagnostic_summaries
1711 .entry(path)
1712 .or_default()
1713 .insert(server_id, summary);
1714 }
1715 }
1716
1717 pub fn insert_entry(
1718 &mut self,
1719 entry: proto::Entry,
1720 scan_id: usize,
1721 cx: &mut ModelContext<Worktree>,
1722 ) -> Task<Result<Entry>> {
1723 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1724 cx.spawn(|this, mut cx| async move {
1725 wait_for_snapshot.await?;
1726 this.update(&mut cx, |worktree, _| {
1727 let worktree = worktree.as_remote_mut().unwrap();
1728 let mut snapshot = worktree.background_snapshot.lock();
1729 let entry = snapshot.insert_entry(entry);
1730 worktree.snapshot = snapshot.clone();
1731 entry
1732 })?
1733 })
1734 }
1735
1736 pub fn delete_entry(
1737 &mut self,
1738 id: ProjectEntryId,
1739 scan_id: usize,
1740 cx: &mut ModelContext<Worktree>,
1741 ) -> Task<Result<()>> {
1742 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1743 cx.spawn(move |this, mut cx| async move {
1744 wait_for_snapshot.await?;
1745 this.update(&mut cx, |worktree, _| {
1746 let worktree = worktree.as_remote_mut().unwrap();
1747 let mut snapshot = worktree.background_snapshot.lock();
1748 snapshot.delete_entry(id);
1749 worktree.snapshot = snapshot.clone();
1750 })?;
1751 Ok(())
1752 })
1753 }
1754}
1755
1756impl Snapshot {
1757 pub fn id(&self) -> WorktreeId {
1758 self.id
1759 }
1760
1761 pub fn abs_path(&self) -> &Arc<Path> {
1762 &self.abs_path
1763 }
1764
1765 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1766 if path
1767 .components()
1768 .any(|component| !matches!(component, std::path::Component::Normal(_)))
1769 {
1770 return Err(anyhow!("invalid path"));
1771 }
1772 if path.file_name().is_some() {
1773 Ok(self.abs_path.join(path))
1774 } else {
1775 Ok(self.abs_path.to_path_buf())
1776 }
1777 }
1778
1779 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1780 self.entries_by_id.get(&entry_id, &()).is_some()
1781 }
1782
1783 fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1784 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1785 let old_entry = self.entries_by_id.insert_or_replace(
1786 PathEntry {
1787 id: entry.id,
1788 path: entry.path.clone(),
1789 is_ignored: entry.is_ignored,
1790 scan_id: 0,
1791 },
1792 &(),
1793 );
1794 if let Some(old_entry) = old_entry {
1795 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1796 }
1797 self.entries_by_path.insert_or_replace(entry.clone(), &());
1798 Ok(entry)
1799 }
1800
1801 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1802 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1803 self.entries_by_path = {
1804 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1805 let mut new_entries_by_path =
1806 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1807 while let Some(entry) = cursor.item() {
1808 if entry.path.starts_with(&removed_entry.path) {
1809 self.entries_by_id.remove(&entry.id, &());
1810 cursor.next(&());
1811 } else {
1812 break;
1813 }
1814 }
1815 new_entries_by_path.append(cursor.suffix(&()), &());
1816 new_entries_by_path
1817 };
1818
1819 Some(removed_entry.path)
1820 }
1821
1822 #[cfg(any(test, feature = "test-support"))]
1823 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1824 let path = path.into();
1825 self.entries_by_path
1826 .get(&PathKey(Arc::from(path)), &())
1827 .and_then(|entry| entry.git_status)
1828 }
1829
1830 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1831 let mut entries_by_path_edits = Vec::new();
1832 let mut entries_by_id_edits = Vec::new();
1833
1834 for entry_id in update.removed_entries {
1835 let entry_id = ProjectEntryId::from_proto(entry_id);
1836 entries_by_id_edits.push(Edit::Remove(entry_id));
1837 if let Some(entry) = self.entry_for_id(entry_id) {
1838 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1839 }
1840 }
1841
1842 for entry in update.updated_entries {
1843 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1844 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1845 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1846 }
1847 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1848 if old_entry.id != entry.id {
1849 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1850 }
1851 }
1852 entries_by_id_edits.push(Edit::Insert(PathEntry {
1853 id: entry.id,
1854 path: entry.path.clone(),
1855 is_ignored: entry.is_ignored,
1856 scan_id: 0,
1857 }));
1858 entries_by_path_edits.push(Edit::Insert(entry));
1859 }
1860
1861 self.entries_by_path.edit(entries_by_path_edits, &());
1862 self.entries_by_id.edit(entries_by_id_edits, &());
1863
1864 update.removed_repositories.sort_unstable();
1865 self.repository_entries.retain(|_, entry| {
1866 if let Ok(_) = update
1867 .removed_repositories
1868 .binary_search(&entry.work_directory.to_proto())
1869 {
1870 false
1871 } else {
1872 true
1873 }
1874 });
1875
1876 for repository in update.updated_repositories {
1877 let work_directory_entry: WorkDirectoryEntry =
1878 ProjectEntryId::from_proto(repository.work_directory_id).into();
1879
1880 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1881 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1882 if self.repository_entries.get(&work_directory).is_some() {
1883 self.repository_entries.update(&work_directory, |repo| {
1884 repo.branch = repository.branch.map(Into::into);
1885 });
1886 } else {
1887 self.repository_entries.insert(
1888 work_directory,
1889 RepositoryEntry {
1890 work_directory: work_directory_entry,
1891 branch: repository.branch.map(Into::into),
1892 },
1893 )
1894 }
1895 } else {
1896 log::error!("no work directory entry for repository {:?}", repository)
1897 }
1898 }
1899
1900 self.scan_id = update.scan_id as usize;
1901 if update.is_last_update {
1902 self.completed_scan_id = update.scan_id as usize;
1903 }
1904
1905 Ok(())
1906 }
1907
1908 pub fn file_count(&self) -> usize {
1909 self.entries_by_path.summary().file_count
1910 }
1911
1912 pub fn visible_file_count(&self) -> usize {
1913 self.entries_by_path.summary().non_ignored_file_count
1914 }
1915
1916 fn traverse_from_offset(
1917 &self,
1918 include_dirs: bool,
1919 include_ignored: bool,
1920 start_offset: usize,
1921 ) -> Traversal {
1922 let mut cursor = self.entries_by_path.cursor();
1923 cursor.seek(
1924 &TraversalTarget::Count {
1925 count: start_offset,
1926 include_dirs,
1927 include_ignored,
1928 },
1929 Bias::Right,
1930 &(),
1931 );
1932 Traversal {
1933 cursor,
1934 include_dirs,
1935 include_ignored,
1936 }
1937 }
1938
1939 fn traverse_from_path(
1940 &self,
1941 include_dirs: bool,
1942 include_ignored: bool,
1943 path: &Path,
1944 ) -> Traversal {
1945 let mut cursor = self.entries_by_path.cursor();
1946 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1947 Traversal {
1948 cursor,
1949 include_dirs,
1950 include_ignored,
1951 }
1952 }
1953
1954 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1955 self.traverse_from_offset(false, include_ignored, start)
1956 }
1957
1958 pub fn entries(&self, include_ignored: bool) -> Traversal {
1959 self.traverse_from_offset(true, include_ignored, 0)
1960 }
1961
1962 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1963 self.repository_entries
1964 .iter()
1965 .map(|(path, entry)| (&path.0, entry))
1966 }
1967
1968 /// Get the repository whose work directory contains the given path.
1969 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1970 self.repository_entries
1971 .get(&RepositoryWorkDirectory(path.into()))
1972 .cloned()
1973 }
1974
1975 /// Get the repository whose work directory contains the given path.
1976 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1977 self.repository_and_work_directory_for_path(path)
1978 .map(|e| e.1)
1979 }
1980
1981 pub fn repository_and_work_directory_for_path(
1982 &self,
1983 path: &Path,
1984 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1985 self.repository_entries
1986 .iter()
1987 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1988 .last()
1989 .map(|(path, repo)| (path.clone(), repo.clone()))
1990 }
1991
1992 /// Given an ordered iterator of entries, returns an iterator of those entries,
1993 /// along with their containing git repository.
1994 pub fn entries_with_repositories<'a>(
1995 &'a self,
1996 entries: impl 'a + Iterator<Item = &'a Entry>,
1997 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1998 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1999 let mut repositories = self.repositories().peekable();
2000 entries.map(move |entry| {
2001 while let Some((repo_path, _)) = containing_repos.last() {
2002 if !entry.path.starts_with(repo_path) {
2003 containing_repos.pop();
2004 } else {
2005 break;
2006 }
2007 }
2008 while let Some((repo_path, _)) = repositories.peek() {
2009 if entry.path.starts_with(repo_path) {
2010 containing_repos.push(repositories.next().unwrap());
2011 } else {
2012 break;
2013 }
2014 }
2015 let repo = containing_repos.last().map(|(_, repo)| *repo);
2016 (entry, repo)
2017 })
2018 }
2019
2020 /// Updates the `git_status` of the given entries such that files'
2021 /// statuses bubble up to their ancestor directories.
2022 pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2023 let mut cursor = self
2024 .entries_by_path
2025 .cursor::<(TraversalProgress, GitStatuses)>();
2026 let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2027
2028 let mut result_ix = 0;
2029 loop {
2030 let next_entry = result.get(result_ix);
2031 let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2032
2033 let entry_to_finish = match (containing_entry, next_entry) {
2034 (Some(_), None) => entry_stack.pop(),
2035 (Some(containing_entry), Some(next_path)) => {
2036 if !next_path.path.starts_with(&containing_entry.path) {
2037 entry_stack.pop()
2038 } else {
2039 None
2040 }
2041 }
2042 (None, Some(_)) => None,
2043 (None, None) => break,
2044 };
2045
2046 if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2047 cursor.seek_forward(
2048 &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2049 Bias::Left,
2050 &(),
2051 );
2052
2053 let statuses = cursor.start().1 - prev_statuses;
2054
2055 result[entry_ix].git_status = if statuses.conflict > 0 {
2056 Some(GitFileStatus::Conflict)
2057 } else if statuses.modified > 0 {
2058 Some(GitFileStatus::Modified)
2059 } else if statuses.added > 0 {
2060 Some(GitFileStatus::Added)
2061 } else {
2062 None
2063 };
2064 } else {
2065 if result[result_ix].is_dir() {
2066 cursor.seek_forward(
2067 &TraversalTarget::Path(&result[result_ix].path),
2068 Bias::Left,
2069 &(),
2070 );
2071 entry_stack.push((result_ix, cursor.start().1));
2072 }
2073 result_ix += 1;
2074 }
2075 }
2076 }
2077
2078 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2079 let empty_path = Path::new("");
2080 self.entries_by_path
2081 .cursor::<()>()
2082 .filter(move |entry| entry.path.as_ref() != empty_path)
2083 .map(|entry| &entry.path)
2084 }
2085
2086 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2087 let mut cursor = self.entries_by_path.cursor();
2088 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2089 let traversal = Traversal {
2090 cursor,
2091 include_dirs: true,
2092 include_ignored: true,
2093 };
2094 ChildEntriesIter {
2095 traversal,
2096 parent_path,
2097 }
2098 }
2099
2100 pub fn descendent_entries<'a>(
2101 &'a self,
2102 include_dirs: bool,
2103 include_ignored: bool,
2104 parent_path: &'a Path,
2105 ) -> DescendentEntriesIter<'a> {
2106 let mut cursor = self.entries_by_path.cursor();
2107 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
2108 let mut traversal = Traversal {
2109 cursor,
2110 include_dirs,
2111 include_ignored,
2112 };
2113
2114 if traversal.end_offset() == traversal.start_offset() {
2115 traversal.advance();
2116 }
2117
2118 DescendentEntriesIter {
2119 traversal,
2120 parent_path,
2121 }
2122 }
2123
2124 pub fn root_entry(&self) -> Option<&Entry> {
2125 self.entry_for_path("")
2126 }
2127
2128 pub fn root_name(&self) -> &str {
2129 &self.root_name
2130 }
2131
2132 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2133 self.repository_entries
2134 .get(&RepositoryWorkDirectory(Path::new("").into()))
2135 .map(|entry| entry.to_owned())
2136 }
2137
2138 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2139 self.repository_entries.values()
2140 }
2141
2142 pub fn scan_id(&self) -> usize {
2143 self.scan_id
2144 }
2145
2146 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2147 let path = path.as_ref();
2148 self.traverse_from_path(true, true, path)
2149 .entry()
2150 .and_then(|entry| {
2151 if entry.path.as_ref() == path {
2152 Some(entry)
2153 } else {
2154 None
2155 }
2156 })
2157 }
2158
2159 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2160 let entry = self.entries_by_id.get(&id, &())?;
2161 self.entry_for_path(&entry.path)
2162 }
2163
2164 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2165 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2166 }
2167}
2168
2169impl LocalSnapshot {
2170 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
2171 self.git_repositories.get(&repo.work_directory.0)
2172 }
2173
2174 pub(crate) fn local_repo_for_path(
2175 &self,
2176 path: &Path,
2177 ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
2178 let (path, repo) = self.repository_and_work_directory_for_path(path)?;
2179 Some((path, self.git_repositories.get(&repo.work_directory_id())?))
2180 }
2181
2182 pub fn local_git_repo(&self, path: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
2183 self.local_repo_for_path(path)
2184 .map(|(_, entry)| entry.repo_ptr.clone())
2185 }
2186
2187 fn build_update(
2188 &self,
2189 project_id: u64,
2190 worktree_id: u64,
2191 entry_changes: UpdatedEntriesSet,
2192 repo_changes: UpdatedGitRepositoriesSet,
2193 ) -> proto::UpdateWorktree {
2194 let mut updated_entries = Vec::new();
2195 let mut removed_entries = Vec::new();
2196 let mut updated_repositories = Vec::new();
2197 let mut removed_repositories = Vec::new();
2198
2199 for (_, entry_id, path_change) in entry_changes.iter() {
2200 if let PathChange::Removed = path_change {
2201 removed_entries.push(entry_id.0 as u64);
2202 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2203 updated_entries.push(proto::Entry::from(entry));
2204 }
2205 }
2206
2207 for (work_dir_path, change) in repo_changes.iter() {
2208 let new_repo = self
2209 .repository_entries
2210 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2211 match (&change.old_repository, new_repo) {
2212 (Some(old_repo), Some(new_repo)) => {
2213 updated_repositories.push(new_repo.build_update(old_repo));
2214 }
2215 (None, Some(new_repo)) => {
2216 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2217 }
2218 (Some(old_repo), None) => {
2219 removed_repositories.push(old_repo.work_directory.0.to_proto());
2220 }
2221 _ => {}
2222 }
2223 }
2224
2225 removed_entries.sort_unstable();
2226 updated_entries.sort_unstable_by_key(|e| e.id);
2227 removed_repositories.sort_unstable();
2228 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2229
2230 // TODO - optimize, knowing that removed_entries are sorted.
2231 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2232
2233 proto::UpdateWorktree {
2234 project_id,
2235 worktree_id,
2236 abs_path: self.abs_path().to_string_lossy().into(),
2237 root_name: self.root_name().to_string(),
2238 updated_entries,
2239 removed_entries,
2240 scan_id: self.scan_id as u64,
2241 is_last_update: self.completed_scan_id == self.scan_id,
2242 updated_repositories,
2243 removed_repositories,
2244 }
2245 }
2246
2247 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2248 let mut updated_entries = self
2249 .entries_by_path
2250 .iter()
2251 .map(proto::Entry::from)
2252 .collect::<Vec<_>>();
2253 updated_entries.sort_unstable_by_key(|e| e.id);
2254
2255 let mut updated_repositories = self
2256 .repository_entries
2257 .values()
2258 .map(proto::RepositoryEntry::from)
2259 .collect::<Vec<_>>();
2260 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2261
2262 proto::UpdateWorktree {
2263 project_id,
2264 worktree_id,
2265 abs_path: self.abs_path().to_string_lossy().into(),
2266 root_name: self.root_name().to_string(),
2267 updated_entries,
2268 removed_entries: Vec::new(),
2269 scan_id: self.scan_id as u64,
2270 is_last_update: self.completed_scan_id == self.scan_id,
2271 updated_repositories,
2272 removed_repositories: Vec::new(),
2273 }
2274 }
2275
2276 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2277 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2278 let abs_path = self.abs_path.join(&entry.path);
2279 match smol::block_on(build_gitignore(&abs_path, fs)) {
2280 Ok(ignore) => {
2281 self.ignores_by_parent_abs_path
2282 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2283 }
2284 Err(error) => {
2285 log::error!(
2286 "error loading .gitignore file {:?} - {:?}",
2287 &entry.path,
2288 error
2289 );
2290 }
2291 }
2292 }
2293
2294 if entry.kind == EntryKind::PendingDir {
2295 if let Some(existing_entry) =
2296 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2297 {
2298 entry.kind = existing_entry.kind;
2299 }
2300 }
2301
2302 let scan_id = self.scan_id;
2303 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2304 if let Some(removed) = removed {
2305 if removed.id != entry.id {
2306 self.entries_by_id.remove(&removed.id, &());
2307 }
2308 }
2309 self.entries_by_id.insert_or_replace(
2310 PathEntry {
2311 id: entry.id,
2312 path: entry.path.clone(),
2313 is_ignored: entry.is_ignored,
2314 scan_id,
2315 },
2316 &(),
2317 );
2318
2319 entry
2320 }
2321
2322 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2323 let mut inodes = TreeSet::default();
2324 for ancestor in path.ancestors().skip(1) {
2325 if let Some(entry) = self.entry_for_path(ancestor) {
2326 inodes.insert(entry.inode);
2327 }
2328 }
2329 inodes
2330 }
2331
2332 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2333 let mut new_ignores = Vec::new();
2334 for (index, ancestor) in abs_path.ancestors().enumerate() {
2335 if index > 0 {
2336 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2337 new_ignores.push((ancestor, Some(ignore.clone())));
2338 } else {
2339 new_ignores.push((ancestor, None));
2340 }
2341 }
2342 if ancestor.join(&*DOT_GIT).is_dir() {
2343 break;
2344 }
2345 }
2346
2347 let mut ignore_stack = IgnoreStack::none();
2348 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2349 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2350 ignore_stack = IgnoreStack::all();
2351 break;
2352 } else if let Some(ignore) = ignore {
2353 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2354 }
2355 }
2356
2357 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2358 ignore_stack = IgnoreStack::all();
2359 }
2360
2361 ignore_stack
2362 }
2363
2364 #[cfg(test)]
2365 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2366 self.entries_by_path
2367 .cursor::<()>()
2368 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2369 }
2370
2371 #[cfg(test)]
2372 pub fn check_invariants(&self, git_state: bool) {
2373 use pretty_assertions::assert_eq;
2374
2375 assert_eq!(
2376 self.entries_by_path
2377 .cursor::<()>()
2378 .map(|e| (&e.path, e.id))
2379 .collect::<Vec<_>>(),
2380 self.entries_by_id
2381 .cursor::<()>()
2382 .map(|e| (&e.path, e.id))
2383 .collect::<collections::BTreeSet<_>>()
2384 .into_iter()
2385 .collect::<Vec<_>>(),
2386 "entries_by_path and entries_by_id are inconsistent"
2387 );
2388
2389 let mut files = self.files(true, 0);
2390 let mut visible_files = self.files(false, 0);
2391 for entry in self.entries_by_path.cursor::<()>() {
2392 if entry.is_file() {
2393 assert_eq!(files.next().unwrap().inode, entry.inode);
2394 if !entry.is_ignored && !entry.is_external {
2395 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2396 }
2397 }
2398 }
2399
2400 assert!(files.next().is_none());
2401 assert!(visible_files.next().is_none());
2402
2403 let mut bfs_paths = Vec::new();
2404 let mut stack = self
2405 .root_entry()
2406 .map(|e| e.path.as_ref())
2407 .into_iter()
2408 .collect::<Vec<_>>();
2409 while let Some(path) = stack.pop() {
2410 bfs_paths.push(path);
2411 let ix = stack.len();
2412 for child_entry in self.child_entries(path) {
2413 stack.insert(ix, &child_entry.path);
2414 }
2415 }
2416
2417 let dfs_paths_via_iter = self
2418 .entries_by_path
2419 .cursor::<()>()
2420 .map(|e| e.path.as_ref())
2421 .collect::<Vec<_>>();
2422 assert_eq!(bfs_paths, dfs_paths_via_iter);
2423
2424 let dfs_paths_via_traversal = self
2425 .entries(true)
2426 .map(|e| e.path.as_ref())
2427 .collect::<Vec<_>>();
2428 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2429
2430 if git_state {
2431 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2432 let ignore_parent_path =
2433 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2434 assert!(self.entry_for_path(&ignore_parent_path).is_some());
2435 assert!(self
2436 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2437 .is_some());
2438 }
2439 }
2440 }
2441
2442 #[cfg(test)]
2443 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2444 let mut paths = Vec::new();
2445 for entry in self.entries_by_path.cursor::<()>() {
2446 if include_ignored || !entry.is_ignored {
2447 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2448 }
2449 }
2450 paths.sort_by(|a, b| a.0.cmp(b.0));
2451 paths
2452 }
2453
2454 pub fn is_path_private(&self, path: &Path) -> bool {
2455 path.ancestors().any(|ancestor| {
2456 self.private_files
2457 .iter()
2458 .any(|exclude_matcher| exclude_matcher.is_match(&ancestor))
2459 })
2460 }
2461
2462 pub fn is_path_excluded(&self, mut path: PathBuf) -> bool {
2463 loop {
2464 if self
2465 .file_scan_exclusions
2466 .iter()
2467 .any(|exclude_matcher| exclude_matcher.is_match(&path))
2468 {
2469 return true;
2470 }
2471 if !path.pop() {
2472 return false;
2473 }
2474 }
2475 }
2476}
2477
2478impl BackgroundScannerState {
2479 fn should_scan_directory(&self, entry: &Entry) -> bool {
2480 (!entry.is_external && !entry.is_ignored)
2481 || entry.path.file_name() == Some(*DOT_GIT)
2482 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2483 || self
2484 .paths_to_scan
2485 .iter()
2486 .any(|p| p.starts_with(&entry.path))
2487 || self
2488 .path_prefixes_to_scan
2489 .iter()
2490 .any(|p| entry.path.starts_with(p))
2491 }
2492
2493 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2494 let path = entry.path.clone();
2495 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2496 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2497 let mut containing_repository = None;
2498 if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2499 if let Some((workdir_path, repo)) = self.snapshot.local_repo_for_path(&path) {
2500 if let Ok(repo_path) = path.strip_prefix(&workdir_path.0) {
2501 containing_repository = Some((
2502 workdir_path,
2503 repo.repo_ptr.clone(),
2504 repo.repo_ptr.lock().staged_statuses(repo_path),
2505 ));
2506 }
2507 }
2508 }
2509 if !ancestor_inodes.contains(&entry.inode) {
2510 ancestor_inodes.insert(entry.inode);
2511 scan_job_tx
2512 .try_send(ScanJob {
2513 abs_path,
2514 path,
2515 ignore_stack,
2516 scan_queue: scan_job_tx.clone(),
2517 ancestor_inodes,
2518 is_external: entry.is_external,
2519 containing_repository,
2520 })
2521 .unwrap();
2522 }
2523 }
2524
2525 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2526 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2527 entry.id = removed_entry_id;
2528 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2529 entry.id = existing_entry.id;
2530 }
2531 }
2532
2533 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2534 self.reuse_entry_id(&mut entry);
2535 let entry = self.snapshot.insert_entry(entry, fs);
2536 if entry.path.file_name() == Some(&DOT_GIT) {
2537 self.build_git_repository(entry.path.clone(), fs);
2538 }
2539
2540 #[cfg(test)]
2541 self.snapshot.check_invariants(false);
2542
2543 entry
2544 }
2545
2546 fn populate_dir(
2547 &mut self,
2548 parent_path: &Arc<Path>,
2549 entries: impl IntoIterator<Item = Entry>,
2550 ignore: Option<Arc<Gitignore>>,
2551 ) {
2552 let mut parent_entry = if let Some(parent_entry) = self
2553 .snapshot
2554 .entries_by_path
2555 .get(&PathKey(parent_path.clone()), &())
2556 {
2557 parent_entry.clone()
2558 } else {
2559 log::warn!(
2560 "populating a directory {:?} that has been removed",
2561 parent_path
2562 );
2563 return;
2564 };
2565
2566 match parent_entry.kind {
2567 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2568 EntryKind::Dir => {}
2569 _ => return,
2570 }
2571
2572 if let Some(ignore) = ignore {
2573 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2574 self.snapshot
2575 .ignores_by_parent_abs_path
2576 .insert(abs_parent_path, (ignore, false));
2577 }
2578
2579 let parent_entry_id = parent_entry.id;
2580 self.scanned_dirs.insert(parent_entry_id);
2581 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2582 let mut entries_by_id_edits = Vec::new();
2583
2584 for entry in entries {
2585 entries_by_id_edits.push(Edit::Insert(PathEntry {
2586 id: entry.id,
2587 path: entry.path.clone(),
2588 is_ignored: entry.is_ignored,
2589 scan_id: self.snapshot.scan_id,
2590 }));
2591 entries_by_path_edits.push(Edit::Insert(entry));
2592 }
2593
2594 self.snapshot
2595 .entries_by_path
2596 .edit(entries_by_path_edits, &());
2597 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2598
2599 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2600 self.changed_paths.insert(ix, parent_path.clone());
2601 }
2602
2603 #[cfg(test)]
2604 self.snapshot.check_invariants(false);
2605 }
2606
2607 fn remove_path(&mut self, path: &Path) {
2608 let mut new_entries;
2609 let removed_entries;
2610 {
2611 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2612 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2613 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2614 new_entries.append(cursor.suffix(&()), &());
2615 }
2616 self.snapshot.entries_by_path = new_entries;
2617
2618 let mut entries_by_id_edits = Vec::new();
2619 for entry in removed_entries.cursor::<()>() {
2620 let removed_entry_id = self
2621 .removed_entry_ids
2622 .entry(entry.inode)
2623 .or_insert(entry.id);
2624 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2625 entries_by_id_edits.push(Edit::Remove(entry.id));
2626 }
2627 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2628
2629 if path.file_name() == Some(&GITIGNORE) {
2630 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2631 if let Some((_, needs_update)) = self
2632 .snapshot
2633 .ignores_by_parent_abs_path
2634 .get_mut(abs_parent_path.as_path())
2635 {
2636 *needs_update = true;
2637 }
2638 }
2639
2640 #[cfg(test)]
2641 self.snapshot.check_invariants(false);
2642 }
2643
2644 fn reload_repositories(&mut self, dot_git_dirs_to_reload: &HashSet<PathBuf>, fs: &dyn Fs) {
2645 let scan_id = self.snapshot.scan_id;
2646
2647 for dot_git_dir in dot_git_dirs_to_reload {
2648 // If there is already a repository for this .git directory, reload
2649 // the status for all of its files.
2650 let repository = self
2651 .snapshot
2652 .git_repositories
2653 .iter()
2654 .find_map(|(entry_id, repo)| {
2655 (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2656 });
2657 match repository {
2658 None => {
2659 self.build_git_repository(Arc::from(dot_git_dir.as_path()), fs);
2660 }
2661 Some((entry_id, repository)) => {
2662 if repository.git_dir_scan_id == scan_id {
2663 continue;
2664 }
2665 let Some(work_dir) = self
2666 .snapshot
2667 .entry_for_id(entry_id)
2668 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
2669 else {
2670 continue;
2671 };
2672
2673 log::info!("reload git repository {dot_git_dir:?}");
2674 let repository = repository.repo_ptr.lock();
2675 let branch = repository.branch_name();
2676 repository.reload_index();
2677
2678 self.snapshot
2679 .git_repositories
2680 .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2681 self.snapshot
2682 .snapshot
2683 .repository_entries
2684 .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2685
2686 self.update_git_statuses(&work_dir, &*repository);
2687 }
2688 }
2689 }
2690
2691 // Remove any git repositories whose .git entry no longer exists.
2692 let snapshot = &mut self.snapshot;
2693 let mut ids_to_preserve = HashSet::default();
2694 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
2695 let exists_in_snapshot = snapshot
2696 .entry_for_id(work_directory_id)
2697 .map_or(false, |entry| {
2698 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2699 });
2700 if exists_in_snapshot {
2701 ids_to_preserve.insert(work_directory_id);
2702 } else {
2703 let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
2704 let git_dir_excluded = snapshot.is_path_excluded(entry.git_dir_path.to_path_buf());
2705 if git_dir_excluded
2706 && !matches!(smol::block_on(fs.metadata(&git_dir_abs_path)), Ok(None))
2707 {
2708 ids_to_preserve.insert(work_directory_id);
2709 }
2710 }
2711 }
2712 snapshot
2713 .git_repositories
2714 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
2715 snapshot
2716 .repository_entries
2717 .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
2718 }
2719
2720 fn build_git_repository(
2721 &mut self,
2722 dot_git_path: Arc<Path>,
2723 fs: &dyn Fs,
2724 ) -> Option<(
2725 RepositoryWorkDirectory,
2726 Arc<Mutex<dyn GitRepository>>,
2727 TreeMap<RepoPath, GitFileStatus>,
2728 )> {
2729 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2730 Some(parent_dir) => {
2731 // Guard against repositories inside the repository metadata
2732 if parent_dir.iter().any(|component| component == *DOT_GIT) {
2733 log::info!(
2734 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2735 );
2736 return None;
2737 };
2738 log::info!(
2739 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2740 );
2741 parent_dir.into()
2742 }
2743 None => {
2744 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2745 // no files inside that directory are tracked by git, so no need to build the repo around it
2746 log::info!(
2747 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2748 );
2749 return None;
2750 }
2751 };
2752
2753 let work_dir_id = self
2754 .snapshot
2755 .entry_for_path(work_dir_path.clone())
2756 .map(|entry| entry.id)?;
2757
2758 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2759 return None;
2760 }
2761
2762 let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2763 let repository = fs.open_repo(abs_path.as_path())?;
2764 let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2765
2766 let repo_lock = repository.lock();
2767 self.snapshot.repository_entries.insert(
2768 work_directory.clone(),
2769 RepositoryEntry {
2770 work_directory: work_dir_id.into(),
2771 branch: repo_lock.branch_name().map(Into::into),
2772 },
2773 );
2774
2775 let staged_statuses = self.update_git_statuses(&work_directory, &*repo_lock);
2776 drop(repo_lock);
2777
2778 self.snapshot.git_repositories.insert(
2779 work_dir_id,
2780 LocalRepositoryEntry {
2781 git_dir_scan_id: 0,
2782 repo_ptr: repository.clone(),
2783 git_dir_path: dot_git_path.clone(),
2784 },
2785 );
2786
2787 Some((work_directory, repository, staged_statuses))
2788 }
2789
2790 fn update_git_statuses(
2791 &mut self,
2792 work_directory: &RepositoryWorkDirectory,
2793 repo: &dyn GitRepository,
2794 ) -> TreeMap<RepoPath, GitFileStatus> {
2795 let staged_statuses = repo.staged_statuses(Path::new(""));
2796
2797 let mut changes = vec![];
2798 let mut edits = vec![];
2799
2800 for mut entry in self
2801 .snapshot
2802 .descendent_entries(false, false, &work_directory.0)
2803 .cloned()
2804 {
2805 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2806 continue;
2807 };
2808 let Some(mtime) = entry.mtime else {
2809 continue;
2810 };
2811 let repo_path = RepoPath(repo_path.to_path_buf());
2812 let git_file_status = combine_git_statuses(
2813 staged_statuses.get(&repo_path).copied(),
2814 repo.unstaged_status(&repo_path, mtime),
2815 );
2816 if entry.git_status != git_file_status {
2817 entry.git_status = git_file_status;
2818 changes.push(entry.path.clone());
2819 edits.push(Edit::Insert(entry));
2820 }
2821 }
2822
2823 self.snapshot.entries_by_path.edit(edits, &());
2824 util::extend_sorted(&mut self.changed_paths, changes, usize::MAX, Ord::cmp);
2825 staged_statuses
2826 }
2827}
2828
2829async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2830 let contents = fs.load(abs_path).await?;
2831 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2832 let mut builder = GitignoreBuilder::new(parent);
2833 for line in contents.lines() {
2834 builder.add_line(Some(abs_path.into()), line)?;
2835 }
2836 Ok(builder.build()?)
2837}
2838
2839impl WorktreeId {
2840 pub fn from_usize(handle_id: usize) -> Self {
2841 Self(handle_id)
2842 }
2843
2844 pub fn from_proto(id: u64) -> Self {
2845 Self(id as usize)
2846 }
2847
2848 pub fn to_proto(&self) -> u64 {
2849 self.0 as u64
2850 }
2851
2852 pub fn to_usize(&self) -> usize {
2853 self.0
2854 }
2855}
2856
2857impl fmt::Display for WorktreeId {
2858 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2859 self.0.fmt(f)
2860 }
2861}
2862
2863impl Deref for Worktree {
2864 type Target = Snapshot;
2865
2866 fn deref(&self) -> &Self::Target {
2867 match self {
2868 Worktree::Local(worktree) => &worktree.snapshot,
2869 Worktree::Remote(worktree) => &worktree.snapshot,
2870 }
2871 }
2872}
2873
2874impl Deref for LocalWorktree {
2875 type Target = LocalSnapshot;
2876
2877 fn deref(&self) -> &Self::Target {
2878 &self.snapshot
2879 }
2880}
2881
2882impl Deref for RemoteWorktree {
2883 type Target = Snapshot;
2884
2885 fn deref(&self) -> &Self::Target {
2886 &self.snapshot
2887 }
2888}
2889
2890impl fmt::Debug for LocalWorktree {
2891 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2892 self.snapshot.fmt(f)
2893 }
2894}
2895
2896impl fmt::Debug for Snapshot {
2897 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2898 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2899 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2900
2901 impl<'a> fmt::Debug for EntriesByPath<'a> {
2902 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2903 f.debug_map()
2904 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2905 .finish()
2906 }
2907 }
2908
2909 impl<'a> fmt::Debug for EntriesById<'a> {
2910 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2911 f.debug_list().entries(self.0.iter()).finish()
2912 }
2913 }
2914
2915 f.debug_struct("Snapshot")
2916 .field("id", &self.id)
2917 .field("root_name", &self.root_name)
2918 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2919 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2920 .finish()
2921 }
2922}
2923
2924#[derive(Clone, PartialEq)]
2925pub struct File {
2926 pub worktree: Model<Worktree>,
2927 pub path: Arc<Path>,
2928 pub mtime: Option<SystemTime>,
2929 pub entry_id: Option<ProjectEntryId>,
2930 pub is_local: bool,
2931 pub is_deleted: bool,
2932 pub is_private: bool,
2933}
2934
2935impl language::File for File {
2936 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2937 if self.is_local {
2938 Some(self)
2939 } else {
2940 None
2941 }
2942 }
2943
2944 fn mtime(&self) -> Option<SystemTime> {
2945 self.mtime
2946 }
2947
2948 fn path(&self) -> &Arc<Path> {
2949 &self.path
2950 }
2951
2952 fn full_path(&self, cx: &AppContext) -> PathBuf {
2953 let mut full_path = PathBuf::new();
2954 let worktree = self.worktree.read(cx);
2955
2956 if worktree.is_visible() {
2957 full_path.push(worktree.root_name());
2958 } else {
2959 let path = worktree.abs_path();
2960
2961 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2962 full_path.push("~");
2963 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2964 } else {
2965 full_path.push(path)
2966 }
2967 }
2968
2969 if self.path.components().next().is_some() {
2970 full_path.push(&self.path);
2971 }
2972
2973 full_path
2974 }
2975
2976 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2977 /// of its worktree, then this method will return the name of the worktree itself.
2978 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2979 self.path
2980 .file_name()
2981 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2982 }
2983
2984 fn worktree_id(&self) -> usize {
2985 self.worktree.entity_id().as_u64() as usize
2986 }
2987
2988 fn is_deleted(&self) -> bool {
2989 self.is_deleted
2990 }
2991
2992 fn as_any(&self) -> &dyn Any {
2993 self
2994 }
2995
2996 fn to_proto(&self) -> rpc::proto::File {
2997 rpc::proto::File {
2998 worktree_id: self.worktree.entity_id().as_u64(),
2999 entry_id: self.entry_id.map(|id| id.to_proto()),
3000 path: self.path.to_string_lossy().into(),
3001 mtime: self.mtime.map(|time| time.into()),
3002 is_deleted: self.is_deleted,
3003 }
3004 }
3005
3006 fn is_private(&self) -> bool {
3007 self.is_private
3008 }
3009}
3010
3011impl language::LocalFile for File {
3012 fn abs_path(&self, cx: &AppContext) -> PathBuf {
3013 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3014 if self.path.as_ref() == Path::new("") {
3015 worktree_path.to_path_buf()
3016 } else {
3017 worktree_path.join(&self.path)
3018 }
3019 }
3020
3021 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3022 let worktree = self.worktree.read(cx).as_local().unwrap();
3023 let abs_path = worktree.absolutize(&self.path);
3024 let fs = worktree.fs.clone();
3025 cx.background_executor()
3026 .spawn(async move { fs.load(&abs_path?).await })
3027 }
3028
3029 fn buffer_reloaded(
3030 &self,
3031 buffer_id: BufferId,
3032 version: &clock::Global,
3033 fingerprint: RopeFingerprint,
3034 line_ending: LineEnding,
3035 mtime: Option<SystemTime>,
3036 cx: &mut AppContext,
3037 ) {
3038 let worktree = self.worktree.read(cx).as_local().unwrap();
3039 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
3040 worktree
3041 .client
3042 .send(proto::BufferReloaded {
3043 project_id,
3044 buffer_id: buffer_id.into(),
3045 version: serialize_version(version),
3046 mtime: mtime.map(|time| time.into()),
3047 fingerprint: serialize_fingerprint(fingerprint),
3048 line_ending: serialize_line_ending(line_ending) as i32,
3049 })
3050 .log_err();
3051 }
3052 }
3053}
3054
3055impl File {
3056 pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3057 Arc::new(Self {
3058 worktree,
3059 path: entry.path.clone(),
3060 mtime: entry.mtime,
3061 entry_id: Some(entry.id),
3062 is_local: true,
3063 is_deleted: false,
3064 is_private: entry.is_private,
3065 })
3066 }
3067
3068 pub fn from_proto(
3069 proto: rpc::proto::File,
3070 worktree: Model<Worktree>,
3071 cx: &AppContext,
3072 ) -> Result<Self> {
3073 let worktree_id = worktree
3074 .read(cx)
3075 .as_remote()
3076 .ok_or_else(|| anyhow!("not remote"))?
3077 .id();
3078
3079 if worktree_id.to_proto() != proto.worktree_id {
3080 return Err(anyhow!("worktree id does not match file"));
3081 }
3082
3083 Ok(Self {
3084 worktree,
3085 path: Path::new(&proto.path).into(),
3086 mtime: proto.mtime.map(|time| time.into()),
3087 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3088 is_local: false,
3089 is_deleted: proto.is_deleted,
3090 is_private: false,
3091 })
3092 }
3093
3094 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3095 file.and_then(|f| f.as_any().downcast_ref())
3096 }
3097
3098 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3099 self.worktree.read(cx).id()
3100 }
3101
3102 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3103 if self.is_deleted {
3104 None
3105 } else {
3106 self.entry_id
3107 }
3108 }
3109}
3110
3111#[derive(Clone, Debug, PartialEq, Eq)]
3112pub struct Entry {
3113 pub id: ProjectEntryId,
3114 pub kind: EntryKind,
3115 pub path: Arc<Path>,
3116 pub inode: u64,
3117 pub mtime: Option<SystemTime>,
3118 pub is_symlink: bool,
3119
3120 /// Whether this entry is ignored by Git.
3121 ///
3122 /// We only scan ignored entries once the directory is expanded and
3123 /// exclude them from searches.
3124 pub is_ignored: bool,
3125
3126 /// Whether this entry's canonical path is outside of the worktree.
3127 /// This means the entry is only accessible from the worktree root via a
3128 /// symlink.
3129 ///
3130 /// We only scan entries outside of the worktree once the symlinked
3131 /// directory is expanded. External entries are treated like gitignored
3132 /// entries in that they are not included in searches.
3133 pub is_external: bool,
3134 pub git_status: Option<GitFileStatus>,
3135 /// Whether this entry is considered to be a `.env` file.
3136 pub is_private: bool,
3137}
3138
3139#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3140pub enum EntryKind {
3141 UnloadedDir,
3142 PendingDir,
3143 Dir,
3144 File(CharBag),
3145}
3146
3147#[derive(Clone, Copy, Debug, PartialEq)]
3148pub enum PathChange {
3149 /// A filesystem entry was was created.
3150 Added,
3151 /// A filesystem entry was removed.
3152 Removed,
3153 /// A filesystem entry was updated.
3154 Updated,
3155 /// A filesystem entry was either updated or added. We don't know
3156 /// whether or not it already existed, because the path had not
3157 /// been loaded before the event.
3158 AddedOrUpdated,
3159 /// A filesystem entry was found during the initial scan of the worktree.
3160 Loaded,
3161}
3162
3163pub struct GitRepositoryChange {
3164 /// The previous state of the repository, if it already existed.
3165 pub old_repository: Option<RepositoryEntry>,
3166}
3167
3168pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3169pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3170
3171impl Entry {
3172 fn new(
3173 path: Arc<Path>,
3174 metadata: &fs::Metadata,
3175 next_entry_id: &AtomicUsize,
3176 root_char_bag: CharBag,
3177 ) -> Self {
3178 Self {
3179 id: ProjectEntryId::new(next_entry_id),
3180 kind: if metadata.is_dir {
3181 EntryKind::PendingDir
3182 } else {
3183 EntryKind::File(char_bag_for_path(root_char_bag, &path))
3184 },
3185 path,
3186 inode: metadata.inode,
3187 mtime: Some(metadata.mtime),
3188 is_symlink: metadata.is_symlink,
3189 is_ignored: false,
3190 is_external: false,
3191 is_private: false,
3192 git_status: None,
3193 }
3194 }
3195
3196 pub fn is_created(&self) -> bool {
3197 self.mtime.is_some()
3198 }
3199
3200 pub fn is_dir(&self) -> bool {
3201 self.kind.is_dir()
3202 }
3203
3204 pub fn is_file(&self) -> bool {
3205 self.kind.is_file()
3206 }
3207
3208 pub fn git_status(&self) -> Option<GitFileStatus> {
3209 self.git_status
3210 }
3211}
3212
3213impl EntryKind {
3214 pub fn is_dir(&self) -> bool {
3215 matches!(
3216 self,
3217 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3218 )
3219 }
3220
3221 pub fn is_unloaded(&self) -> bool {
3222 matches!(self, EntryKind::UnloadedDir)
3223 }
3224
3225 pub fn is_file(&self) -> bool {
3226 matches!(self, EntryKind::File(_))
3227 }
3228}
3229
3230impl sum_tree::Item for Entry {
3231 type Summary = EntrySummary;
3232
3233 fn summary(&self) -> Self::Summary {
3234 let non_ignored_count = if self.is_ignored || self.is_external {
3235 0
3236 } else {
3237 1
3238 };
3239 let file_count;
3240 let non_ignored_file_count;
3241 if self.is_file() {
3242 file_count = 1;
3243 non_ignored_file_count = non_ignored_count;
3244 } else {
3245 file_count = 0;
3246 non_ignored_file_count = 0;
3247 }
3248
3249 let mut statuses = GitStatuses::default();
3250 match self.git_status {
3251 Some(status) => match status {
3252 GitFileStatus::Added => statuses.added = 1,
3253 GitFileStatus::Modified => statuses.modified = 1,
3254 GitFileStatus::Conflict => statuses.conflict = 1,
3255 },
3256 None => {}
3257 }
3258
3259 EntrySummary {
3260 max_path: self.path.clone(),
3261 count: 1,
3262 non_ignored_count,
3263 file_count,
3264 non_ignored_file_count,
3265 statuses,
3266 }
3267 }
3268}
3269
3270impl sum_tree::KeyedItem for Entry {
3271 type Key = PathKey;
3272
3273 fn key(&self) -> Self::Key {
3274 PathKey(self.path.clone())
3275 }
3276}
3277
3278#[derive(Clone, Debug)]
3279pub struct EntrySummary {
3280 max_path: Arc<Path>,
3281 count: usize,
3282 non_ignored_count: usize,
3283 file_count: usize,
3284 non_ignored_file_count: usize,
3285 statuses: GitStatuses,
3286}
3287
3288impl Default for EntrySummary {
3289 fn default() -> Self {
3290 Self {
3291 max_path: Arc::from(Path::new("")),
3292 count: 0,
3293 non_ignored_count: 0,
3294 file_count: 0,
3295 non_ignored_file_count: 0,
3296 statuses: Default::default(),
3297 }
3298 }
3299}
3300
3301impl sum_tree::Summary for EntrySummary {
3302 type Context = ();
3303
3304 fn add_summary(&mut self, rhs: &Self, _: &()) {
3305 self.max_path = rhs.max_path.clone();
3306 self.count += rhs.count;
3307 self.non_ignored_count += rhs.non_ignored_count;
3308 self.file_count += rhs.file_count;
3309 self.non_ignored_file_count += rhs.non_ignored_file_count;
3310 self.statuses += rhs.statuses;
3311 }
3312}
3313
3314#[derive(Clone, Debug)]
3315struct PathEntry {
3316 id: ProjectEntryId,
3317 path: Arc<Path>,
3318 is_ignored: bool,
3319 scan_id: usize,
3320}
3321
3322impl sum_tree::Item for PathEntry {
3323 type Summary = PathEntrySummary;
3324
3325 fn summary(&self) -> Self::Summary {
3326 PathEntrySummary { max_id: self.id }
3327 }
3328}
3329
3330impl sum_tree::KeyedItem for PathEntry {
3331 type Key = ProjectEntryId;
3332
3333 fn key(&self) -> Self::Key {
3334 self.id
3335 }
3336}
3337
3338#[derive(Clone, Debug, Default)]
3339struct PathEntrySummary {
3340 max_id: ProjectEntryId,
3341}
3342
3343impl sum_tree::Summary for PathEntrySummary {
3344 type Context = ();
3345
3346 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3347 self.max_id = summary.max_id;
3348 }
3349}
3350
3351impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3352 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3353 *self = summary.max_id;
3354 }
3355}
3356
3357#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3358pub struct PathKey(Arc<Path>);
3359
3360impl Default for PathKey {
3361 fn default() -> Self {
3362 Self(Path::new("").into())
3363 }
3364}
3365
3366impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3367 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3368 self.0 = summary.max_path.clone();
3369 }
3370}
3371
3372struct BackgroundScanner {
3373 state: Mutex<BackgroundScannerState>,
3374 fs: Arc<dyn Fs>,
3375 fs_case_sensitive: bool,
3376 status_updates_tx: UnboundedSender<ScanState>,
3377 executor: BackgroundExecutor,
3378 scan_requests_rx: channel::Receiver<ScanRequest>,
3379 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3380 next_entry_id: Arc<AtomicUsize>,
3381 phase: BackgroundScannerPhase,
3382}
3383
3384#[derive(PartialEq)]
3385enum BackgroundScannerPhase {
3386 InitialScan,
3387 EventsReceivedDuringInitialScan,
3388 Events,
3389}
3390
3391impl BackgroundScanner {
3392 #[allow(clippy::too_many_arguments)]
3393 fn new(
3394 snapshot: LocalSnapshot,
3395 next_entry_id: Arc<AtomicUsize>,
3396 fs: Arc<dyn Fs>,
3397 fs_case_sensitive: bool,
3398 status_updates_tx: UnboundedSender<ScanState>,
3399 executor: BackgroundExecutor,
3400 scan_requests_rx: channel::Receiver<ScanRequest>,
3401 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3402 ) -> Self {
3403 Self {
3404 fs,
3405 fs_case_sensitive,
3406 status_updates_tx,
3407 executor,
3408 scan_requests_rx,
3409 path_prefixes_to_scan_rx,
3410 next_entry_id,
3411 state: Mutex::new(BackgroundScannerState {
3412 prev_snapshot: snapshot.snapshot.clone(),
3413 snapshot,
3414 scanned_dirs: Default::default(),
3415 path_prefixes_to_scan: Default::default(),
3416 paths_to_scan: Default::default(),
3417 removed_entry_ids: Default::default(),
3418 changed_paths: Default::default(),
3419 }),
3420 phase: BackgroundScannerPhase::InitialScan,
3421 }
3422 }
3423
3424 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3425 use futures::FutureExt as _;
3426
3427 // Populate ignores above the root.
3428 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3429 for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3430 if index != 0 {
3431 if let Ok(ignore) =
3432 build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3433 {
3434 self.state
3435 .lock()
3436 .snapshot
3437 .ignores_by_parent_abs_path
3438 .insert(ancestor.into(), (ignore.into(), false));
3439 }
3440 }
3441 if ancestor.join(&*DOT_GIT).is_dir() {
3442 // Reached root of git repository.
3443 break;
3444 }
3445 }
3446
3447 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3448 {
3449 let mut state = self.state.lock();
3450 state.snapshot.scan_id += 1;
3451 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3452 let ignore_stack = state
3453 .snapshot
3454 .ignore_stack_for_abs_path(&root_abs_path, true);
3455 if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3456 root_entry.is_ignored = true;
3457 state.insert_entry(root_entry.clone(), self.fs.as_ref());
3458 }
3459 state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3460 }
3461 };
3462
3463 // Perform an initial scan of the directory.
3464 drop(scan_job_tx);
3465 self.scan_dirs(true, scan_job_rx).await;
3466 {
3467 let mut state = self.state.lock();
3468 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3469 }
3470
3471 self.send_status_update(false, None);
3472
3473 // Process any any FS events that occurred while performing the initial scan.
3474 // For these events, update events cannot be as precise, because we didn't
3475 // have the previous state loaded yet.
3476 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3477 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3478 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3479 paths.extend(more_paths);
3480 }
3481 self.process_events(paths).await;
3482 }
3483
3484 // Continue processing events until the worktree is dropped.
3485 self.phase = BackgroundScannerPhase::Events;
3486 loop {
3487 select_biased! {
3488 // Process any path refresh requests from the worktree. Prioritize
3489 // these before handling changes reported by the filesystem.
3490 request = self.scan_requests_rx.recv().fuse() => {
3491 let Ok(request) = request else { break };
3492 if !self.process_scan_request(request, false).await {
3493 return;
3494 }
3495 }
3496
3497 path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3498 let Ok(path_prefix) = path_prefix else { break };
3499 log::trace!("adding path prefix {:?}", path_prefix);
3500
3501 let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3502 if did_scan {
3503 let abs_path =
3504 {
3505 let mut state = self.state.lock();
3506 state.path_prefixes_to_scan.insert(path_prefix.clone());
3507 state.snapshot.abs_path.join(&path_prefix)
3508 };
3509
3510 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3511 self.process_events(vec![abs_path]).await;
3512 }
3513 }
3514 }
3515
3516 paths = fs_events_rx.next().fuse() => {
3517 let Some(mut paths) = paths else { break };
3518 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3519 paths.extend(more_paths);
3520 }
3521 self.process_events(paths.clone()).await;
3522 }
3523 }
3524 }
3525 }
3526
3527 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3528 log::debug!("rescanning paths {:?}", request.relative_paths);
3529
3530 request.relative_paths.sort_unstable();
3531 self.forcibly_load_paths(&request.relative_paths).await;
3532
3533 let root_path = self.state.lock().snapshot.abs_path.clone();
3534 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3535 Ok(path) => path,
3536 Err(err) => {
3537 log::error!("failed to canonicalize root path: {}", err);
3538 return true;
3539 }
3540 };
3541 let abs_paths = request
3542 .relative_paths
3543 .iter()
3544 .map(|path| {
3545 if path.file_name().is_some() {
3546 root_canonical_path.join(path)
3547 } else {
3548 root_canonical_path.clone()
3549 }
3550 })
3551 .collect::<Vec<_>>();
3552
3553 self.reload_entries_for_paths(
3554 root_path,
3555 root_canonical_path,
3556 &request.relative_paths,
3557 abs_paths,
3558 None,
3559 )
3560 .await;
3561 self.send_status_update(scanning, Some(request.done))
3562 }
3563
3564 async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3565 let root_path = self.state.lock().snapshot.abs_path.clone();
3566 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3567 Ok(path) => path,
3568 Err(err) => {
3569 log::error!("failed to canonicalize root path: {}", err);
3570 return;
3571 }
3572 };
3573
3574 let mut relative_paths = Vec::with_capacity(abs_paths.len());
3575 let mut dot_git_paths_to_reload = HashSet::default();
3576 abs_paths.sort_unstable();
3577 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3578 abs_paths.retain(|abs_path| {
3579 let snapshot = &self.state.lock().snapshot;
3580 {
3581 let mut is_git_related = false;
3582 if let Some(dot_git_dir) = abs_path
3583 .ancestors()
3584 .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3585 {
3586 let dot_git_path = dot_git_dir
3587 .strip_prefix(&root_canonical_path)
3588 .ok()
3589 .map(|path| path.to_path_buf())
3590 .unwrap_or_else(|| dot_git_dir.to_path_buf());
3591 dot_git_paths_to_reload.insert(dot_git_path.to_path_buf());
3592 is_git_related = true;
3593 }
3594
3595 let relative_path: Arc<Path> =
3596 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3597 path.into()
3598 } else {
3599 log::error!(
3600 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3601 );
3602 return false;
3603 };
3604
3605 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3606 snapshot
3607 .entry_for_path(parent)
3608 .map_or(false, |entry| entry.kind == EntryKind::Dir)
3609 });
3610 if !parent_dir_is_loaded {
3611 log::debug!("ignoring event {relative_path:?} within unloaded directory");
3612 return false;
3613 }
3614
3615 if snapshot.is_path_excluded(relative_path.to_path_buf()) {
3616 if !is_git_related {
3617 log::debug!("ignoring FS event for excluded path {relative_path:?}");
3618 }
3619 return false;
3620 }
3621
3622 relative_paths.push(relative_path);
3623 true
3624 }
3625 });
3626
3627 if dot_git_paths_to_reload.is_empty() && relative_paths.is_empty() {
3628 return;
3629 }
3630
3631 if !relative_paths.is_empty() {
3632 log::debug!("received fs events {:?}", relative_paths);
3633
3634 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3635 self.reload_entries_for_paths(
3636 root_path,
3637 root_canonical_path,
3638 &relative_paths,
3639 abs_paths,
3640 Some(scan_job_tx.clone()),
3641 )
3642 .await;
3643 drop(scan_job_tx);
3644 self.scan_dirs(false, scan_job_rx).await;
3645
3646 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3647 self.update_ignore_statuses(scan_job_tx).await;
3648 self.scan_dirs(false, scan_job_rx).await;
3649 }
3650
3651 {
3652 let mut state = self.state.lock();
3653 if !dot_git_paths_to_reload.is_empty() {
3654 if relative_paths.is_empty() {
3655 state.snapshot.scan_id += 1;
3656 }
3657 log::debug!("reloading repositories: {dot_git_paths_to_reload:?}");
3658 state.reload_repositories(&dot_git_paths_to_reload, self.fs.as_ref());
3659 }
3660 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3661 for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3662 state.scanned_dirs.remove(&entry_id);
3663 }
3664 }
3665
3666 self.send_status_update(false, None);
3667 }
3668
3669 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3670 let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3671 {
3672 let mut state = self.state.lock();
3673 let root_path = state.snapshot.abs_path.clone();
3674 for path in paths {
3675 for ancestor in path.ancestors() {
3676 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3677 if entry.kind == EntryKind::UnloadedDir {
3678 let abs_path = root_path.join(ancestor);
3679 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3680 state.paths_to_scan.insert(path.clone());
3681 break;
3682 }
3683 }
3684 }
3685 }
3686 drop(scan_job_tx);
3687 }
3688 while let Some(job) = scan_job_rx.next().await {
3689 self.scan_dir(&job).await.log_err();
3690 }
3691
3692 mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3693 }
3694
3695 async fn scan_dirs(
3696 &self,
3697 enable_progress_updates: bool,
3698 scan_jobs_rx: channel::Receiver<ScanJob>,
3699 ) {
3700 use futures::FutureExt as _;
3701
3702 if self
3703 .status_updates_tx
3704 .unbounded_send(ScanState::Started)
3705 .is_err()
3706 {
3707 return;
3708 }
3709
3710 let progress_update_count = AtomicUsize::new(0);
3711 self.executor
3712 .scoped(|scope| {
3713 for _ in 0..self.executor.num_cpus() {
3714 scope.spawn(async {
3715 let mut last_progress_update_count = 0;
3716 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3717 futures::pin_mut!(progress_update_timer);
3718
3719 loop {
3720 select_biased! {
3721 // Process any path refresh requests before moving on to process
3722 // the scan queue, so that user operations are prioritized.
3723 request = self.scan_requests_rx.recv().fuse() => {
3724 let Ok(request) = request else { break };
3725 if !self.process_scan_request(request, true).await {
3726 return;
3727 }
3728 }
3729
3730 // Send periodic progress updates to the worktree. Use an atomic counter
3731 // to ensure that only one of the workers sends a progress update after
3732 // the update interval elapses.
3733 _ = progress_update_timer => {
3734 match progress_update_count.compare_exchange(
3735 last_progress_update_count,
3736 last_progress_update_count + 1,
3737 SeqCst,
3738 SeqCst
3739 ) {
3740 Ok(_) => {
3741 last_progress_update_count += 1;
3742 self.send_status_update(true, None);
3743 }
3744 Err(count) => {
3745 last_progress_update_count = count;
3746 }
3747 }
3748 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3749 }
3750
3751 // Recursively load directories from the file system.
3752 job = scan_jobs_rx.recv().fuse() => {
3753 let Ok(job) = job else { break };
3754 if let Err(err) = self.scan_dir(&job).await {
3755 if job.path.as_ref() != Path::new("") {
3756 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3757 }
3758 }
3759 }
3760 }
3761 }
3762 })
3763 }
3764 })
3765 .await;
3766 }
3767
3768 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3769 let mut state = self.state.lock();
3770 if state.changed_paths.is_empty() && scanning {
3771 return true;
3772 }
3773
3774 let new_snapshot = state.snapshot.clone();
3775 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3776 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3777 state.changed_paths.clear();
3778
3779 self.status_updates_tx
3780 .unbounded_send(ScanState::Updated {
3781 snapshot: new_snapshot,
3782 changes,
3783 scanning,
3784 barrier,
3785 })
3786 .is_ok()
3787 }
3788
3789 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3790 let root_abs_path;
3791 let mut ignore_stack;
3792 let mut new_ignore;
3793 let root_char_bag;
3794 let next_entry_id;
3795 {
3796 let state = self.state.lock();
3797 let snapshot = &state.snapshot;
3798 root_abs_path = snapshot.abs_path().clone();
3799 if snapshot.is_path_excluded(job.path.to_path_buf()) {
3800 log::error!("skipping excluded directory {:?}", job.path);
3801 return Ok(());
3802 }
3803 log::debug!("scanning directory {:?}", job.path);
3804 ignore_stack = job.ignore_stack.clone();
3805 new_ignore = None;
3806 root_char_bag = snapshot.root_char_bag;
3807 next_entry_id = self.next_entry_id.clone();
3808 drop(state);
3809 }
3810
3811 let mut dotgit_path = None;
3812 let mut root_canonical_path = None;
3813 let mut new_entries: Vec<Entry> = Vec::new();
3814 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3815 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3816 while let Some(child_abs_path) = child_paths.next().await {
3817 let child_abs_path: Arc<Path> = match child_abs_path {
3818 Ok(child_abs_path) => child_abs_path.into(),
3819 Err(error) => {
3820 log::error!("error processing entry {:?}", error);
3821 continue;
3822 }
3823 };
3824 let child_name = child_abs_path.file_name().unwrap();
3825 let child_path: Arc<Path> = job.path.join(child_name).into();
3826 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3827 if child_name == *GITIGNORE {
3828 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3829 Ok(ignore) => {
3830 let ignore = Arc::new(ignore);
3831 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3832 new_ignore = Some(ignore);
3833 }
3834 Err(error) => {
3835 log::error!(
3836 "error loading .gitignore file {:?} - {:?}",
3837 child_name,
3838 error
3839 );
3840 }
3841 }
3842
3843 // Update ignore status of any child entries we've already processed to reflect the
3844 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3845 // there should rarely be too numerous. Update the ignore stack associated with any
3846 // new jobs as well.
3847 let mut new_jobs = new_jobs.iter_mut();
3848 for entry in &mut new_entries {
3849 let entry_abs_path = root_abs_path.join(&entry.path);
3850 entry.is_ignored =
3851 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3852
3853 if entry.is_dir() {
3854 if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3855 job.ignore_stack = if entry.is_ignored {
3856 IgnoreStack::all()
3857 } else {
3858 ignore_stack.clone()
3859 };
3860 }
3861 }
3862 }
3863 }
3864 // If we find a .git, we'll need to load the repository.
3865 else if child_name == *DOT_GIT {
3866 dotgit_path = Some(child_path.clone());
3867 }
3868
3869 {
3870 let relative_path = job.path.join(child_name);
3871 let mut state = self.state.lock();
3872 if state.snapshot.is_path_excluded(relative_path.clone()) {
3873 log::debug!("skipping excluded child entry {relative_path:?}");
3874 state.remove_path(&relative_path);
3875 continue;
3876 }
3877 drop(state);
3878 }
3879
3880 let child_metadata = match self.fs.metadata(&child_abs_path).await {
3881 Ok(Some(metadata)) => metadata,
3882 Ok(None) => continue,
3883 Err(err) => {
3884 log::error!("error processing {child_abs_path:?}: {err:?}");
3885 continue;
3886 }
3887 };
3888
3889 let mut child_entry = Entry::new(
3890 child_path.clone(),
3891 &child_metadata,
3892 &next_entry_id,
3893 root_char_bag,
3894 );
3895
3896 if job.is_external {
3897 child_entry.is_external = true;
3898 } else if child_metadata.is_symlink {
3899 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3900 Ok(path) => path,
3901 Err(err) => {
3902 log::error!(
3903 "error reading target of symlink {:?}: {:?}",
3904 child_abs_path,
3905 err
3906 );
3907 continue;
3908 }
3909 };
3910
3911 // lazily canonicalize the root path in order to determine if
3912 // symlinks point outside of the worktree.
3913 let root_canonical_path = match &root_canonical_path {
3914 Some(path) => path,
3915 None => match self.fs.canonicalize(&root_abs_path).await {
3916 Ok(path) => root_canonical_path.insert(path),
3917 Err(err) => {
3918 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3919 continue;
3920 }
3921 },
3922 };
3923
3924 if !canonical_path.starts_with(root_canonical_path) {
3925 child_entry.is_external = true;
3926 }
3927 }
3928
3929 if child_entry.is_dir() {
3930 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3931
3932 // Avoid recursing until crash in the case of a recursive symlink
3933 if !job.ancestor_inodes.contains(&child_entry.inode) {
3934 let mut ancestor_inodes = job.ancestor_inodes.clone();
3935 ancestor_inodes.insert(child_entry.inode);
3936
3937 new_jobs.push(Some(ScanJob {
3938 abs_path: child_abs_path.clone(),
3939 path: child_path,
3940 is_external: child_entry.is_external,
3941 ignore_stack: if child_entry.is_ignored {
3942 IgnoreStack::all()
3943 } else {
3944 ignore_stack.clone()
3945 },
3946 ancestor_inodes,
3947 scan_queue: job.scan_queue.clone(),
3948 containing_repository: job.containing_repository.clone(),
3949 }));
3950 } else {
3951 new_jobs.push(None);
3952 }
3953 } else {
3954 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3955 if !child_entry.is_ignored {
3956 if let Some((repository_dir, repository, staged_statuses)) =
3957 &job.containing_repository
3958 {
3959 if let Ok(repo_path) = child_entry.path.strip_prefix(&repository_dir.0) {
3960 if let Some(mtime) = child_entry.mtime {
3961 let repo_path = RepoPath(repo_path.into());
3962 child_entry.git_status = combine_git_statuses(
3963 staged_statuses.get(&repo_path).copied(),
3964 repository.lock().unstaged_status(&repo_path, mtime),
3965 );
3966 }
3967 }
3968 }
3969 }
3970 }
3971
3972 {
3973 let relative_path = job.path.join(child_name);
3974 let state = self.state.lock();
3975 if state.snapshot.is_path_private(&relative_path) {
3976 log::debug!("detected private file: {relative_path:?}");
3977 child_entry.is_private = true;
3978 }
3979 drop(state)
3980 }
3981
3982 new_entries.push(child_entry);
3983 }
3984
3985 let mut state = self.state.lock();
3986
3987 // Identify any subdirectories that should not be scanned.
3988 let mut job_ix = 0;
3989 for entry in &mut new_entries {
3990 state.reuse_entry_id(entry);
3991 if entry.is_dir() {
3992 if state.should_scan_directory(entry) {
3993 job_ix += 1;
3994 } else {
3995 log::debug!("defer scanning directory {:?}", entry.path);
3996 entry.kind = EntryKind::UnloadedDir;
3997 new_jobs.remove(job_ix);
3998 }
3999 }
4000 }
4001
4002 state.populate_dir(&job.path, new_entries, new_ignore);
4003
4004 let repository =
4005 dotgit_path.and_then(|path| state.build_git_repository(path, self.fs.as_ref()));
4006
4007 for mut new_job in new_jobs.into_iter().flatten() {
4008 if let Some(containing_repository) = &repository {
4009 new_job.containing_repository = Some(containing_repository.clone());
4010 }
4011
4012 job.scan_queue
4013 .try_send(new_job)
4014 .expect("channel is unbounded");
4015 }
4016
4017 Ok(())
4018 }
4019
4020 async fn reload_entries_for_paths(
4021 &self,
4022 root_abs_path: Arc<Path>,
4023 root_canonical_path: PathBuf,
4024 relative_paths: &[Arc<Path>],
4025 abs_paths: Vec<PathBuf>,
4026 scan_queue_tx: Option<Sender<ScanJob>>,
4027 ) {
4028 let metadata = futures::future::join_all(
4029 abs_paths
4030 .iter()
4031 .map(|abs_path| async move {
4032 let metadata = self.fs.metadata(abs_path).await?;
4033 if let Some(metadata) = metadata {
4034 let canonical_path = self.fs.canonicalize(abs_path).await?;
4035
4036 // If we're on a case-insensitive filesystem (default on macOS), we want
4037 // to only ignore metadata for non-symlink files if their absolute-path matches
4038 // the canonical-path.
4039 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4040 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4041 // treated as removed.
4042 if !self.fs_case_sensitive && !metadata.is_symlink {
4043 let canonical_file_name = canonical_path.file_name();
4044 let file_name = abs_path.file_name();
4045 if canonical_file_name != file_name {
4046 return Ok(None);
4047 }
4048 }
4049
4050 anyhow::Ok(Some((metadata, canonical_path)))
4051 } else {
4052 Ok(None)
4053 }
4054 })
4055 .collect::<Vec<_>>(),
4056 )
4057 .await;
4058
4059 let mut state = self.state.lock();
4060 let snapshot = &mut state.snapshot;
4061 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
4062 let doing_recursive_update = scan_queue_tx.is_some();
4063 snapshot.scan_id += 1;
4064 if is_idle && !doing_recursive_update {
4065 snapshot.completed_scan_id = snapshot.scan_id;
4066 }
4067
4068 // Remove any entries for paths that no longer exist or are being recursively
4069 // refreshed. Do this before adding any new entries, so that renames can be
4070 // detected regardless of the order of the paths.
4071 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4072 if matches!(metadata, Ok(None)) || doing_recursive_update {
4073 log::trace!("remove path {:?}", path);
4074 state.remove_path(path);
4075 }
4076 }
4077
4078 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4079 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4080 match metadata {
4081 Ok(Some((metadata, canonical_path))) => {
4082 let ignore_stack = state
4083 .snapshot
4084 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4085
4086 let mut fs_entry = Entry::new(
4087 path.clone(),
4088 metadata,
4089 self.next_entry_id.as_ref(),
4090 state.snapshot.root_char_bag,
4091 );
4092 let is_dir = fs_entry.is_dir();
4093 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4094 fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4095 fs_entry.is_private = state.snapshot.is_path_private(path);
4096
4097 if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4098 if let Some((work_dir, repo)) = state.snapshot.local_repo_for_path(path) {
4099 if let Ok(repo_path) = path.strip_prefix(work_dir.0) {
4100 if let Some(mtime) = fs_entry.mtime {
4101 let repo_path = RepoPath(repo_path.into());
4102 let repo = repo.repo_ptr.lock();
4103 fs_entry.git_status = repo.status(&repo_path, mtime);
4104 }
4105 }
4106 }
4107 }
4108
4109 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4110 if state.should_scan_directory(&fs_entry) {
4111 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4112 } else {
4113 fs_entry.kind = EntryKind::UnloadedDir;
4114 }
4115 }
4116
4117 state.insert_entry(fs_entry, self.fs.as_ref());
4118 }
4119 Ok(None) => {
4120 self.remove_repo_path(path, &mut state.snapshot);
4121 }
4122 Err(err) => {
4123 // TODO - create a special 'error' entry in the entries tree to mark this
4124 log::error!("error reading file {abs_path:?} on event: {err:#}");
4125 }
4126 }
4127 }
4128
4129 util::extend_sorted(
4130 &mut state.changed_paths,
4131 relative_paths.iter().cloned(),
4132 usize::MAX,
4133 Ord::cmp,
4134 );
4135 }
4136
4137 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4138 if !path
4139 .components()
4140 .any(|component| component.as_os_str() == *DOT_GIT)
4141 {
4142 if let Some(repository) = snapshot.repository_for_work_directory(path) {
4143 let entry = repository.work_directory.0;
4144 snapshot.git_repositories.remove(&entry);
4145 snapshot
4146 .snapshot
4147 .repository_entries
4148 .remove(&RepositoryWorkDirectory(path.into()));
4149 return Some(());
4150 }
4151 }
4152
4153 // TODO statuses
4154 // Track when a .git is removed and iterate over the file system there
4155
4156 Some(())
4157 }
4158
4159 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4160 use futures::FutureExt as _;
4161
4162 let mut snapshot = self.state.lock().snapshot.clone();
4163 let mut ignores_to_update = Vec::new();
4164 let mut ignores_to_delete = Vec::new();
4165 let abs_path = snapshot.abs_path.clone();
4166 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
4167 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4168 if *needs_update {
4169 *needs_update = false;
4170 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4171 ignores_to_update.push(parent_abs_path.clone());
4172 }
4173 }
4174
4175 let ignore_path = parent_path.join(&*GITIGNORE);
4176 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4177 ignores_to_delete.push(parent_abs_path.clone());
4178 }
4179 }
4180 }
4181
4182 for parent_abs_path in ignores_to_delete {
4183 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
4184 self.state
4185 .lock()
4186 .snapshot
4187 .ignores_by_parent_abs_path
4188 .remove(&parent_abs_path);
4189 }
4190
4191 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4192 ignores_to_update.sort_unstable();
4193 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4194 while let Some(parent_abs_path) = ignores_to_update.next() {
4195 while ignores_to_update
4196 .peek()
4197 .map_or(false, |p| p.starts_with(&parent_abs_path))
4198 {
4199 ignores_to_update.next().unwrap();
4200 }
4201
4202 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4203 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
4204 abs_path: parent_abs_path,
4205 ignore_stack,
4206 ignore_queue: ignore_queue_tx.clone(),
4207 scan_queue: scan_job_tx.clone(),
4208 }))
4209 .unwrap();
4210 }
4211 drop(ignore_queue_tx);
4212
4213 self.executor
4214 .scoped(|scope| {
4215 for _ in 0..self.executor.num_cpus() {
4216 scope.spawn(async {
4217 loop {
4218 select_biased! {
4219 // Process any path refresh requests before moving on to process
4220 // the queue of ignore statuses.
4221 request = self.scan_requests_rx.recv().fuse() => {
4222 let Ok(request) = request else { break };
4223 if !self.process_scan_request(request, true).await {
4224 return;
4225 }
4226 }
4227
4228 // Recursively process directories whose ignores have changed.
4229 job = ignore_queue_rx.recv().fuse() => {
4230 let Ok(job) = job else { break };
4231 self.update_ignore_status(job, &snapshot).await;
4232 }
4233 }
4234 }
4235 });
4236 }
4237 })
4238 .await;
4239 }
4240
4241 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4242 log::trace!("update ignore status {:?}", job.abs_path);
4243
4244 let mut ignore_stack = job.ignore_stack;
4245 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4246 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4247 }
4248
4249 let mut entries_by_id_edits = Vec::new();
4250 let mut entries_by_path_edits = Vec::new();
4251 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4252 let repo = snapshot
4253 .local_repo_for_path(path)
4254 .map_or(None, |local_repo| Some(local_repo.1));
4255 for mut entry in snapshot.child_entries(path).cloned() {
4256 let was_ignored = entry.is_ignored;
4257 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4258 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4259 if entry.is_dir() {
4260 let child_ignore_stack = if entry.is_ignored {
4261 IgnoreStack::all()
4262 } else {
4263 ignore_stack.clone()
4264 };
4265
4266 // Scan any directories that were previously ignored and weren't previously scanned.
4267 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4268 let state = self.state.lock();
4269 if state.should_scan_directory(&entry) {
4270 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4271 }
4272 }
4273
4274 job.ignore_queue
4275 .send(UpdateIgnoreStatusJob {
4276 abs_path: abs_path.clone(),
4277 ignore_stack: child_ignore_stack,
4278 ignore_queue: job.ignore_queue.clone(),
4279 scan_queue: job.scan_queue.clone(),
4280 })
4281 .await
4282 .unwrap();
4283 }
4284
4285 if entry.is_ignored != was_ignored {
4286 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4287 path_entry.scan_id = snapshot.scan_id;
4288 path_entry.is_ignored = entry.is_ignored;
4289 if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4290 if let Some(repo) = repo {
4291 if let Some(mtime) = &entry.mtime {
4292 let repo_path = RepoPath(entry.path.to_path_buf());
4293 let repo = repo.repo_ptr.lock();
4294 entry.git_status = repo.status(&repo_path, *mtime);
4295 }
4296 }
4297 }
4298 entries_by_id_edits.push(Edit::Insert(path_entry));
4299 entries_by_path_edits.push(Edit::Insert(entry));
4300 }
4301 }
4302
4303 let state = &mut self.state.lock();
4304 for edit in &entries_by_path_edits {
4305 if let Edit::Insert(entry) = edit {
4306 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4307 state.changed_paths.insert(ix, entry.path.clone());
4308 }
4309 }
4310 }
4311
4312 state
4313 .snapshot
4314 .entries_by_path
4315 .edit(entries_by_path_edits, &());
4316 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4317 }
4318
4319 fn build_change_set(
4320 &self,
4321 old_snapshot: &Snapshot,
4322 new_snapshot: &Snapshot,
4323 event_paths: &[Arc<Path>],
4324 ) -> UpdatedEntriesSet {
4325 use BackgroundScannerPhase::*;
4326 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4327
4328 // Identify which paths have changed. Use the known set of changed
4329 // parent paths to optimize the search.
4330 let mut changes = Vec::new();
4331 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4332 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4333 let mut last_newly_loaded_dir_path = None;
4334 old_paths.next(&());
4335 new_paths.next(&());
4336 for path in event_paths {
4337 let path = PathKey(path.clone());
4338 if old_paths.item().map_or(false, |e| e.path < path.0) {
4339 old_paths.seek_forward(&path, Bias::Left, &());
4340 }
4341 if new_paths.item().map_or(false, |e| e.path < path.0) {
4342 new_paths.seek_forward(&path, Bias::Left, &());
4343 }
4344 loop {
4345 match (old_paths.item(), new_paths.item()) {
4346 (Some(old_entry), Some(new_entry)) => {
4347 if old_entry.path > path.0
4348 && new_entry.path > path.0
4349 && !old_entry.path.starts_with(&path.0)
4350 && !new_entry.path.starts_with(&path.0)
4351 {
4352 break;
4353 }
4354
4355 match Ord::cmp(&old_entry.path, &new_entry.path) {
4356 Ordering::Less => {
4357 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4358 old_paths.next(&());
4359 }
4360 Ordering::Equal => {
4361 if self.phase == EventsReceivedDuringInitialScan {
4362 if old_entry.id != new_entry.id {
4363 changes.push((
4364 old_entry.path.clone(),
4365 old_entry.id,
4366 Removed,
4367 ));
4368 }
4369 // If the worktree was not fully initialized when this event was generated,
4370 // we can't know whether this entry was added during the scan or whether
4371 // it was merely updated.
4372 changes.push((
4373 new_entry.path.clone(),
4374 new_entry.id,
4375 AddedOrUpdated,
4376 ));
4377 } else if old_entry.id != new_entry.id {
4378 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4379 changes.push((new_entry.path.clone(), new_entry.id, Added));
4380 } else if old_entry != new_entry {
4381 if old_entry.kind.is_unloaded() {
4382 last_newly_loaded_dir_path = Some(&new_entry.path);
4383 changes.push((
4384 new_entry.path.clone(),
4385 new_entry.id,
4386 Loaded,
4387 ));
4388 } else {
4389 changes.push((
4390 new_entry.path.clone(),
4391 new_entry.id,
4392 Updated,
4393 ));
4394 }
4395 }
4396 old_paths.next(&());
4397 new_paths.next(&());
4398 }
4399 Ordering::Greater => {
4400 let is_newly_loaded = self.phase == InitialScan
4401 || last_newly_loaded_dir_path
4402 .as_ref()
4403 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4404 changes.push((
4405 new_entry.path.clone(),
4406 new_entry.id,
4407 if is_newly_loaded { Loaded } else { Added },
4408 ));
4409 new_paths.next(&());
4410 }
4411 }
4412 }
4413 (Some(old_entry), None) => {
4414 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4415 old_paths.next(&());
4416 }
4417 (None, Some(new_entry)) => {
4418 let is_newly_loaded = self.phase == InitialScan
4419 || last_newly_loaded_dir_path
4420 .as_ref()
4421 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4422 changes.push((
4423 new_entry.path.clone(),
4424 new_entry.id,
4425 if is_newly_loaded { Loaded } else { Added },
4426 ));
4427 new_paths.next(&());
4428 }
4429 (None, None) => break,
4430 }
4431 }
4432 }
4433
4434 changes.into()
4435 }
4436
4437 async fn progress_timer(&self, running: bool) {
4438 if !running {
4439 return futures::future::pending().await;
4440 }
4441
4442 #[cfg(any(test, feature = "test-support"))]
4443 if self.fs.is_fake() {
4444 return self.executor.simulate_random_delay().await;
4445 }
4446
4447 smol::Timer::after(FS_WATCH_LATENCY).await;
4448 }
4449}
4450
4451fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4452 let mut result = root_char_bag;
4453 result.extend(
4454 path.to_string_lossy()
4455 .chars()
4456 .map(|c| c.to_ascii_lowercase()),
4457 );
4458 result
4459}
4460
4461struct ScanJob {
4462 abs_path: Arc<Path>,
4463 path: Arc<Path>,
4464 ignore_stack: Arc<IgnoreStack>,
4465 scan_queue: Sender<ScanJob>,
4466 ancestor_inodes: TreeSet<u64>,
4467 is_external: bool,
4468 containing_repository: Option<(
4469 RepositoryWorkDirectory,
4470 Arc<Mutex<dyn GitRepository>>,
4471 TreeMap<RepoPath, GitFileStatus>,
4472 )>,
4473}
4474
4475struct UpdateIgnoreStatusJob {
4476 abs_path: Arc<Path>,
4477 ignore_stack: Arc<IgnoreStack>,
4478 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4479 scan_queue: Sender<ScanJob>,
4480}
4481
4482pub trait WorktreeModelHandle {
4483 #[cfg(any(test, feature = "test-support"))]
4484 fn flush_fs_events<'a>(
4485 &self,
4486 cx: &'a mut gpui::TestAppContext,
4487 ) -> futures::future::LocalBoxFuture<'a, ()>;
4488}
4489
4490impl WorktreeModelHandle for Model<Worktree> {
4491 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4492 // occurred before the worktree was constructed. These events can cause the worktree to perform
4493 // extra directory scans, and emit extra scan-state notifications.
4494 //
4495 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4496 // to ensure that all redundant FS events have already been processed.
4497 #[cfg(any(test, feature = "test-support"))]
4498 fn flush_fs_events<'a>(
4499 &self,
4500 cx: &'a mut gpui::TestAppContext,
4501 ) -> futures::future::LocalBoxFuture<'a, ()> {
4502 let file_name = "fs-event-sentinel";
4503
4504 let tree = self.clone();
4505 let (fs, root_path) = self.update(cx, |tree, _| {
4506 let tree = tree.as_local().unwrap();
4507 (tree.fs.clone(), tree.abs_path().clone())
4508 });
4509
4510 async move {
4511 fs.create_file(&root_path.join(file_name), Default::default())
4512 .await
4513 .unwrap();
4514
4515 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4516 .await;
4517
4518 fs.remove_file(&root_path.join(file_name), Default::default())
4519 .await
4520 .unwrap();
4521 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4522 .await;
4523
4524 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4525 .await;
4526 }
4527 .boxed_local()
4528 }
4529}
4530
4531#[derive(Clone, Debug)]
4532struct TraversalProgress<'a> {
4533 max_path: &'a Path,
4534 count: usize,
4535 non_ignored_count: usize,
4536 file_count: usize,
4537 non_ignored_file_count: usize,
4538}
4539
4540impl<'a> TraversalProgress<'a> {
4541 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
4542 match (include_ignored, include_dirs) {
4543 (true, true) => self.count,
4544 (true, false) => self.file_count,
4545 (false, true) => self.non_ignored_count,
4546 (false, false) => self.non_ignored_file_count,
4547 }
4548 }
4549}
4550
4551impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4552 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4553 self.max_path = summary.max_path.as_ref();
4554 self.count += summary.count;
4555 self.non_ignored_count += summary.non_ignored_count;
4556 self.file_count += summary.file_count;
4557 self.non_ignored_file_count += summary.non_ignored_file_count;
4558 }
4559}
4560
4561impl<'a> Default for TraversalProgress<'a> {
4562 fn default() -> Self {
4563 Self {
4564 max_path: Path::new(""),
4565 count: 0,
4566 non_ignored_count: 0,
4567 file_count: 0,
4568 non_ignored_file_count: 0,
4569 }
4570 }
4571}
4572
4573#[derive(Clone, Debug, Default, Copy)]
4574struct GitStatuses {
4575 added: usize,
4576 modified: usize,
4577 conflict: usize,
4578}
4579
4580impl AddAssign for GitStatuses {
4581 fn add_assign(&mut self, rhs: Self) {
4582 self.added += rhs.added;
4583 self.modified += rhs.modified;
4584 self.conflict += rhs.conflict;
4585 }
4586}
4587
4588impl Sub for GitStatuses {
4589 type Output = GitStatuses;
4590
4591 fn sub(self, rhs: Self) -> Self::Output {
4592 GitStatuses {
4593 added: self.added - rhs.added,
4594 modified: self.modified - rhs.modified,
4595 conflict: self.conflict - rhs.conflict,
4596 }
4597 }
4598}
4599
4600impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4601 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4602 *self += summary.statuses
4603 }
4604}
4605
4606pub struct Traversal<'a> {
4607 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4608 include_ignored: bool,
4609 include_dirs: bool,
4610}
4611
4612impl<'a> Traversal<'a> {
4613 pub fn advance(&mut self) -> bool {
4614 self.cursor.seek_forward(
4615 &TraversalTarget::Count {
4616 count: self.end_offset() + 1,
4617 include_dirs: self.include_dirs,
4618 include_ignored: self.include_ignored,
4619 },
4620 Bias::Left,
4621 &(),
4622 )
4623 }
4624
4625 pub fn advance_to_sibling(&mut self) -> bool {
4626 while let Some(entry) = self.cursor.item() {
4627 self.cursor.seek_forward(
4628 &TraversalTarget::PathSuccessor(&entry.path),
4629 Bias::Left,
4630 &(),
4631 );
4632 if let Some(entry) = self.cursor.item() {
4633 if (self.include_dirs || !entry.is_dir())
4634 && (self.include_ignored || !entry.is_ignored)
4635 {
4636 return true;
4637 }
4638 }
4639 }
4640 false
4641 }
4642
4643 pub fn entry(&self) -> Option<&'a Entry> {
4644 self.cursor.item()
4645 }
4646
4647 pub fn start_offset(&self) -> usize {
4648 self.cursor
4649 .start()
4650 .count(self.include_dirs, self.include_ignored)
4651 }
4652
4653 pub fn end_offset(&self) -> usize {
4654 self.cursor
4655 .end(&())
4656 .count(self.include_dirs, self.include_ignored)
4657 }
4658}
4659
4660impl<'a> Iterator for Traversal<'a> {
4661 type Item = &'a Entry;
4662
4663 fn next(&mut self) -> Option<Self::Item> {
4664 if let Some(item) = self.entry() {
4665 self.advance();
4666 Some(item)
4667 } else {
4668 None
4669 }
4670 }
4671}
4672
4673#[derive(Debug)]
4674enum TraversalTarget<'a> {
4675 Path(&'a Path),
4676 PathSuccessor(&'a Path),
4677 Count {
4678 count: usize,
4679 include_ignored: bool,
4680 include_dirs: bool,
4681 },
4682}
4683
4684impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4685 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4686 match self {
4687 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4688 TraversalTarget::PathSuccessor(path) => {
4689 if !cursor_location.max_path.starts_with(path) {
4690 Ordering::Equal
4691 } else {
4692 Ordering::Greater
4693 }
4694 }
4695 TraversalTarget::Count {
4696 count,
4697 include_dirs,
4698 include_ignored,
4699 } => Ord::cmp(
4700 count,
4701 &cursor_location.count(*include_dirs, *include_ignored),
4702 ),
4703 }
4704 }
4705}
4706
4707impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4708 for TraversalTarget<'b>
4709{
4710 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4711 self.cmp(&cursor_location.0, &())
4712 }
4713}
4714
4715struct ChildEntriesIter<'a> {
4716 parent_path: &'a Path,
4717 traversal: Traversal<'a>,
4718}
4719
4720impl<'a> Iterator for ChildEntriesIter<'a> {
4721 type Item = &'a Entry;
4722
4723 fn next(&mut self) -> Option<Self::Item> {
4724 if let Some(item) = self.traversal.entry() {
4725 if item.path.starts_with(&self.parent_path) {
4726 self.traversal.advance_to_sibling();
4727 return Some(item);
4728 }
4729 }
4730 None
4731 }
4732}
4733
4734pub struct DescendentEntriesIter<'a> {
4735 parent_path: &'a Path,
4736 traversal: Traversal<'a>,
4737}
4738
4739impl<'a> Iterator for DescendentEntriesIter<'a> {
4740 type Item = &'a Entry;
4741
4742 fn next(&mut self) -> Option<Self::Item> {
4743 if let Some(item) = self.traversal.entry() {
4744 if item.path.starts_with(&self.parent_path) {
4745 self.traversal.advance();
4746 return Some(item);
4747 }
4748 }
4749 None
4750 }
4751}
4752
4753impl<'a> From<&'a Entry> for proto::Entry {
4754 fn from(entry: &'a Entry) -> Self {
4755 Self {
4756 id: entry.id.to_proto(),
4757 is_dir: entry.is_dir(),
4758 path: entry.path.to_string_lossy().into(),
4759 inode: entry.inode,
4760 mtime: entry.mtime.map(|time| time.into()),
4761 is_symlink: entry.is_symlink,
4762 is_ignored: entry.is_ignored,
4763 is_external: entry.is_external,
4764 git_status: entry.git_status.map(git_status_to_proto),
4765 }
4766 }
4767}
4768
4769impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4770 type Error = anyhow::Error;
4771
4772 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4773 let kind = if entry.is_dir {
4774 EntryKind::Dir
4775 } else {
4776 let mut char_bag = *root_char_bag;
4777 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4778 EntryKind::File(char_bag)
4779 };
4780 let path: Arc<Path> = PathBuf::from(entry.path).into();
4781 Ok(Entry {
4782 id: ProjectEntryId::from_proto(entry.id),
4783 kind,
4784 path,
4785 inode: entry.inode,
4786 mtime: entry.mtime.map(|time| time.into()),
4787 is_symlink: entry.is_symlink,
4788 is_ignored: entry.is_ignored,
4789 is_external: entry.is_external,
4790 git_status: git_status_from_proto(entry.git_status),
4791 is_private: false,
4792 })
4793 }
4794}
4795
4796fn combine_git_statuses(
4797 staged: Option<GitFileStatus>,
4798 unstaged: Option<GitFileStatus>,
4799) -> Option<GitFileStatus> {
4800 if let Some(staged) = staged {
4801 if let Some(unstaged) = unstaged {
4802 if unstaged != staged {
4803 Some(GitFileStatus::Modified)
4804 } else {
4805 Some(staged)
4806 }
4807 } else {
4808 Some(staged)
4809 }
4810 } else {
4811 unstaged
4812 }
4813}
4814
4815fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
4816 git_status.and_then(|status| {
4817 proto::GitStatus::from_i32(status).map(|status| match status {
4818 proto::GitStatus::Added => GitFileStatus::Added,
4819 proto::GitStatus::Modified => GitFileStatus::Modified,
4820 proto::GitStatus::Conflict => GitFileStatus::Conflict,
4821 })
4822 })
4823}
4824
4825fn git_status_to_proto(status: GitFileStatus) -> i32 {
4826 match status {
4827 GitFileStatus::Added => proto::GitStatus::Added as i32,
4828 GitFileStatus::Modified => proto::GitStatus::Modified as i32,
4829 GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
4830 }
4831}
4832
4833#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
4834pub struct ProjectEntryId(usize);
4835
4836impl ProjectEntryId {
4837 pub const MAX: Self = Self(usize::MAX);
4838
4839 pub fn new(counter: &AtomicUsize) -> Self {
4840 Self(counter.fetch_add(1, SeqCst))
4841 }
4842
4843 pub fn from_proto(id: u64) -> Self {
4844 Self(id as usize)
4845 }
4846
4847 pub fn to_proto(&self) -> u64 {
4848 self.0 as u64
4849 }
4850
4851 pub fn to_usize(&self) -> usize {
4852 self.0
4853 }
4854}
4855
4856#[derive(Copy, Clone, Debug, Default, PartialEq, Serialize)]
4857pub struct DiagnosticSummary {
4858 pub error_count: usize,
4859 pub warning_count: usize,
4860}
4861
4862impl DiagnosticSummary {
4863 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
4864 let mut this = Self {
4865 error_count: 0,
4866 warning_count: 0,
4867 };
4868
4869 for entry in diagnostics {
4870 if entry.diagnostic.is_primary {
4871 match entry.diagnostic.severity {
4872 DiagnosticSeverity::ERROR => this.error_count += 1,
4873 DiagnosticSeverity::WARNING => this.warning_count += 1,
4874 _ => {}
4875 }
4876 }
4877 }
4878
4879 this
4880 }
4881
4882 pub fn is_empty(&self) -> bool {
4883 self.error_count == 0 && self.warning_count == 0
4884 }
4885
4886 pub fn to_proto(
4887 &self,
4888 language_server_id: LanguageServerId,
4889 path: &Path,
4890 ) -> proto::DiagnosticSummary {
4891 proto::DiagnosticSummary {
4892 path: path.to_string_lossy().to_string(),
4893 language_server_id: language_server_id.0 as u64,
4894 error_count: self.error_count as u32,
4895 warning_count: self.warning_count as u32,
4896 }
4897 }
4898}