1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use client::{proto, Client};
9use clock::ReplicaId;
10use collections::{HashMap, HashSet, VecDeque};
11use fs::Fs;
12use fs::{copy_recursive, RemoveOptions};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 FutureExt as _, Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{
24 repository::{GitFileStatus, GitRepository, RepoPath},
25 DOT_GIT, GITIGNORE,
26};
27use gpui::{
28 AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
29 Task,
30};
31use ignore::IgnoreStack;
32use itertools::Itertools;
33use language::{
34 proto::{deserialize_version, serialize_line_ending, serialize_version},
35 Buffer, Capability, DiagnosticEntry, File as _, LineEnding, PointUtf16, Rope, Unclipped,
36};
37use lsp::{DiagnosticSeverity, LanguageServerId};
38use parking_lot::Mutex;
39use postage::{
40 barrier,
41 prelude::{Sink as _, Stream as _},
42 watch,
43};
44use serde::Serialize;
45use settings::{Settings, SettingsLocation, SettingsStore};
46use smol::channel::{self, Sender};
47use std::{
48 any::Any,
49 cmp::{self, Ordering},
50 convert::TryFrom,
51 ffi::OsStr,
52 fmt,
53 future::Future,
54 mem,
55 ops::{AddAssign, Deref, DerefMut, Sub},
56 path::{Path, PathBuf},
57 pin::Pin,
58 sync::{
59 atomic::{AtomicUsize, Ordering::SeqCst},
60 Arc,
61 },
62 time::{Duration, SystemTime},
63};
64use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
65use text::BufferId;
66use util::{
67 paths::{PathMatcher, HOME},
68 ResultExt,
69};
70
71pub use worktree_settings::WorktreeSettings;
72
73#[cfg(feature = "test-support")]
74pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
75#[cfg(not(feature = "test-support"))]
76pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
77
78#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
79pub struct WorktreeId(usize);
80
81/// A set of local or remote files that are being opened as part of a project.
82/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
83/// Stores git repositories data and the diagnostics for the file(s).
84///
85/// Has an absolute path, and may be set to be visible in Zed UI or not.
86/// May correspond to a directory or a single file.
87/// Possible examples:
88/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
89/// * a directory opened in Zed — may be added as a visible entry to the current worktree
90///
91/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
92pub enum Worktree {
93 Local(LocalWorktree),
94 Remote(RemoteWorktree),
95}
96
97pub struct LocalWorktree {
98 snapshot: LocalSnapshot,
99 scan_requests_tx: channel::Sender<ScanRequest>,
100 path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
101 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
102 _background_scanner_tasks: Vec<Task<()>>,
103 share: Option<ShareState>,
104 diagnostics: HashMap<
105 Arc<Path>,
106 Vec<(
107 LanguageServerId,
108 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
109 )>,
110 >,
111 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
112 client: Arc<Client>,
113 fs: Arc<dyn Fs>,
114 fs_case_sensitive: bool,
115 visible: bool,
116}
117
118struct ScanRequest {
119 relative_paths: Vec<Arc<Path>>,
120 done: barrier::Sender,
121}
122
123pub struct RemoteWorktree {
124 snapshot: Snapshot,
125 background_snapshot: Arc<Mutex<Snapshot>>,
126 project_id: u64,
127 client: Arc<Client>,
128 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
129 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
130 replica_id: ReplicaId,
131 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
132 visible: bool,
133 disconnected: bool,
134}
135
136#[derive(Clone)]
137pub struct Snapshot {
138 id: WorktreeId,
139 abs_path: Arc<Path>,
140 root_name: String,
141 root_char_bag: CharBag,
142 entries_by_path: SumTree<Entry>,
143 entries_by_id: SumTree<PathEntry>,
144 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
145
146 /// A number that increases every time the worktree begins scanning
147 /// a set of paths from the filesystem. This scanning could be caused
148 /// by some operation performed on the worktree, such as reading or
149 /// writing a file, or by an event reported by the filesystem.
150 scan_id: usize,
151
152 /// The latest scan id that has completed, and whose preceding scans
153 /// have all completed. The current `scan_id` could be more than one
154 /// greater than the `completed_scan_id` if operations are performed
155 /// on the worktree while it is processing a file-system event.
156 completed_scan_id: usize,
157}
158
159#[derive(Clone, Debug, PartialEq, Eq)]
160pub struct RepositoryEntry {
161 pub(crate) work_directory: WorkDirectoryEntry,
162 pub(crate) branch: Option<Arc<str>>,
163}
164
165impl RepositoryEntry {
166 pub fn branch(&self) -> Option<Arc<str>> {
167 self.branch.clone()
168 }
169
170 pub fn work_directory_id(&self) -> ProjectEntryId {
171 *self.work_directory
172 }
173
174 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
175 snapshot
176 .entry_for_id(self.work_directory_id())
177 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
178 }
179
180 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
181 proto::RepositoryEntry {
182 work_directory_id: self.work_directory_id().to_proto(),
183 branch: self.branch.as_ref().map(|str| str.to_string()),
184 }
185 }
186}
187
188impl From<&RepositoryEntry> for proto::RepositoryEntry {
189 fn from(value: &RepositoryEntry) -> Self {
190 proto::RepositoryEntry {
191 work_directory_id: value.work_directory.to_proto(),
192 branch: value.branch.as_ref().map(|str| str.to_string()),
193 }
194 }
195}
196
197/// This path corresponds to the 'content path' (the folder that contains the .git)
198#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
199pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
200
201impl Default for RepositoryWorkDirectory {
202 fn default() -> Self {
203 RepositoryWorkDirectory(Arc::from(Path::new("")))
204 }
205}
206
207impl AsRef<Path> for RepositoryWorkDirectory {
208 fn as_ref(&self) -> &Path {
209 self.0.as_ref()
210 }
211}
212
213#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
214pub struct WorkDirectoryEntry(ProjectEntryId);
215
216impl WorkDirectoryEntry {
217 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
218 let entry = worktree
219 .entry_for_id(self.0)
220 .ok_or_else(|| anyhow!("entry not found"))?;
221 let path = path
222 .strip_prefix(&entry.path)
223 .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
224 Ok(path.into())
225 }
226}
227
228impl Deref for WorkDirectoryEntry {
229 type Target = ProjectEntryId;
230
231 fn deref(&self) -> &Self::Target {
232 &self.0
233 }
234}
235
236impl From<ProjectEntryId> for WorkDirectoryEntry {
237 fn from(value: ProjectEntryId) -> Self {
238 WorkDirectoryEntry(value)
239 }
240}
241
242#[derive(Debug, Clone)]
243pub struct LocalSnapshot {
244 snapshot: Snapshot,
245 /// All of the gitignore files in the worktree, indexed by their relative path.
246 /// The boolean indicates whether the gitignore needs to be updated.
247 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
248 /// All of the git repositories in the worktree, indexed by the project entry
249 /// id of their parent directory.
250 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
251 file_scan_exclusions: Vec<PathMatcher>,
252 private_files: Vec<PathMatcher>,
253}
254
255struct BackgroundScannerState {
256 snapshot: LocalSnapshot,
257 scanned_dirs: HashSet<ProjectEntryId>,
258 path_prefixes_to_scan: HashSet<Arc<Path>>,
259 paths_to_scan: HashSet<Arc<Path>>,
260 /// The ids of all of the entries that were removed from the snapshot
261 /// as part of the current update. These entry ids may be re-used
262 /// if the same inode is discovered at a new path, or if the given
263 /// path is re-created after being deleted.
264 removed_entry_ids: HashMap<u64, ProjectEntryId>,
265 changed_paths: Vec<Arc<Path>>,
266 prev_snapshot: Snapshot,
267}
268
269#[derive(Debug, Clone)]
270pub struct LocalRepositoryEntry {
271 pub(crate) git_dir_scan_id: usize,
272 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
273 /// Path to the actual .git folder.
274 /// Note: if .git is a file, this points to the folder indicated by the .git file
275 pub(crate) git_dir_path: Arc<Path>,
276}
277
278impl LocalRepositoryEntry {
279 pub fn repo(&self) -> &Arc<Mutex<dyn GitRepository>> {
280 &self.repo_ptr
281 }
282}
283
284impl Deref for LocalSnapshot {
285 type Target = Snapshot;
286
287 fn deref(&self) -> &Self::Target {
288 &self.snapshot
289 }
290}
291
292impl DerefMut for LocalSnapshot {
293 fn deref_mut(&mut self) -> &mut Self::Target {
294 &mut self.snapshot
295 }
296}
297
298enum ScanState {
299 Started,
300 Updated {
301 snapshot: LocalSnapshot,
302 changes: UpdatedEntriesSet,
303 barrier: Option<barrier::Sender>,
304 scanning: bool,
305 },
306}
307
308struct ShareState {
309 project_id: u64,
310 snapshots_tx:
311 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
312 resume_updates: watch::Sender<()>,
313 _maintain_remote_snapshot: Task<Option<()>>,
314}
315
316#[derive(Clone)]
317pub enum Event {
318 UpdatedEntries(UpdatedEntriesSet),
319 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
320}
321
322impl EventEmitter<Event> for Worktree {}
323
324impl Worktree {
325 pub async fn local(
326 client: Arc<Client>,
327 path: impl Into<Arc<Path>>,
328 visible: bool,
329 fs: Arc<dyn Fs>,
330 next_entry_id: Arc<AtomicUsize>,
331 cx: &mut AsyncAppContext,
332 ) -> Result<Model<Self>> {
333 // After determining whether the root entry is a file or a directory, populate the
334 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
335 let abs_path = path.into();
336
337 let metadata = fs
338 .metadata(&abs_path)
339 .await
340 .context("failed to stat worktree path")?;
341
342 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
343 log::error!(
344 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
345 );
346 true
347 });
348
349 let closure_fs = Arc::clone(&fs);
350 let closure_next_entry_id = Arc::clone(&next_entry_id);
351 let closure_abs_path = abs_path.to_path_buf();
352 cx.new_model(move |cx: &mut ModelContext<Worktree>| {
353 cx.observe_global::<SettingsStore>(move |this, cx| {
354 if let Self::Local(this) = this {
355 let new_file_scan_exclusions = path_matchers(
356 WorktreeSettings::get_global(cx)
357 .file_scan_exclusions
358 .as_deref(),
359 "file_scan_exclusions",
360 );
361 let new_private_files = path_matchers(
362 WorktreeSettings::get(Some(settings::SettingsLocation {
363 worktree_id: cx.handle().entity_id().as_u64() as usize,
364 path: Path::new("")
365 }), cx).private_files.as_deref(),
366 "private_files",
367 );
368
369 if new_file_scan_exclusions != this.snapshot.file_scan_exclusions
370 || new_private_files != this.snapshot.private_files
371 {
372 this.snapshot.file_scan_exclusions = new_file_scan_exclusions;
373 this.snapshot.private_files = new_private_files;
374
375 log::info!(
376 "Re-scanning directories, new scan exclude files: {:?}, new dotenv files: {:?}",
377 this.snapshot
378 .file_scan_exclusions
379 .iter()
380 .map(ToString::to_string)
381 .collect::<Vec<_>>(),
382 this.snapshot
383 .private_files
384 .iter()
385 .map(ToString::to_string)
386 .collect::<Vec<_>>()
387 );
388
389 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
390 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) =
391 channel::unbounded();
392 this.scan_requests_tx = scan_requests_tx;
393 this.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
394 this._background_scanner_tasks = start_background_scan_tasks(
395 &closure_abs_path,
396 this.snapshot(),
397 scan_requests_rx,
398 path_prefixes_to_scan_rx,
399 Arc::clone(&closure_next_entry_id),
400 Arc::clone(&closure_fs),
401 cx,
402 );
403 this.is_scanning = watch::channel_with(true);
404 }
405 }
406 })
407 .detach();
408
409 let root_name = abs_path
410 .file_name()
411 .map_or(String::new(), |f| f.to_string_lossy().to_string());
412
413 let mut snapshot = LocalSnapshot {
414 file_scan_exclusions: path_matchers(
415 WorktreeSettings::get_global(cx)
416 .file_scan_exclusions
417 .as_deref(),
418 "file_scan_exclusions",
419 ),
420 private_files: path_matchers(
421 WorktreeSettings::get(Some(SettingsLocation {
422 worktree_id: cx.handle().entity_id().as_u64() as usize,
423 path: Path::new(""),
424 }), cx).private_files.as_deref(),
425 "private_files",
426 ),
427 ignores_by_parent_abs_path: Default::default(),
428 git_repositories: Default::default(),
429 snapshot: Snapshot {
430 id: WorktreeId::from_usize(cx.entity_id().as_u64() as usize),
431 abs_path: abs_path.to_path_buf().into(),
432 root_name: root_name.clone(),
433 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
434 entries_by_path: Default::default(),
435 entries_by_id: Default::default(),
436 repository_entries: Default::default(),
437 scan_id: 1,
438 completed_scan_id: 0,
439 },
440 };
441
442 if let Some(metadata) = metadata {
443 snapshot.insert_entry(
444 Entry::new(
445 Arc::from(Path::new("")),
446 &metadata,
447 &next_entry_id,
448 snapshot.root_char_bag,
449 ),
450 fs.as_ref(),
451 );
452 }
453
454 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
455 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
456 let task_snapshot = snapshot.clone();
457 Worktree::Local(LocalWorktree {
458 snapshot,
459 is_scanning: watch::channel_with(true),
460 share: None,
461 scan_requests_tx,
462 path_prefixes_to_scan_tx,
463 _background_scanner_tasks: start_background_scan_tasks(
464 &abs_path,
465 task_snapshot,
466 scan_requests_rx,
467 path_prefixes_to_scan_rx,
468 Arc::clone(&next_entry_id),
469 Arc::clone(&fs),
470 cx,
471 ),
472 diagnostics: Default::default(),
473 diagnostic_summaries: Default::default(),
474 client,
475 fs,
476 fs_case_sensitive,
477 visible,
478 })
479 })
480 }
481
482 pub fn remote(
483 project_remote_id: u64,
484 replica_id: ReplicaId,
485 worktree: proto::WorktreeMetadata,
486 client: Arc<Client>,
487 cx: &mut AppContext,
488 ) -> Model<Self> {
489 cx.new_model(|cx: &mut ModelContext<Self>| {
490 let snapshot = Snapshot {
491 id: WorktreeId(worktree.id as usize),
492 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
493 root_name: worktree.root_name.clone(),
494 root_char_bag: worktree
495 .root_name
496 .chars()
497 .map(|c| c.to_ascii_lowercase())
498 .collect(),
499 entries_by_path: Default::default(),
500 entries_by_id: Default::default(),
501 repository_entries: Default::default(),
502 scan_id: 1,
503 completed_scan_id: 0,
504 };
505
506 let (updates_tx, mut updates_rx) = mpsc::unbounded();
507 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
508 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
509
510 cx.background_executor()
511 .spawn({
512 let background_snapshot = background_snapshot.clone();
513 async move {
514 while let Some(update) = updates_rx.next().await {
515 if let Err(error) =
516 background_snapshot.lock().apply_remote_update(update)
517 {
518 log::error!("error applying worktree update: {}", error);
519 }
520 snapshot_updated_tx.send(()).await.ok();
521 }
522 }
523 })
524 .detach();
525
526 cx.spawn(|this, mut cx| async move {
527 while (snapshot_updated_rx.recv().await).is_some() {
528 this.update(&mut cx, |this, cx| {
529 let this = this.as_remote_mut().unwrap();
530 this.snapshot = this.background_snapshot.lock().clone();
531 cx.emit(Event::UpdatedEntries(Arc::from([])));
532 cx.notify();
533 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
534 if this.observed_snapshot(*scan_id) {
535 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
536 let _ = tx.send(());
537 } else {
538 break;
539 }
540 }
541 })?;
542 }
543 anyhow::Ok(())
544 })
545 .detach();
546
547 Worktree::Remote(RemoteWorktree {
548 project_id: project_remote_id,
549 replica_id,
550 snapshot: snapshot.clone(),
551 background_snapshot,
552 updates_tx: Some(updates_tx),
553 snapshot_subscriptions: Default::default(),
554 client: client.clone(),
555 diagnostic_summaries: Default::default(),
556 visible: worktree.visible,
557 disconnected: false,
558 })
559 })
560 }
561
562 pub fn as_local(&self) -> Option<&LocalWorktree> {
563 if let Worktree::Local(worktree) = self {
564 Some(worktree)
565 } else {
566 None
567 }
568 }
569
570 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
571 if let Worktree::Remote(worktree) = self {
572 Some(worktree)
573 } else {
574 None
575 }
576 }
577
578 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
579 if let Worktree::Local(worktree) = self {
580 Some(worktree)
581 } else {
582 None
583 }
584 }
585
586 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
587 if let Worktree::Remote(worktree) = self {
588 Some(worktree)
589 } else {
590 None
591 }
592 }
593
594 pub fn is_local(&self) -> bool {
595 matches!(self, Worktree::Local(_))
596 }
597
598 pub fn is_remote(&self) -> bool {
599 !self.is_local()
600 }
601
602 pub fn snapshot(&self) -> Snapshot {
603 match self {
604 Worktree::Local(worktree) => worktree.snapshot().snapshot,
605 Worktree::Remote(worktree) => worktree.snapshot(),
606 }
607 }
608
609 pub fn scan_id(&self) -> usize {
610 match self {
611 Worktree::Local(worktree) => worktree.snapshot.scan_id,
612 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
613 }
614 }
615
616 pub fn completed_scan_id(&self) -> usize {
617 match self {
618 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
619 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
620 }
621 }
622
623 pub fn is_visible(&self) -> bool {
624 match self {
625 Worktree::Local(worktree) => worktree.visible,
626 Worktree::Remote(worktree) => worktree.visible,
627 }
628 }
629
630 pub fn replica_id(&self) -> ReplicaId {
631 match self {
632 Worktree::Local(_) => 0,
633 Worktree::Remote(worktree) => worktree.replica_id,
634 }
635 }
636
637 pub fn diagnostic_summaries(
638 &self,
639 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
640 match self {
641 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
642 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
643 }
644 .iter()
645 .flat_map(|(path, summaries)| {
646 summaries
647 .iter()
648 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
649 })
650 }
651
652 pub fn abs_path(&self) -> Arc<Path> {
653 match self {
654 Worktree::Local(worktree) => worktree.abs_path.clone(),
655 Worktree::Remote(worktree) => worktree.abs_path.clone(),
656 }
657 }
658
659 pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
660 let entry = self.root_entry()?;
661 Some(File::for_entry(entry.clone(), cx.handle()))
662 }
663}
664
665fn start_background_scan_tasks(
666 abs_path: &Path,
667 snapshot: LocalSnapshot,
668 scan_requests_rx: channel::Receiver<ScanRequest>,
669 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
670 next_entry_id: Arc<AtomicUsize>,
671 fs: Arc<dyn Fs>,
672 cx: &mut ModelContext<'_, Worktree>,
673) -> Vec<Task<()>> {
674 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
675 let background_scanner = cx.background_executor().spawn({
676 let abs_path = if cfg!(target_os = "windows") {
677 abs_path.canonicalize().expect("start background scan tasks failed for canonicalize path {abs_path}")
678 } else {
679 abs_path.to_path_buf()
680 };
681 let background = cx.background_executor().clone();
682 async move {
683 let events = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
684 let case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
685 log::error!(
686 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
687 );
688 true
689 });
690
691 BackgroundScanner::new(
692 snapshot,
693 next_entry_id,
694 fs,
695 case_sensitive,
696 scan_states_tx,
697 background,
698 scan_requests_rx,
699 path_prefixes_to_scan_rx,
700 )
701 .run(events)
702 .await;
703 }
704 });
705 let scan_state_updater = cx.spawn(|this, mut cx| async move {
706 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
707 this.update(&mut cx, |this, cx| {
708 let this = this.as_local_mut().unwrap();
709 match state {
710 ScanState::Started => {
711 *this.is_scanning.0.borrow_mut() = true;
712 }
713 ScanState::Updated {
714 snapshot,
715 changes,
716 barrier,
717 scanning,
718 } => {
719 *this.is_scanning.0.borrow_mut() = scanning;
720 this.set_snapshot(snapshot, changes, cx);
721 drop(barrier);
722 }
723 }
724 cx.notify();
725 })
726 .ok();
727 }
728 });
729 vec![background_scanner, scan_state_updater]
730}
731
732fn path_matchers(values: Option<&[String]>, context: &'static str) -> Vec<PathMatcher> {
733 values
734 .unwrap_or(&[])
735 .iter()
736 .sorted()
737 .filter_map(|pattern| {
738 PathMatcher::new(pattern)
739 .map(Some)
740 .unwrap_or_else(|e| {
741 log::error!(
742 "Skipping pattern {pattern} in `{}` project settings due to parsing error: {e:#}", context
743 );
744 None
745 })
746 })
747 .collect()
748}
749
750impl LocalWorktree {
751 pub fn contains_abs_path(&self, path: &Path) -> bool {
752 path.starts_with(&self.abs_path)
753 }
754
755 pub fn load_buffer(
756 &mut self,
757 path: &Path,
758 cx: &mut ModelContext<Worktree>,
759 ) -> Task<Result<Model<Buffer>>> {
760 let path = Arc::from(path);
761 let reservation = cx.reserve_model();
762 let buffer_id = BufferId::from(reservation.entity_id().as_non_zero_u64());
763 cx.spawn(move |this, mut cx| async move {
764 let (file, contents, diff_base) = this
765 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))?
766 .await?;
767 let text_buffer = cx
768 .background_executor()
769 .spawn(async move { text::Buffer::new(0, buffer_id, contents) })
770 .await;
771 cx.insert_model(reservation, |_| {
772 Buffer::build(
773 text_buffer,
774 diff_base,
775 Some(Arc::new(file)),
776 Capability::ReadWrite,
777 )
778 })
779 })
780 }
781
782 pub fn new_buffer(
783 &mut self,
784 path: Arc<Path>,
785 cx: &mut ModelContext<Worktree>,
786 ) -> Model<Buffer> {
787 let worktree = cx.handle();
788 cx.new_model(|cx| {
789 let buffer_id = BufferId::from(cx.entity_id().as_non_zero_u64());
790 let text_buffer = text::Buffer::new(0, buffer_id, "".into());
791 Buffer::build(
792 text_buffer,
793 None,
794 Some(Arc::new(File {
795 worktree,
796 path,
797 mtime: None,
798 entry_id: None,
799 is_local: true,
800 is_deleted: false,
801 is_private: false,
802 })),
803 Capability::ReadWrite,
804 )
805 })
806 }
807
808 pub fn diagnostics_for_path(
809 &self,
810 path: &Path,
811 ) -> Vec<(
812 LanguageServerId,
813 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
814 )> {
815 self.diagnostics.get(path).cloned().unwrap_or_default()
816 }
817
818 pub fn clear_diagnostics_for_language_server(
819 &mut self,
820 server_id: LanguageServerId,
821 _: &mut ModelContext<Worktree>,
822 ) {
823 let worktree_id = self.id().to_proto();
824 self.diagnostic_summaries
825 .retain(|path, summaries_by_server_id| {
826 if summaries_by_server_id.remove(&server_id).is_some() {
827 if let Some(share) = self.share.as_ref() {
828 self.client
829 .send(proto::UpdateDiagnosticSummary {
830 project_id: share.project_id,
831 worktree_id,
832 summary: Some(proto::DiagnosticSummary {
833 path: path.to_string_lossy().to_string(),
834 language_server_id: server_id.0 as u64,
835 error_count: 0,
836 warning_count: 0,
837 }),
838 })
839 .log_err();
840 }
841 !summaries_by_server_id.is_empty()
842 } else {
843 true
844 }
845 });
846
847 self.diagnostics.retain(|_, diagnostics_by_server_id| {
848 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
849 diagnostics_by_server_id.remove(ix);
850 !diagnostics_by_server_id.is_empty()
851 } else {
852 true
853 }
854 });
855 }
856
857 pub fn update_diagnostics(
858 &mut self,
859 server_id: LanguageServerId,
860 worktree_path: Arc<Path>,
861 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
862 _: &mut ModelContext<Worktree>,
863 ) -> Result<bool> {
864 let summaries_by_server_id = self
865 .diagnostic_summaries
866 .entry(worktree_path.clone())
867 .or_default();
868
869 let old_summary = summaries_by_server_id
870 .remove(&server_id)
871 .unwrap_or_default();
872
873 let new_summary = DiagnosticSummary::new(&diagnostics);
874 if new_summary.is_empty() {
875 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
876 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
877 diagnostics_by_server_id.remove(ix);
878 }
879 if diagnostics_by_server_id.is_empty() {
880 self.diagnostics.remove(&worktree_path);
881 }
882 }
883 } else {
884 summaries_by_server_id.insert(server_id, new_summary);
885 let diagnostics_by_server_id =
886 self.diagnostics.entry(worktree_path.clone()).or_default();
887 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
888 Ok(ix) => {
889 diagnostics_by_server_id[ix] = (server_id, diagnostics);
890 }
891 Err(ix) => {
892 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
893 }
894 }
895 }
896
897 if !old_summary.is_empty() || !new_summary.is_empty() {
898 if let Some(share) = self.share.as_ref() {
899 self.client
900 .send(proto::UpdateDiagnosticSummary {
901 project_id: share.project_id,
902 worktree_id: self.id().to_proto(),
903 summary: Some(proto::DiagnosticSummary {
904 path: worktree_path.to_string_lossy().to_string(),
905 language_server_id: server_id.0 as u64,
906 error_count: new_summary.error_count as u32,
907 warning_count: new_summary.warning_count as u32,
908 }),
909 })
910 .log_err();
911 }
912 }
913
914 Ok(!old_summary.is_empty() || !new_summary.is_empty())
915 }
916
917 fn set_snapshot(
918 &mut self,
919 new_snapshot: LocalSnapshot,
920 entry_changes: UpdatedEntriesSet,
921 cx: &mut ModelContext<Worktree>,
922 ) {
923 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
924
925 self.snapshot = new_snapshot;
926
927 if let Some(share) = self.share.as_mut() {
928 share
929 .snapshots_tx
930 .unbounded_send((
931 self.snapshot.clone(),
932 entry_changes.clone(),
933 repo_changes.clone(),
934 ))
935 .ok();
936 }
937
938 if !entry_changes.is_empty() {
939 cx.emit(Event::UpdatedEntries(entry_changes));
940 }
941 if !repo_changes.is_empty() {
942 cx.emit(Event::UpdatedGitRepositories(repo_changes));
943 }
944 }
945
946 fn changed_repos(
947 &self,
948 old_snapshot: &LocalSnapshot,
949 new_snapshot: &LocalSnapshot,
950 ) -> UpdatedGitRepositoriesSet {
951 let mut changes = Vec::new();
952 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
953 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
954 loop {
955 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
956 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
957 match Ord::cmp(&new_entry_id, &old_entry_id) {
958 Ordering::Less => {
959 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
960 changes.push((
961 entry.path.clone(),
962 GitRepositoryChange {
963 old_repository: None,
964 },
965 ));
966 }
967 new_repos.next();
968 }
969 Ordering::Equal => {
970 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
971 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
972 let old_repo = old_snapshot
973 .repository_entries
974 .get(&RepositoryWorkDirectory(entry.path.clone()))
975 .cloned();
976 changes.push((
977 entry.path.clone(),
978 GitRepositoryChange {
979 old_repository: old_repo,
980 },
981 ));
982 }
983 }
984 new_repos.next();
985 old_repos.next();
986 }
987 Ordering::Greater => {
988 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
989 let old_repo = old_snapshot
990 .repository_entries
991 .get(&RepositoryWorkDirectory(entry.path.clone()))
992 .cloned();
993 changes.push((
994 entry.path.clone(),
995 GitRepositoryChange {
996 old_repository: old_repo,
997 },
998 ));
999 }
1000 old_repos.next();
1001 }
1002 }
1003 }
1004 (Some((entry_id, _)), None) => {
1005 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1006 changes.push((
1007 entry.path.clone(),
1008 GitRepositoryChange {
1009 old_repository: None,
1010 },
1011 ));
1012 }
1013 new_repos.next();
1014 }
1015 (None, Some((entry_id, _))) => {
1016 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1017 let old_repo = old_snapshot
1018 .repository_entries
1019 .get(&RepositoryWorkDirectory(entry.path.clone()))
1020 .cloned();
1021 changes.push((
1022 entry.path.clone(),
1023 GitRepositoryChange {
1024 old_repository: old_repo,
1025 },
1026 ));
1027 }
1028 old_repos.next();
1029 }
1030 (None, None) => break,
1031 }
1032 }
1033
1034 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1035 (value.0.clone(), value.1.clone())
1036 }
1037
1038 changes.into()
1039 }
1040
1041 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1042 let mut is_scanning_rx = self.is_scanning.1.clone();
1043 async move {
1044 let mut is_scanning = *is_scanning_rx.borrow();
1045 while is_scanning {
1046 if let Some(value) = is_scanning_rx.recv().await {
1047 is_scanning = value;
1048 } else {
1049 break;
1050 }
1051 }
1052 }
1053 }
1054
1055 pub fn snapshot(&self) -> LocalSnapshot {
1056 self.snapshot.clone()
1057 }
1058
1059 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
1060 proto::WorktreeMetadata {
1061 id: self.id().to_proto(),
1062 root_name: self.root_name().to_string(),
1063 visible: self.visible,
1064 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
1065 }
1066 }
1067
1068 fn load(
1069 &self,
1070 path: &Path,
1071 cx: &mut ModelContext<Worktree>,
1072 ) -> Task<Result<(File, String, Option<String>)>> {
1073 let path = Arc::from(path);
1074 let abs_path = self.absolutize(&path);
1075 let fs = self.fs.clone();
1076 let entry = self.refresh_entry(path.clone(), None, cx);
1077
1078 cx.spawn(|this, mut cx| async move {
1079 let abs_path = abs_path?;
1080 let text = fs.load(&abs_path).await?;
1081 let mut index_task = None;
1082 let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1083 if let Some(repo) = snapshot.repository_for_path(&path) {
1084 if let Some(repo_path) = repo.work_directory.relativize(&snapshot, &path).log_err()
1085 {
1086 if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1087 let git_repo = git_repo.repo_ptr.clone();
1088 index_task = Some(cx.background_executor().spawn({
1089 let fs = fs.clone();
1090 let abs_path = abs_path.clone();
1091 async move {
1092 let abs_path_metadata = fs
1093 .metadata(&abs_path)
1094 .await
1095 .with_context(|| {
1096 format!("loading file and FS metadata for {abs_path:?}")
1097 })
1098 .log_err()
1099 .flatten()?;
1100 if abs_path_metadata.is_dir || abs_path_metadata.is_symlink {
1101 None
1102 } else {
1103 git_repo.lock().load_index_text(&repo_path)
1104 }
1105 }
1106 }));
1107 }
1108 }
1109 }
1110
1111 let diff_base = if let Some(index_task) = index_task {
1112 index_task.await
1113 } else {
1114 None
1115 };
1116
1117 let worktree = this
1118 .upgrade()
1119 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1120 match entry.await? {
1121 Some(entry) => Ok((
1122 File {
1123 entry_id: Some(entry.id),
1124 worktree,
1125 path: entry.path,
1126 mtime: entry.mtime,
1127 is_local: true,
1128 is_deleted: false,
1129 is_private: entry.is_private,
1130 },
1131 text,
1132 diff_base,
1133 )),
1134 None => {
1135 let metadata = fs
1136 .metadata(&abs_path)
1137 .await
1138 .with_context(|| {
1139 format!("Loading metadata for excluded file {abs_path:?}")
1140 })?
1141 .with_context(|| {
1142 format!("Excluded file {abs_path:?} got removed during loading")
1143 })?;
1144 let is_private = snapshot.is_path_private(path.as_ref());
1145 Ok((
1146 File {
1147 entry_id: None,
1148 worktree,
1149 path,
1150 mtime: Some(metadata.mtime),
1151 is_local: true,
1152 is_deleted: false,
1153 is_private,
1154 },
1155 text,
1156 diff_base,
1157 ))
1158 }
1159 }
1160 })
1161 }
1162
1163 pub fn save_buffer(
1164 &self,
1165 buffer_handle: Model<Buffer>,
1166 path: Arc<Path>,
1167 mut has_changed_file: bool,
1168 cx: &mut ModelContext<Worktree>,
1169 ) -> Task<Result<()>> {
1170 let buffer = buffer_handle.read(cx);
1171
1172 let rpc = self.client.clone();
1173 let buffer_id: u64 = buffer.remote_id().into();
1174 let project_id = self.share.as_ref().map(|share| share.project_id);
1175
1176 if buffer.file().is_some_and(|file| !file.is_created()) {
1177 has_changed_file = true;
1178 }
1179
1180 let text = buffer.as_rope().clone();
1181 let version = buffer.version();
1182 let save = self.write_file(path.as_ref(), text, buffer.line_ending(), cx);
1183 let fs = Arc::clone(&self.fs);
1184 let abs_path = self.absolutize(&path);
1185 let is_private = self.snapshot.is_path_private(&path);
1186
1187 cx.spawn(move |this, mut cx| async move {
1188 let entry = save.await?;
1189 let abs_path = abs_path?;
1190 let this = this.upgrade().context("worktree dropped")?;
1191
1192 let (entry_id, mtime, path, is_dotenv) = match entry {
1193 Some(entry) => (Some(entry.id), entry.mtime, entry.path, entry.is_private),
1194 None => {
1195 let metadata = fs
1196 .metadata(&abs_path)
1197 .await
1198 .with_context(|| {
1199 format!(
1200 "Fetching metadata after saving the excluded buffer {abs_path:?}"
1201 )
1202 })?
1203 .with_context(|| {
1204 format!("Excluded buffer {path:?} got removed during saving")
1205 })?;
1206 (None, Some(metadata.mtime), path, is_private)
1207 }
1208 };
1209
1210 if has_changed_file {
1211 let new_file = Arc::new(File {
1212 entry_id,
1213 worktree: this,
1214 path,
1215 mtime,
1216 is_local: true,
1217 is_deleted: false,
1218 is_private: is_dotenv,
1219 });
1220
1221 if let Some(project_id) = project_id {
1222 rpc.send(proto::UpdateBufferFile {
1223 project_id,
1224 buffer_id,
1225 file: Some(new_file.to_proto()),
1226 })
1227 .log_err();
1228 }
1229
1230 buffer_handle.update(&mut cx, |buffer, cx| {
1231 if has_changed_file {
1232 buffer.file_updated(new_file, cx);
1233 }
1234 })?;
1235 }
1236
1237 if let Some(project_id) = project_id {
1238 rpc.send(proto::BufferSaved {
1239 project_id,
1240 buffer_id,
1241 version: serialize_version(&version),
1242 mtime: mtime.map(|time| time.into()),
1243 })?;
1244 }
1245
1246 buffer_handle.update(&mut cx, |buffer, cx| {
1247 buffer.did_save(version.clone(), mtime, cx);
1248 })?;
1249
1250 Ok(())
1251 })
1252 }
1253
1254 /// Find the lowest path in the worktree's datastructures that is an ancestor
1255 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1256 let mut lowest_ancestor = None;
1257 for path in path.ancestors() {
1258 if self.entry_for_path(path).is_some() {
1259 lowest_ancestor = Some(path.to_path_buf());
1260 break;
1261 }
1262 }
1263
1264 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1265 }
1266
1267 pub fn create_entry(
1268 &self,
1269 path: impl Into<Arc<Path>>,
1270 is_dir: bool,
1271 cx: &mut ModelContext<Worktree>,
1272 ) -> Task<Result<Option<Entry>>> {
1273 let path = path.into();
1274 let lowest_ancestor = self.lowest_ancestor(&path);
1275 let abs_path = self.absolutize(&path);
1276 let fs = self.fs.clone();
1277 let write = cx.background_executor().spawn(async move {
1278 if is_dir {
1279 fs.create_dir(&abs_path?).await
1280 } else {
1281 fs.save(&abs_path?, &Default::default(), Default::default())
1282 .await
1283 }
1284 });
1285
1286 cx.spawn(|this, mut cx| async move {
1287 write.await?;
1288 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1289 let mut refreshes = Vec::new();
1290 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1291 for refresh_path in refresh_paths.ancestors() {
1292 if refresh_path == Path::new("") {
1293 continue;
1294 }
1295 let refresh_full_path = lowest_ancestor.join(refresh_path);
1296
1297 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1298 refresh_full_path.into(),
1299 None,
1300 cx,
1301 ));
1302 }
1303 (
1304 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1305 refreshes,
1306 )
1307 })?;
1308 for refresh in refreshes {
1309 refresh.await.log_err();
1310 }
1311
1312 result.await
1313 })
1314 }
1315
1316 pub(crate) fn write_file(
1317 &self,
1318 path: impl Into<Arc<Path>>,
1319 text: Rope,
1320 line_ending: LineEnding,
1321 cx: &mut ModelContext<Worktree>,
1322 ) -> Task<Result<Option<Entry>>> {
1323 let path: Arc<Path> = path.into();
1324 let abs_path = self.absolutize(&path);
1325 let fs = self.fs.clone();
1326 let write = cx
1327 .background_executor()
1328 .spawn(async move { fs.save(&abs_path?, &text, line_ending).await });
1329
1330 cx.spawn(|this, mut cx| async move {
1331 write.await?;
1332 this.update(&mut cx, |this, cx| {
1333 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1334 })?
1335 .await
1336 })
1337 }
1338
1339 pub fn delete_entry(
1340 &self,
1341 entry_id: ProjectEntryId,
1342 trash: bool,
1343 cx: &mut ModelContext<Worktree>,
1344 ) -> Option<Task<Result<()>>> {
1345 let entry = self.entry_for_id(entry_id)?.clone();
1346 let abs_path = self.absolutize(&entry.path);
1347 let fs = self.fs.clone();
1348
1349 let delete = cx.background_executor().spawn(async move {
1350 if entry.is_file() {
1351 if trash {
1352 fs.trash_file(&abs_path?, Default::default()).await?;
1353 } else {
1354 fs.remove_file(&abs_path?, Default::default()).await?;
1355 }
1356 } else {
1357 if trash {
1358 fs.trash_dir(
1359 &abs_path?,
1360 RemoveOptions {
1361 recursive: true,
1362 ignore_if_not_exists: false,
1363 },
1364 )
1365 .await?;
1366 } else {
1367 fs.remove_dir(
1368 &abs_path?,
1369 RemoveOptions {
1370 recursive: true,
1371 ignore_if_not_exists: false,
1372 },
1373 )
1374 .await?;
1375 }
1376 }
1377 anyhow::Ok(entry.path)
1378 });
1379
1380 Some(cx.spawn(|this, mut cx| async move {
1381 let path = delete.await?;
1382 this.update(&mut cx, |this, _| {
1383 this.as_local_mut()
1384 .unwrap()
1385 .refresh_entries_for_paths(vec![path])
1386 })?
1387 .recv()
1388 .await;
1389 Ok(())
1390 }))
1391 }
1392
1393 pub fn rename_entry(
1394 &self,
1395 entry_id: ProjectEntryId,
1396 new_path: impl Into<Arc<Path>>,
1397 cx: &mut ModelContext<Worktree>,
1398 ) -> Task<Result<Option<Entry>>> {
1399 let old_path = match self.entry_for_id(entry_id) {
1400 Some(entry) => entry.path.clone(),
1401 None => return Task::ready(Ok(None)),
1402 };
1403 let new_path = new_path.into();
1404 let abs_old_path = self.absolutize(&old_path);
1405 let abs_new_path = self.absolutize(&new_path);
1406 let fs = self.fs.clone();
1407 let case_sensitive = self.fs_case_sensitive;
1408 let rename = cx.background_executor().spawn(async move {
1409 let abs_old_path = abs_old_path?;
1410 let abs_new_path = abs_new_path?;
1411
1412 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1413 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1414
1415 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1416 // we want to overwrite, because otherwise we run into a file-already-exists error.
1417 let overwrite = !case_sensitive
1418 && abs_old_path != abs_new_path
1419 && abs_old_path_lower == abs_new_path_lower;
1420
1421 fs.rename(
1422 &abs_old_path,
1423 &abs_new_path,
1424 fs::RenameOptions {
1425 overwrite,
1426 ..Default::default()
1427 },
1428 )
1429 .await
1430 });
1431
1432 cx.spawn(|this, mut cx| async move {
1433 rename.await?;
1434 this.update(&mut cx, |this, cx| {
1435 this.as_local_mut()
1436 .unwrap()
1437 .refresh_entry(new_path.clone(), Some(old_path), cx)
1438 })?
1439 .await
1440 })
1441 }
1442
1443 pub fn copy_entry(
1444 &self,
1445 entry_id: ProjectEntryId,
1446 new_path: impl Into<Arc<Path>>,
1447 cx: &mut ModelContext<Worktree>,
1448 ) -> Task<Result<Option<Entry>>> {
1449 let old_path = match self.entry_for_id(entry_id) {
1450 Some(entry) => entry.path.clone(),
1451 None => return Task::ready(Ok(None)),
1452 };
1453 let new_path = new_path.into();
1454 let abs_old_path = self.absolutize(&old_path);
1455 let abs_new_path = self.absolutize(&new_path);
1456 let fs = self.fs.clone();
1457 let copy = cx.background_executor().spawn(async move {
1458 copy_recursive(
1459 fs.as_ref(),
1460 &abs_old_path?,
1461 &abs_new_path?,
1462 Default::default(),
1463 )
1464 .await
1465 });
1466
1467 cx.spawn(|this, mut cx| async move {
1468 copy.await?;
1469 this.update(&mut cx, |this, cx| {
1470 this.as_local_mut()
1471 .unwrap()
1472 .refresh_entry(new_path.clone(), None, cx)
1473 })?
1474 .await
1475 })
1476 }
1477
1478 pub fn expand_entry(
1479 &mut self,
1480 entry_id: ProjectEntryId,
1481 cx: &mut ModelContext<Worktree>,
1482 ) -> Option<Task<Result<()>>> {
1483 let path = self.entry_for_id(entry_id)?.path.clone();
1484 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1485 Some(cx.background_executor().spawn(async move {
1486 refresh.next().await;
1487 Ok(())
1488 }))
1489 }
1490
1491 pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1492 let (tx, rx) = barrier::channel();
1493 self.scan_requests_tx
1494 .try_send(ScanRequest {
1495 relative_paths: paths,
1496 done: tx,
1497 })
1498 .ok();
1499 rx
1500 }
1501
1502 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1503 self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1504 }
1505
1506 fn refresh_entry(
1507 &self,
1508 path: Arc<Path>,
1509 old_path: Option<Arc<Path>>,
1510 cx: &mut ModelContext<Worktree>,
1511 ) -> Task<Result<Option<Entry>>> {
1512 if self.is_path_excluded(path.to_path_buf()) {
1513 return Task::ready(Ok(None));
1514 }
1515 let paths = if let Some(old_path) = old_path.as_ref() {
1516 vec![old_path.clone(), path.clone()]
1517 } else {
1518 vec![path.clone()]
1519 };
1520 let mut refresh = self.refresh_entries_for_paths(paths);
1521 cx.spawn(move |this, mut cx| async move {
1522 refresh.recv().await;
1523 let new_entry = this.update(&mut cx, |this, _| {
1524 this.entry_for_path(path)
1525 .cloned()
1526 .ok_or_else(|| anyhow!("failed to read path after update"))
1527 })??;
1528 Ok(Some(new_entry))
1529 })
1530 }
1531
1532 pub fn observe_updates<F, Fut>(
1533 &mut self,
1534 project_id: u64,
1535 cx: &mut ModelContext<Worktree>,
1536 callback: F,
1537 ) -> oneshot::Receiver<()>
1538 where
1539 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1540 Fut: Send + Future<Output = bool>,
1541 {
1542 #[cfg(any(test, feature = "test-support"))]
1543 const MAX_CHUNK_SIZE: usize = 2;
1544 #[cfg(not(any(test, feature = "test-support")))]
1545 const MAX_CHUNK_SIZE: usize = 256;
1546
1547 let (share_tx, share_rx) = oneshot::channel();
1548
1549 if let Some(share) = self.share.as_mut() {
1550 share_tx.send(()).ok();
1551 *share.resume_updates.borrow_mut() = ();
1552 return share_rx;
1553 }
1554
1555 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1556 let (snapshots_tx, mut snapshots_rx) =
1557 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1558 snapshots_tx
1559 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1560 .ok();
1561
1562 let worktree_id = cx.entity_id().as_u64();
1563 let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1564 let mut is_first = true;
1565 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1566 let update;
1567 if is_first {
1568 update = snapshot.build_initial_update(project_id, worktree_id);
1569 is_first = false;
1570 } else {
1571 update =
1572 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1573 }
1574
1575 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1576 let _ = resume_updates_rx.try_recv();
1577 loop {
1578 let result = callback(update.clone());
1579 if result.await {
1580 break;
1581 } else {
1582 log::info!("waiting to resume updates");
1583 if resume_updates_rx.next().await.is_none() {
1584 return Some(());
1585 }
1586 }
1587 }
1588 }
1589 }
1590 share_tx.send(()).ok();
1591 Some(())
1592 });
1593
1594 self.share = Some(ShareState {
1595 project_id,
1596 snapshots_tx,
1597 resume_updates: resume_updates_tx,
1598 _maintain_remote_snapshot,
1599 });
1600 share_rx
1601 }
1602
1603 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1604 let client = self.client.clone();
1605
1606 for (path, summaries) in &self.diagnostic_summaries {
1607 for (&server_id, summary) in summaries {
1608 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1609 project_id,
1610 worktree_id: cx.entity_id().as_u64(),
1611 summary: Some(summary.to_proto(server_id, path)),
1612 }) {
1613 return Task::ready(Err(e));
1614 }
1615 }
1616 }
1617
1618 let rx = self.observe_updates(project_id, cx, move |update| {
1619 client.request(update).map(|result| result.is_ok())
1620 });
1621 cx.background_executor()
1622 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1623 }
1624
1625 pub fn unshare(&mut self) {
1626 self.share.take();
1627 }
1628
1629 pub fn is_shared(&self) -> bool {
1630 self.share.is_some()
1631 }
1632}
1633
1634impl RemoteWorktree {
1635 fn snapshot(&self) -> Snapshot {
1636 self.snapshot.clone()
1637 }
1638
1639 pub fn disconnected_from_host(&mut self) {
1640 self.updates_tx.take();
1641 self.snapshot_subscriptions.clear();
1642 self.disconnected = true;
1643 }
1644
1645 pub fn save_buffer(
1646 &self,
1647 buffer_handle: Model<Buffer>,
1648 new_path: Option<proto::ProjectPath>,
1649 cx: &mut ModelContext<Worktree>,
1650 ) -> Task<Result<()>> {
1651 let buffer = buffer_handle.read(cx);
1652 let buffer_id = buffer.remote_id().into();
1653 let version = buffer.version();
1654 let rpc = self.client.clone();
1655 let project_id = self.project_id;
1656 cx.spawn(move |_, mut cx| async move {
1657 let response = rpc
1658 .request(proto::SaveBuffer {
1659 project_id,
1660 buffer_id,
1661 new_path,
1662 version: serialize_version(&version),
1663 })
1664 .await?;
1665 let version = deserialize_version(&response.version);
1666 let mtime = response.mtime.map(|mtime| mtime.into());
1667
1668 buffer_handle.update(&mut cx, |buffer, cx| {
1669 buffer.did_save(version.clone(), mtime, cx);
1670 })?;
1671
1672 Ok(())
1673 })
1674 }
1675
1676 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1677 if let Some(updates_tx) = &self.updates_tx {
1678 updates_tx
1679 .unbounded_send(update)
1680 .expect("consumer runs to completion");
1681 }
1682 }
1683
1684 fn observed_snapshot(&self, scan_id: usize) -> bool {
1685 self.completed_scan_id >= scan_id
1686 }
1687
1688 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1689 let (tx, rx) = oneshot::channel();
1690 if self.observed_snapshot(scan_id) {
1691 let _ = tx.send(());
1692 } else if self.disconnected {
1693 drop(tx);
1694 } else {
1695 match self
1696 .snapshot_subscriptions
1697 .binary_search_by_key(&scan_id, |probe| probe.0)
1698 {
1699 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1700 }
1701 }
1702
1703 async move {
1704 rx.await?;
1705 Ok(())
1706 }
1707 }
1708
1709 pub fn update_diagnostic_summary(
1710 &mut self,
1711 path: Arc<Path>,
1712 summary: &proto::DiagnosticSummary,
1713 ) {
1714 let server_id = LanguageServerId(summary.language_server_id as usize);
1715 let summary = DiagnosticSummary {
1716 error_count: summary.error_count as usize,
1717 warning_count: summary.warning_count as usize,
1718 };
1719
1720 if summary.is_empty() {
1721 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1722 summaries.remove(&server_id);
1723 if summaries.is_empty() {
1724 self.diagnostic_summaries.remove(&path);
1725 }
1726 }
1727 } else {
1728 self.diagnostic_summaries
1729 .entry(path)
1730 .or_default()
1731 .insert(server_id, summary);
1732 }
1733 }
1734
1735 pub fn insert_entry(
1736 &mut self,
1737 entry: proto::Entry,
1738 scan_id: usize,
1739 cx: &mut ModelContext<Worktree>,
1740 ) -> Task<Result<Entry>> {
1741 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1742 cx.spawn(|this, mut cx| async move {
1743 wait_for_snapshot.await?;
1744 this.update(&mut cx, |worktree, _| {
1745 let worktree = worktree.as_remote_mut().unwrap();
1746 let mut snapshot = worktree.background_snapshot.lock();
1747 let entry = snapshot.insert_entry(entry);
1748 worktree.snapshot = snapshot.clone();
1749 entry
1750 })?
1751 })
1752 }
1753
1754 pub fn delete_entry(
1755 &mut self,
1756 id: ProjectEntryId,
1757 scan_id: usize,
1758 cx: &mut ModelContext<Worktree>,
1759 ) -> Task<Result<()>> {
1760 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1761 cx.spawn(move |this, mut cx| async move {
1762 wait_for_snapshot.await?;
1763 this.update(&mut cx, |worktree, _| {
1764 let worktree = worktree.as_remote_mut().unwrap();
1765 let mut snapshot = worktree.background_snapshot.lock();
1766 snapshot.delete_entry(id);
1767 worktree.snapshot = snapshot.clone();
1768 })?;
1769 Ok(())
1770 })
1771 }
1772}
1773
1774impl Snapshot {
1775 pub fn id(&self) -> WorktreeId {
1776 self.id
1777 }
1778
1779 pub fn abs_path(&self) -> &Arc<Path> {
1780 &self.abs_path
1781 }
1782
1783 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1784 if path
1785 .components()
1786 .any(|component| !matches!(component, std::path::Component::Normal(_)))
1787 {
1788 return Err(anyhow!("invalid path"));
1789 }
1790 if path.file_name().is_some() {
1791 Ok(self.abs_path.join(path))
1792 } else {
1793 Ok(self.abs_path.to_path_buf())
1794 }
1795 }
1796
1797 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1798 self.entries_by_id.get(&entry_id, &()).is_some()
1799 }
1800
1801 fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1802 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1803 let old_entry = self.entries_by_id.insert_or_replace(
1804 PathEntry {
1805 id: entry.id,
1806 path: entry.path.clone(),
1807 is_ignored: entry.is_ignored,
1808 scan_id: 0,
1809 },
1810 &(),
1811 );
1812 if let Some(old_entry) = old_entry {
1813 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1814 }
1815 self.entries_by_path.insert_or_replace(entry.clone(), &());
1816 Ok(entry)
1817 }
1818
1819 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1820 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1821 self.entries_by_path = {
1822 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1823 let mut new_entries_by_path =
1824 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1825 while let Some(entry) = cursor.item() {
1826 if entry.path.starts_with(&removed_entry.path) {
1827 self.entries_by_id.remove(&entry.id, &());
1828 cursor.next(&());
1829 } else {
1830 break;
1831 }
1832 }
1833 new_entries_by_path.append(cursor.suffix(&()), &());
1834 new_entries_by_path
1835 };
1836
1837 Some(removed_entry.path)
1838 }
1839
1840 #[cfg(any(test, feature = "test-support"))]
1841 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1842 let path = path.into();
1843 self.entries_by_path
1844 .get(&PathKey(Arc::from(path)), &())
1845 .and_then(|entry| entry.git_status)
1846 }
1847
1848 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1849 let mut entries_by_path_edits = Vec::new();
1850 let mut entries_by_id_edits = Vec::new();
1851
1852 for entry_id in update.removed_entries {
1853 let entry_id = ProjectEntryId::from_proto(entry_id);
1854 entries_by_id_edits.push(Edit::Remove(entry_id));
1855 if let Some(entry) = self.entry_for_id(entry_id) {
1856 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1857 }
1858 }
1859
1860 for entry in update.updated_entries {
1861 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1862 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1863 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1864 }
1865 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1866 if old_entry.id != entry.id {
1867 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1868 }
1869 }
1870 entries_by_id_edits.push(Edit::Insert(PathEntry {
1871 id: entry.id,
1872 path: entry.path.clone(),
1873 is_ignored: entry.is_ignored,
1874 scan_id: 0,
1875 }));
1876 entries_by_path_edits.push(Edit::Insert(entry));
1877 }
1878
1879 self.entries_by_path.edit(entries_by_path_edits, &());
1880 self.entries_by_id.edit(entries_by_id_edits, &());
1881
1882 update.removed_repositories.sort_unstable();
1883 self.repository_entries.retain(|_, entry| {
1884 if let Ok(_) = update
1885 .removed_repositories
1886 .binary_search(&entry.work_directory.to_proto())
1887 {
1888 false
1889 } else {
1890 true
1891 }
1892 });
1893
1894 for repository in update.updated_repositories {
1895 let work_directory_entry: WorkDirectoryEntry =
1896 ProjectEntryId::from_proto(repository.work_directory_id).into();
1897
1898 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1899 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1900 if self.repository_entries.get(&work_directory).is_some() {
1901 self.repository_entries.update(&work_directory, |repo| {
1902 repo.branch = repository.branch.map(Into::into);
1903 });
1904 } else {
1905 self.repository_entries.insert(
1906 work_directory,
1907 RepositoryEntry {
1908 work_directory: work_directory_entry,
1909 branch: repository.branch.map(Into::into),
1910 },
1911 )
1912 }
1913 } else {
1914 log::error!("no work directory entry for repository {:?}", repository)
1915 }
1916 }
1917
1918 self.scan_id = update.scan_id as usize;
1919 if update.is_last_update {
1920 self.completed_scan_id = update.scan_id as usize;
1921 }
1922
1923 Ok(())
1924 }
1925
1926 pub fn file_count(&self) -> usize {
1927 self.entries_by_path.summary().file_count
1928 }
1929
1930 pub fn visible_file_count(&self) -> usize {
1931 self.entries_by_path.summary().non_ignored_file_count
1932 }
1933
1934 fn traverse_from_offset(
1935 &self,
1936 include_files: bool,
1937 include_dirs: bool,
1938 include_ignored: bool,
1939 start_offset: usize,
1940 ) -> Traversal {
1941 let mut cursor = self.entries_by_path.cursor();
1942 cursor.seek(
1943 &TraversalTarget::Count {
1944 count: start_offset,
1945 include_files,
1946 include_dirs,
1947 include_ignored,
1948 },
1949 Bias::Right,
1950 &(),
1951 );
1952 Traversal {
1953 cursor,
1954 include_files,
1955 include_dirs,
1956 include_ignored,
1957 }
1958 }
1959
1960 fn traverse_from_path(
1961 &self,
1962 include_files: bool,
1963 include_dirs: bool,
1964 include_ignored: bool,
1965 path: &Path,
1966 ) -> Traversal {
1967 let mut cursor = self.entries_by_path.cursor();
1968 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1969 Traversal {
1970 cursor,
1971 include_files,
1972 include_dirs,
1973 include_ignored,
1974 }
1975 }
1976
1977 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1978 self.traverse_from_offset(true, false, include_ignored, start)
1979 }
1980
1981 pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
1982 self.traverse_from_offset(false, true, include_ignored, start)
1983 }
1984
1985 pub fn entries(&self, include_ignored: bool) -> Traversal {
1986 self.traverse_from_offset(true, true, include_ignored, 0)
1987 }
1988
1989 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1990 self.repository_entries
1991 .iter()
1992 .map(|(path, entry)| (&path.0, entry))
1993 }
1994
1995 /// Get the repository whose work directory contains the given path.
1996 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1997 self.repository_entries
1998 .get(&RepositoryWorkDirectory(path.into()))
1999 .cloned()
2000 }
2001
2002 /// Get the repository whose work directory contains the given path.
2003 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
2004 self.repository_and_work_directory_for_path(path)
2005 .map(|e| e.1)
2006 }
2007
2008 pub fn repository_and_work_directory_for_path(
2009 &self,
2010 path: &Path,
2011 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
2012 self.repository_entries
2013 .iter()
2014 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
2015 .last()
2016 .map(|(path, repo)| (path.clone(), repo.clone()))
2017 }
2018
2019 /// Given an ordered iterator of entries, returns an iterator of those entries,
2020 /// along with their containing git repository.
2021 pub fn entries_with_repositories<'a>(
2022 &'a self,
2023 entries: impl 'a + Iterator<Item = &'a Entry>,
2024 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2025 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
2026 let mut repositories = self.repositories().peekable();
2027 entries.map(move |entry| {
2028 while let Some((repo_path, _)) = containing_repos.last() {
2029 if entry.path.starts_with(repo_path) {
2030 break;
2031 } else {
2032 containing_repos.pop();
2033 }
2034 }
2035 while let Some((repo_path, _)) = repositories.peek() {
2036 if entry.path.starts_with(repo_path) {
2037 containing_repos.push(repositories.next().unwrap());
2038 } else {
2039 break;
2040 }
2041 }
2042 let repo = containing_repos.last().map(|(_, repo)| *repo);
2043 (entry, repo)
2044 })
2045 }
2046
2047 /// Updates the `git_status` of the given entries such that files'
2048 /// statuses bubble up to their ancestor directories.
2049 pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2050 let mut cursor = self
2051 .entries_by_path
2052 .cursor::<(TraversalProgress, GitStatuses)>();
2053 let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2054
2055 let mut result_ix = 0;
2056 loop {
2057 let next_entry = result.get(result_ix);
2058 let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2059
2060 let entry_to_finish = match (containing_entry, next_entry) {
2061 (Some(_), None) => entry_stack.pop(),
2062 (Some(containing_entry), Some(next_path)) => {
2063 if next_path.path.starts_with(&containing_entry.path) {
2064 None
2065 } else {
2066 entry_stack.pop()
2067 }
2068 }
2069 (None, Some(_)) => None,
2070 (None, None) => break,
2071 };
2072
2073 if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2074 cursor.seek_forward(
2075 &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2076 Bias::Left,
2077 &(),
2078 );
2079
2080 let statuses = cursor.start().1 - prev_statuses;
2081
2082 result[entry_ix].git_status = if statuses.conflict > 0 {
2083 Some(GitFileStatus::Conflict)
2084 } else if statuses.modified > 0 {
2085 Some(GitFileStatus::Modified)
2086 } else if statuses.added > 0 {
2087 Some(GitFileStatus::Added)
2088 } else {
2089 None
2090 };
2091 } else {
2092 if result[result_ix].is_dir() {
2093 cursor.seek_forward(
2094 &TraversalTarget::Path(&result[result_ix].path),
2095 Bias::Left,
2096 &(),
2097 );
2098 entry_stack.push((result_ix, cursor.start().1));
2099 }
2100 result_ix += 1;
2101 }
2102 }
2103 }
2104
2105 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2106 let empty_path = Path::new("");
2107 self.entries_by_path
2108 .cursor::<()>()
2109 .filter(move |entry| entry.path.as_ref() != empty_path)
2110 .map(|entry| &entry.path)
2111 }
2112
2113 pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2114 let mut cursor = self.entries_by_path.cursor();
2115 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2116 let traversal = Traversal {
2117 cursor,
2118 include_files: true,
2119 include_dirs: true,
2120 include_ignored: true,
2121 };
2122 ChildEntriesIter {
2123 traversal,
2124 parent_path,
2125 }
2126 }
2127
2128 pub fn descendent_entries<'a>(
2129 &'a self,
2130 include_dirs: bool,
2131 include_ignored: bool,
2132 parent_path: &'a Path,
2133 ) -> DescendentEntriesIter<'a> {
2134 let mut cursor = self.entries_by_path.cursor();
2135 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
2136 let mut traversal = Traversal {
2137 cursor,
2138 include_files: true,
2139 include_dirs,
2140 include_ignored,
2141 };
2142
2143 if traversal.end_offset() == traversal.start_offset() {
2144 traversal.advance();
2145 }
2146
2147 DescendentEntriesIter {
2148 traversal,
2149 parent_path,
2150 }
2151 }
2152
2153 pub fn root_entry(&self) -> Option<&Entry> {
2154 self.entry_for_path("")
2155 }
2156
2157 pub fn root_name(&self) -> &str {
2158 &self.root_name
2159 }
2160
2161 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2162 self.repository_entries
2163 .get(&RepositoryWorkDirectory(Path::new("").into()))
2164 .map(|entry| entry.to_owned())
2165 }
2166
2167 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2168 self.repository_entries.values()
2169 }
2170
2171 pub fn scan_id(&self) -> usize {
2172 self.scan_id
2173 }
2174
2175 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2176 let path = path.as_ref();
2177 self.traverse_from_path(true, true, true, path)
2178 .entry()
2179 .and_then(|entry| {
2180 if entry.path.as_ref() == path {
2181 Some(entry)
2182 } else {
2183 None
2184 }
2185 })
2186 }
2187
2188 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2189 let entry = self.entries_by_id.get(&id, &())?;
2190 self.entry_for_path(&entry.path)
2191 }
2192
2193 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2194 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2195 }
2196}
2197
2198impl LocalSnapshot {
2199 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
2200 self.git_repositories.get(&repo.work_directory.0)
2201 }
2202
2203 pub(crate) fn local_repo_for_path(
2204 &self,
2205 path: &Path,
2206 ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
2207 let (path, repo) = self.repository_and_work_directory_for_path(path)?;
2208 Some((path, self.git_repositories.get(&repo.work_directory_id())?))
2209 }
2210
2211 pub fn local_git_repo(&self, path: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
2212 self.local_repo_for_path(path)
2213 .map(|(_, entry)| entry.repo_ptr.clone())
2214 }
2215
2216 fn build_update(
2217 &self,
2218 project_id: u64,
2219 worktree_id: u64,
2220 entry_changes: UpdatedEntriesSet,
2221 repo_changes: UpdatedGitRepositoriesSet,
2222 ) -> proto::UpdateWorktree {
2223 let mut updated_entries = Vec::new();
2224 let mut removed_entries = Vec::new();
2225 let mut updated_repositories = Vec::new();
2226 let mut removed_repositories = Vec::new();
2227
2228 for (_, entry_id, path_change) in entry_changes.iter() {
2229 if let PathChange::Removed = path_change {
2230 removed_entries.push(entry_id.0 as u64);
2231 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2232 updated_entries.push(proto::Entry::from(entry));
2233 }
2234 }
2235
2236 for (work_dir_path, change) in repo_changes.iter() {
2237 let new_repo = self
2238 .repository_entries
2239 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2240 match (&change.old_repository, new_repo) {
2241 (Some(old_repo), Some(new_repo)) => {
2242 updated_repositories.push(new_repo.build_update(old_repo));
2243 }
2244 (None, Some(new_repo)) => {
2245 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2246 }
2247 (Some(old_repo), None) => {
2248 removed_repositories.push(old_repo.work_directory.0.to_proto());
2249 }
2250 _ => {}
2251 }
2252 }
2253
2254 removed_entries.sort_unstable();
2255 updated_entries.sort_unstable_by_key(|e| e.id);
2256 removed_repositories.sort_unstable();
2257 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2258
2259 // TODO - optimize, knowing that removed_entries are sorted.
2260 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2261
2262 proto::UpdateWorktree {
2263 project_id,
2264 worktree_id,
2265 abs_path: self.abs_path().to_string_lossy().into(),
2266 root_name: self.root_name().to_string(),
2267 updated_entries,
2268 removed_entries,
2269 scan_id: self.scan_id as u64,
2270 is_last_update: self.completed_scan_id == self.scan_id,
2271 updated_repositories,
2272 removed_repositories,
2273 }
2274 }
2275
2276 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2277 let mut updated_entries = self
2278 .entries_by_path
2279 .iter()
2280 .map(proto::Entry::from)
2281 .collect::<Vec<_>>();
2282 updated_entries.sort_unstable_by_key(|e| e.id);
2283
2284 let mut updated_repositories = self
2285 .repository_entries
2286 .values()
2287 .map(proto::RepositoryEntry::from)
2288 .collect::<Vec<_>>();
2289 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2290
2291 proto::UpdateWorktree {
2292 project_id,
2293 worktree_id,
2294 abs_path: self.abs_path().to_string_lossy().into(),
2295 root_name: self.root_name().to_string(),
2296 updated_entries,
2297 removed_entries: Vec::new(),
2298 scan_id: self.scan_id as u64,
2299 is_last_update: self.completed_scan_id == self.scan_id,
2300 updated_repositories,
2301 removed_repositories: Vec::new(),
2302 }
2303 }
2304
2305 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2306 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2307 let abs_path = self.abs_path.join(&entry.path);
2308 match smol::block_on(build_gitignore(&abs_path, fs)) {
2309 Ok(ignore) => {
2310 self.ignores_by_parent_abs_path
2311 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2312 }
2313 Err(error) => {
2314 log::error!(
2315 "error loading .gitignore file {:?} - {:?}",
2316 &entry.path,
2317 error
2318 );
2319 }
2320 }
2321 }
2322
2323 if entry.kind == EntryKind::PendingDir {
2324 if let Some(existing_entry) =
2325 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2326 {
2327 entry.kind = existing_entry.kind;
2328 }
2329 }
2330
2331 let scan_id = self.scan_id;
2332 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2333 if let Some(removed) = removed {
2334 if removed.id != entry.id {
2335 self.entries_by_id.remove(&removed.id, &());
2336 }
2337 }
2338 self.entries_by_id.insert_or_replace(
2339 PathEntry {
2340 id: entry.id,
2341 path: entry.path.clone(),
2342 is_ignored: entry.is_ignored,
2343 scan_id,
2344 },
2345 &(),
2346 );
2347
2348 entry
2349 }
2350
2351 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2352 let mut inodes = TreeSet::default();
2353 for ancestor in path.ancestors().skip(1) {
2354 if let Some(entry) = self.entry_for_path(ancestor) {
2355 inodes.insert(entry.inode);
2356 }
2357 }
2358 inodes
2359 }
2360
2361 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2362 let mut new_ignores = Vec::new();
2363 for (index, ancestor) in abs_path.ancestors().enumerate() {
2364 if index > 0 {
2365 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2366 new_ignores.push((ancestor, Some(ignore.clone())));
2367 } else {
2368 new_ignores.push((ancestor, None));
2369 }
2370 }
2371 if ancestor.join(&*DOT_GIT).is_dir() {
2372 break;
2373 }
2374 }
2375
2376 let mut ignore_stack = IgnoreStack::none();
2377 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2378 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2379 ignore_stack = IgnoreStack::all();
2380 break;
2381 } else if let Some(ignore) = ignore {
2382 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2383 }
2384 }
2385
2386 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2387 ignore_stack = IgnoreStack::all();
2388 }
2389
2390 ignore_stack
2391 }
2392
2393 #[cfg(test)]
2394 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2395 self.entries_by_path
2396 .cursor::<()>()
2397 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2398 }
2399
2400 #[cfg(test)]
2401 pub fn check_invariants(&self, git_state: bool) {
2402 use pretty_assertions::assert_eq;
2403
2404 assert_eq!(
2405 self.entries_by_path
2406 .cursor::<()>()
2407 .map(|e| (&e.path, e.id))
2408 .collect::<Vec<_>>(),
2409 self.entries_by_id
2410 .cursor::<()>()
2411 .map(|e| (&e.path, e.id))
2412 .collect::<collections::BTreeSet<_>>()
2413 .into_iter()
2414 .collect::<Vec<_>>(),
2415 "entries_by_path and entries_by_id are inconsistent"
2416 );
2417
2418 let mut files = self.files(true, 0);
2419 let mut visible_files = self.files(false, 0);
2420 for entry in self.entries_by_path.cursor::<()>() {
2421 if entry.is_file() {
2422 assert_eq!(files.next().unwrap().inode, entry.inode);
2423 if !entry.is_ignored && !entry.is_external {
2424 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2425 }
2426 }
2427 }
2428
2429 assert!(files.next().is_none());
2430 assert!(visible_files.next().is_none());
2431
2432 let mut bfs_paths = Vec::new();
2433 let mut stack = self
2434 .root_entry()
2435 .map(|e| e.path.as_ref())
2436 .into_iter()
2437 .collect::<Vec<_>>();
2438 while let Some(path) = stack.pop() {
2439 bfs_paths.push(path);
2440 let ix = stack.len();
2441 for child_entry in self.child_entries(path) {
2442 stack.insert(ix, &child_entry.path);
2443 }
2444 }
2445
2446 let dfs_paths_via_iter = self
2447 .entries_by_path
2448 .cursor::<()>()
2449 .map(|e| e.path.as_ref())
2450 .collect::<Vec<_>>();
2451 assert_eq!(bfs_paths, dfs_paths_via_iter);
2452
2453 let dfs_paths_via_traversal = self
2454 .entries(true)
2455 .map(|e| e.path.as_ref())
2456 .collect::<Vec<_>>();
2457 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2458
2459 if git_state {
2460 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2461 let ignore_parent_path =
2462 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2463 assert!(self.entry_for_path(&ignore_parent_path).is_some());
2464 assert!(self
2465 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2466 .is_some());
2467 }
2468 }
2469 }
2470
2471 #[cfg(test)]
2472 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2473 let mut paths = Vec::new();
2474 for entry in self.entries_by_path.cursor::<()>() {
2475 if include_ignored || !entry.is_ignored {
2476 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2477 }
2478 }
2479 paths.sort_by(|a, b| a.0.cmp(b.0));
2480 paths
2481 }
2482
2483 pub fn is_path_private(&self, path: &Path) -> bool {
2484 path.ancestors().any(|ancestor| {
2485 self.private_files
2486 .iter()
2487 .any(|exclude_matcher| exclude_matcher.is_match(&ancestor))
2488 })
2489 }
2490
2491 pub fn is_path_excluded(&self, mut path: PathBuf) -> bool {
2492 loop {
2493 if self
2494 .file_scan_exclusions
2495 .iter()
2496 .any(|exclude_matcher| exclude_matcher.is_match(&path))
2497 {
2498 return true;
2499 }
2500 if !path.pop() {
2501 return false;
2502 }
2503 }
2504 }
2505}
2506
2507impl BackgroundScannerState {
2508 fn should_scan_directory(&self, entry: &Entry) -> bool {
2509 (!entry.is_external && !entry.is_ignored)
2510 || entry.path.file_name() == Some(*DOT_GIT)
2511 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2512 || self
2513 .paths_to_scan
2514 .iter()
2515 .any(|p| p.starts_with(&entry.path))
2516 || self
2517 .path_prefixes_to_scan
2518 .iter()
2519 .any(|p| entry.path.starts_with(p))
2520 }
2521
2522 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2523 let path = entry.path.clone();
2524 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2525 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2526 let mut containing_repository = None;
2527 if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2528 if let Some((workdir_path, repo)) = self.snapshot.local_repo_for_path(&path) {
2529 if let Ok(repo_path) = path.strip_prefix(&workdir_path.0) {
2530 containing_repository = Some((
2531 workdir_path,
2532 repo.repo_ptr.clone(),
2533 repo.repo_ptr.lock().staged_statuses(repo_path),
2534 ));
2535 }
2536 }
2537 }
2538 if !ancestor_inodes.contains(&entry.inode) {
2539 ancestor_inodes.insert(entry.inode);
2540 scan_job_tx
2541 .try_send(ScanJob {
2542 abs_path,
2543 path,
2544 ignore_stack,
2545 scan_queue: scan_job_tx.clone(),
2546 ancestor_inodes,
2547 is_external: entry.is_external,
2548 containing_repository,
2549 })
2550 .unwrap();
2551 }
2552 }
2553
2554 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2555 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2556 entry.id = removed_entry_id;
2557 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2558 entry.id = existing_entry.id;
2559 }
2560 }
2561
2562 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2563 self.reuse_entry_id(&mut entry);
2564 let entry = self.snapshot.insert_entry(entry, fs);
2565 if entry.path.file_name() == Some(&DOT_GIT) {
2566 self.build_git_repository(entry.path.clone(), fs);
2567 }
2568
2569 #[cfg(test)]
2570 self.snapshot.check_invariants(false);
2571
2572 entry
2573 }
2574
2575 fn populate_dir(
2576 &mut self,
2577 parent_path: &Arc<Path>,
2578 entries: impl IntoIterator<Item = Entry>,
2579 ignore: Option<Arc<Gitignore>>,
2580 ) {
2581 let mut parent_entry = if let Some(parent_entry) = self
2582 .snapshot
2583 .entries_by_path
2584 .get(&PathKey(parent_path.clone()), &())
2585 {
2586 parent_entry.clone()
2587 } else {
2588 log::warn!(
2589 "populating a directory {:?} that has been removed",
2590 parent_path
2591 );
2592 return;
2593 };
2594
2595 match parent_entry.kind {
2596 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2597 EntryKind::Dir => {}
2598 _ => return,
2599 }
2600
2601 if let Some(ignore) = ignore {
2602 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2603 self.snapshot
2604 .ignores_by_parent_abs_path
2605 .insert(abs_parent_path, (ignore, false));
2606 }
2607
2608 let parent_entry_id = parent_entry.id;
2609 self.scanned_dirs.insert(parent_entry_id);
2610 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2611 let mut entries_by_id_edits = Vec::new();
2612
2613 for entry in entries {
2614 entries_by_id_edits.push(Edit::Insert(PathEntry {
2615 id: entry.id,
2616 path: entry.path.clone(),
2617 is_ignored: entry.is_ignored,
2618 scan_id: self.snapshot.scan_id,
2619 }));
2620 entries_by_path_edits.push(Edit::Insert(entry));
2621 }
2622
2623 self.snapshot
2624 .entries_by_path
2625 .edit(entries_by_path_edits, &());
2626 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2627
2628 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2629 self.changed_paths.insert(ix, parent_path.clone());
2630 }
2631
2632 #[cfg(test)]
2633 self.snapshot.check_invariants(false);
2634 }
2635
2636 fn remove_path(&mut self, path: &Path) {
2637 let mut new_entries;
2638 let removed_entries;
2639 {
2640 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2641 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2642 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2643 new_entries.append(cursor.suffix(&()), &());
2644 }
2645 self.snapshot.entries_by_path = new_entries;
2646
2647 let mut entries_by_id_edits = Vec::new();
2648 for entry in removed_entries.cursor::<()>() {
2649 let removed_entry_id = self
2650 .removed_entry_ids
2651 .entry(entry.inode)
2652 .or_insert(entry.id);
2653 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2654 entries_by_id_edits.push(Edit::Remove(entry.id));
2655 }
2656 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2657
2658 if path.file_name() == Some(&GITIGNORE) {
2659 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2660 if let Some((_, needs_update)) = self
2661 .snapshot
2662 .ignores_by_parent_abs_path
2663 .get_mut(abs_parent_path.as_path())
2664 {
2665 *needs_update = true;
2666 }
2667 }
2668
2669 #[cfg(test)]
2670 self.snapshot.check_invariants(false);
2671 }
2672
2673 fn reload_repositories(&mut self, dot_git_dirs_to_reload: &HashSet<PathBuf>, fs: &dyn Fs) {
2674 let scan_id = self.snapshot.scan_id;
2675
2676 for dot_git_dir in dot_git_dirs_to_reload {
2677 // If there is already a repository for this .git directory, reload
2678 // the status for all of its files.
2679 let repository = self
2680 .snapshot
2681 .git_repositories
2682 .iter()
2683 .find_map(|(entry_id, repo)| {
2684 (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2685 });
2686 match repository {
2687 None => {
2688 self.build_git_repository(Arc::from(dot_git_dir.as_path()), fs);
2689 }
2690 Some((entry_id, repository)) => {
2691 if repository.git_dir_scan_id == scan_id {
2692 continue;
2693 }
2694 let Some(work_dir) = self
2695 .snapshot
2696 .entry_for_id(entry_id)
2697 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
2698 else {
2699 continue;
2700 };
2701
2702 log::info!("reload git repository {dot_git_dir:?}");
2703 let repository = repository.repo_ptr.lock();
2704 let branch = repository.branch_name();
2705 repository.reload_index();
2706
2707 self.snapshot
2708 .git_repositories
2709 .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2710 self.snapshot
2711 .snapshot
2712 .repository_entries
2713 .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2714
2715 self.update_git_statuses(&work_dir, &*repository);
2716 }
2717 }
2718 }
2719
2720 // Remove any git repositories whose .git entry no longer exists.
2721 let snapshot = &mut self.snapshot;
2722 let mut ids_to_preserve = HashSet::default();
2723 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
2724 let exists_in_snapshot = snapshot
2725 .entry_for_id(work_directory_id)
2726 .map_or(false, |entry| {
2727 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2728 });
2729 if exists_in_snapshot {
2730 ids_to_preserve.insert(work_directory_id);
2731 } else {
2732 let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
2733 let git_dir_excluded = snapshot.is_path_excluded(entry.git_dir_path.to_path_buf());
2734 if git_dir_excluded
2735 && !matches!(smol::block_on(fs.metadata(&git_dir_abs_path)), Ok(None))
2736 {
2737 ids_to_preserve.insert(work_directory_id);
2738 }
2739 }
2740 }
2741 snapshot
2742 .git_repositories
2743 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
2744 snapshot
2745 .repository_entries
2746 .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
2747 }
2748
2749 fn build_git_repository(
2750 &mut self,
2751 dot_git_path: Arc<Path>,
2752 fs: &dyn Fs,
2753 ) -> Option<(
2754 RepositoryWorkDirectory,
2755 Arc<Mutex<dyn GitRepository>>,
2756 TreeMap<RepoPath, GitFileStatus>,
2757 )> {
2758 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2759 Some(parent_dir) => {
2760 // Guard against repositories inside the repository metadata
2761 if parent_dir.iter().any(|component| component == *DOT_GIT) {
2762 log::info!(
2763 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2764 );
2765 return None;
2766 };
2767 log::info!(
2768 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2769 );
2770 parent_dir.into()
2771 }
2772 None => {
2773 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2774 // no files inside that directory are tracked by git, so no need to build the repo around it
2775 log::info!(
2776 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2777 );
2778 return None;
2779 }
2780 };
2781
2782 let work_dir_id = self
2783 .snapshot
2784 .entry_for_path(work_dir_path.clone())
2785 .map(|entry| entry.id)?;
2786
2787 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2788 return None;
2789 }
2790
2791 let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2792 let repository = fs.open_repo(abs_path.as_path())?;
2793 let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2794
2795 let repo_lock = repository.lock();
2796 self.snapshot.repository_entries.insert(
2797 work_directory.clone(),
2798 RepositoryEntry {
2799 work_directory: work_dir_id.into(),
2800 branch: repo_lock.branch_name().map(Into::into),
2801 },
2802 );
2803
2804 let staged_statuses = self.update_git_statuses(&work_directory, &*repo_lock);
2805 drop(repo_lock);
2806
2807 self.snapshot.git_repositories.insert(
2808 work_dir_id,
2809 LocalRepositoryEntry {
2810 git_dir_scan_id: 0,
2811 repo_ptr: repository.clone(),
2812 git_dir_path: dot_git_path.clone(),
2813 },
2814 );
2815
2816 Some((work_directory, repository, staged_statuses))
2817 }
2818
2819 fn update_git_statuses(
2820 &mut self,
2821 work_directory: &RepositoryWorkDirectory,
2822 repo: &dyn GitRepository,
2823 ) -> TreeMap<RepoPath, GitFileStatus> {
2824 let staged_statuses = repo.staged_statuses(Path::new(""));
2825
2826 let mut changes = vec![];
2827 let mut edits = vec![];
2828
2829 for mut entry in self
2830 .snapshot
2831 .descendent_entries(false, false, &work_directory.0)
2832 .cloned()
2833 {
2834 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2835 continue;
2836 };
2837 let Some(mtime) = entry.mtime else {
2838 continue;
2839 };
2840 let repo_path = RepoPath(repo_path.to_path_buf());
2841 let git_file_status = combine_git_statuses(
2842 staged_statuses.get(&repo_path).copied(),
2843 repo.unstaged_status(&repo_path, mtime),
2844 );
2845 if entry.git_status != git_file_status {
2846 entry.git_status = git_file_status;
2847 changes.push(entry.path.clone());
2848 edits.push(Edit::Insert(entry));
2849 }
2850 }
2851
2852 self.snapshot.entries_by_path.edit(edits, &());
2853 util::extend_sorted(&mut self.changed_paths, changes, usize::MAX, Ord::cmp);
2854 staged_statuses
2855 }
2856}
2857
2858async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2859 let contents = fs.load(abs_path).await?;
2860 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2861 let mut builder = GitignoreBuilder::new(parent);
2862 for line in contents.lines() {
2863 builder.add_line(Some(abs_path.into()), line)?;
2864 }
2865 Ok(builder.build()?)
2866}
2867
2868impl WorktreeId {
2869 pub fn from_usize(handle_id: usize) -> Self {
2870 Self(handle_id)
2871 }
2872
2873 pub fn from_proto(id: u64) -> Self {
2874 Self(id as usize)
2875 }
2876
2877 pub fn to_proto(&self) -> u64 {
2878 self.0 as u64
2879 }
2880
2881 pub fn to_usize(&self) -> usize {
2882 self.0
2883 }
2884}
2885
2886impl fmt::Display for WorktreeId {
2887 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2888 self.0.fmt(f)
2889 }
2890}
2891
2892impl Deref for Worktree {
2893 type Target = Snapshot;
2894
2895 fn deref(&self) -> &Self::Target {
2896 match self {
2897 Worktree::Local(worktree) => &worktree.snapshot,
2898 Worktree::Remote(worktree) => &worktree.snapshot,
2899 }
2900 }
2901}
2902
2903impl Deref for LocalWorktree {
2904 type Target = LocalSnapshot;
2905
2906 fn deref(&self) -> &Self::Target {
2907 &self.snapshot
2908 }
2909}
2910
2911impl Deref for RemoteWorktree {
2912 type Target = Snapshot;
2913
2914 fn deref(&self) -> &Self::Target {
2915 &self.snapshot
2916 }
2917}
2918
2919impl fmt::Debug for LocalWorktree {
2920 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2921 self.snapshot.fmt(f)
2922 }
2923}
2924
2925impl fmt::Debug for Snapshot {
2926 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2927 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2928 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2929
2930 impl<'a> fmt::Debug for EntriesByPath<'a> {
2931 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2932 f.debug_map()
2933 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2934 .finish()
2935 }
2936 }
2937
2938 impl<'a> fmt::Debug for EntriesById<'a> {
2939 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2940 f.debug_list().entries(self.0.iter()).finish()
2941 }
2942 }
2943
2944 f.debug_struct("Snapshot")
2945 .field("id", &self.id)
2946 .field("root_name", &self.root_name)
2947 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2948 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2949 .finish()
2950 }
2951}
2952
2953#[derive(Clone, PartialEq)]
2954pub struct File {
2955 pub worktree: Model<Worktree>,
2956 pub path: Arc<Path>,
2957 pub mtime: Option<SystemTime>,
2958 pub entry_id: Option<ProjectEntryId>,
2959 pub is_local: bool,
2960 pub is_deleted: bool,
2961 pub is_private: bool,
2962}
2963
2964impl language::File for File {
2965 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2966 if self.is_local {
2967 Some(self)
2968 } else {
2969 None
2970 }
2971 }
2972
2973 fn mtime(&self) -> Option<SystemTime> {
2974 self.mtime
2975 }
2976
2977 fn path(&self) -> &Arc<Path> {
2978 &self.path
2979 }
2980
2981 fn full_path(&self, cx: &AppContext) -> PathBuf {
2982 let mut full_path = PathBuf::new();
2983 let worktree = self.worktree.read(cx);
2984
2985 if worktree.is_visible() {
2986 full_path.push(worktree.root_name());
2987 } else {
2988 let path = worktree.abs_path();
2989
2990 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2991 full_path.push("~");
2992 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2993 } else {
2994 full_path.push(path)
2995 }
2996 }
2997
2998 if self.path.components().next().is_some() {
2999 full_path.push(&self.path);
3000 }
3001
3002 full_path
3003 }
3004
3005 /// Returns the last component of this handle's absolute path. If this handle refers to the root
3006 /// of its worktree, then this method will return the name of the worktree itself.
3007 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
3008 self.path
3009 .file_name()
3010 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3011 }
3012
3013 fn worktree_id(&self) -> usize {
3014 self.worktree.entity_id().as_u64() as usize
3015 }
3016
3017 fn is_deleted(&self) -> bool {
3018 self.is_deleted
3019 }
3020
3021 fn as_any(&self) -> &dyn Any {
3022 self
3023 }
3024
3025 fn to_proto(&self) -> rpc::proto::File {
3026 rpc::proto::File {
3027 worktree_id: self.worktree.entity_id().as_u64(),
3028 entry_id: self.entry_id.map(|id| id.to_proto()),
3029 path: self.path.to_string_lossy().into(),
3030 mtime: self.mtime.map(|time| time.into()),
3031 is_deleted: self.is_deleted,
3032 }
3033 }
3034
3035 fn is_private(&self) -> bool {
3036 self.is_private
3037 }
3038}
3039
3040impl language::LocalFile for File {
3041 fn abs_path(&self, cx: &AppContext) -> PathBuf {
3042 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3043 if self.path.as_ref() == Path::new("") {
3044 worktree_path.to_path_buf()
3045 } else {
3046 worktree_path.join(&self.path)
3047 }
3048 }
3049
3050 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3051 let worktree = self.worktree.read(cx).as_local().unwrap();
3052 let abs_path = worktree.absolutize(&self.path);
3053 let fs = worktree.fs.clone();
3054 cx.background_executor()
3055 .spawn(async move { fs.load(&abs_path?).await })
3056 }
3057
3058 fn buffer_reloaded(
3059 &self,
3060 buffer_id: BufferId,
3061 version: &clock::Global,
3062 line_ending: LineEnding,
3063 mtime: Option<SystemTime>,
3064 cx: &mut AppContext,
3065 ) {
3066 let worktree = self.worktree.read(cx).as_local().unwrap();
3067 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
3068 worktree
3069 .client
3070 .send(proto::BufferReloaded {
3071 project_id,
3072 buffer_id: buffer_id.into(),
3073 version: serialize_version(version),
3074 mtime: mtime.map(|time| time.into()),
3075 line_ending: serialize_line_ending(line_ending) as i32,
3076 })
3077 .log_err();
3078 }
3079 }
3080}
3081
3082impl File {
3083 pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3084 Arc::new(Self {
3085 worktree,
3086 path: entry.path.clone(),
3087 mtime: entry.mtime,
3088 entry_id: Some(entry.id),
3089 is_local: true,
3090 is_deleted: false,
3091 is_private: entry.is_private,
3092 })
3093 }
3094
3095 pub fn from_proto(
3096 proto: rpc::proto::File,
3097 worktree: Model<Worktree>,
3098 cx: &AppContext,
3099 ) -> Result<Self> {
3100 let worktree_id = worktree
3101 .read(cx)
3102 .as_remote()
3103 .ok_or_else(|| anyhow!("not remote"))?
3104 .id();
3105
3106 if worktree_id.to_proto() != proto.worktree_id {
3107 return Err(anyhow!("worktree id does not match file"));
3108 }
3109
3110 Ok(Self {
3111 worktree,
3112 path: Path::new(&proto.path).into(),
3113 mtime: proto.mtime.map(|time| time.into()),
3114 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3115 is_local: false,
3116 is_deleted: proto.is_deleted,
3117 is_private: false,
3118 })
3119 }
3120
3121 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3122 file.and_then(|f| f.as_any().downcast_ref())
3123 }
3124
3125 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3126 self.worktree.read(cx).id()
3127 }
3128
3129 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3130 if self.is_deleted {
3131 None
3132 } else {
3133 self.entry_id
3134 }
3135 }
3136}
3137
3138#[derive(Clone, Debug, PartialEq, Eq)]
3139pub struct Entry {
3140 pub id: ProjectEntryId,
3141 pub kind: EntryKind,
3142 pub path: Arc<Path>,
3143 pub inode: u64,
3144 pub mtime: Option<SystemTime>,
3145 pub is_symlink: bool,
3146
3147 /// Whether this entry is ignored by Git.
3148 ///
3149 /// We only scan ignored entries once the directory is expanded and
3150 /// exclude them from searches.
3151 pub is_ignored: bool,
3152
3153 /// Whether this entry's canonical path is outside of the worktree.
3154 /// This means the entry is only accessible from the worktree root via a
3155 /// symlink.
3156 ///
3157 /// We only scan entries outside of the worktree once the symlinked
3158 /// directory is expanded. External entries are treated like gitignored
3159 /// entries in that they are not included in searches.
3160 pub is_external: bool,
3161 pub git_status: Option<GitFileStatus>,
3162 /// Whether this entry is considered to be a `.env` file.
3163 pub is_private: bool,
3164}
3165
3166#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3167pub enum EntryKind {
3168 UnloadedDir,
3169 PendingDir,
3170 Dir,
3171 File(CharBag),
3172}
3173
3174#[derive(Clone, Copy, Debug, PartialEq)]
3175pub enum PathChange {
3176 /// A filesystem entry was was created.
3177 Added,
3178 /// A filesystem entry was removed.
3179 Removed,
3180 /// A filesystem entry was updated.
3181 Updated,
3182 /// A filesystem entry was either updated or added. We don't know
3183 /// whether or not it already existed, because the path had not
3184 /// been loaded before the event.
3185 AddedOrUpdated,
3186 /// A filesystem entry was found during the initial scan of the worktree.
3187 Loaded,
3188}
3189
3190pub struct GitRepositoryChange {
3191 /// The previous state of the repository, if it already existed.
3192 pub old_repository: Option<RepositoryEntry>,
3193}
3194
3195pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3196pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3197
3198impl Entry {
3199 fn new(
3200 path: Arc<Path>,
3201 metadata: &fs::Metadata,
3202 next_entry_id: &AtomicUsize,
3203 root_char_bag: CharBag,
3204 ) -> Self {
3205 Self {
3206 id: ProjectEntryId::new(next_entry_id),
3207 kind: if metadata.is_dir {
3208 EntryKind::PendingDir
3209 } else {
3210 EntryKind::File(char_bag_for_path(root_char_bag, &path))
3211 },
3212 path,
3213 inode: metadata.inode,
3214 mtime: Some(metadata.mtime),
3215 is_symlink: metadata.is_symlink,
3216 is_ignored: false,
3217 is_external: false,
3218 is_private: false,
3219 git_status: None,
3220 }
3221 }
3222
3223 pub fn is_created(&self) -> bool {
3224 self.mtime.is_some()
3225 }
3226
3227 pub fn is_dir(&self) -> bool {
3228 self.kind.is_dir()
3229 }
3230
3231 pub fn is_file(&self) -> bool {
3232 self.kind.is_file()
3233 }
3234
3235 pub fn git_status(&self) -> Option<GitFileStatus> {
3236 self.git_status
3237 }
3238}
3239
3240impl EntryKind {
3241 pub fn is_dir(&self) -> bool {
3242 matches!(
3243 self,
3244 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3245 )
3246 }
3247
3248 pub fn is_unloaded(&self) -> bool {
3249 matches!(self, EntryKind::UnloadedDir)
3250 }
3251
3252 pub fn is_file(&self) -> bool {
3253 matches!(self, EntryKind::File(_))
3254 }
3255}
3256
3257impl sum_tree::Item for Entry {
3258 type Summary = EntrySummary;
3259
3260 fn summary(&self) -> Self::Summary {
3261 let non_ignored_count = if self.is_ignored || self.is_external {
3262 0
3263 } else {
3264 1
3265 };
3266 let file_count;
3267 let non_ignored_file_count;
3268 if self.is_file() {
3269 file_count = 1;
3270 non_ignored_file_count = non_ignored_count;
3271 } else {
3272 file_count = 0;
3273 non_ignored_file_count = 0;
3274 }
3275
3276 let mut statuses = GitStatuses::default();
3277 match self.git_status {
3278 Some(status) => match status {
3279 GitFileStatus::Added => statuses.added = 1,
3280 GitFileStatus::Modified => statuses.modified = 1,
3281 GitFileStatus::Conflict => statuses.conflict = 1,
3282 },
3283 None => {}
3284 }
3285
3286 EntrySummary {
3287 max_path: self.path.clone(),
3288 count: 1,
3289 non_ignored_count,
3290 file_count,
3291 non_ignored_file_count,
3292 statuses,
3293 }
3294 }
3295}
3296
3297impl sum_tree::KeyedItem for Entry {
3298 type Key = PathKey;
3299
3300 fn key(&self) -> Self::Key {
3301 PathKey(self.path.clone())
3302 }
3303}
3304
3305#[derive(Clone, Debug)]
3306pub struct EntrySummary {
3307 max_path: Arc<Path>,
3308 count: usize,
3309 non_ignored_count: usize,
3310 file_count: usize,
3311 non_ignored_file_count: usize,
3312 statuses: GitStatuses,
3313}
3314
3315impl Default for EntrySummary {
3316 fn default() -> Self {
3317 Self {
3318 max_path: Arc::from(Path::new("")),
3319 count: 0,
3320 non_ignored_count: 0,
3321 file_count: 0,
3322 non_ignored_file_count: 0,
3323 statuses: Default::default(),
3324 }
3325 }
3326}
3327
3328impl sum_tree::Summary for EntrySummary {
3329 type Context = ();
3330
3331 fn add_summary(&mut self, rhs: &Self, _: &()) {
3332 self.max_path = rhs.max_path.clone();
3333 self.count += rhs.count;
3334 self.non_ignored_count += rhs.non_ignored_count;
3335 self.file_count += rhs.file_count;
3336 self.non_ignored_file_count += rhs.non_ignored_file_count;
3337 self.statuses += rhs.statuses;
3338 }
3339}
3340
3341#[derive(Clone, Debug)]
3342struct PathEntry {
3343 id: ProjectEntryId,
3344 path: Arc<Path>,
3345 is_ignored: bool,
3346 scan_id: usize,
3347}
3348
3349impl sum_tree::Item for PathEntry {
3350 type Summary = PathEntrySummary;
3351
3352 fn summary(&self) -> Self::Summary {
3353 PathEntrySummary { max_id: self.id }
3354 }
3355}
3356
3357impl sum_tree::KeyedItem for PathEntry {
3358 type Key = ProjectEntryId;
3359
3360 fn key(&self) -> Self::Key {
3361 self.id
3362 }
3363}
3364
3365#[derive(Clone, Debug, Default)]
3366struct PathEntrySummary {
3367 max_id: ProjectEntryId,
3368}
3369
3370impl sum_tree::Summary for PathEntrySummary {
3371 type Context = ();
3372
3373 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3374 self.max_id = summary.max_id;
3375 }
3376}
3377
3378impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3379 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3380 *self = summary.max_id;
3381 }
3382}
3383
3384#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3385pub struct PathKey(Arc<Path>);
3386
3387impl Default for PathKey {
3388 fn default() -> Self {
3389 Self(Path::new("").into())
3390 }
3391}
3392
3393impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3394 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3395 self.0 = summary.max_path.clone();
3396 }
3397}
3398
3399struct BackgroundScanner {
3400 state: Mutex<BackgroundScannerState>,
3401 fs: Arc<dyn Fs>,
3402 fs_case_sensitive: bool,
3403 status_updates_tx: UnboundedSender<ScanState>,
3404 executor: BackgroundExecutor,
3405 scan_requests_rx: channel::Receiver<ScanRequest>,
3406 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3407 next_entry_id: Arc<AtomicUsize>,
3408 phase: BackgroundScannerPhase,
3409}
3410
3411#[derive(PartialEq)]
3412enum BackgroundScannerPhase {
3413 InitialScan,
3414 EventsReceivedDuringInitialScan,
3415 Events,
3416}
3417
3418impl BackgroundScanner {
3419 #[allow(clippy::too_many_arguments)]
3420 fn new(
3421 snapshot: LocalSnapshot,
3422 next_entry_id: Arc<AtomicUsize>,
3423 fs: Arc<dyn Fs>,
3424 fs_case_sensitive: bool,
3425 status_updates_tx: UnboundedSender<ScanState>,
3426 executor: BackgroundExecutor,
3427 scan_requests_rx: channel::Receiver<ScanRequest>,
3428 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3429 ) -> Self {
3430 Self {
3431 fs,
3432 fs_case_sensitive,
3433 status_updates_tx,
3434 executor,
3435 scan_requests_rx,
3436 path_prefixes_to_scan_rx,
3437 next_entry_id,
3438 state: Mutex::new(BackgroundScannerState {
3439 prev_snapshot: snapshot.snapshot.clone(),
3440 snapshot,
3441 scanned_dirs: Default::default(),
3442 path_prefixes_to_scan: Default::default(),
3443 paths_to_scan: Default::default(),
3444 removed_entry_ids: Default::default(),
3445 changed_paths: Default::default(),
3446 }),
3447 phase: BackgroundScannerPhase::InitialScan,
3448 }
3449 }
3450
3451 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3452 use futures::FutureExt as _;
3453
3454 // Populate ignores above the root.
3455 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3456 for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3457 if index != 0 {
3458 if let Ok(ignore) =
3459 build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3460 {
3461 self.state
3462 .lock()
3463 .snapshot
3464 .ignores_by_parent_abs_path
3465 .insert(ancestor.into(), (ignore.into(), false));
3466 }
3467 }
3468 if ancestor.join(&*DOT_GIT).is_dir() {
3469 // Reached root of git repository.
3470 break;
3471 }
3472 }
3473
3474 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3475 {
3476 let mut state = self.state.lock();
3477 state.snapshot.scan_id += 1;
3478 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3479 let ignore_stack = state
3480 .snapshot
3481 .ignore_stack_for_abs_path(&root_abs_path, true);
3482 if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3483 root_entry.is_ignored = true;
3484 state.insert_entry(root_entry.clone(), self.fs.as_ref());
3485 }
3486 state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3487 }
3488 };
3489
3490 // Perform an initial scan of the directory.
3491 drop(scan_job_tx);
3492 self.scan_dirs(true, scan_job_rx).await;
3493 {
3494 let mut state = self.state.lock();
3495 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3496 }
3497
3498 self.send_status_update(false, None);
3499
3500 // Process any any FS events that occurred while performing the initial scan.
3501 // For these events, update events cannot be as precise, because we didn't
3502 // have the previous state loaded yet.
3503 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3504 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3505 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3506 paths.extend(more_paths);
3507 }
3508 self.process_events(paths).await;
3509 }
3510
3511 // Continue processing events until the worktree is dropped.
3512 self.phase = BackgroundScannerPhase::Events;
3513 loop {
3514 select_biased! {
3515 // Process any path refresh requests from the worktree. Prioritize
3516 // these before handling changes reported by the filesystem.
3517 request = self.scan_requests_rx.recv().fuse() => {
3518 let Ok(request) = request else { break };
3519 if !self.process_scan_request(request, false).await {
3520 return;
3521 }
3522 }
3523
3524 path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3525 let Ok(path_prefix) = path_prefix else { break };
3526 log::trace!("adding path prefix {:?}", path_prefix);
3527
3528 let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3529 if did_scan {
3530 let abs_path =
3531 {
3532 let mut state = self.state.lock();
3533 state.path_prefixes_to_scan.insert(path_prefix.clone());
3534 state.snapshot.abs_path.join(&path_prefix)
3535 };
3536
3537 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3538 self.process_events(vec![abs_path]).await;
3539 }
3540 }
3541 }
3542
3543 paths = fs_events_rx.next().fuse() => {
3544 let Some(mut paths) = paths else { break };
3545 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3546 paths.extend(more_paths);
3547 }
3548 self.process_events(paths.clone()).await;
3549 }
3550 }
3551 }
3552 }
3553
3554 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3555 log::debug!("rescanning paths {:?}", request.relative_paths);
3556
3557 request.relative_paths.sort_unstable();
3558 self.forcibly_load_paths(&request.relative_paths).await;
3559
3560 let root_path = self.state.lock().snapshot.abs_path.clone();
3561 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3562 Ok(path) => path,
3563 Err(err) => {
3564 log::error!("failed to canonicalize root path: {}", err);
3565 return true;
3566 }
3567 };
3568 let abs_paths = request
3569 .relative_paths
3570 .iter()
3571 .map(|path| {
3572 if path.file_name().is_some() {
3573 root_canonical_path.join(path)
3574 } else {
3575 root_canonical_path.clone()
3576 }
3577 })
3578 .collect::<Vec<_>>();
3579
3580 self.reload_entries_for_paths(
3581 root_path,
3582 root_canonical_path,
3583 &request.relative_paths,
3584 abs_paths,
3585 None,
3586 )
3587 .await;
3588 self.send_status_update(scanning, Some(request.done))
3589 }
3590
3591 async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3592 let root_path = self.state.lock().snapshot.abs_path.clone();
3593 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3594 Ok(path) => path,
3595 Err(err) => {
3596 log::error!("failed to canonicalize root path: {}", err);
3597 return;
3598 }
3599 };
3600
3601 let mut relative_paths = Vec::with_capacity(abs_paths.len());
3602 let mut dot_git_paths_to_reload = HashSet::default();
3603 abs_paths.sort_unstable();
3604 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3605 abs_paths.retain(|abs_path| {
3606 let snapshot = &self.state.lock().snapshot;
3607 {
3608 let mut is_git_related = false;
3609 if let Some(dot_git_dir) = abs_path
3610 .ancestors()
3611 .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3612 {
3613 let dot_git_path = dot_git_dir
3614 .strip_prefix(&root_canonical_path)
3615 .ok()
3616 .map(|path| path.to_path_buf())
3617 .unwrap_or_else(|| dot_git_dir.to_path_buf());
3618 dot_git_paths_to_reload.insert(dot_git_path.to_path_buf());
3619 is_git_related = true;
3620 }
3621
3622 let relative_path: Arc<Path> =
3623 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3624 path.into()
3625 } else {
3626 log::error!(
3627 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3628 );
3629 return false;
3630 };
3631
3632 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3633 snapshot
3634 .entry_for_path(parent)
3635 .map_or(false, |entry| entry.kind == EntryKind::Dir)
3636 });
3637 if !parent_dir_is_loaded {
3638 log::debug!("ignoring event {relative_path:?} within unloaded directory");
3639 return false;
3640 }
3641
3642 if snapshot.is_path_excluded(relative_path.to_path_buf()) {
3643 if !is_git_related {
3644 log::debug!("ignoring FS event for excluded path {relative_path:?}");
3645 }
3646 return false;
3647 }
3648
3649 relative_paths.push(relative_path);
3650 true
3651 }
3652 });
3653
3654 if dot_git_paths_to_reload.is_empty() && relative_paths.is_empty() {
3655 return;
3656 }
3657
3658 if !relative_paths.is_empty() {
3659 log::debug!("received fs events {:?}", relative_paths);
3660
3661 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3662 self.reload_entries_for_paths(
3663 root_path,
3664 root_canonical_path,
3665 &relative_paths,
3666 abs_paths,
3667 Some(scan_job_tx.clone()),
3668 )
3669 .await;
3670 drop(scan_job_tx);
3671 self.scan_dirs(false, scan_job_rx).await;
3672
3673 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3674 self.update_ignore_statuses(scan_job_tx).await;
3675 self.scan_dirs(false, scan_job_rx).await;
3676 }
3677
3678 {
3679 let mut state = self.state.lock();
3680 if !dot_git_paths_to_reload.is_empty() {
3681 if relative_paths.is_empty() {
3682 state.snapshot.scan_id += 1;
3683 }
3684 log::debug!("reloading repositories: {dot_git_paths_to_reload:?}");
3685 state.reload_repositories(&dot_git_paths_to_reload, self.fs.as_ref());
3686 }
3687 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3688 for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3689 state.scanned_dirs.remove(&entry_id);
3690 }
3691 }
3692
3693 self.send_status_update(false, None);
3694 }
3695
3696 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3697 let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3698 {
3699 let mut state = self.state.lock();
3700 let root_path = state.snapshot.abs_path.clone();
3701 for path in paths {
3702 for ancestor in path.ancestors() {
3703 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3704 if entry.kind == EntryKind::UnloadedDir {
3705 let abs_path = root_path.join(ancestor);
3706 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3707 state.paths_to_scan.insert(path.clone());
3708 break;
3709 }
3710 }
3711 }
3712 }
3713 drop(scan_job_tx);
3714 }
3715 while let Some(job) = scan_job_rx.next().await {
3716 self.scan_dir(&job).await.log_err();
3717 }
3718
3719 mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3720 }
3721
3722 async fn scan_dirs(
3723 &self,
3724 enable_progress_updates: bool,
3725 scan_jobs_rx: channel::Receiver<ScanJob>,
3726 ) {
3727 use futures::FutureExt as _;
3728
3729 if self
3730 .status_updates_tx
3731 .unbounded_send(ScanState::Started)
3732 .is_err()
3733 {
3734 return;
3735 }
3736
3737 let progress_update_count = AtomicUsize::new(0);
3738 self.executor
3739 .scoped(|scope| {
3740 for _ in 0..self.executor.num_cpus() {
3741 scope.spawn(async {
3742 let mut last_progress_update_count = 0;
3743 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3744 futures::pin_mut!(progress_update_timer);
3745
3746 loop {
3747 select_biased! {
3748 // Process any path refresh requests before moving on to process
3749 // the scan queue, so that user operations are prioritized.
3750 request = self.scan_requests_rx.recv().fuse() => {
3751 let Ok(request) = request else { break };
3752 if !self.process_scan_request(request, true).await {
3753 return;
3754 }
3755 }
3756
3757 // Send periodic progress updates to the worktree. Use an atomic counter
3758 // to ensure that only one of the workers sends a progress update after
3759 // the update interval elapses.
3760 _ = progress_update_timer => {
3761 match progress_update_count.compare_exchange(
3762 last_progress_update_count,
3763 last_progress_update_count + 1,
3764 SeqCst,
3765 SeqCst
3766 ) {
3767 Ok(_) => {
3768 last_progress_update_count += 1;
3769 self.send_status_update(true, None);
3770 }
3771 Err(count) => {
3772 last_progress_update_count = count;
3773 }
3774 }
3775 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3776 }
3777
3778 // Recursively load directories from the file system.
3779 job = scan_jobs_rx.recv().fuse() => {
3780 let Ok(job) = job else { break };
3781 if let Err(err) = self.scan_dir(&job).await {
3782 if job.path.as_ref() != Path::new("") {
3783 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3784 }
3785 }
3786 }
3787 }
3788 }
3789 })
3790 }
3791 })
3792 .await;
3793 }
3794
3795 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3796 let mut state = self.state.lock();
3797 if state.changed_paths.is_empty() && scanning {
3798 return true;
3799 }
3800
3801 let new_snapshot = state.snapshot.clone();
3802 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3803 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3804 state.changed_paths.clear();
3805
3806 self.status_updates_tx
3807 .unbounded_send(ScanState::Updated {
3808 snapshot: new_snapshot,
3809 changes,
3810 scanning,
3811 barrier,
3812 })
3813 .is_ok()
3814 }
3815
3816 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3817 let root_abs_path;
3818 let mut ignore_stack;
3819 let mut new_ignore;
3820 let root_char_bag;
3821 let next_entry_id;
3822 {
3823 let state = self.state.lock();
3824 let snapshot = &state.snapshot;
3825 root_abs_path = snapshot.abs_path().clone();
3826 if snapshot.is_path_excluded(job.path.to_path_buf()) {
3827 log::error!("skipping excluded directory {:?}", job.path);
3828 return Ok(());
3829 }
3830 log::debug!("scanning directory {:?}", job.path);
3831 ignore_stack = job.ignore_stack.clone();
3832 new_ignore = None;
3833 root_char_bag = snapshot.root_char_bag;
3834 next_entry_id = self.next_entry_id.clone();
3835 drop(state);
3836 }
3837
3838 let mut dotgit_path = None;
3839 let mut root_canonical_path = None;
3840 let mut new_entries: Vec<Entry> = Vec::new();
3841 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3842 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3843 while let Some(child_abs_path) = child_paths.next().await {
3844 let child_abs_path: Arc<Path> = match child_abs_path {
3845 Ok(child_abs_path) => child_abs_path.into(),
3846 Err(error) => {
3847 log::error!("error processing entry {:?}", error);
3848 continue;
3849 }
3850 };
3851 let child_name = child_abs_path.file_name().unwrap();
3852 let child_path: Arc<Path> = job.path.join(child_name).into();
3853 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3854 if child_name == *GITIGNORE {
3855 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3856 Ok(ignore) => {
3857 let ignore = Arc::new(ignore);
3858 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3859 new_ignore = Some(ignore);
3860 }
3861 Err(error) => {
3862 log::error!(
3863 "error loading .gitignore file {:?} - {:?}",
3864 child_name,
3865 error
3866 );
3867 }
3868 }
3869
3870 // Update ignore status of any child entries we've already processed to reflect the
3871 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3872 // there should rarely be too numerous. Update the ignore stack associated with any
3873 // new jobs as well.
3874 let mut new_jobs = new_jobs.iter_mut();
3875 for entry in &mut new_entries {
3876 let entry_abs_path = root_abs_path.join(&entry.path);
3877 entry.is_ignored =
3878 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3879
3880 if entry.is_dir() {
3881 if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3882 job.ignore_stack = if entry.is_ignored {
3883 IgnoreStack::all()
3884 } else {
3885 ignore_stack.clone()
3886 };
3887 }
3888 }
3889 }
3890 }
3891 // If we find a .git, we'll need to load the repository.
3892 else if child_name == *DOT_GIT {
3893 dotgit_path = Some(child_path.clone());
3894 }
3895
3896 {
3897 let relative_path = job.path.join(child_name);
3898 let mut state = self.state.lock();
3899 if state.snapshot.is_path_excluded(relative_path.clone()) {
3900 log::debug!("skipping excluded child entry {relative_path:?}");
3901 state.remove_path(&relative_path);
3902 continue;
3903 }
3904 drop(state);
3905 }
3906
3907 let child_metadata = match self.fs.metadata(&child_abs_path).await {
3908 Ok(Some(metadata)) => metadata,
3909 Ok(None) => continue,
3910 Err(err) => {
3911 log::error!("error processing {child_abs_path:?}: {err:?}");
3912 continue;
3913 }
3914 };
3915
3916 let mut child_entry = Entry::new(
3917 child_path.clone(),
3918 &child_metadata,
3919 &next_entry_id,
3920 root_char_bag,
3921 );
3922
3923 if job.is_external {
3924 child_entry.is_external = true;
3925 } else if child_metadata.is_symlink {
3926 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3927 Ok(path) => path,
3928 Err(err) => {
3929 log::error!(
3930 "error reading target of symlink {:?}: {:?}",
3931 child_abs_path,
3932 err
3933 );
3934 continue;
3935 }
3936 };
3937
3938 // lazily canonicalize the root path in order to determine if
3939 // symlinks point outside of the worktree.
3940 let root_canonical_path = match &root_canonical_path {
3941 Some(path) => path,
3942 None => match self.fs.canonicalize(&root_abs_path).await {
3943 Ok(path) => root_canonical_path.insert(path),
3944 Err(err) => {
3945 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3946 continue;
3947 }
3948 },
3949 };
3950
3951 if !canonical_path.starts_with(root_canonical_path) {
3952 child_entry.is_external = true;
3953 }
3954 }
3955
3956 if child_entry.is_dir() {
3957 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3958
3959 // Avoid recursing until crash in the case of a recursive symlink
3960 if job.ancestor_inodes.contains(&child_entry.inode) {
3961 new_jobs.push(None);
3962 } else {
3963 let mut ancestor_inodes = job.ancestor_inodes.clone();
3964 ancestor_inodes.insert(child_entry.inode);
3965
3966 new_jobs.push(Some(ScanJob {
3967 abs_path: child_abs_path.clone(),
3968 path: child_path,
3969 is_external: child_entry.is_external,
3970 ignore_stack: if child_entry.is_ignored {
3971 IgnoreStack::all()
3972 } else {
3973 ignore_stack.clone()
3974 },
3975 ancestor_inodes,
3976 scan_queue: job.scan_queue.clone(),
3977 containing_repository: job.containing_repository.clone(),
3978 }));
3979 }
3980 } else {
3981 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3982 if !child_entry.is_ignored {
3983 if let Some((repository_dir, repository, staged_statuses)) =
3984 &job.containing_repository
3985 {
3986 if let Ok(repo_path) = child_entry.path.strip_prefix(&repository_dir.0) {
3987 if let Some(mtime) = child_entry.mtime {
3988 let repo_path = RepoPath(repo_path.into());
3989 child_entry.git_status = combine_git_statuses(
3990 staged_statuses.get(&repo_path).copied(),
3991 repository.lock().unstaged_status(&repo_path, mtime),
3992 );
3993 }
3994 }
3995 }
3996 }
3997 }
3998
3999 {
4000 let relative_path = job.path.join(child_name);
4001 let state = self.state.lock();
4002 if state.snapshot.is_path_private(&relative_path) {
4003 log::debug!("detected private file: {relative_path:?}");
4004 child_entry.is_private = true;
4005 }
4006 drop(state)
4007 }
4008
4009 new_entries.push(child_entry);
4010 }
4011
4012 let mut state = self.state.lock();
4013
4014 // Identify any subdirectories that should not be scanned.
4015 let mut job_ix = 0;
4016 for entry in &mut new_entries {
4017 state.reuse_entry_id(entry);
4018 if entry.is_dir() {
4019 if state.should_scan_directory(entry) {
4020 job_ix += 1;
4021 } else {
4022 log::debug!("defer scanning directory {:?}", entry.path);
4023 entry.kind = EntryKind::UnloadedDir;
4024 new_jobs.remove(job_ix);
4025 }
4026 }
4027 }
4028
4029 state.populate_dir(&job.path, new_entries, new_ignore);
4030
4031 let repository =
4032 dotgit_path.and_then(|path| state.build_git_repository(path, self.fs.as_ref()));
4033
4034 for mut new_job in new_jobs.into_iter().flatten() {
4035 if let Some(containing_repository) = &repository {
4036 new_job.containing_repository = Some(containing_repository.clone());
4037 }
4038
4039 job.scan_queue
4040 .try_send(new_job)
4041 .expect("channel is unbounded");
4042 }
4043
4044 Ok(())
4045 }
4046
4047 async fn reload_entries_for_paths(
4048 &self,
4049 root_abs_path: Arc<Path>,
4050 root_canonical_path: PathBuf,
4051 relative_paths: &[Arc<Path>],
4052 abs_paths: Vec<PathBuf>,
4053 scan_queue_tx: Option<Sender<ScanJob>>,
4054 ) {
4055 let metadata = futures::future::join_all(
4056 abs_paths
4057 .iter()
4058 .map(|abs_path| async move {
4059 let metadata = self.fs.metadata(abs_path).await?;
4060 if let Some(metadata) = metadata {
4061 let canonical_path = self.fs.canonicalize(abs_path).await?;
4062
4063 // If we're on a case-insensitive filesystem (default on macOS), we want
4064 // to only ignore metadata for non-symlink files if their absolute-path matches
4065 // the canonical-path.
4066 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4067 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4068 // treated as removed.
4069 if !self.fs_case_sensitive && !metadata.is_symlink {
4070 let canonical_file_name = canonical_path.file_name();
4071 let file_name = abs_path.file_name();
4072 if canonical_file_name != file_name {
4073 return Ok(None);
4074 }
4075 }
4076
4077 anyhow::Ok(Some((metadata, canonical_path)))
4078 } else {
4079 Ok(None)
4080 }
4081 })
4082 .collect::<Vec<_>>(),
4083 )
4084 .await;
4085
4086 let mut state = self.state.lock();
4087 let snapshot = &mut state.snapshot;
4088 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
4089 let doing_recursive_update = scan_queue_tx.is_some();
4090 snapshot.scan_id += 1;
4091 if is_idle && !doing_recursive_update {
4092 snapshot.completed_scan_id = snapshot.scan_id;
4093 }
4094
4095 // Remove any entries for paths that no longer exist or are being recursively
4096 // refreshed. Do this before adding any new entries, so that renames can be
4097 // detected regardless of the order of the paths.
4098 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4099 if matches!(metadata, Ok(None)) || doing_recursive_update {
4100 log::trace!("remove path {:?}", path);
4101 state.remove_path(path);
4102 }
4103 }
4104
4105 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4106 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4107 match metadata {
4108 Ok(Some((metadata, canonical_path))) => {
4109 let ignore_stack = state
4110 .snapshot
4111 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4112
4113 let mut fs_entry = Entry::new(
4114 path.clone(),
4115 metadata,
4116 self.next_entry_id.as_ref(),
4117 state.snapshot.root_char_bag,
4118 );
4119 let is_dir = fs_entry.is_dir();
4120 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4121 fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4122 fs_entry.is_private = state.snapshot.is_path_private(path);
4123
4124 if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4125 if let Some((work_dir, repo)) = state.snapshot.local_repo_for_path(path) {
4126 if let Ok(repo_path) = path.strip_prefix(work_dir.0) {
4127 if let Some(mtime) = fs_entry.mtime {
4128 let repo_path = RepoPath(repo_path.into());
4129 let repo = repo.repo_ptr.lock();
4130 fs_entry.git_status = repo.status(&repo_path, mtime);
4131 }
4132 }
4133 }
4134 }
4135
4136 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4137 if state.should_scan_directory(&fs_entry) {
4138 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4139 } else {
4140 fs_entry.kind = EntryKind::UnloadedDir;
4141 }
4142 }
4143
4144 state.insert_entry(fs_entry, self.fs.as_ref());
4145 }
4146 Ok(None) => {
4147 self.remove_repo_path(path, &mut state.snapshot);
4148 }
4149 Err(err) => {
4150 // TODO - create a special 'error' entry in the entries tree to mark this
4151 log::error!("error reading file {abs_path:?} on event: {err:#}");
4152 }
4153 }
4154 }
4155
4156 util::extend_sorted(
4157 &mut state.changed_paths,
4158 relative_paths.iter().cloned(),
4159 usize::MAX,
4160 Ord::cmp,
4161 );
4162 }
4163
4164 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4165 if !path
4166 .components()
4167 .any(|component| component.as_os_str() == *DOT_GIT)
4168 {
4169 if let Some(repository) = snapshot.repository_for_work_directory(path) {
4170 let entry = repository.work_directory.0;
4171 snapshot.git_repositories.remove(&entry);
4172 snapshot
4173 .snapshot
4174 .repository_entries
4175 .remove(&RepositoryWorkDirectory(path.into()));
4176 return Some(());
4177 }
4178 }
4179
4180 // TODO statuses
4181 // Track when a .git is removed and iterate over the file system there
4182
4183 Some(())
4184 }
4185
4186 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4187 use futures::FutureExt as _;
4188
4189 let mut snapshot = self.state.lock().snapshot.clone();
4190 let mut ignores_to_update = Vec::new();
4191 let mut ignores_to_delete = Vec::new();
4192 let abs_path = snapshot.abs_path.clone();
4193 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
4194 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4195 if *needs_update {
4196 *needs_update = false;
4197 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4198 ignores_to_update.push(parent_abs_path.clone());
4199 }
4200 }
4201
4202 let ignore_path = parent_path.join(&*GITIGNORE);
4203 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4204 ignores_to_delete.push(parent_abs_path.clone());
4205 }
4206 }
4207 }
4208
4209 for parent_abs_path in ignores_to_delete {
4210 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
4211 self.state
4212 .lock()
4213 .snapshot
4214 .ignores_by_parent_abs_path
4215 .remove(&parent_abs_path);
4216 }
4217
4218 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4219 ignores_to_update.sort_unstable();
4220 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4221 while let Some(parent_abs_path) = ignores_to_update.next() {
4222 while ignores_to_update
4223 .peek()
4224 .map_or(false, |p| p.starts_with(&parent_abs_path))
4225 {
4226 ignores_to_update.next().unwrap();
4227 }
4228
4229 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4230 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
4231 abs_path: parent_abs_path,
4232 ignore_stack,
4233 ignore_queue: ignore_queue_tx.clone(),
4234 scan_queue: scan_job_tx.clone(),
4235 }))
4236 .unwrap();
4237 }
4238 drop(ignore_queue_tx);
4239
4240 self.executor
4241 .scoped(|scope| {
4242 for _ in 0..self.executor.num_cpus() {
4243 scope.spawn(async {
4244 loop {
4245 select_biased! {
4246 // Process any path refresh requests before moving on to process
4247 // the queue of ignore statuses.
4248 request = self.scan_requests_rx.recv().fuse() => {
4249 let Ok(request) = request else { break };
4250 if !self.process_scan_request(request, true).await {
4251 return;
4252 }
4253 }
4254
4255 // Recursively process directories whose ignores have changed.
4256 job = ignore_queue_rx.recv().fuse() => {
4257 let Ok(job) = job else { break };
4258 self.update_ignore_status(job, &snapshot).await;
4259 }
4260 }
4261 }
4262 });
4263 }
4264 })
4265 .await;
4266 }
4267
4268 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4269 log::trace!("update ignore status {:?}", job.abs_path);
4270
4271 let mut ignore_stack = job.ignore_stack;
4272 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4273 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4274 }
4275
4276 let mut entries_by_id_edits = Vec::new();
4277 let mut entries_by_path_edits = Vec::new();
4278 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4279 let repo = snapshot
4280 .local_repo_for_path(path)
4281 .map_or(None, |local_repo| Some(local_repo.1));
4282 for mut entry in snapshot.child_entries(path).cloned() {
4283 let was_ignored = entry.is_ignored;
4284 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4285 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4286 if entry.is_dir() {
4287 let child_ignore_stack = if entry.is_ignored {
4288 IgnoreStack::all()
4289 } else {
4290 ignore_stack.clone()
4291 };
4292
4293 // Scan any directories that were previously ignored and weren't previously scanned.
4294 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4295 let state = self.state.lock();
4296 if state.should_scan_directory(&entry) {
4297 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4298 }
4299 }
4300
4301 job.ignore_queue
4302 .send(UpdateIgnoreStatusJob {
4303 abs_path: abs_path.clone(),
4304 ignore_stack: child_ignore_stack,
4305 ignore_queue: job.ignore_queue.clone(),
4306 scan_queue: job.scan_queue.clone(),
4307 })
4308 .await
4309 .unwrap();
4310 }
4311
4312 if entry.is_ignored != was_ignored {
4313 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4314 path_entry.scan_id = snapshot.scan_id;
4315 path_entry.is_ignored = entry.is_ignored;
4316 if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4317 if let Some(repo) = repo {
4318 if let Some(mtime) = &entry.mtime {
4319 let repo_path = RepoPath(entry.path.to_path_buf());
4320 let repo = repo.repo_ptr.lock();
4321 entry.git_status = repo.status(&repo_path, *mtime);
4322 }
4323 }
4324 }
4325 entries_by_id_edits.push(Edit::Insert(path_entry));
4326 entries_by_path_edits.push(Edit::Insert(entry));
4327 }
4328 }
4329
4330 let state = &mut self.state.lock();
4331 for edit in &entries_by_path_edits {
4332 if let Edit::Insert(entry) = edit {
4333 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4334 state.changed_paths.insert(ix, entry.path.clone());
4335 }
4336 }
4337 }
4338
4339 state
4340 .snapshot
4341 .entries_by_path
4342 .edit(entries_by_path_edits, &());
4343 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4344 }
4345
4346 fn build_change_set(
4347 &self,
4348 old_snapshot: &Snapshot,
4349 new_snapshot: &Snapshot,
4350 event_paths: &[Arc<Path>],
4351 ) -> UpdatedEntriesSet {
4352 use BackgroundScannerPhase::*;
4353 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4354
4355 // Identify which paths have changed. Use the known set of changed
4356 // parent paths to optimize the search.
4357 let mut changes = Vec::new();
4358 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4359 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4360 let mut last_newly_loaded_dir_path = None;
4361 old_paths.next(&());
4362 new_paths.next(&());
4363 for path in event_paths {
4364 let path = PathKey(path.clone());
4365 if old_paths.item().map_or(false, |e| e.path < path.0) {
4366 old_paths.seek_forward(&path, Bias::Left, &());
4367 }
4368 if new_paths.item().map_or(false, |e| e.path < path.0) {
4369 new_paths.seek_forward(&path, Bias::Left, &());
4370 }
4371 loop {
4372 match (old_paths.item(), new_paths.item()) {
4373 (Some(old_entry), Some(new_entry)) => {
4374 if old_entry.path > path.0
4375 && new_entry.path > path.0
4376 && !old_entry.path.starts_with(&path.0)
4377 && !new_entry.path.starts_with(&path.0)
4378 {
4379 break;
4380 }
4381
4382 match Ord::cmp(&old_entry.path, &new_entry.path) {
4383 Ordering::Less => {
4384 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4385 old_paths.next(&());
4386 }
4387 Ordering::Equal => {
4388 if self.phase == EventsReceivedDuringInitialScan {
4389 if old_entry.id != new_entry.id {
4390 changes.push((
4391 old_entry.path.clone(),
4392 old_entry.id,
4393 Removed,
4394 ));
4395 }
4396 // If the worktree was not fully initialized when this event was generated,
4397 // we can't know whether this entry was added during the scan or whether
4398 // it was merely updated.
4399 changes.push((
4400 new_entry.path.clone(),
4401 new_entry.id,
4402 AddedOrUpdated,
4403 ));
4404 } else if old_entry.id != new_entry.id {
4405 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4406 changes.push((new_entry.path.clone(), new_entry.id, Added));
4407 } else if old_entry != new_entry {
4408 if old_entry.kind.is_unloaded() {
4409 last_newly_loaded_dir_path = Some(&new_entry.path);
4410 changes.push((
4411 new_entry.path.clone(),
4412 new_entry.id,
4413 Loaded,
4414 ));
4415 } else {
4416 changes.push((
4417 new_entry.path.clone(),
4418 new_entry.id,
4419 Updated,
4420 ));
4421 }
4422 }
4423 old_paths.next(&());
4424 new_paths.next(&());
4425 }
4426 Ordering::Greater => {
4427 let is_newly_loaded = self.phase == InitialScan
4428 || last_newly_loaded_dir_path
4429 .as_ref()
4430 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4431 changes.push((
4432 new_entry.path.clone(),
4433 new_entry.id,
4434 if is_newly_loaded { Loaded } else { Added },
4435 ));
4436 new_paths.next(&());
4437 }
4438 }
4439 }
4440 (Some(old_entry), None) => {
4441 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4442 old_paths.next(&());
4443 }
4444 (None, Some(new_entry)) => {
4445 let is_newly_loaded = self.phase == InitialScan
4446 || last_newly_loaded_dir_path
4447 .as_ref()
4448 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4449 changes.push((
4450 new_entry.path.clone(),
4451 new_entry.id,
4452 if is_newly_loaded { Loaded } else { Added },
4453 ));
4454 new_paths.next(&());
4455 }
4456 (None, None) => break,
4457 }
4458 }
4459 }
4460
4461 changes.into()
4462 }
4463
4464 async fn progress_timer(&self, running: bool) {
4465 if !running {
4466 return futures::future::pending().await;
4467 }
4468
4469 #[cfg(any(test, feature = "test-support"))]
4470 if self.fs.is_fake() {
4471 return self.executor.simulate_random_delay().await;
4472 }
4473
4474 smol::Timer::after(FS_WATCH_LATENCY).await;
4475 }
4476}
4477
4478fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4479 let mut result = root_char_bag;
4480 result.extend(
4481 path.to_string_lossy()
4482 .chars()
4483 .map(|c| c.to_ascii_lowercase()),
4484 );
4485 result
4486}
4487
4488struct ScanJob {
4489 abs_path: Arc<Path>,
4490 path: Arc<Path>,
4491 ignore_stack: Arc<IgnoreStack>,
4492 scan_queue: Sender<ScanJob>,
4493 ancestor_inodes: TreeSet<u64>,
4494 is_external: bool,
4495 containing_repository: Option<(
4496 RepositoryWorkDirectory,
4497 Arc<Mutex<dyn GitRepository>>,
4498 TreeMap<RepoPath, GitFileStatus>,
4499 )>,
4500}
4501
4502struct UpdateIgnoreStatusJob {
4503 abs_path: Arc<Path>,
4504 ignore_stack: Arc<IgnoreStack>,
4505 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4506 scan_queue: Sender<ScanJob>,
4507}
4508
4509pub trait WorktreeModelHandle {
4510 #[cfg(any(test, feature = "test-support"))]
4511 fn flush_fs_events<'a>(
4512 &self,
4513 cx: &'a mut gpui::TestAppContext,
4514 ) -> futures::future::LocalBoxFuture<'a, ()>;
4515}
4516
4517impl WorktreeModelHandle for Model<Worktree> {
4518 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4519 // occurred before the worktree was constructed. These events can cause the worktree to perform
4520 // extra directory scans, and emit extra scan-state notifications.
4521 //
4522 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4523 // to ensure that all redundant FS events have already been processed.
4524 #[cfg(any(test, feature = "test-support"))]
4525 fn flush_fs_events<'a>(
4526 &self,
4527 cx: &'a mut gpui::TestAppContext,
4528 ) -> futures::future::LocalBoxFuture<'a, ()> {
4529 let file_name = "fs-event-sentinel";
4530
4531 let tree = self.clone();
4532 let (fs, root_path) = self.update(cx, |tree, _| {
4533 let tree = tree.as_local().unwrap();
4534 (tree.fs.clone(), tree.abs_path().clone())
4535 });
4536
4537 async move {
4538 fs.create_file(&root_path.join(file_name), Default::default())
4539 .await
4540 .unwrap();
4541
4542 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4543 .await;
4544
4545 fs.remove_file(&root_path.join(file_name), Default::default())
4546 .await
4547 .unwrap();
4548 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4549 .await;
4550
4551 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4552 .await;
4553 }
4554 .boxed_local()
4555 }
4556}
4557
4558#[derive(Clone, Debug)]
4559struct TraversalProgress<'a> {
4560 max_path: &'a Path,
4561 count: usize,
4562 non_ignored_count: usize,
4563 file_count: usize,
4564 non_ignored_file_count: usize,
4565}
4566
4567impl<'a> TraversalProgress<'a> {
4568 fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
4569 match (include_files, include_dirs, include_ignored) {
4570 (true, true, true) => self.count,
4571 (true, true, false) => self.non_ignored_count,
4572 (true, false, true) => self.file_count,
4573 (true, false, false) => self.non_ignored_file_count,
4574 (false, true, true) => self.count - self.file_count,
4575 (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
4576 (false, false, _) => 0,
4577 }
4578 }
4579}
4580
4581impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4582 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4583 self.max_path = summary.max_path.as_ref();
4584 self.count += summary.count;
4585 self.non_ignored_count += summary.non_ignored_count;
4586 self.file_count += summary.file_count;
4587 self.non_ignored_file_count += summary.non_ignored_file_count;
4588 }
4589}
4590
4591impl<'a> Default for TraversalProgress<'a> {
4592 fn default() -> Self {
4593 Self {
4594 max_path: Path::new(""),
4595 count: 0,
4596 non_ignored_count: 0,
4597 file_count: 0,
4598 non_ignored_file_count: 0,
4599 }
4600 }
4601}
4602
4603#[derive(Clone, Debug, Default, Copy)]
4604struct GitStatuses {
4605 added: usize,
4606 modified: usize,
4607 conflict: usize,
4608}
4609
4610impl AddAssign for GitStatuses {
4611 fn add_assign(&mut self, rhs: Self) {
4612 self.added += rhs.added;
4613 self.modified += rhs.modified;
4614 self.conflict += rhs.conflict;
4615 }
4616}
4617
4618impl Sub for GitStatuses {
4619 type Output = GitStatuses;
4620
4621 fn sub(self, rhs: Self) -> Self::Output {
4622 GitStatuses {
4623 added: self.added - rhs.added,
4624 modified: self.modified - rhs.modified,
4625 conflict: self.conflict - rhs.conflict,
4626 }
4627 }
4628}
4629
4630impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4631 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4632 *self += summary.statuses
4633 }
4634}
4635
4636pub struct Traversal<'a> {
4637 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4638 include_ignored: bool,
4639 include_files: bool,
4640 include_dirs: bool,
4641}
4642
4643impl<'a> Traversal<'a> {
4644 pub fn advance(&mut self) -> bool {
4645 self.cursor.seek_forward(
4646 &TraversalTarget::Count {
4647 count: self.end_offset() + 1,
4648 include_dirs: self.include_dirs,
4649 include_files: self.include_files,
4650 include_ignored: self.include_ignored,
4651 },
4652 Bias::Left,
4653 &(),
4654 )
4655 }
4656
4657 pub fn advance_to_sibling(&mut self) -> bool {
4658 while let Some(entry) = self.cursor.item() {
4659 self.cursor.seek_forward(
4660 &TraversalTarget::PathSuccessor(&entry.path),
4661 Bias::Left,
4662 &(),
4663 );
4664 if let Some(entry) = self.cursor.item() {
4665 if (self.include_files || !entry.is_file())
4666 && (self.include_dirs || !entry.is_dir())
4667 && (self.include_ignored || !entry.is_ignored)
4668 {
4669 return true;
4670 }
4671 }
4672 }
4673 false
4674 }
4675
4676 pub fn entry(&self) -> Option<&'a Entry> {
4677 self.cursor.item()
4678 }
4679
4680 pub fn start_offset(&self) -> usize {
4681 self.cursor
4682 .start()
4683 .count(self.include_files, self.include_dirs, self.include_ignored)
4684 }
4685
4686 pub fn end_offset(&self) -> usize {
4687 self.cursor
4688 .end(&())
4689 .count(self.include_files, self.include_dirs, self.include_ignored)
4690 }
4691}
4692
4693impl<'a> Iterator for Traversal<'a> {
4694 type Item = &'a Entry;
4695
4696 fn next(&mut self) -> Option<Self::Item> {
4697 if let Some(item) = self.entry() {
4698 self.advance();
4699 Some(item)
4700 } else {
4701 None
4702 }
4703 }
4704}
4705
4706#[derive(Debug)]
4707enum TraversalTarget<'a> {
4708 Path(&'a Path),
4709 PathSuccessor(&'a Path),
4710 Count {
4711 count: usize,
4712 include_files: bool,
4713 include_ignored: bool,
4714 include_dirs: bool,
4715 },
4716}
4717
4718impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4719 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4720 match self {
4721 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4722 TraversalTarget::PathSuccessor(path) => {
4723 if cursor_location.max_path.starts_with(path) {
4724 Ordering::Greater
4725 } else {
4726 Ordering::Equal
4727 }
4728 }
4729 TraversalTarget::Count {
4730 count,
4731 include_files,
4732 include_dirs,
4733 include_ignored,
4734 } => Ord::cmp(
4735 count,
4736 &cursor_location.count(*include_files, *include_dirs, *include_ignored),
4737 ),
4738 }
4739 }
4740}
4741
4742impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4743 for TraversalTarget<'b>
4744{
4745 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4746 self.cmp(&cursor_location.0, &())
4747 }
4748}
4749
4750pub struct ChildEntriesIter<'a> {
4751 parent_path: &'a Path,
4752 traversal: Traversal<'a>,
4753}
4754
4755impl<'a> Iterator for ChildEntriesIter<'a> {
4756 type Item = &'a Entry;
4757
4758 fn next(&mut self) -> Option<Self::Item> {
4759 if let Some(item) = self.traversal.entry() {
4760 if item.path.starts_with(&self.parent_path) {
4761 self.traversal.advance_to_sibling();
4762 return Some(item);
4763 }
4764 }
4765 None
4766 }
4767}
4768
4769pub struct DescendentEntriesIter<'a> {
4770 parent_path: &'a Path,
4771 traversal: Traversal<'a>,
4772}
4773
4774impl<'a> Iterator for DescendentEntriesIter<'a> {
4775 type Item = &'a Entry;
4776
4777 fn next(&mut self) -> Option<Self::Item> {
4778 if let Some(item) = self.traversal.entry() {
4779 if item.path.starts_with(&self.parent_path) {
4780 self.traversal.advance();
4781 return Some(item);
4782 }
4783 }
4784 None
4785 }
4786}
4787
4788impl<'a> From<&'a Entry> for proto::Entry {
4789 fn from(entry: &'a Entry) -> Self {
4790 Self {
4791 id: entry.id.to_proto(),
4792 is_dir: entry.is_dir(),
4793 path: entry.path.to_string_lossy().into(),
4794 inode: entry.inode,
4795 mtime: entry.mtime.map(|time| time.into()),
4796 is_symlink: entry.is_symlink,
4797 is_ignored: entry.is_ignored,
4798 is_external: entry.is_external,
4799 git_status: entry.git_status.map(git_status_to_proto),
4800 }
4801 }
4802}
4803
4804impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4805 type Error = anyhow::Error;
4806
4807 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4808 let kind = if entry.is_dir {
4809 EntryKind::Dir
4810 } else {
4811 let mut char_bag = *root_char_bag;
4812 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4813 EntryKind::File(char_bag)
4814 };
4815 let path: Arc<Path> = PathBuf::from(entry.path).into();
4816 Ok(Entry {
4817 id: ProjectEntryId::from_proto(entry.id),
4818 kind,
4819 path,
4820 inode: entry.inode,
4821 mtime: entry.mtime.map(|time| time.into()),
4822 is_symlink: entry.is_symlink,
4823 is_ignored: entry.is_ignored,
4824 is_external: entry.is_external,
4825 git_status: git_status_from_proto(entry.git_status),
4826 is_private: false,
4827 })
4828 }
4829}
4830
4831fn combine_git_statuses(
4832 staged: Option<GitFileStatus>,
4833 unstaged: Option<GitFileStatus>,
4834) -> Option<GitFileStatus> {
4835 if let Some(staged) = staged {
4836 if let Some(unstaged) = unstaged {
4837 if unstaged == staged {
4838 Some(staged)
4839 } else {
4840 Some(GitFileStatus::Modified)
4841 }
4842 } else {
4843 Some(staged)
4844 }
4845 } else {
4846 unstaged
4847 }
4848}
4849
4850fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
4851 git_status.and_then(|status| {
4852 proto::GitStatus::from_i32(status).map(|status| match status {
4853 proto::GitStatus::Added => GitFileStatus::Added,
4854 proto::GitStatus::Modified => GitFileStatus::Modified,
4855 proto::GitStatus::Conflict => GitFileStatus::Conflict,
4856 })
4857 })
4858}
4859
4860fn git_status_to_proto(status: GitFileStatus) -> i32 {
4861 match status {
4862 GitFileStatus::Added => proto::GitStatus::Added as i32,
4863 GitFileStatus::Modified => proto::GitStatus::Modified as i32,
4864 GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
4865 }
4866}
4867
4868#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
4869pub struct ProjectEntryId(usize);
4870
4871impl ProjectEntryId {
4872 pub const MAX: Self = Self(usize::MAX);
4873
4874 pub fn new(counter: &AtomicUsize) -> Self {
4875 Self(counter.fetch_add(1, SeqCst))
4876 }
4877
4878 pub fn from_proto(id: u64) -> Self {
4879 Self(id as usize)
4880 }
4881
4882 pub fn to_proto(&self) -> u64 {
4883 self.0 as u64
4884 }
4885
4886 pub fn to_usize(&self) -> usize {
4887 self.0
4888 }
4889}
4890
4891#[derive(Copy, Clone, Debug, Default, PartialEq, Serialize)]
4892pub struct DiagnosticSummary {
4893 pub error_count: usize,
4894 pub warning_count: usize,
4895}
4896
4897impl DiagnosticSummary {
4898 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
4899 let mut this = Self {
4900 error_count: 0,
4901 warning_count: 0,
4902 };
4903
4904 for entry in diagnostics {
4905 if entry.diagnostic.is_primary {
4906 match entry.diagnostic.severity {
4907 DiagnosticSeverity::ERROR => this.error_count += 1,
4908 DiagnosticSeverity::WARNING => this.warning_count += 1,
4909 _ => {}
4910 }
4911 }
4912 }
4913
4914 this
4915 }
4916
4917 pub fn is_empty(&self) -> bool {
4918 self.error_count == 0 && self.warning_count == 0
4919 }
4920
4921 pub fn to_proto(
4922 &self,
4923 language_server_id: LanguageServerId,
4924 path: &Path,
4925 ) -> proto::DiagnosticSummary {
4926 proto::DiagnosticSummary {
4927 path: path.to_string_lossy().to_string(),
4928 language_server_id: language_server_id.0 as u64,
4929 error_count: self.error_count as u32,
4930 warning_count: self.warning_count as u32,
4931 }
4932 }
4933}