1mod ignore;
2mod worktree_settings;
3#[cfg(test)]
4mod worktree_tests;
5
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context as _, Result};
8use client::{proto, Client};
9use clock::ReplicaId;
10use collections::{HashMap, HashSet, VecDeque};
11use fs::{copy_recursive, RemoveOptions};
12use fs::{
13 repository::{GitFileStatus, GitRepository, RepoPath},
14 Fs,
15};
16use futures::{
17 channel::{
18 mpsc::{self, UnboundedSender},
19 oneshot,
20 },
21 select_biased,
22 task::Poll,
23 FutureExt as _, Stream, StreamExt,
24};
25use fuzzy::CharBag;
26use git::{DOT_GIT, GITIGNORE};
27use gpui::{
28 AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
29 Task,
30};
31use ignore::IgnoreStack;
32use itertools::Itertools;
33use language::{
34 proto::{
35 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
36 serialize_version,
37 },
38 Buffer, Capability, DiagnosticEntry, File as _, LineEnding, PointUtf16, Rope, RopeFingerprint,
39 Unclipped,
40};
41use lsp::{DiagnosticSeverity, LanguageServerId};
42use parking_lot::Mutex;
43use postage::{
44 barrier,
45 prelude::{Sink as _, Stream as _},
46 watch,
47};
48use serde::Serialize;
49use settings::{Settings, SettingsLocation, SettingsStore};
50use smol::channel::{self, Sender};
51use std::{
52 any::Any,
53 cmp::{self, Ordering},
54 convert::TryFrom,
55 ffi::OsStr,
56 fmt,
57 future::Future,
58 mem,
59 ops::{AddAssign, Deref, DerefMut, Sub},
60 path::{Path, PathBuf},
61 pin::Pin,
62 sync::{
63 atomic::{AtomicUsize, Ordering::SeqCst},
64 Arc,
65 },
66 time::{Duration, SystemTime},
67};
68use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
69use text::BufferId;
70use util::{
71 paths::{PathMatcher, HOME},
72 ResultExt,
73};
74
75pub use worktree_settings::WorktreeSettings;
76
77#[cfg(feature = "test-support")]
78pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
79#[cfg(not(feature = "test-support"))]
80pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
81
82#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
83pub struct WorktreeId(usize);
84
85/// A set of local or remote files that are being opened as part of a project.
86/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
87/// Stores git repositories data and the diagnostics for the file(s).
88///
89/// Has an absolute path, and may be set to be visible in Zed UI or not.
90/// May correspond to a directory or a single file.
91/// Possible examples:
92/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
93/// * a directory opened in Zed — may be added as a visible entry to the current worktree
94///
95/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
96pub enum Worktree {
97 Local(LocalWorktree),
98 Remote(RemoteWorktree),
99}
100
101pub struct LocalWorktree {
102 snapshot: LocalSnapshot,
103 scan_requests_tx: channel::Sender<ScanRequest>,
104 path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
105 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
106 _background_scanner_tasks: Vec<Task<()>>,
107 share: Option<ShareState>,
108 diagnostics: HashMap<
109 Arc<Path>,
110 Vec<(
111 LanguageServerId,
112 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
113 )>,
114 >,
115 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
116 client: Arc<Client>,
117 fs: Arc<dyn Fs>,
118 fs_case_sensitive: bool,
119 visible: bool,
120}
121
122struct ScanRequest {
123 relative_paths: Vec<Arc<Path>>,
124 done: barrier::Sender,
125}
126
127pub struct RemoteWorktree {
128 snapshot: Snapshot,
129 background_snapshot: Arc<Mutex<Snapshot>>,
130 project_id: u64,
131 client: Arc<Client>,
132 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
133 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
134 replica_id: ReplicaId,
135 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
136 visible: bool,
137 disconnected: bool,
138}
139
140#[derive(Clone)]
141pub struct Snapshot {
142 id: WorktreeId,
143 abs_path: Arc<Path>,
144 root_name: String,
145 root_char_bag: CharBag,
146 entries_by_path: SumTree<Entry>,
147 entries_by_id: SumTree<PathEntry>,
148 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
149
150 /// A number that increases every time the worktree begins scanning
151 /// a set of paths from the filesystem. This scanning could be caused
152 /// by some operation performed on the worktree, such as reading or
153 /// writing a file, or by an event reported by the filesystem.
154 scan_id: usize,
155
156 /// The latest scan id that has completed, and whose preceding scans
157 /// have all completed. The current `scan_id` could be more than one
158 /// greater than the `completed_scan_id` if operations are performed
159 /// on the worktree while it is processing a file-system event.
160 completed_scan_id: usize,
161}
162
163#[derive(Clone, Debug, PartialEq, Eq)]
164pub struct RepositoryEntry {
165 pub(crate) work_directory: WorkDirectoryEntry,
166 pub(crate) branch: Option<Arc<str>>,
167}
168
169impl RepositoryEntry {
170 pub fn branch(&self) -> Option<Arc<str>> {
171 self.branch.clone()
172 }
173
174 pub fn work_directory_id(&self) -> ProjectEntryId {
175 *self.work_directory
176 }
177
178 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
179 snapshot
180 .entry_for_id(self.work_directory_id())
181 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
182 }
183
184 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
185 proto::RepositoryEntry {
186 work_directory_id: self.work_directory_id().to_proto(),
187 branch: self.branch.as_ref().map(|str| str.to_string()),
188 }
189 }
190}
191
192impl From<&RepositoryEntry> for proto::RepositoryEntry {
193 fn from(value: &RepositoryEntry) -> Self {
194 proto::RepositoryEntry {
195 work_directory_id: value.work_directory.to_proto(),
196 branch: value.branch.as_ref().map(|str| str.to_string()),
197 }
198 }
199}
200
201/// This path corresponds to the 'content path' (the folder that contains the .git)
202#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
203pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
204
205impl Default for RepositoryWorkDirectory {
206 fn default() -> Self {
207 RepositoryWorkDirectory(Arc::from(Path::new("")))
208 }
209}
210
211impl AsRef<Path> for RepositoryWorkDirectory {
212 fn as_ref(&self) -> &Path {
213 self.0.as_ref()
214 }
215}
216
217#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
218pub struct WorkDirectoryEntry(ProjectEntryId);
219
220impl WorkDirectoryEntry {
221 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
222 let entry = worktree
223 .entry_for_id(self.0)
224 .ok_or_else(|| anyhow!("entry not found"))?;
225 let path = path
226 .strip_prefix(&entry.path)
227 .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
228 Ok(path.into())
229 }
230}
231
232impl Deref for WorkDirectoryEntry {
233 type Target = ProjectEntryId;
234
235 fn deref(&self) -> &Self::Target {
236 &self.0
237 }
238}
239
240impl From<ProjectEntryId> for WorkDirectoryEntry {
241 fn from(value: ProjectEntryId) -> Self {
242 WorkDirectoryEntry(value)
243 }
244}
245
246#[derive(Debug, Clone)]
247pub struct LocalSnapshot {
248 snapshot: Snapshot,
249 /// All of the gitignore files in the worktree, indexed by their relative path.
250 /// The boolean indicates whether the gitignore needs to be updated.
251 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
252 /// All of the git repositories in the worktree, indexed by the project entry
253 /// id of their parent directory.
254 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
255 file_scan_exclusions: Vec<PathMatcher>,
256 private_files: Vec<PathMatcher>,
257}
258
259struct BackgroundScannerState {
260 snapshot: LocalSnapshot,
261 scanned_dirs: HashSet<ProjectEntryId>,
262 path_prefixes_to_scan: HashSet<Arc<Path>>,
263 paths_to_scan: HashSet<Arc<Path>>,
264 /// The ids of all of the entries that were removed from the snapshot
265 /// as part of the current update. These entry ids may be re-used
266 /// if the same inode is discovered at a new path, or if the given
267 /// path is re-created after being deleted.
268 removed_entry_ids: HashMap<u64, ProjectEntryId>,
269 changed_paths: Vec<Arc<Path>>,
270 prev_snapshot: Snapshot,
271}
272
273#[derive(Debug, Clone)]
274pub struct LocalRepositoryEntry {
275 pub(crate) git_dir_scan_id: usize,
276 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
277 /// Path to the actual .git folder.
278 /// Note: if .git is a file, this points to the folder indicated by the .git file
279 pub(crate) git_dir_path: Arc<Path>,
280}
281
282impl LocalRepositoryEntry {
283 pub fn load_index_text(&self, relative_file_path: &Path) -> Option<String> {
284 self.repo_ptr.lock().load_index_text(relative_file_path)
285 }
286}
287
288impl Deref for LocalSnapshot {
289 type Target = Snapshot;
290
291 fn deref(&self) -> &Self::Target {
292 &self.snapshot
293 }
294}
295
296impl DerefMut for LocalSnapshot {
297 fn deref_mut(&mut self) -> &mut Self::Target {
298 &mut self.snapshot
299 }
300}
301
302enum ScanState {
303 Started,
304 Updated {
305 snapshot: LocalSnapshot,
306 changes: UpdatedEntriesSet,
307 barrier: Option<barrier::Sender>,
308 scanning: bool,
309 },
310}
311
312struct ShareState {
313 project_id: u64,
314 snapshots_tx:
315 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
316 resume_updates: watch::Sender<()>,
317 _maintain_remote_snapshot: Task<Option<()>>,
318}
319
320#[derive(Clone)]
321pub enum Event {
322 UpdatedEntries(UpdatedEntriesSet),
323 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
324}
325
326impl EventEmitter<Event> for Worktree {}
327
328impl Worktree {
329 pub async fn local(
330 client: Arc<Client>,
331 path: impl Into<Arc<Path>>,
332 visible: bool,
333 fs: Arc<dyn Fs>,
334 next_entry_id: Arc<AtomicUsize>,
335 cx: &mut AsyncAppContext,
336 ) -> Result<Model<Self>> {
337 // After determining whether the root entry is a file or a directory, populate the
338 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
339 let abs_path = path.into();
340
341 let metadata = fs
342 .metadata(&abs_path)
343 .await
344 .context("failed to stat worktree path")?;
345
346 let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
347 log::error!(
348 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
349 );
350 true
351 });
352
353 let closure_fs = Arc::clone(&fs);
354 let closure_next_entry_id = Arc::clone(&next_entry_id);
355 let closure_abs_path = abs_path.to_path_buf();
356 cx.new_model(move |cx: &mut ModelContext<Worktree>| {
357 cx.observe_global::<SettingsStore>(move |this, cx| {
358 if let Self::Local(this) = this {
359 let new_file_scan_exclusions = path_matchers(
360 WorktreeSettings::get_global(cx)
361 .file_scan_exclusions
362 .as_deref(),
363 "file_scan_exclusions",
364 );
365 let new_private_files = path_matchers(
366 WorktreeSettings::get(Some(settings::SettingsLocation {
367 worktree_id: cx.handle().entity_id().as_u64() as usize,
368 path: Path::new("")
369 }), cx).private_files.as_deref(),
370 "private_files",
371 );
372
373 if new_file_scan_exclusions != this.snapshot.file_scan_exclusions
374 || new_private_files != this.snapshot.private_files
375 {
376 this.snapshot.file_scan_exclusions = new_file_scan_exclusions;
377 this.snapshot.private_files = new_private_files;
378
379 log::info!(
380 "Re-scanning directories, new scan exclude files: {:?}, new dotenv files: {:?}",
381 this.snapshot
382 .file_scan_exclusions
383 .iter()
384 .map(ToString::to_string)
385 .collect::<Vec<_>>(),
386 this.snapshot
387 .private_files
388 .iter()
389 .map(ToString::to_string)
390 .collect::<Vec<_>>()
391 );
392
393 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
394 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) =
395 channel::unbounded();
396 this.scan_requests_tx = scan_requests_tx;
397 this.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
398 this._background_scanner_tasks = start_background_scan_tasks(
399 &closure_abs_path,
400 this.snapshot(),
401 scan_requests_rx,
402 path_prefixes_to_scan_rx,
403 Arc::clone(&closure_next_entry_id),
404 Arc::clone(&closure_fs),
405 cx,
406 );
407 this.is_scanning = watch::channel_with(true);
408 }
409 }
410 })
411 .detach();
412
413 let root_name = abs_path
414 .file_name()
415 .map_or(String::new(), |f| f.to_string_lossy().to_string());
416
417 let mut snapshot = LocalSnapshot {
418 file_scan_exclusions: path_matchers(
419 WorktreeSettings::get_global(cx)
420 .file_scan_exclusions
421 .as_deref(),
422 "file_scan_exclusions",
423 ),
424 private_files: path_matchers(
425 WorktreeSettings::get(Some(SettingsLocation {
426 worktree_id: cx.handle().entity_id().as_u64() as usize,
427 path: Path::new(""),
428 }), cx).private_files.as_deref(),
429 "private_files",
430 ),
431 ignores_by_parent_abs_path: Default::default(),
432 git_repositories: Default::default(),
433 snapshot: Snapshot {
434 id: WorktreeId::from_usize(cx.entity_id().as_u64() as usize),
435 abs_path: abs_path.to_path_buf().into(),
436 root_name: root_name.clone(),
437 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
438 entries_by_path: Default::default(),
439 entries_by_id: Default::default(),
440 repository_entries: Default::default(),
441 scan_id: 1,
442 completed_scan_id: 0,
443 },
444 };
445
446 if let Some(metadata) = metadata {
447 snapshot.insert_entry(
448 Entry::new(
449 Arc::from(Path::new("")),
450 &metadata,
451 &next_entry_id,
452 snapshot.root_char_bag,
453 ),
454 fs.as_ref(),
455 );
456 }
457
458 let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
459 let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
460 let task_snapshot = snapshot.clone();
461 Worktree::Local(LocalWorktree {
462 snapshot,
463 is_scanning: watch::channel_with(true),
464 share: None,
465 scan_requests_tx,
466 path_prefixes_to_scan_tx,
467 _background_scanner_tasks: start_background_scan_tasks(
468 &abs_path,
469 task_snapshot,
470 scan_requests_rx,
471 path_prefixes_to_scan_rx,
472 Arc::clone(&next_entry_id),
473 Arc::clone(&fs),
474 cx,
475 ),
476 diagnostics: Default::default(),
477 diagnostic_summaries: Default::default(),
478 client,
479 fs,
480 fs_case_sensitive,
481 visible,
482 })
483 })
484 }
485
486 pub fn remote(
487 project_remote_id: u64,
488 replica_id: ReplicaId,
489 worktree: proto::WorktreeMetadata,
490 client: Arc<Client>,
491 cx: &mut AppContext,
492 ) -> Model<Self> {
493 cx.new_model(|cx: &mut ModelContext<Self>| {
494 let snapshot = Snapshot {
495 id: WorktreeId(worktree.id as usize),
496 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
497 root_name: worktree.root_name.clone(),
498 root_char_bag: worktree
499 .root_name
500 .chars()
501 .map(|c| c.to_ascii_lowercase())
502 .collect(),
503 entries_by_path: Default::default(),
504 entries_by_id: Default::default(),
505 repository_entries: Default::default(),
506 scan_id: 1,
507 completed_scan_id: 0,
508 };
509
510 let (updates_tx, mut updates_rx) = mpsc::unbounded();
511 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
512 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
513
514 cx.background_executor()
515 .spawn({
516 let background_snapshot = background_snapshot.clone();
517 async move {
518 while let Some(update) = updates_rx.next().await {
519 if let Err(error) =
520 background_snapshot.lock().apply_remote_update(update)
521 {
522 log::error!("error applying worktree update: {}", error);
523 }
524 snapshot_updated_tx.send(()).await.ok();
525 }
526 }
527 })
528 .detach();
529
530 cx.spawn(|this, mut cx| async move {
531 while (snapshot_updated_rx.recv().await).is_some() {
532 this.update(&mut cx, |this, cx| {
533 let this = this.as_remote_mut().unwrap();
534 this.snapshot = this.background_snapshot.lock().clone();
535 cx.emit(Event::UpdatedEntries(Arc::from([])));
536 cx.notify();
537 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
538 if this.observed_snapshot(*scan_id) {
539 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
540 let _ = tx.send(());
541 } else {
542 break;
543 }
544 }
545 })?;
546 }
547 anyhow::Ok(())
548 })
549 .detach();
550
551 Worktree::Remote(RemoteWorktree {
552 project_id: project_remote_id,
553 replica_id,
554 snapshot: snapshot.clone(),
555 background_snapshot,
556 updates_tx: Some(updates_tx),
557 snapshot_subscriptions: Default::default(),
558 client: client.clone(),
559 diagnostic_summaries: Default::default(),
560 visible: worktree.visible,
561 disconnected: false,
562 })
563 })
564 }
565
566 pub fn as_local(&self) -> Option<&LocalWorktree> {
567 if let Worktree::Local(worktree) = self {
568 Some(worktree)
569 } else {
570 None
571 }
572 }
573
574 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
575 if let Worktree::Remote(worktree) = self {
576 Some(worktree)
577 } else {
578 None
579 }
580 }
581
582 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
583 if let Worktree::Local(worktree) = self {
584 Some(worktree)
585 } else {
586 None
587 }
588 }
589
590 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
591 if let Worktree::Remote(worktree) = self {
592 Some(worktree)
593 } else {
594 None
595 }
596 }
597
598 pub fn is_local(&self) -> bool {
599 matches!(self, Worktree::Local(_))
600 }
601
602 pub fn is_remote(&self) -> bool {
603 !self.is_local()
604 }
605
606 pub fn snapshot(&self) -> Snapshot {
607 match self {
608 Worktree::Local(worktree) => worktree.snapshot().snapshot,
609 Worktree::Remote(worktree) => worktree.snapshot(),
610 }
611 }
612
613 pub fn scan_id(&self) -> usize {
614 match self {
615 Worktree::Local(worktree) => worktree.snapshot.scan_id,
616 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
617 }
618 }
619
620 pub fn completed_scan_id(&self) -> usize {
621 match self {
622 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
623 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
624 }
625 }
626
627 pub fn is_visible(&self) -> bool {
628 match self {
629 Worktree::Local(worktree) => worktree.visible,
630 Worktree::Remote(worktree) => worktree.visible,
631 }
632 }
633
634 pub fn replica_id(&self) -> ReplicaId {
635 match self {
636 Worktree::Local(_) => 0,
637 Worktree::Remote(worktree) => worktree.replica_id,
638 }
639 }
640
641 pub fn diagnostic_summaries(
642 &self,
643 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
644 match self {
645 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
646 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
647 }
648 .iter()
649 .flat_map(|(path, summaries)| {
650 summaries
651 .iter()
652 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
653 })
654 }
655
656 pub fn abs_path(&self) -> Arc<Path> {
657 match self {
658 Worktree::Local(worktree) => worktree.abs_path.clone(),
659 Worktree::Remote(worktree) => worktree.abs_path.clone(),
660 }
661 }
662
663 pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
664 let entry = self.root_entry()?;
665 Some(File::for_entry(entry.clone(), cx.handle()))
666 }
667}
668
669fn start_background_scan_tasks(
670 abs_path: &Path,
671 snapshot: LocalSnapshot,
672 scan_requests_rx: channel::Receiver<ScanRequest>,
673 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
674 next_entry_id: Arc<AtomicUsize>,
675 fs: Arc<dyn Fs>,
676 cx: &mut ModelContext<'_, Worktree>,
677) -> Vec<Task<()>> {
678 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
679 let background_scanner = cx.background_executor().spawn({
680 let abs_path = abs_path.to_path_buf();
681 let background = cx.background_executor().clone();
682 async move {
683 let events = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
684 let case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
685 log::error!(
686 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
687 );
688 true
689 });
690
691 BackgroundScanner::new(
692 snapshot,
693 next_entry_id,
694 fs,
695 case_sensitive,
696 scan_states_tx,
697 background,
698 scan_requests_rx,
699 path_prefixes_to_scan_rx,
700 )
701 .run(events)
702 .await;
703 }
704 });
705 let scan_state_updater = cx.spawn(|this, mut cx| async move {
706 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
707 this.update(&mut cx, |this, cx| {
708 let this = this.as_local_mut().unwrap();
709 match state {
710 ScanState::Started => {
711 *this.is_scanning.0.borrow_mut() = true;
712 }
713 ScanState::Updated {
714 snapshot,
715 changes,
716 barrier,
717 scanning,
718 } => {
719 *this.is_scanning.0.borrow_mut() = scanning;
720 this.set_snapshot(snapshot, changes, cx);
721 drop(barrier);
722 }
723 }
724 cx.notify();
725 })
726 .ok();
727 }
728 });
729 vec![background_scanner, scan_state_updater]
730}
731
732fn path_matchers(values: Option<&[String]>, context: &'static str) -> Vec<PathMatcher> {
733 values
734 .unwrap_or(&[])
735 .iter()
736 .sorted()
737 .filter_map(|pattern| {
738 PathMatcher::new(pattern)
739 .map(Some)
740 .unwrap_or_else(|e| {
741 log::error!(
742 "Skipping pattern {pattern} in `{}` project settings due to parsing error: {e:#}", context
743 );
744 None
745 })
746 })
747 .collect()
748}
749
750impl LocalWorktree {
751 pub fn contains_abs_path(&self, path: &Path) -> bool {
752 path.starts_with(&self.abs_path)
753 }
754
755 pub fn load_buffer(
756 &mut self,
757 id: BufferId,
758 path: &Path,
759 cx: &mut ModelContext<Worktree>,
760 ) -> Task<Result<Model<Buffer>>> {
761 let path = Arc::from(path);
762 cx.spawn(move |this, mut cx| async move {
763 let (file, contents, diff_base) = this
764 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))?
765 .await?;
766 let text_buffer = cx
767 .background_executor()
768 .spawn(async move { text::Buffer::new(0, id, contents) })
769 .await;
770 cx.new_model(|_| {
771 Buffer::build(
772 text_buffer,
773 diff_base,
774 Some(Arc::new(file)),
775 Capability::ReadWrite,
776 )
777 })
778 })
779 }
780
781 pub fn new_buffer(
782 &mut self,
783 buffer_id: BufferId,
784 path: Arc<Path>,
785 cx: &mut ModelContext<Worktree>,
786 ) -> Model<Buffer> {
787 let text_buffer = text::Buffer::new(0, buffer_id, "".into());
788 let worktree = cx.handle();
789 cx.new_model(|_| {
790 Buffer::build(
791 text_buffer,
792 None,
793 Some(Arc::new(File {
794 worktree,
795 path,
796 mtime: None,
797 entry_id: None,
798 is_local: true,
799 is_deleted: false,
800 is_private: false,
801 })),
802 Capability::ReadWrite,
803 )
804 })
805 }
806
807 pub fn diagnostics_for_path(
808 &self,
809 path: &Path,
810 ) -> Vec<(
811 LanguageServerId,
812 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
813 )> {
814 self.diagnostics.get(path).cloned().unwrap_or_default()
815 }
816
817 pub fn clear_diagnostics_for_language_server(
818 &mut self,
819 server_id: LanguageServerId,
820 _: &mut ModelContext<Worktree>,
821 ) {
822 let worktree_id = self.id().to_proto();
823 self.diagnostic_summaries
824 .retain(|path, summaries_by_server_id| {
825 if summaries_by_server_id.remove(&server_id).is_some() {
826 if let Some(share) = self.share.as_ref() {
827 self.client
828 .send(proto::UpdateDiagnosticSummary {
829 project_id: share.project_id,
830 worktree_id,
831 summary: Some(proto::DiagnosticSummary {
832 path: path.to_string_lossy().to_string(),
833 language_server_id: server_id.0 as u64,
834 error_count: 0,
835 warning_count: 0,
836 }),
837 })
838 .log_err();
839 }
840 !summaries_by_server_id.is_empty()
841 } else {
842 true
843 }
844 });
845
846 self.diagnostics.retain(|_, diagnostics_by_server_id| {
847 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
848 diagnostics_by_server_id.remove(ix);
849 !diagnostics_by_server_id.is_empty()
850 } else {
851 true
852 }
853 });
854 }
855
856 pub fn update_diagnostics(
857 &mut self,
858 server_id: LanguageServerId,
859 worktree_path: Arc<Path>,
860 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
861 _: &mut ModelContext<Worktree>,
862 ) -> Result<bool> {
863 let summaries_by_server_id = self
864 .diagnostic_summaries
865 .entry(worktree_path.clone())
866 .or_default();
867
868 let old_summary = summaries_by_server_id
869 .remove(&server_id)
870 .unwrap_or_default();
871
872 let new_summary = DiagnosticSummary::new(&diagnostics);
873 if new_summary.is_empty() {
874 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
875 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
876 diagnostics_by_server_id.remove(ix);
877 }
878 if diagnostics_by_server_id.is_empty() {
879 self.diagnostics.remove(&worktree_path);
880 }
881 }
882 } else {
883 summaries_by_server_id.insert(server_id, new_summary);
884 let diagnostics_by_server_id =
885 self.diagnostics.entry(worktree_path.clone()).or_default();
886 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
887 Ok(ix) => {
888 diagnostics_by_server_id[ix] = (server_id, diagnostics);
889 }
890 Err(ix) => {
891 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
892 }
893 }
894 }
895
896 if !old_summary.is_empty() || !new_summary.is_empty() {
897 if let Some(share) = self.share.as_ref() {
898 self.client
899 .send(proto::UpdateDiagnosticSummary {
900 project_id: share.project_id,
901 worktree_id: self.id().to_proto(),
902 summary: Some(proto::DiagnosticSummary {
903 path: worktree_path.to_string_lossy().to_string(),
904 language_server_id: server_id.0 as u64,
905 error_count: new_summary.error_count as u32,
906 warning_count: new_summary.warning_count as u32,
907 }),
908 })
909 .log_err();
910 }
911 }
912
913 Ok(!old_summary.is_empty() || !new_summary.is_empty())
914 }
915
916 fn set_snapshot(
917 &mut self,
918 new_snapshot: LocalSnapshot,
919 entry_changes: UpdatedEntriesSet,
920 cx: &mut ModelContext<Worktree>,
921 ) {
922 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
923
924 self.snapshot = new_snapshot;
925
926 if let Some(share) = self.share.as_mut() {
927 share
928 .snapshots_tx
929 .unbounded_send((
930 self.snapshot.clone(),
931 entry_changes.clone(),
932 repo_changes.clone(),
933 ))
934 .ok();
935 }
936
937 if !entry_changes.is_empty() {
938 cx.emit(Event::UpdatedEntries(entry_changes));
939 }
940 if !repo_changes.is_empty() {
941 cx.emit(Event::UpdatedGitRepositories(repo_changes));
942 }
943 }
944
945 fn changed_repos(
946 &self,
947 old_snapshot: &LocalSnapshot,
948 new_snapshot: &LocalSnapshot,
949 ) -> UpdatedGitRepositoriesSet {
950 let mut changes = Vec::new();
951 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
952 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
953 loop {
954 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
955 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
956 match Ord::cmp(&new_entry_id, &old_entry_id) {
957 Ordering::Less => {
958 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
959 changes.push((
960 entry.path.clone(),
961 GitRepositoryChange {
962 old_repository: None,
963 },
964 ));
965 }
966 new_repos.next();
967 }
968 Ordering::Equal => {
969 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
970 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
971 let old_repo = old_snapshot
972 .repository_entries
973 .get(&RepositoryWorkDirectory(entry.path.clone()))
974 .cloned();
975 changes.push((
976 entry.path.clone(),
977 GitRepositoryChange {
978 old_repository: old_repo,
979 },
980 ));
981 }
982 }
983 new_repos.next();
984 old_repos.next();
985 }
986 Ordering::Greater => {
987 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
988 let old_repo = old_snapshot
989 .repository_entries
990 .get(&RepositoryWorkDirectory(entry.path.clone()))
991 .cloned();
992 changes.push((
993 entry.path.clone(),
994 GitRepositoryChange {
995 old_repository: old_repo,
996 },
997 ));
998 }
999 old_repos.next();
1000 }
1001 }
1002 }
1003 (Some((entry_id, _)), None) => {
1004 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1005 changes.push((
1006 entry.path.clone(),
1007 GitRepositoryChange {
1008 old_repository: None,
1009 },
1010 ));
1011 }
1012 new_repos.next();
1013 }
1014 (None, Some((entry_id, _))) => {
1015 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1016 let old_repo = old_snapshot
1017 .repository_entries
1018 .get(&RepositoryWorkDirectory(entry.path.clone()))
1019 .cloned();
1020 changes.push((
1021 entry.path.clone(),
1022 GitRepositoryChange {
1023 old_repository: old_repo,
1024 },
1025 ));
1026 }
1027 old_repos.next();
1028 }
1029 (None, None) => break,
1030 }
1031 }
1032
1033 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1034 (value.0.clone(), value.1.clone())
1035 }
1036
1037 changes.into()
1038 }
1039
1040 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1041 let mut is_scanning_rx = self.is_scanning.1.clone();
1042 async move {
1043 let mut is_scanning = *is_scanning_rx.borrow();
1044 while is_scanning {
1045 if let Some(value) = is_scanning_rx.recv().await {
1046 is_scanning = value;
1047 } else {
1048 break;
1049 }
1050 }
1051 }
1052 }
1053
1054 pub fn snapshot(&self) -> LocalSnapshot {
1055 self.snapshot.clone()
1056 }
1057
1058 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
1059 proto::WorktreeMetadata {
1060 id: self.id().to_proto(),
1061 root_name: self.root_name().to_string(),
1062 visible: self.visible,
1063 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
1064 }
1065 }
1066
1067 fn load(
1068 &self,
1069 path: &Path,
1070 cx: &mut ModelContext<Worktree>,
1071 ) -> Task<Result<(File, String, Option<String>)>> {
1072 let path = Arc::from(path);
1073 let abs_path = self.absolutize(&path);
1074 let fs = self.fs.clone();
1075 let entry = self.refresh_entry(path.clone(), None, cx);
1076
1077 cx.spawn(|this, mut cx| async move {
1078 let abs_path = abs_path?;
1079 let text = fs.load(&abs_path).await?;
1080 let mut index_task = None;
1081 let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1082 if let Some(repo) = snapshot.repository_for_path(&path) {
1083 if let Some(repo_path) = repo.work_directory.relativize(&snapshot, &path).log_err()
1084 {
1085 if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1086 let git_repo = git_repo.repo_ptr.clone();
1087 index_task = Some(
1088 cx.background_executor()
1089 .spawn(async move { git_repo.lock().load_index_text(&repo_path) }),
1090 );
1091 }
1092 }
1093 }
1094
1095 let diff_base = if let Some(index_task) = index_task {
1096 index_task.await
1097 } else {
1098 None
1099 };
1100
1101 let worktree = this
1102 .upgrade()
1103 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1104 match entry.await? {
1105 Some(entry) => Ok((
1106 File {
1107 entry_id: Some(entry.id),
1108 worktree,
1109 path: entry.path,
1110 mtime: entry.mtime,
1111 is_local: true,
1112 is_deleted: false,
1113 is_private: entry.is_private,
1114 },
1115 text,
1116 diff_base,
1117 )),
1118 None => {
1119 let metadata = fs
1120 .metadata(&abs_path)
1121 .await
1122 .with_context(|| {
1123 format!("Loading metadata for excluded file {abs_path:?}")
1124 })?
1125 .with_context(|| {
1126 format!("Excluded file {abs_path:?} got removed during loading")
1127 })?;
1128 let is_private = snapshot.is_path_private(path.as_ref());
1129 Ok((
1130 File {
1131 entry_id: None,
1132 worktree,
1133 path,
1134 mtime: Some(metadata.mtime),
1135 is_local: true,
1136 is_deleted: false,
1137 is_private,
1138 },
1139 text,
1140 diff_base,
1141 ))
1142 }
1143 }
1144 })
1145 }
1146
1147 pub fn save_buffer(
1148 &self,
1149 buffer_handle: Model<Buffer>,
1150 path: Arc<Path>,
1151 mut has_changed_file: bool,
1152 cx: &mut ModelContext<Worktree>,
1153 ) -> Task<Result<()>> {
1154 let buffer = buffer_handle.read(cx);
1155
1156 let rpc = self.client.clone();
1157 let buffer_id: u64 = buffer.remote_id().into();
1158 let project_id = self.share.as_ref().map(|share| share.project_id);
1159
1160 if buffer.file().is_some_and(|file| !file.is_created()) {
1161 has_changed_file = true;
1162 }
1163
1164 let text = buffer.as_rope().clone();
1165 let fingerprint = text.fingerprint();
1166 let version = buffer.version();
1167 let save = self.write_file(path.as_ref(), text, buffer.line_ending(), cx);
1168 let fs = Arc::clone(&self.fs);
1169 let abs_path = self.absolutize(&path);
1170 let is_private = self.snapshot.is_path_private(&path);
1171
1172 cx.spawn(move |this, mut cx| async move {
1173 let entry = save.await?;
1174 let abs_path = abs_path?;
1175 let this = this.upgrade().context("worktree dropped")?;
1176
1177 let (entry_id, mtime, path, is_dotenv) = match entry {
1178 Some(entry) => (Some(entry.id), entry.mtime, entry.path, entry.is_private),
1179 None => {
1180 let metadata = fs
1181 .metadata(&abs_path)
1182 .await
1183 .with_context(|| {
1184 format!(
1185 "Fetching metadata after saving the excluded buffer {abs_path:?}"
1186 )
1187 })?
1188 .with_context(|| {
1189 format!("Excluded buffer {path:?} got removed during saving")
1190 })?;
1191 (None, Some(metadata.mtime), path, is_private)
1192 }
1193 };
1194
1195 if has_changed_file {
1196 let new_file = Arc::new(File {
1197 entry_id,
1198 worktree: this,
1199 path,
1200 mtime,
1201 is_local: true,
1202 is_deleted: false,
1203 is_private: is_dotenv,
1204 });
1205
1206 if let Some(project_id) = project_id {
1207 rpc.send(proto::UpdateBufferFile {
1208 project_id,
1209 buffer_id,
1210 file: Some(new_file.to_proto()),
1211 })
1212 .log_err();
1213 }
1214
1215 buffer_handle.update(&mut cx, |buffer, cx| {
1216 if has_changed_file {
1217 buffer.file_updated(new_file, cx);
1218 }
1219 })?;
1220 }
1221
1222 if let Some(project_id) = project_id {
1223 rpc.send(proto::BufferSaved {
1224 project_id,
1225 buffer_id,
1226 version: serialize_version(&version),
1227 mtime: mtime.map(|time| time.into()),
1228 fingerprint: serialize_fingerprint(fingerprint),
1229 })?;
1230 }
1231
1232 buffer_handle.update(&mut cx, |buffer, cx| {
1233 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1234 })?;
1235
1236 Ok(())
1237 })
1238 }
1239
1240 /// Find the lowest path in the worktree's datastructures that is an ancestor
1241 fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1242 let mut lowest_ancestor = None;
1243 for path in path.ancestors() {
1244 if self.entry_for_path(path).is_some() {
1245 lowest_ancestor = Some(path.to_path_buf());
1246 break;
1247 }
1248 }
1249
1250 lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1251 }
1252
1253 pub fn create_entry(
1254 &self,
1255 path: impl Into<Arc<Path>>,
1256 is_dir: bool,
1257 cx: &mut ModelContext<Worktree>,
1258 ) -> Task<Result<Option<Entry>>> {
1259 let path = path.into();
1260 let lowest_ancestor = self.lowest_ancestor(&path);
1261 let abs_path = self.absolutize(&path);
1262 let fs = self.fs.clone();
1263 let write = cx.background_executor().spawn(async move {
1264 if is_dir {
1265 fs.create_dir(&abs_path?).await
1266 } else {
1267 fs.save(&abs_path?, &Default::default(), Default::default())
1268 .await
1269 }
1270 });
1271
1272 cx.spawn(|this, mut cx| async move {
1273 write.await?;
1274 let (result, refreshes) = this.update(&mut cx, |this, cx| {
1275 let mut refreshes = Vec::new();
1276 let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1277 for refresh_path in refresh_paths.ancestors() {
1278 if refresh_path == Path::new("") {
1279 continue;
1280 }
1281 let refresh_full_path = lowest_ancestor.join(refresh_path);
1282
1283 refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1284 refresh_full_path.into(),
1285 None,
1286 cx,
1287 ));
1288 }
1289 (
1290 this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1291 refreshes,
1292 )
1293 })?;
1294 for refresh in refreshes {
1295 refresh.await.log_err();
1296 }
1297
1298 result.await
1299 })
1300 }
1301
1302 pub(crate) fn write_file(
1303 &self,
1304 path: impl Into<Arc<Path>>,
1305 text: Rope,
1306 line_ending: LineEnding,
1307 cx: &mut ModelContext<Worktree>,
1308 ) -> Task<Result<Option<Entry>>> {
1309 let path: Arc<Path> = path.into();
1310 let abs_path = self.absolutize(&path);
1311 let fs = self.fs.clone();
1312 let write = cx
1313 .background_executor()
1314 .spawn(async move { fs.save(&abs_path?, &text, line_ending).await });
1315
1316 cx.spawn(|this, mut cx| async move {
1317 write.await?;
1318 this.update(&mut cx, |this, cx| {
1319 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1320 })?
1321 .await
1322 })
1323 }
1324
1325 pub fn delete_entry(
1326 &self,
1327 entry_id: ProjectEntryId,
1328 cx: &mut ModelContext<Worktree>,
1329 ) -> Option<Task<Result<()>>> {
1330 let entry = self.entry_for_id(entry_id)?.clone();
1331 let abs_path = self.absolutize(&entry.path);
1332 let fs = self.fs.clone();
1333
1334 let delete = cx.background_executor().spawn(async move {
1335 if entry.is_file() {
1336 fs.remove_file(&abs_path?, Default::default()).await?;
1337 } else {
1338 fs.remove_dir(
1339 &abs_path?,
1340 RemoveOptions {
1341 recursive: true,
1342 ignore_if_not_exists: false,
1343 },
1344 )
1345 .await?;
1346 }
1347 anyhow::Ok(entry.path)
1348 });
1349
1350 Some(cx.spawn(|this, mut cx| async move {
1351 let path = delete.await?;
1352 this.update(&mut cx, |this, _| {
1353 this.as_local_mut()
1354 .unwrap()
1355 .refresh_entries_for_paths(vec![path])
1356 })?
1357 .recv()
1358 .await;
1359 Ok(())
1360 }))
1361 }
1362
1363 pub fn rename_entry(
1364 &self,
1365 entry_id: ProjectEntryId,
1366 new_path: impl Into<Arc<Path>>,
1367 cx: &mut ModelContext<Worktree>,
1368 ) -> Task<Result<Option<Entry>>> {
1369 let old_path = match self.entry_for_id(entry_id) {
1370 Some(entry) => entry.path.clone(),
1371 None => return Task::ready(Ok(None)),
1372 };
1373 let new_path = new_path.into();
1374 let abs_old_path = self.absolutize(&old_path);
1375 let abs_new_path = self.absolutize(&new_path);
1376 let fs = self.fs.clone();
1377 let case_sensitive = self.fs_case_sensitive;
1378 let rename = cx.background_executor().spawn(async move {
1379 let abs_old_path = abs_old_path?;
1380 let abs_new_path = abs_new_path?;
1381
1382 let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1383 let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1384
1385 // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1386 // we want to overwrite, because otherwise we run into a file-already-exists error.
1387 let overwrite = !case_sensitive
1388 && abs_old_path != abs_new_path
1389 && abs_old_path_lower == abs_new_path_lower;
1390
1391 fs.rename(
1392 &abs_old_path,
1393 &abs_new_path,
1394 fs::RenameOptions {
1395 overwrite,
1396 ..Default::default()
1397 },
1398 )
1399 .await
1400 });
1401
1402 cx.spawn(|this, mut cx| async move {
1403 rename.await?;
1404 this.update(&mut cx, |this, cx| {
1405 this.as_local_mut()
1406 .unwrap()
1407 .refresh_entry(new_path.clone(), Some(old_path), cx)
1408 })?
1409 .await
1410 })
1411 }
1412
1413 pub fn copy_entry(
1414 &self,
1415 entry_id: ProjectEntryId,
1416 new_path: impl Into<Arc<Path>>,
1417 cx: &mut ModelContext<Worktree>,
1418 ) -> Task<Result<Option<Entry>>> {
1419 let old_path = match self.entry_for_id(entry_id) {
1420 Some(entry) => entry.path.clone(),
1421 None => return Task::ready(Ok(None)),
1422 };
1423 let new_path = new_path.into();
1424 let abs_old_path = self.absolutize(&old_path);
1425 let abs_new_path = self.absolutize(&new_path);
1426 let fs = self.fs.clone();
1427 let copy = cx.background_executor().spawn(async move {
1428 copy_recursive(
1429 fs.as_ref(),
1430 &abs_old_path?,
1431 &abs_new_path?,
1432 Default::default(),
1433 )
1434 .await
1435 });
1436
1437 cx.spawn(|this, mut cx| async move {
1438 copy.await?;
1439 this.update(&mut cx, |this, cx| {
1440 this.as_local_mut()
1441 .unwrap()
1442 .refresh_entry(new_path.clone(), None, cx)
1443 })?
1444 .await
1445 })
1446 }
1447
1448 pub fn expand_entry(
1449 &mut self,
1450 entry_id: ProjectEntryId,
1451 cx: &mut ModelContext<Worktree>,
1452 ) -> Option<Task<Result<()>>> {
1453 let path = self.entry_for_id(entry_id)?.path.clone();
1454 let mut refresh = self.refresh_entries_for_paths(vec![path]);
1455 Some(cx.background_executor().spawn(async move {
1456 refresh.next().await;
1457 Ok(())
1458 }))
1459 }
1460
1461 pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1462 let (tx, rx) = barrier::channel();
1463 self.scan_requests_tx
1464 .try_send(ScanRequest {
1465 relative_paths: paths,
1466 done: tx,
1467 })
1468 .ok();
1469 rx
1470 }
1471
1472 pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1473 self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1474 }
1475
1476 fn refresh_entry(
1477 &self,
1478 path: Arc<Path>,
1479 old_path: Option<Arc<Path>>,
1480 cx: &mut ModelContext<Worktree>,
1481 ) -> Task<Result<Option<Entry>>> {
1482 if self.is_path_excluded(path.to_path_buf()) {
1483 return Task::ready(Ok(None));
1484 }
1485 let paths = if let Some(old_path) = old_path.as_ref() {
1486 vec![old_path.clone(), path.clone()]
1487 } else {
1488 vec![path.clone()]
1489 };
1490 let mut refresh = self.refresh_entries_for_paths(paths);
1491 cx.spawn(move |this, mut cx| async move {
1492 refresh.recv().await;
1493 let new_entry = this.update(&mut cx, |this, _| {
1494 this.entry_for_path(path)
1495 .cloned()
1496 .ok_or_else(|| anyhow!("failed to read path after update"))
1497 })??;
1498 Ok(Some(new_entry))
1499 })
1500 }
1501
1502 pub fn observe_updates<F, Fut>(
1503 &mut self,
1504 project_id: u64,
1505 cx: &mut ModelContext<Worktree>,
1506 callback: F,
1507 ) -> oneshot::Receiver<()>
1508 where
1509 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1510 Fut: Send + Future<Output = bool>,
1511 {
1512 #[cfg(any(test, feature = "test-support"))]
1513 const MAX_CHUNK_SIZE: usize = 2;
1514 #[cfg(not(any(test, feature = "test-support")))]
1515 const MAX_CHUNK_SIZE: usize = 256;
1516
1517 let (share_tx, share_rx) = oneshot::channel();
1518
1519 if let Some(share) = self.share.as_mut() {
1520 share_tx.send(()).ok();
1521 *share.resume_updates.borrow_mut() = ();
1522 return share_rx;
1523 }
1524
1525 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1526 let (snapshots_tx, mut snapshots_rx) =
1527 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1528 snapshots_tx
1529 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1530 .ok();
1531
1532 let worktree_id = cx.entity_id().as_u64();
1533 let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1534 let mut is_first = true;
1535 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1536 let update;
1537 if is_first {
1538 update = snapshot.build_initial_update(project_id, worktree_id);
1539 is_first = false;
1540 } else {
1541 update =
1542 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1543 }
1544
1545 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1546 let _ = resume_updates_rx.try_recv();
1547 loop {
1548 let result = callback(update.clone());
1549 if result.await {
1550 break;
1551 } else {
1552 log::info!("waiting to resume updates");
1553 if resume_updates_rx.next().await.is_none() {
1554 return Some(());
1555 }
1556 }
1557 }
1558 }
1559 }
1560 share_tx.send(()).ok();
1561 Some(())
1562 });
1563
1564 self.share = Some(ShareState {
1565 project_id,
1566 snapshots_tx,
1567 resume_updates: resume_updates_tx,
1568 _maintain_remote_snapshot,
1569 });
1570 share_rx
1571 }
1572
1573 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1574 let client = self.client.clone();
1575
1576 for (path, summaries) in &self.diagnostic_summaries {
1577 for (&server_id, summary) in summaries {
1578 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1579 project_id,
1580 worktree_id: cx.entity_id().as_u64(),
1581 summary: Some(summary.to_proto(server_id, path)),
1582 }) {
1583 return Task::ready(Err(e));
1584 }
1585 }
1586 }
1587
1588 let rx = self.observe_updates(project_id, cx, move |update| {
1589 client.request(update).map(|result| result.is_ok())
1590 });
1591 cx.background_executor()
1592 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1593 }
1594
1595 pub fn unshare(&mut self) {
1596 self.share.take();
1597 }
1598
1599 pub fn is_shared(&self) -> bool {
1600 self.share.is_some()
1601 }
1602}
1603
1604impl RemoteWorktree {
1605 fn snapshot(&self) -> Snapshot {
1606 self.snapshot.clone()
1607 }
1608
1609 pub fn disconnected_from_host(&mut self) {
1610 self.updates_tx.take();
1611 self.snapshot_subscriptions.clear();
1612 self.disconnected = true;
1613 }
1614
1615 pub fn save_buffer(
1616 &self,
1617 buffer_handle: Model<Buffer>,
1618 cx: &mut ModelContext<Worktree>,
1619 ) -> Task<Result<()>> {
1620 let buffer = buffer_handle.read(cx);
1621 let buffer_id = buffer.remote_id().into();
1622 let version = buffer.version();
1623 let rpc = self.client.clone();
1624 let project_id = self.project_id;
1625 cx.spawn(move |_, mut cx| async move {
1626 let response = rpc
1627 .request(proto::SaveBuffer {
1628 project_id,
1629 buffer_id,
1630 version: serialize_version(&version),
1631 })
1632 .await?;
1633 let version = deserialize_version(&response.version);
1634 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1635 let mtime = response.mtime.map(|mtime| mtime.into());
1636
1637 buffer_handle.update(&mut cx, |buffer, cx| {
1638 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1639 })?;
1640
1641 Ok(())
1642 })
1643 }
1644
1645 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1646 if let Some(updates_tx) = &self.updates_tx {
1647 updates_tx
1648 .unbounded_send(update)
1649 .expect("consumer runs to completion");
1650 }
1651 }
1652
1653 fn observed_snapshot(&self, scan_id: usize) -> bool {
1654 self.completed_scan_id >= scan_id
1655 }
1656
1657 pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1658 let (tx, rx) = oneshot::channel();
1659 if self.observed_snapshot(scan_id) {
1660 let _ = tx.send(());
1661 } else if self.disconnected {
1662 drop(tx);
1663 } else {
1664 match self
1665 .snapshot_subscriptions
1666 .binary_search_by_key(&scan_id, |probe| probe.0)
1667 {
1668 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1669 }
1670 }
1671
1672 async move {
1673 rx.await?;
1674 Ok(())
1675 }
1676 }
1677
1678 pub fn update_diagnostic_summary(
1679 &mut self,
1680 path: Arc<Path>,
1681 summary: &proto::DiagnosticSummary,
1682 ) {
1683 let server_id = LanguageServerId(summary.language_server_id as usize);
1684 let summary = DiagnosticSummary {
1685 error_count: summary.error_count as usize,
1686 warning_count: summary.warning_count as usize,
1687 };
1688
1689 if summary.is_empty() {
1690 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1691 summaries.remove(&server_id);
1692 if summaries.is_empty() {
1693 self.diagnostic_summaries.remove(&path);
1694 }
1695 }
1696 } else {
1697 self.diagnostic_summaries
1698 .entry(path)
1699 .or_default()
1700 .insert(server_id, summary);
1701 }
1702 }
1703
1704 pub fn insert_entry(
1705 &mut self,
1706 entry: proto::Entry,
1707 scan_id: usize,
1708 cx: &mut ModelContext<Worktree>,
1709 ) -> Task<Result<Entry>> {
1710 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1711 cx.spawn(|this, mut cx| async move {
1712 wait_for_snapshot.await?;
1713 this.update(&mut cx, |worktree, _| {
1714 let worktree = worktree.as_remote_mut().unwrap();
1715 let mut snapshot = worktree.background_snapshot.lock();
1716 let entry = snapshot.insert_entry(entry);
1717 worktree.snapshot = snapshot.clone();
1718 entry
1719 })?
1720 })
1721 }
1722
1723 pub fn delete_entry(
1724 &mut self,
1725 id: ProjectEntryId,
1726 scan_id: usize,
1727 cx: &mut ModelContext<Worktree>,
1728 ) -> Task<Result<()>> {
1729 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1730 cx.spawn(move |this, mut cx| async move {
1731 wait_for_snapshot.await?;
1732 this.update(&mut cx, |worktree, _| {
1733 let worktree = worktree.as_remote_mut().unwrap();
1734 let mut snapshot = worktree.background_snapshot.lock();
1735 snapshot.delete_entry(id);
1736 worktree.snapshot = snapshot.clone();
1737 })?;
1738 Ok(())
1739 })
1740 }
1741}
1742
1743impl Snapshot {
1744 pub fn id(&self) -> WorktreeId {
1745 self.id
1746 }
1747
1748 pub fn abs_path(&self) -> &Arc<Path> {
1749 &self.abs_path
1750 }
1751
1752 pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1753 if path
1754 .components()
1755 .any(|component| !matches!(component, std::path::Component::Normal(_)))
1756 {
1757 return Err(anyhow!("invalid path"));
1758 }
1759 if path.file_name().is_some() {
1760 Ok(self.abs_path.join(path))
1761 } else {
1762 Ok(self.abs_path.to_path_buf())
1763 }
1764 }
1765
1766 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1767 self.entries_by_id.get(&entry_id, &()).is_some()
1768 }
1769
1770 fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1771 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1772 let old_entry = self.entries_by_id.insert_or_replace(
1773 PathEntry {
1774 id: entry.id,
1775 path: entry.path.clone(),
1776 is_ignored: entry.is_ignored,
1777 scan_id: 0,
1778 },
1779 &(),
1780 );
1781 if let Some(old_entry) = old_entry {
1782 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1783 }
1784 self.entries_by_path.insert_or_replace(entry.clone(), &());
1785 Ok(entry)
1786 }
1787
1788 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1789 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1790 self.entries_by_path = {
1791 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1792 let mut new_entries_by_path =
1793 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1794 while let Some(entry) = cursor.item() {
1795 if entry.path.starts_with(&removed_entry.path) {
1796 self.entries_by_id.remove(&entry.id, &());
1797 cursor.next(&());
1798 } else {
1799 break;
1800 }
1801 }
1802 new_entries_by_path.append(cursor.suffix(&()), &());
1803 new_entries_by_path
1804 };
1805
1806 Some(removed_entry.path)
1807 }
1808
1809 #[cfg(any(test, feature = "test-support"))]
1810 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1811 let path = path.into();
1812 self.entries_by_path
1813 .get(&PathKey(Arc::from(path)), &())
1814 .and_then(|entry| entry.git_status)
1815 }
1816
1817 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1818 let mut entries_by_path_edits = Vec::new();
1819 let mut entries_by_id_edits = Vec::new();
1820
1821 for entry_id in update.removed_entries {
1822 let entry_id = ProjectEntryId::from_proto(entry_id);
1823 entries_by_id_edits.push(Edit::Remove(entry_id));
1824 if let Some(entry) = self.entry_for_id(entry_id) {
1825 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1826 }
1827 }
1828
1829 for entry in update.updated_entries {
1830 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1831 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1832 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1833 }
1834 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1835 if old_entry.id != entry.id {
1836 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1837 }
1838 }
1839 entries_by_id_edits.push(Edit::Insert(PathEntry {
1840 id: entry.id,
1841 path: entry.path.clone(),
1842 is_ignored: entry.is_ignored,
1843 scan_id: 0,
1844 }));
1845 entries_by_path_edits.push(Edit::Insert(entry));
1846 }
1847
1848 self.entries_by_path.edit(entries_by_path_edits, &());
1849 self.entries_by_id.edit(entries_by_id_edits, &());
1850
1851 update.removed_repositories.sort_unstable();
1852 self.repository_entries.retain(|_, entry| {
1853 if let Ok(_) = update
1854 .removed_repositories
1855 .binary_search(&entry.work_directory.to_proto())
1856 {
1857 false
1858 } else {
1859 true
1860 }
1861 });
1862
1863 for repository in update.updated_repositories {
1864 let work_directory_entry: WorkDirectoryEntry =
1865 ProjectEntryId::from_proto(repository.work_directory_id).into();
1866
1867 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1868 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1869 if self.repository_entries.get(&work_directory).is_some() {
1870 self.repository_entries.update(&work_directory, |repo| {
1871 repo.branch = repository.branch.map(Into::into);
1872 });
1873 } else {
1874 self.repository_entries.insert(
1875 work_directory,
1876 RepositoryEntry {
1877 work_directory: work_directory_entry,
1878 branch: repository.branch.map(Into::into),
1879 },
1880 )
1881 }
1882 } else {
1883 log::error!("no work directory entry for repository {:?}", repository)
1884 }
1885 }
1886
1887 self.scan_id = update.scan_id as usize;
1888 if update.is_last_update {
1889 self.completed_scan_id = update.scan_id as usize;
1890 }
1891
1892 Ok(())
1893 }
1894
1895 pub fn file_count(&self) -> usize {
1896 self.entries_by_path.summary().file_count
1897 }
1898
1899 pub fn visible_file_count(&self) -> usize {
1900 self.entries_by_path.summary().non_ignored_file_count
1901 }
1902
1903 fn traverse_from_offset(
1904 &self,
1905 include_dirs: bool,
1906 include_ignored: bool,
1907 start_offset: usize,
1908 ) -> Traversal {
1909 let mut cursor = self.entries_by_path.cursor();
1910 cursor.seek(
1911 &TraversalTarget::Count {
1912 count: start_offset,
1913 include_dirs,
1914 include_ignored,
1915 },
1916 Bias::Right,
1917 &(),
1918 );
1919 Traversal {
1920 cursor,
1921 include_dirs,
1922 include_ignored,
1923 }
1924 }
1925
1926 fn traverse_from_path(
1927 &self,
1928 include_dirs: bool,
1929 include_ignored: bool,
1930 path: &Path,
1931 ) -> Traversal {
1932 let mut cursor = self.entries_by_path.cursor();
1933 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1934 Traversal {
1935 cursor,
1936 include_dirs,
1937 include_ignored,
1938 }
1939 }
1940
1941 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1942 self.traverse_from_offset(false, include_ignored, start)
1943 }
1944
1945 pub fn entries(&self, include_ignored: bool) -> Traversal {
1946 self.traverse_from_offset(true, include_ignored, 0)
1947 }
1948
1949 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1950 self.repository_entries
1951 .iter()
1952 .map(|(path, entry)| (&path.0, entry))
1953 }
1954
1955 /// Get the repository whose work directory contains the given path.
1956 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1957 self.repository_entries
1958 .get(&RepositoryWorkDirectory(path.into()))
1959 .cloned()
1960 }
1961
1962 /// Get the repository whose work directory contains the given path.
1963 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1964 self.repository_and_work_directory_for_path(path)
1965 .map(|e| e.1)
1966 }
1967
1968 pub fn repository_and_work_directory_for_path(
1969 &self,
1970 path: &Path,
1971 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1972 self.repository_entries
1973 .iter()
1974 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1975 .last()
1976 .map(|(path, repo)| (path.clone(), repo.clone()))
1977 }
1978
1979 /// Given an ordered iterator of entries, returns an iterator of those entries,
1980 /// along with their containing git repository.
1981 pub fn entries_with_repositories<'a>(
1982 &'a self,
1983 entries: impl 'a + Iterator<Item = &'a Entry>,
1984 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1985 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1986 let mut repositories = self.repositories().peekable();
1987 entries.map(move |entry| {
1988 while let Some((repo_path, _)) = containing_repos.last() {
1989 if !entry.path.starts_with(repo_path) {
1990 containing_repos.pop();
1991 } else {
1992 break;
1993 }
1994 }
1995 while let Some((repo_path, _)) = repositories.peek() {
1996 if entry.path.starts_with(repo_path) {
1997 containing_repos.push(repositories.next().unwrap());
1998 } else {
1999 break;
2000 }
2001 }
2002 let repo = containing_repos.last().map(|(_, repo)| *repo);
2003 (entry, repo)
2004 })
2005 }
2006
2007 /// Updates the `git_status` of the given entries such that files'
2008 /// statuses bubble up to their ancestor directories.
2009 pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2010 let mut cursor = self
2011 .entries_by_path
2012 .cursor::<(TraversalProgress, GitStatuses)>();
2013 let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2014
2015 let mut result_ix = 0;
2016 loop {
2017 let next_entry = result.get(result_ix);
2018 let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2019
2020 let entry_to_finish = match (containing_entry, next_entry) {
2021 (Some(_), None) => entry_stack.pop(),
2022 (Some(containing_entry), Some(next_path)) => {
2023 if !next_path.path.starts_with(&containing_entry.path) {
2024 entry_stack.pop()
2025 } else {
2026 None
2027 }
2028 }
2029 (None, Some(_)) => None,
2030 (None, None) => break,
2031 };
2032
2033 if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2034 cursor.seek_forward(
2035 &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2036 Bias::Left,
2037 &(),
2038 );
2039
2040 let statuses = cursor.start().1 - prev_statuses;
2041
2042 result[entry_ix].git_status = if statuses.conflict > 0 {
2043 Some(GitFileStatus::Conflict)
2044 } else if statuses.modified > 0 {
2045 Some(GitFileStatus::Modified)
2046 } else if statuses.added > 0 {
2047 Some(GitFileStatus::Added)
2048 } else {
2049 None
2050 };
2051 } else {
2052 if result[result_ix].is_dir() {
2053 cursor.seek_forward(
2054 &TraversalTarget::Path(&result[result_ix].path),
2055 Bias::Left,
2056 &(),
2057 );
2058 entry_stack.push((result_ix, cursor.start().1));
2059 }
2060 result_ix += 1;
2061 }
2062 }
2063 }
2064
2065 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2066 let empty_path = Path::new("");
2067 self.entries_by_path
2068 .cursor::<()>()
2069 .filter(move |entry| entry.path.as_ref() != empty_path)
2070 .map(|entry| &entry.path)
2071 }
2072
2073 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2074 let mut cursor = self.entries_by_path.cursor();
2075 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2076 let traversal = Traversal {
2077 cursor,
2078 include_dirs: true,
2079 include_ignored: true,
2080 };
2081 ChildEntriesIter {
2082 traversal,
2083 parent_path,
2084 }
2085 }
2086
2087 pub fn descendent_entries<'a>(
2088 &'a self,
2089 include_dirs: bool,
2090 include_ignored: bool,
2091 parent_path: &'a Path,
2092 ) -> DescendentEntriesIter<'a> {
2093 let mut cursor = self.entries_by_path.cursor();
2094 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
2095 let mut traversal = Traversal {
2096 cursor,
2097 include_dirs,
2098 include_ignored,
2099 };
2100
2101 if traversal.end_offset() == traversal.start_offset() {
2102 traversal.advance();
2103 }
2104
2105 DescendentEntriesIter {
2106 traversal,
2107 parent_path,
2108 }
2109 }
2110
2111 pub fn root_entry(&self) -> Option<&Entry> {
2112 self.entry_for_path("")
2113 }
2114
2115 pub fn root_name(&self) -> &str {
2116 &self.root_name
2117 }
2118
2119 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2120 self.repository_entries
2121 .get(&RepositoryWorkDirectory(Path::new("").into()))
2122 .map(|entry| entry.to_owned())
2123 }
2124
2125 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2126 self.repository_entries.values()
2127 }
2128
2129 pub fn scan_id(&self) -> usize {
2130 self.scan_id
2131 }
2132
2133 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2134 let path = path.as_ref();
2135 self.traverse_from_path(true, true, path)
2136 .entry()
2137 .and_then(|entry| {
2138 if entry.path.as_ref() == path {
2139 Some(entry)
2140 } else {
2141 None
2142 }
2143 })
2144 }
2145
2146 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2147 let entry = self.entries_by_id.get(&id, &())?;
2148 self.entry_for_path(&entry.path)
2149 }
2150
2151 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2152 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2153 }
2154}
2155
2156impl LocalSnapshot {
2157 pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
2158 self.git_repositories.get(&repo.work_directory.0)
2159 }
2160
2161 pub(crate) fn local_repo_for_path(
2162 &self,
2163 path: &Path,
2164 ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
2165 let (path, repo) = self.repository_and_work_directory_for_path(path)?;
2166 Some((path, self.git_repositories.get(&repo.work_directory_id())?))
2167 }
2168
2169 pub fn local_git_repo(&self, path: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
2170 self.local_repo_for_path(path)
2171 .map(|(_, entry)| entry.repo_ptr.clone())
2172 }
2173
2174 fn build_update(
2175 &self,
2176 project_id: u64,
2177 worktree_id: u64,
2178 entry_changes: UpdatedEntriesSet,
2179 repo_changes: UpdatedGitRepositoriesSet,
2180 ) -> proto::UpdateWorktree {
2181 let mut updated_entries = Vec::new();
2182 let mut removed_entries = Vec::new();
2183 let mut updated_repositories = Vec::new();
2184 let mut removed_repositories = Vec::new();
2185
2186 for (_, entry_id, path_change) in entry_changes.iter() {
2187 if let PathChange::Removed = path_change {
2188 removed_entries.push(entry_id.0 as u64);
2189 } else if let Some(entry) = self.entry_for_id(*entry_id) {
2190 updated_entries.push(proto::Entry::from(entry));
2191 }
2192 }
2193
2194 for (work_dir_path, change) in repo_changes.iter() {
2195 let new_repo = self
2196 .repository_entries
2197 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2198 match (&change.old_repository, new_repo) {
2199 (Some(old_repo), Some(new_repo)) => {
2200 updated_repositories.push(new_repo.build_update(old_repo));
2201 }
2202 (None, Some(new_repo)) => {
2203 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2204 }
2205 (Some(old_repo), None) => {
2206 removed_repositories.push(old_repo.work_directory.0.to_proto());
2207 }
2208 _ => {}
2209 }
2210 }
2211
2212 removed_entries.sort_unstable();
2213 updated_entries.sort_unstable_by_key(|e| e.id);
2214 removed_repositories.sort_unstable();
2215 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2216
2217 // TODO - optimize, knowing that removed_entries are sorted.
2218 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2219
2220 proto::UpdateWorktree {
2221 project_id,
2222 worktree_id,
2223 abs_path: self.abs_path().to_string_lossy().into(),
2224 root_name: self.root_name().to_string(),
2225 updated_entries,
2226 removed_entries,
2227 scan_id: self.scan_id as u64,
2228 is_last_update: self.completed_scan_id == self.scan_id,
2229 updated_repositories,
2230 removed_repositories,
2231 }
2232 }
2233
2234 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2235 let mut updated_entries = self
2236 .entries_by_path
2237 .iter()
2238 .map(proto::Entry::from)
2239 .collect::<Vec<_>>();
2240 updated_entries.sort_unstable_by_key(|e| e.id);
2241
2242 let mut updated_repositories = self
2243 .repository_entries
2244 .values()
2245 .map(proto::RepositoryEntry::from)
2246 .collect::<Vec<_>>();
2247 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2248
2249 proto::UpdateWorktree {
2250 project_id,
2251 worktree_id,
2252 abs_path: self.abs_path().to_string_lossy().into(),
2253 root_name: self.root_name().to_string(),
2254 updated_entries,
2255 removed_entries: Vec::new(),
2256 scan_id: self.scan_id as u64,
2257 is_last_update: self.completed_scan_id == self.scan_id,
2258 updated_repositories,
2259 removed_repositories: Vec::new(),
2260 }
2261 }
2262
2263 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2264 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2265 let abs_path = self.abs_path.join(&entry.path);
2266 match smol::block_on(build_gitignore(&abs_path, fs)) {
2267 Ok(ignore) => {
2268 self.ignores_by_parent_abs_path
2269 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2270 }
2271 Err(error) => {
2272 log::error!(
2273 "error loading .gitignore file {:?} - {:?}",
2274 &entry.path,
2275 error
2276 );
2277 }
2278 }
2279 }
2280
2281 if entry.kind == EntryKind::PendingDir {
2282 if let Some(existing_entry) =
2283 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2284 {
2285 entry.kind = existing_entry.kind;
2286 }
2287 }
2288
2289 let scan_id = self.scan_id;
2290 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2291 if let Some(removed) = removed {
2292 if removed.id != entry.id {
2293 self.entries_by_id.remove(&removed.id, &());
2294 }
2295 }
2296 self.entries_by_id.insert_or_replace(
2297 PathEntry {
2298 id: entry.id,
2299 path: entry.path.clone(),
2300 is_ignored: entry.is_ignored,
2301 scan_id,
2302 },
2303 &(),
2304 );
2305
2306 entry
2307 }
2308
2309 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2310 let mut inodes = TreeSet::default();
2311 for ancestor in path.ancestors().skip(1) {
2312 if let Some(entry) = self.entry_for_path(ancestor) {
2313 inodes.insert(entry.inode);
2314 }
2315 }
2316 inodes
2317 }
2318
2319 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2320 let mut new_ignores = Vec::new();
2321 for (index, ancestor) in abs_path.ancestors().enumerate() {
2322 if index > 0 {
2323 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2324 new_ignores.push((ancestor, Some(ignore.clone())));
2325 } else {
2326 new_ignores.push((ancestor, None));
2327 }
2328 }
2329 if ancestor.join(&*DOT_GIT).is_dir() {
2330 break;
2331 }
2332 }
2333
2334 let mut ignore_stack = IgnoreStack::none();
2335 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2336 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2337 ignore_stack = IgnoreStack::all();
2338 break;
2339 } else if let Some(ignore) = ignore {
2340 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2341 }
2342 }
2343
2344 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2345 ignore_stack = IgnoreStack::all();
2346 }
2347
2348 ignore_stack
2349 }
2350
2351 #[cfg(test)]
2352 pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2353 self.entries_by_path
2354 .cursor::<()>()
2355 .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2356 }
2357
2358 #[cfg(test)]
2359 pub fn check_invariants(&self, git_state: bool) {
2360 use pretty_assertions::assert_eq;
2361
2362 assert_eq!(
2363 self.entries_by_path
2364 .cursor::<()>()
2365 .map(|e| (&e.path, e.id))
2366 .collect::<Vec<_>>(),
2367 self.entries_by_id
2368 .cursor::<()>()
2369 .map(|e| (&e.path, e.id))
2370 .collect::<collections::BTreeSet<_>>()
2371 .into_iter()
2372 .collect::<Vec<_>>(),
2373 "entries_by_path and entries_by_id are inconsistent"
2374 );
2375
2376 let mut files = self.files(true, 0);
2377 let mut visible_files = self.files(false, 0);
2378 for entry in self.entries_by_path.cursor::<()>() {
2379 if entry.is_file() {
2380 assert_eq!(files.next().unwrap().inode, entry.inode);
2381 if !entry.is_ignored && !entry.is_external {
2382 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2383 }
2384 }
2385 }
2386
2387 assert!(files.next().is_none());
2388 assert!(visible_files.next().is_none());
2389
2390 let mut bfs_paths = Vec::new();
2391 let mut stack = self
2392 .root_entry()
2393 .map(|e| e.path.as_ref())
2394 .into_iter()
2395 .collect::<Vec<_>>();
2396 while let Some(path) = stack.pop() {
2397 bfs_paths.push(path);
2398 let ix = stack.len();
2399 for child_entry in self.child_entries(path) {
2400 stack.insert(ix, &child_entry.path);
2401 }
2402 }
2403
2404 let dfs_paths_via_iter = self
2405 .entries_by_path
2406 .cursor::<()>()
2407 .map(|e| e.path.as_ref())
2408 .collect::<Vec<_>>();
2409 assert_eq!(bfs_paths, dfs_paths_via_iter);
2410
2411 let dfs_paths_via_traversal = self
2412 .entries(true)
2413 .map(|e| e.path.as_ref())
2414 .collect::<Vec<_>>();
2415 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2416
2417 if git_state {
2418 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2419 let ignore_parent_path =
2420 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2421 assert!(self.entry_for_path(&ignore_parent_path).is_some());
2422 assert!(self
2423 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2424 .is_some());
2425 }
2426 }
2427 }
2428
2429 #[cfg(test)]
2430 pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2431 let mut paths = Vec::new();
2432 for entry in self.entries_by_path.cursor::<()>() {
2433 if include_ignored || !entry.is_ignored {
2434 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2435 }
2436 }
2437 paths.sort_by(|a, b| a.0.cmp(b.0));
2438 paths
2439 }
2440
2441 pub fn is_path_private(&self, path: &Path) -> bool {
2442 path.ancestors().any(|ancestor| {
2443 self.private_files
2444 .iter()
2445 .any(|exclude_matcher| exclude_matcher.is_match(&ancestor))
2446 })
2447 }
2448
2449 pub fn is_path_excluded(&self, mut path: PathBuf) -> bool {
2450 loop {
2451 if self
2452 .file_scan_exclusions
2453 .iter()
2454 .any(|exclude_matcher| exclude_matcher.is_match(&path))
2455 {
2456 return true;
2457 }
2458 if !path.pop() {
2459 return false;
2460 }
2461 }
2462 }
2463}
2464
2465impl BackgroundScannerState {
2466 fn should_scan_directory(&self, entry: &Entry) -> bool {
2467 (!entry.is_external && !entry.is_ignored)
2468 || entry.path.file_name() == Some(*DOT_GIT)
2469 || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2470 || self
2471 .paths_to_scan
2472 .iter()
2473 .any(|p| p.starts_with(&entry.path))
2474 || self
2475 .path_prefixes_to_scan
2476 .iter()
2477 .any(|p| entry.path.starts_with(p))
2478 }
2479
2480 fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2481 let path = entry.path.clone();
2482 let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2483 let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2484 let mut containing_repository = None;
2485 if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2486 if let Some((workdir_path, repo)) = self.snapshot.local_repo_for_path(&path) {
2487 if let Ok(repo_path) = path.strip_prefix(&workdir_path.0) {
2488 containing_repository = Some((
2489 workdir_path,
2490 repo.repo_ptr.clone(),
2491 repo.repo_ptr.lock().staged_statuses(repo_path),
2492 ));
2493 }
2494 }
2495 }
2496 if !ancestor_inodes.contains(&entry.inode) {
2497 ancestor_inodes.insert(entry.inode);
2498 scan_job_tx
2499 .try_send(ScanJob {
2500 abs_path,
2501 path,
2502 ignore_stack,
2503 scan_queue: scan_job_tx.clone(),
2504 ancestor_inodes,
2505 is_external: entry.is_external,
2506 containing_repository,
2507 })
2508 .unwrap();
2509 }
2510 }
2511
2512 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2513 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2514 entry.id = removed_entry_id;
2515 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2516 entry.id = existing_entry.id;
2517 }
2518 }
2519
2520 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2521 self.reuse_entry_id(&mut entry);
2522 let entry = self.snapshot.insert_entry(entry, fs);
2523 if entry.path.file_name() == Some(&DOT_GIT) {
2524 self.build_git_repository(entry.path.clone(), fs);
2525 }
2526
2527 #[cfg(test)]
2528 self.snapshot.check_invariants(false);
2529
2530 entry
2531 }
2532
2533 fn populate_dir(
2534 &mut self,
2535 parent_path: &Arc<Path>,
2536 entries: impl IntoIterator<Item = Entry>,
2537 ignore: Option<Arc<Gitignore>>,
2538 ) {
2539 let mut parent_entry = if let Some(parent_entry) = self
2540 .snapshot
2541 .entries_by_path
2542 .get(&PathKey(parent_path.clone()), &())
2543 {
2544 parent_entry.clone()
2545 } else {
2546 log::warn!(
2547 "populating a directory {:?} that has been removed",
2548 parent_path
2549 );
2550 return;
2551 };
2552
2553 match parent_entry.kind {
2554 EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2555 EntryKind::Dir => {}
2556 _ => return,
2557 }
2558
2559 if let Some(ignore) = ignore {
2560 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2561 self.snapshot
2562 .ignores_by_parent_abs_path
2563 .insert(abs_parent_path, (ignore, false));
2564 }
2565
2566 let parent_entry_id = parent_entry.id;
2567 self.scanned_dirs.insert(parent_entry_id);
2568 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2569 let mut entries_by_id_edits = Vec::new();
2570
2571 for entry in entries {
2572 entries_by_id_edits.push(Edit::Insert(PathEntry {
2573 id: entry.id,
2574 path: entry.path.clone(),
2575 is_ignored: entry.is_ignored,
2576 scan_id: self.snapshot.scan_id,
2577 }));
2578 entries_by_path_edits.push(Edit::Insert(entry));
2579 }
2580
2581 self.snapshot
2582 .entries_by_path
2583 .edit(entries_by_path_edits, &());
2584 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2585
2586 if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2587 self.changed_paths.insert(ix, parent_path.clone());
2588 }
2589
2590 #[cfg(test)]
2591 self.snapshot.check_invariants(false);
2592 }
2593
2594 fn remove_path(&mut self, path: &Path) {
2595 let mut new_entries;
2596 let removed_entries;
2597 {
2598 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2599 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2600 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2601 new_entries.append(cursor.suffix(&()), &());
2602 }
2603 self.snapshot.entries_by_path = new_entries;
2604
2605 let mut entries_by_id_edits = Vec::new();
2606 for entry in removed_entries.cursor::<()>() {
2607 let removed_entry_id = self
2608 .removed_entry_ids
2609 .entry(entry.inode)
2610 .or_insert(entry.id);
2611 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2612 entries_by_id_edits.push(Edit::Remove(entry.id));
2613 }
2614 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2615
2616 if path.file_name() == Some(&GITIGNORE) {
2617 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2618 if let Some((_, needs_update)) = self
2619 .snapshot
2620 .ignores_by_parent_abs_path
2621 .get_mut(abs_parent_path.as_path())
2622 {
2623 *needs_update = true;
2624 }
2625 }
2626
2627 #[cfg(test)]
2628 self.snapshot.check_invariants(false);
2629 }
2630
2631 fn reload_repositories(&mut self, dot_git_dirs_to_reload: &HashSet<PathBuf>, fs: &dyn Fs) {
2632 let scan_id = self.snapshot.scan_id;
2633
2634 for dot_git_dir in dot_git_dirs_to_reload {
2635 // If there is already a repository for this .git directory, reload
2636 // the status for all of its files.
2637 let repository = self
2638 .snapshot
2639 .git_repositories
2640 .iter()
2641 .find_map(|(entry_id, repo)| {
2642 (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2643 });
2644 match repository {
2645 None => {
2646 self.build_git_repository(Arc::from(dot_git_dir.as_path()), fs);
2647 }
2648 Some((entry_id, repository)) => {
2649 if repository.git_dir_scan_id == scan_id {
2650 continue;
2651 }
2652 let Some(work_dir) = self
2653 .snapshot
2654 .entry_for_id(entry_id)
2655 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
2656 else {
2657 continue;
2658 };
2659
2660 log::info!("reload git repository {dot_git_dir:?}");
2661 let repository = repository.repo_ptr.lock();
2662 let branch = repository.branch_name();
2663 repository.reload_index();
2664
2665 self.snapshot
2666 .git_repositories
2667 .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2668 self.snapshot
2669 .snapshot
2670 .repository_entries
2671 .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2672
2673 self.update_git_statuses(&work_dir, &*repository);
2674 }
2675 }
2676 }
2677
2678 // Remove any git repositories whose .git entry no longer exists.
2679 let snapshot = &mut self.snapshot;
2680 let mut ids_to_preserve = HashSet::default();
2681 for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
2682 let exists_in_snapshot = snapshot
2683 .entry_for_id(work_directory_id)
2684 .map_or(false, |entry| {
2685 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2686 });
2687 if exists_in_snapshot {
2688 ids_to_preserve.insert(work_directory_id);
2689 } else {
2690 let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
2691 let git_dir_excluded = snapshot.is_path_excluded(entry.git_dir_path.to_path_buf());
2692 if git_dir_excluded
2693 && !matches!(smol::block_on(fs.metadata(&git_dir_abs_path)), Ok(None))
2694 {
2695 ids_to_preserve.insert(work_directory_id);
2696 }
2697 }
2698 }
2699 snapshot
2700 .git_repositories
2701 .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
2702 snapshot
2703 .repository_entries
2704 .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
2705 }
2706
2707 fn build_git_repository(
2708 &mut self,
2709 dot_git_path: Arc<Path>,
2710 fs: &dyn Fs,
2711 ) -> Option<(
2712 RepositoryWorkDirectory,
2713 Arc<Mutex<dyn GitRepository>>,
2714 TreeMap<RepoPath, GitFileStatus>,
2715 )> {
2716 let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2717 Some(parent_dir) => {
2718 // Guard against repositories inside the repository metadata
2719 if parent_dir.iter().any(|component| component == *DOT_GIT) {
2720 log::info!(
2721 "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2722 );
2723 return None;
2724 };
2725 log::info!(
2726 "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2727 );
2728 parent_dir.into()
2729 }
2730 None => {
2731 // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2732 // no files inside that directory are tracked by git, so no need to build the repo around it
2733 log::info!(
2734 "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2735 );
2736 return None;
2737 }
2738 };
2739
2740 let work_dir_id = self
2741 .snapshot
2742 .entry_for_path(work_dir_path.clone())
2743 .map(|entry| entry.id)?;
2744
2745 if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2746 return None;
2747 }
2748
2749 let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2750 let repository = fs.open_repo(abs_path.as_path())?;
2751 let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2752
2753 let repo_lock = repository.lock();
2754 self.snapshot.repository_entries.insert(
2755 work_directory.clone(),
2756 RepositoryEntry {
2757 work_directory: work_dir_id.into(),
2758 branch: repo_lock.branch_name().map(Into::into),
2759 },
2760 );
2761
2762 let staged_statuses = self.update_git_statuses(&work_directory, &*repo_lock);
2763 drop(repo_lock);
2764
2765 self.snapshot.git_repositories.insert(
2766 work_dir_id,
2767 LocalRepositoryEntry {
2768 git_dir_scan_id: 0,
2769 repo_ptr: repository.clone(),
2770 git_dir_path: dot_git_path.clone(),
2771 },
2772 );
2773
2774 Some((work_directory, repository, staged_statuses))
2775 }
2776
2777 fn update_git_statuses(
2778 &mut self,
2779 work_directory: &RepositoryWorkDirectory,
2780 repo: &dyn GitRepository,
2781 ) -> TreeMap<RepoPath, GitFileStatus> {
2782 let staged_statuses = repo.staged_statuses(Path::new(""));
2783
2784 let mut changes = vec![];
2785 let mut edits = vec![];
2786
2787 for mut entry in self
2788 .snapshot
2789 .descendent_entries(false, false, &work_directory.0)
2790 .cloned()
2791 {
2792 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2793 continue;
2794 };
2795 let Some(mtime) = entry.mtime else {
2796 continue;
2797 };
2798 let repo_path = RepoPath(repo_path.to_path_buf());
2799 let git_file_status = combine_git_statuses(
2800 staged_statuses.get(&repo_path).copied(),
2801 repo.unstaged_status(&repo_path, mtime),
2802 );
2803 if entry.git_status != git_file_status {
2804 entry.git_status = git_file_status;
2805 changes.push(entry.path.clone());
2806 edits.push(Edit::Insert(entry));
2807 }
2808 }
2809
2810 self.snapshot.entries_by_path.edit(edits, &());
2811 util::extend_sorted(&mut self.changed_paths, changes, usize::MAX, Ord::cmp);
2812 staged_statuses
2813 }
2814}
2815
2816async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2817 let contents = fs.load(abs_path).await?;
2818 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2819 let mut builder = GitignoreBuilder::new(parent);
2820 for line in contents.lines() {
2821 builder.add_line(Some(abs_path.into()), line)?;
2822 }
2823 Ok(builder.build()?)
2824}
2825
2826impl WorktreeId {
2827 pub fn from_usize(handle_id: usize) -> Self {
2828 Self(handle_id)
2829 }
2830
2831 pub fn from_proto(id: u64) -> Self {
2832 Self(id as usize)
2833 }
2834
2835 pub fn to_proto(&self) -> u64 {
2836 self.0 as u64
2837 }
2838
2839 pub fn to_usize(&self) -> usize {
2840 self.0
2841 }
2842}
2843
2844impl fmt::Display for WorktreeId {
2845 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2846 self.0.fmt(f)
2847 }
2848}
2849
2850impl Deref for Worktree {
2851 type Target = Snapshot;
2852
2853 fn deref(&self) -> &Self::Target {
2854 match self {
2855 Worktree::Local(worktree) => &worktree.snapshot,
2856 Worktree::Remote(worktree) => &worktree.snapshot,
2857 }
2858 }
2859}
2860
2861impl Deref for LocalWorktree {
2862 type Target = LocalSnapshot;
2863
2864 fn deref(&self) -> &Self::Target {
2865 &self.snapshot
2866 }
2867}
2868
2869impl Deref for RemoteWorktree {
2870 type Target = Snapshot;
2871
2872 fn deref(&self) -> &Self::Target {
2873 &self.snapshot
2874 }
2875}
2876
2877impl fmt::Debug for LocalWorktree {
2878 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2879 self.snapshot.fmt(f)
2880 }
2881}
2882
2883impl fmt::Debug for Snapshot {
2884 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2885 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2886 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2887
2888 impl<'a> fmt::Debug for EntriesByPath<'a> {
2889 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2890 f.debug_map()
2891 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2892 .finish()
2893 }
2894 }
2895
2896 impl<'a> fmt::Debug for EntriesById<'a> {
2897 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2898 f.debug_list().entries(self.0.iter()).finish()
2899 }
2900 }
2901
2902 f.debug_struct("Snapshot")
2903 .field("id", &self.id)
2904 .field("root_name", &self.root_name)
2905 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2906 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2907 .finish()
2908 }
2909}
2910
2911#[derive(Clone, PartialEq)]
2912pub struct File {
2913 pub worktree: Model<Worktree>,
2914 pub path: Arc<Path>,
2915 pub mtime: Option<SystemTime>,
2916 pub entry_id: Option<ProjectEntryId>,
2917 pub is_local: bool,
2918 pub is_deleted: bool,
2919 pub is_private: bool,
2920}
2921
2922impl language::File for File {
2923 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2924 if self.is_local {
2925 Some(self)
2926 } else {
2927 None
2928 }
2929 }
2930
2931 fn mtime(&self) -> Option<SystemTime> {
2932 self.mtime
2933 }
2934
2935 fn path(&self) -> &Arc<Path> {
2936 &self.path
2937 }
2938
2939 fn full_path(&self, cx: &AppContext) -> PathBuf {
2940 let mut full_path = PathBuf::new();
2941 let worktree = self.worktree.read(cx);
2942
2943 if worktree.is_visible() {
2944 full_path.push(worktree.root_name());
2945 } else {
2946 let path = worktree.abs_path();
2947
2948 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2949 full_path.push("~");
2950 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2951 } else {
2952 full_path.push(path)
2953 }
2954 }
2955
2956 if self.path.components().next().is_some() {
2957 full_path.push(&self.path);
2958 }
2959
2960 full_path
2961 }
2962
2963 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2964 /// of its worktree, then this method will return the name of the worktree itself.
2965 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2966 self.path
2967 .file_name()
2968 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2969 }
2970
2971 fn worktree_id(&self) -> usize {
2972 self.worktree.entity_id().as_u64() as usize
2973 }
2974
2975 fn is_deleted(&self) -> bool {
2976 self.is_deleted
2977 }
2978
2979 fn as_any(&self) -> &dyn Any {
2980 self
2981 }
2982
2983 fn to_proto(&self) -> rpc::proto::File {
2984 rpc::proto::File {
2985 worktree_id: self.worktree.entity_id().as_u64(),
2986 entry_id: self.entry_id.map(|id| id.to_proto()),
2987 path: self.path.to_string_lossy().into(),
2988 mtime: self.mtime.map(|time| time.into()),
2989 is_deleted: self.is_deleted,
2990 }
2991 }
2992
2993 fn is_private(&self) -> bool {
2994 self.is_private
2995 }
2996}
2997
2998impl language::LocalFile for File {
2999 fn abs_path(&self, cx: &AppContext) -> PathBuf {
3000 let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3001 if self.path.as_ref() == Path::new("") {
3002 worktree_path.to_path_buf()
3003 } else {
3004 worktree_path.join(&self.path)
3005 }
3006 }
3007
3008 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3009 let worktree = self.worktree.read(cx).as_local().unwrap();
3010 let abs_path = worktree.absolutize(&self.path);
3011 let fs = worktree.fs.clone();
3012 cx.background_executor()
3013 .spawn(async move { fs.load(&abs_path?).await })
3014 }
3015
3016 fn buffer_reloaded(
3017 &self,
3018 buffer_id: BufferId,
3019 version: &clock::Global,
3020 fingerprint: RopeFingerprint,
3021 line_ending: LineEnding,
3022 mtime: Option<SystemTime>,
3023 cx: &mut AppContext,
3024 ) {
3025 let worktree = self.worktree.read(cx).as_local().unwrap();
3026 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
3027 worktree
3028 .client
3029 .send(proto::BufferReloaded {
3030 project_id,
3031 buffer_id: buffer_id.into(),
3032 version: serialize_version(version),
3033 mtime: mtime.map(|time| time.into()),
3034 fingerprint: serialize_fingerprint(fingerprint),
3035 line_ending: serialize_line_ending(line_ending) as i32,
3036 })
3037 .log_err();
3038 }
3039 }
3040}
3041
3042impl File {
3043 pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3044 Arc::new(Self {
3045 worktree,
3046 path: entry.path.clone(),
3047 mtime: entry.mtime,
3048 entry_id: Some(entry.id),
3049 is_local: true,
3050 is_deleted: false,
3051 is_private: entry.is_private,
3052 })
3053 }
3054
3055 pub fn from_proto(
3056 proto: rpc::proto::File,
3057 worktree: Model<Worktree>,
3058 cx: &AppContext,
3059 ) -> Result<Self> {
3060 let worktree_id = worktree
3061 .read(cx)
3062 .as_remote()
3063 .ok_or_else(|| anyhow!("not remote"))?
3064 .id();
3065
3066 if worktree_id.to_proto() != proto.worktree_id {
3067 return Err(anyhow!("worktree id does not match file"));
3068 }
3069
3070 Ok(Self {
3071 worktree,
3072 path: Path::new(&proto.path).into(),
3073 mtime: proto.mtime.map(|time| time.into()),
3074 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3075 is_local: false,
3076 is_deleted: proto.is_deleted,
3077 is_private: false,
3078 })
3079 }
3080
3081 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3082 file.and_then(|f| f.as_any().downcast_ref())
3083 }
3084
3085 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3086 self.worktree.read(cx).id()
3087 }
3088
3089 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3090 if self.is_deleted {
3091 None
3092 } else {
3093 self.entry_id
3094 }
3095 }
3096}
3097
3098#[derive(Clone, Debug, PartialEq, Eq)]
3099pub struct Entry {
3100 pub id: ProjectEntryId,
3101 pub kind: EntryKind,
3102 pub path: Arc<Path>,
3103 pub inode: u64,
3104 pub mtime: Option<SystemTime>,
3105 pub is_symlink: bool,
3106
3107 /// Whether this entry is ignored by Git.
3108 ///
3109 /// We only scan ignored entries once the directory is expanded and
3110 /// exclude them from searches.
3111 pub is_ignored: bool,
3112
3113 /// Whether this entry's canonical path is outside of the worktree.
3114 /// This means the entry is only accessible from the worktree root via a
3115 /// symlink.
3116 ///
3117 /// We only scan entries outside of the worktree once the symlinked
3118 /// directory is expanded. External entries are treated like gitignored
3119 /// entries in that they are not included in searches.
3120 pub is_external: bool,
3121 pub git_status: Option<GitFileStatus>,
3122 /// Whether this entry is considered to be a `.env` file.
3123 pub is_private: bool,
3124}
3125
3126#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3127pub enum EntryKind {
3128 UnloadedDir,
3129 PendingDir,
3130 Dir,
3131 File(CharBag),
3132}
3133
3134#[derive(Clone, Copy, Debug, PartialEq)]
3135pub enum PathChange {
3136 /// A filesystem entry was was created.
3137 Added,
3138 /// A filesystem entry was removed.
3139 Removed,
3140 /// A filesystem entry was updated.
3141 Updated,
3142 /// A filesystem entry was either updated or added. We don't know
3143 /// whether or not it already existed, because the path had not
3144 /// been loaded before the event.
3145 AddedOrUpdated,
3146 /// A filesystem entry was found during the initial scan of the worktree.
3147 Loaded,
3148}
3149
3150pub struct GitRepositoryChange {
3151 /// The previous state of the repository, if it already existed.
3152 pub old_repository: Option<RepositoryEntry>,
3153}
3154
3155pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3156pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3157
3158impl Entry {
3159 fn new(
3160 path: Arc<Path>,
3161 metadata: &fs::Metadata,
3162 next_entry_id: &AtomicUsize,
3163 root_char_bag: CharBag,
3164 ) -> Self {
3165 Self {
3166 id: ProjectEntryId::new(next_entry_id),
3167 kind: if metadata.is_dir {
3168 EntryKind::PendingDir
3169 } else {
3170 EntryKind::File(char_bag_for_path(root_char_bag, &path))
3171 },
3172 path,
3173 inode: metadata.inode,
3174 mtime: Some(metadata.mtime),
3175 is_symlink: metadata.is_symlink,
3176 is_ignored: false,
3177 is_external: false,
3178 is_private: false,
3179 git_status: None,
3180 }
3181 }
3182
3183 pub fn is_created(&self) -> bool {
3184 self.mtime.is_some()
3185 }
3186
3187 pub fn is_dir(&self) -> bool {
3188 self.kind.is_dir()
3189 }
3190
3191 pub fn is_file(&self) -> bool {
3192 self.kind.is_file()
3193 }
3194
3195 pub fn git_status(&self) -> Option<GitFileStatus> {
3196 self.git_status
3197 }
3198}
3199
3200impl EntryKind {
3201 pub fn is_dir(&self) -> bool {
3202 matches!(
3203 self,
3204 EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3205 )
3206 }
3207
3208 pub fn is_unloaded(&self) -> bool {
3209 matches!(self, EntryKind::UnloadedDir)
3210 }
3211
3212 pub fn is_file(&self) -> bool {
3213 matches!(self, EntryKind::File(_))
3214 }
3215}
3216
3217impl sum_tree::Item for Entry {
3218 type Summary = EntrySummary;
3219
3220 fn summary(&self) -> Self::Summary {
3221 let non_ignored_count = if self.is_ignored || self.is_external {
3222 0
3223 } else {
3224 1
3225 };
3226 let file_count;
3227 let non_ignored_file_count;
3228 if self.is_file() {
3229 file_count = 1;
3230 non_ignored_file_count = non_ignored_count;
3231 } else {
3232 file_count = 0;
3233 non_ignored_file_count = 0;
3234 }
3235
3236 let mut statuses = GitStatuses::default();
3237 match self.git_status {
3238 Some(status) => match status {
3239 GitFileStatus::Added => statuses.added = 1,
3240 GitFileStatus::Modified => statuses.modified = 1,
3241 GitFileStatus::Conflict => statuses.conflict = 1,
3242 },
3243 None => {}
3244 }
3245
3246 EntrySummary {
3247 max_path: self.path.clone(),
3248 count: 1,
3249 non_ignored_count,
3250 file_count,
3251 non_ignored_file_count,
3252 statuses,
3253 }
3254 }
3255}
3256
3257impl sum_tree::KeyedItem for Entry {
3258 type Key = PathKey;
3259
3260 fn key(&self) -> Self::Key {
3261 PathKey(self.path.clone())
3262 }
3263}
3264
3265#[derive(Clone, Debug)]
3266pub struct EntrySummary {
3267 max_path: Arc<Path>,
3268 count: usize,
3269 non_ignored_count: usize,
3270 file_count: usize,
3271 non_ignored_file_count: usize,
3272 statuses: GitStatuses,
3273}
3274
3275impl Default for EntrySummary {
3276 fn default() -> Self {
3277 Self {
3278 max_path: Arc::from(Path::new("")),
3279 count: 0,
3280 non_ignored_count: 0,
3281 file_count: 0,
3282 non_ignored_file_count: 0,
3283 statuses: Default::default(),
3284 }
3285 }
3286}
3287
3288impl sum_tree::Summary for EntrySummary {
3289 type Context = ();
3290
3291 fn add_summary(&mut self, rhs: &Self, _: &()) {
3292 self.max_path = rhs.max_path.clone();
3293 self.count += rhs.count;
3294 self.non_ignored_count += rhs.non_ignored_count;
3295 self.file_count += rhs.file_count;
3296 self.non_ignored_file_count += rhs.non_ignored_file_count;
3297 self.statuses += rhs.statuses;
3298 }
3299}
3300
3301#[derive(Clone, Debug)]
3302struct PathEntry {
3303 id: ProjectEntryId,
3304 path: Arc<Path>,
3305 is_ignored: bool,
3306 scan_id: usize,
3307}
3308
3309impl sum_tree::Item for PathEntry {
3310 type Summary = PathEntrySummary;
3311
3312 fn summary(&self) -> Self::Summary {
3313 PathEntrySummary { max_id: self.id }
3314 }
3315}
3316
3317impl sum_tree::KeyedItem for PathEntry {
3318 type Key = ProjectEntryId;
3319
3320 fn key(&self) -> Self::Key {
3321 self.id
3322 }
3323}
3324
3325#[derive(Clone, Debug, Default)]
3326struct PathEntrySummary {
3327 max_id: ProjectEntryId,
3328}
3329
3330impl sum_tree::Summary for PathEntrySummary {
3331 type Context = ();
3332
3333 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3334 self.max_id = summary.max_id;
3335 }
3336}
3337
3338impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3339 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3340 *self = summary.max_id;
3341 }
3342}
3343
3344#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3345pub struct PathKey(Arc<Path>);
3346
3347impl Default for PathKey {
3348 fn default() -> Self {
3349 Self(Path::new("").into())
3350 }
3351}
3352
3353impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3354 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3355 self.0 = summary.max_path.clone();
3356 }
3357}
3358
3359struct BackgroundScanner {
3360 state: Mutex<BackgroundScannerState>,
3361 fs: Arc<dyn Fs>,
3362 fs_case_sensitive: bool,
3363 status_updates_tx: UnboundedSender<ScanState>,
3364 executor: BackgroundExecutor,
3365 scan_requests_rx: channel::Receiver<ScanRequest>,
3366 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3367 next_entry_id: Arc<AtomicUsize>,
3368 phase: BackgroundScannerPhase,
3369}
3370
3371#[derive(PartialEq)]
3372enum BackgroundScannerPhase {
3373 InitialScan,
3374 EventsReceivedDuringInitialScan,
3375 Events,
3376}
3377
3378impl BackgroundScanner {
3379 #[allow(clippy::too_many_arguments)]
3380 fn new(
3381 snapshot: LocalSnapshot,
3382 next_entry_id: Arc<AtomicUsize>,
3383 fs: Arc<dyn Fs>,
3384 fs_case_sensitive: bool,
3385 status_updates_tx: UnboundedSender<ScanState>,
3386 executor: BackgroundExecutor,
3387 scan_requests_rx: channel::Receiver<ScanRequest>,
3388 path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3389 ) -> Self {
3390 Self {
3391 fs,
3392 fs_case_sensitive,
3393 status_updates_tx,
3394 executor,
3395 scan_requests_rx,
3396 path_prefixes_to_scan_rx,
3397 next_entry_id,
3398 state: Mutex::new(BackgroundScannerState {
3399 prev_snapshot: snapshot.snapshot.clone(),
3400 snapshot,
3401 scanned_dirs: Default::default(),
3402 path_prefixes_to_scan: Default::default(),
3403 paths_to_scan: Default::default(),
3404 removed_entry_ids: Default::default(),
3405 changed_paths: Default::default(),
3406 }),
3407 phase: BackgroundScannerPhase::InitialScan,
3408 }
3409 }
3410
3411 async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3412 use futures::FutureExt as _;
3413
3414 // Populate ignores above the root.
3415 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3416 for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3417 if index != 0 {
3418 if let Ok(ignore) =
3419 build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3420 {
3421 self.state
3422 .lock()
3423 .snapshot
3424 .ignores_by_parent_abs_path
3425 .insert(ancestor.into(), (ignore.into(), false));
3426 }
3427 }
3428 if ancestor.join(&*DOT_GIT).is_dir() {
3429 // Reached root of git repository.
3430 break;
3431 }
3432 }
3433
3434 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3435 {
3436 let mut state = self.state.lock();
3437 state.snapshot.scan_id += 1;
3438 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3439 let ignore_stack = state
3440 .snapshot
3441 .ignore_stack_for_abs_path(&root_abs_path, true);
3442 if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3443 root_entry.is_ignored = true;
3444 state.insert_entry(root_entry.clone(), self.fs.as_ref());
3445 }
3446 state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3447 }
3448 };
3449
3450 // Perform an initial scan of the directory.
3451 drop(scan_job_tx);
3452 self.scan_dirs(true, scan_job_rx).await;
3453 {
3454 let mut state = self.state.lock();
3455 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3456 }
3457
3458 self.send_status_update(false, None);
3459
3460 // Process any any FS events that occurred while performing the initial scan.
3461 // For these events, update events cannot be as precise, because we didn't
3462 // have the previous state loaded yet.
3463 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3464 if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3465 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3466 paths.extend(more_paths);
3467 }
3468 self.process_events(paths).await;
3469 }
3470
3471 // Continue processing events until the worktree is dropped.
3472 self.phase = BackgroundScannerPhase::Events;
3473 loop {
3474 select_biased! {
3475 // Process any path refresh requests from the worktree. Prioritize
3476 // these before handling changes reported by the filesystem.
3477 request = self.scan_requests_rx.recv().fuse() => {
3478 let Ok(request) = request else { break };
3479 if !self.process_scan_request(request, false).await {
3480 return;
3481 }
3482 }
3483
3484 path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3485 let Ok(path_prefix) = path_prefix else { break };
3486 log::trace!("adding path prefix {:?}", path_prefix);
3487
3488 let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3489 if did_scan {
3490 let abs_path =
3491 {
3492 let mut state = self.state.lock();
3493 state.path_prefixes_to_scan.insert(path_prefix.clone());
3494 state.snapshot.abs_path.join(&path_prefix)
3495 };
3496
3497 if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3498 self.process_events(vec![abs_path]).await;
3499 }
3500 }
3501 }
3502
3503 paths = fs_events_rx.next().fuse() => {
3504 let Some(mut paths) = paths else { break };
3505 while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3506 paths.extend(more_paths);
3507 }
3508 self.process_events(paths.clone()).await;
3509 }
3510 }
3511 }
3512 }
3513
3514 async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3515 log::debug!("rescanning paths {:?}", request.relative_paths);
3516
3517 request.relative_paths.sort_unstable();
3518 self.forcibly_load_paths(&request.relative_paths).await;
3519
3520 let root_path = self.state.lock().snapshot.abs_path.clone();
3521 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3522 Ok(path) => path,
3523 Err(err) => {
3524 log::error!("failed to canonicalize root path: {}", err);
3525 return true;
3526 }
3527 };
3528 let abs_paths = request
3529 .relative_paths
3530 .iter()
3531 .map(|path| {
3532 if path.file_name().is_some() {
3533 root_canonical_path.join(path)
3534 } else {
3535 root_canonical_path.clone()
3536 }
3537 })
3538 .collect::<Vec<_>>();
3539
3540 self.reload_entries_for_paths(
3541 root_path,
3542 root_canonical_path,
3543 &request.relative_paths,
3544 abs_paths,
3545 None,
3546 )
3547 .await;
3548 self.send_status_update(scanning, Some(request.done))
3549 }
3550
3551 async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3552 let root_path = self.state.lock().snapshot.abs_path.clone();
3553 let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3554 Ok(path) => path,
3555 Err(err) => {
3556 log::error!("failed to canonicalize root path: {}", err);
3557 return;
3558 }
3559 };
3560
3561 let mut relative_paths = Vec::with_capacity(abs_paths.len());
3562 let mut dot_git_paths_to_reload = HashSet::default();
3563 abs_paths.sort_unstable();
3564 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3565 abs_paths.retain(|abs_path| {
3566 let snapshot = &self.state.lock().snapshot;
3567 {
3568 let mut is_git_related = false;
3569 if let Some(dot_git_dir) = abs_path
3570 .ancestors()
3571 .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3572 {
3573 let dot_git_path = dot_git_dir
3574 .strip_prefix(&root_canonical_path)
3575 .ok()
3576 .map(|path| path.to_path_buf())
3577 .unwrap_or_else(|| dot_git_dir.to_path_buf());
3578 dot_git_paths_to_reload.insert(dot_git_path.to_path_buf());
3579 is_git_related = true;
3580 }
3581
3582 let relative_path: Arc<Path> =
3583 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3584 path.into()
3585 } else {
3586 log::error!(
3587 "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3588 );
3589 return false;
3590 };
3591
3592 let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3593 snapshot
3594 .entry_for_path(parent)
3595 .map_or(false, |entry| entry.kind == EntryKind::Dir)
3596 });
3597 if !parent_dir_is_loaded {
3598 log::debug!("ignoring event {relative_path:?} within unloaded directory");
3599 return false;
3600 }
3601
3602 if snapshot.is_path_excluded(relative_path.to_path_buf()) {
3603 if !is_git_related {
3604 log::debug!("ignoring FS event for excluded path {relative_path:?}");
3605 }
3606 return false;
3607 }
3608
3609 relative_paths.push(relative_path);
3610 true
3611 }
3612 });
3613
3614 if dot_git_paths_to_reload.is_empty() && relative_paths.is_empty() {
3615 return;
3616 }
3617
3618 if !relative_paths.is_empty() {
3619 log::debug!("received fs events {:?}", relative_paths);
3620
3621 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3622 self.reload_entries_for_paths(
3623 root_path,
3624 root_canonical_path,
3625 &relative_paths,
3626 abs_paths,
3627 Some(scan_job_tx.clone()),
3628 )
3629 .await;
3630 drop(scan_job_tx);
3631 self.scan_dirs(false, scan_job_rx).await;
3632
3633 let (scan_job_tx, scan_job_rx) = channel::unbounded();
3634 self.update_ignore_statuses(scan_job_tx).await;
3635 self.scan_dirs(false, scan_job_rx).await;
3636 }
3637
3638 {
3639 let mut state = self.state.lock();
3640 if !dot_git_paths_to_reload.is_empty() {
3641 if relative_paths.is_empty() {
3642 state.snapshot.scan_id += 1;
3643 }
3644 log::debug!("reloading repositories: {dot_git_paths_to_reload:?}");
3645 state.reload_repositories(&dot_git_paths_to_reload, self.fs.as_ref());
3646 }
3647 state.snapshot.completed_scan_id = state.snapshot.scan_id;
3648 for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3649 state.scanned_dirs.remove(&entry_id);
3650 }
3651 }
3652
3653 self.send_status_update(false, None);
3654 }
3655
3656 async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3657 let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3658 {
3659 let mut state = self.state.lock();
3660 let root_path = state.snapshot.abs_path.clone();
3661 for path in paths {
3662 for ancestor in path.ancestors() {
3663 if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3664 if entry.kind == EntryKind::UnloadedDir {
3665 let abs_path = root_path.join(ancestor);
3666 state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3667 state.paths_to_scan.insert(path.clone());
3668 break;
3669 }
3670 }
3671 }
3672 }
3673 drop(scan_job_tx);
3674 }
3675 while let Some(job) = scan_job_rx.next().await {
3676 self.scan_dir(&job).await.log_err();
3677 }
3678
3679 mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3680 }
3681
3682 async fn scan_dirs(
3683 &self,
3684 enable_progress_updates: bool,
3685 scan_jobs_rx: channel::Receiver<ScanJob>,
3686 ) {
3687 use futures::FutureExt as _;
3688
3689 if self
3690 .status_updates_tx
3691 .unbounded_send(ScanState::Started)
3692 .is_err()
3693 {
3694 return;
3695 }
3696
3697 let progress_update_count = AtomicUsize::new(0);
3698 self.executor
3699 .scoped(|scope| {
3700 for _ in 0..self.executor.num_cpus() {
3701 scope.spawn(async {
3702 let mut last_progress_update_count = 0;
3703 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3704 futures::pin_mut!(progress_update_timer);
3705
3706 loop {
3707 select_biased! {
3708 // Process any path refresh requests before moving on to process
3709 // the scan queue, so that user operations are prioritized.
3710 request = self.scan_requests_rx.recv().fuse() => {
3711 let Ok(request) = request else { break };
3712 if !self.process_scan_request(request, true).await {
3713 return;
3714 }
3715 }
3716
3717 // Send periodic progress updates to the worktree. Use an atomic counter
3718 // to ensure that only one of the workers sends a progress update after
3719 // the update interval elapses.
3720 _ = progress_update_timer => {
3721 match progress_update_count.compare_exchange(
3722 last_progress_update_count,
3723 last_progress_update_count + 1,
3724 SeqCst,
3725 SeqCst
3726 ) {
3727 Ok(_) => {
3728 last_progress_update_count += 1;
3729 self.send_status_update(true, None);
3730 }
3731 Err(count) => {
3732 last_progress_update_count = count;
3733 }
3734 }
3735 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3736 }
3737
3738 // Recursively load directories from the file system.
3739 job = scan_jobs_rx.recv().fuse() => {
3740 let Ok(job) = job else { break };
3741 if let Err(err) = self.scan_dir(&job).await {
3742 if job.path.as_ref() != Path::new("") {
3743 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3744 }
3745 }
3746 }
3747 }
3748 }
3749 })
3750 }
3751 })
3752 .await;
3753 }
3754
3755 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3756 let mut state = self.state.lock();
3757 if state.changed_paths.is_empty() && scanning {
3758 return true;
3759 }
3760
3761 let new_snapshot = state.snapshot.clone();
3762 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3763 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3764 state.changed_paths.clear();
3765
3766 self.status_updates_tx
3767 .unbounded_send(ScanState::Updated {
3768 snapshot: new_snapshot,
3769 changes,
3770 scanning,
3771 barrier,
3772 })
3773 .is_ok()
3774 }
3775
3776 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3777 let root_abs_path;
3778 let mut ignore_stack;
3779 let mut new_ignore;
3780 let root_char_bag;
3781 let next_entry_id;
3782 {
3783 let state = self.state.lock();
3784 let snapshot = &state.snapshot;
3785 root_abs_path = snapshot.abs_path().clone();
3786 if snapshot.is_path_excluded(job.path.to_path_buf()) {
3787 log::error!("skipping excluded directory {:?}", job.path);
3788 return Ok(());
3789 }
3790 log::debug!("scanning directory {:?}", job.path);
3791 ignore_stack = job.ignore_stack.clone();
3792 new_ignore = None;
3793 root_char_bag = snapshot.root_char_bag;
3794 next_entry_id = self.next_entry_id.clone();
3795 drop(state);
3796 }
3797
3798 let mut dotgit_path = None;
3799 let mut root_canonical_path = None;
3800 let mut new_entries: Vec<Entry> = Vec::new();
3801 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3802 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3803 while let Some(child_abs_path) = child_paths.next().await {
3804 let child_abs_path: Arc<Path> = match child_abs_path {
3805 Ok(child_abs_path) => child_abs_path.into(),
3806 Err(error) => {
3807 log::error!("error processing entry {:?}", error);
3808 continue;
3809 }
3810 };
3811 let child_name = child_abs_path.file_name().unwrap();
3812 let child_path: Arc<Path> = job.path.join(child_name).into();
3813 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3814 if child_name == *GITIGNORE {
3815 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3816 Ok(ignore) => {
3817 let ignore = Arc::new(ignore);
3818 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3819 new_ignore = Some(ignore);
3820 }
3821 Err(error) => {
3822 log::error!(
3823 "error loading .gitignore file {:?} - {:?}",
3824 child_name,
3825 error
3826 );
3827 }
3828 }
3829
3830 // Update ignore status of any child entries we've already processed to reflect the
3831 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3832 // there should rarely be too numerous. Update the ignore stack associated with any
3833 // new jobs as well.
3834 let mut new_jobs = new_jobs.iter_mut();
3835 for entry in &mut new_entries {
3836 let entry_abs_path = root_abs_path.join(&entry.path);
3837 entry.is_ignored =
3838 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3839
3840 if entry.is_dir() {
3841 if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3842 job.ignore_stack = if entry.is_ignored {
3843 IgnoreStack::all()
3844 } else {
3845 ignore_stack.clone()
3846 };
3847 }
3848 }
3849 }
3850 }
3851 // If we find a .git, we'll need to load the repository.
3852 else if child_name == *DOT_GIT {
3853 dotgit_path = Some(child_path.clone());
3854 }
3855
3856 {
3857 let relative_path = job.path.join(child_name);
3858 let mut state = self.state.lock();
3859 if state.snapshot.is_path_excluded(relative_path.clone()) {
3860 log::debug!("skipping excluded child entry {relative_path:?}");
3861 state.remove_path(&relative_path);
3862 continue;
3863 }
3864 drop(state);
3865 }
3866
3867 let child_metadata = match self.fs.metadata(&child_abs_path).await {
3868 Ok(Some(metadata)) => metadata,
3869 Ok(None) => continue,
3870 Err(err) => {
3871 log::error!("error processing {child_abs_path:?}: {err:?}");
3872 continue;
3873 }
3874 };
3875
3876 let mut child_entry = Entry::new(
3877 child_path.clone(),
3878 &child_metadata,
3879 &next_entry_id,
3880 root_char_bag,
3881 );
3882
3883 if job.is_external {
3884 child_entry.is_external = true;
3885 } else if child_metadata.is_symlink {
3886 let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3887 Ok(path) => path,
3888 Err(err) => {
3889 log::error!(
3890 "error reading target of symlink {:?}: {:?}",
3891 child_abs_path,
3892 err
3893 );
3894 continue;
3895 }
3896 };
3897
3898 // lazily canonicalize the root path in order to determine if
3899 // symlinks point outside of the worktree.
3900 let root_canonical_path = match &root_canonical_path {
3901 Some(path) => path,
3902 None => match self.fs.canonicalize(&root_abs_path).await {
3903 Ok(path) => root_canonical_path.insert(path),
3904 Err(err) => {
3905 log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3906 continue;
3907 }
3908 },
3909 };
3910
3911 if !canonical_path.starts_with(root_canonical_path) {
3912 child_entry.is_external = true;
3913 }
3914 }
3915
3916 if child_entry.is_dir() {
3917 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3918
3919 // Avoid recursing until crash in the case of a recursive symlink
3920 if !job.ancestor_inodes.contains(&child_entry.inode) {
3921 let mut ancestor_inodes = job.ancestor_inodes.clone();
3922 ancestor_inodes.insert(child_entry.inode);
3923
3924 new_jobs.push(Some(ScanJob {
3925 abs_path: child_abs_path.clone(),
3926 path: child_path,
3927 is_external: child_entry.is_external,
3928 ignore_stack: if child_entry.is_ignored {
3929 IgnoreStack::all()
3930 } else {
3931 ignore_stack.clone()
3932 },
3933 ancestor_inodes,
3934 scan_queue: job.scan_queue.clone(),
3935 containing_repository: job.containing_repository.clone(),
3936 }));
3937 } else {
3938 new_jobs.push(None);
3939 }
3940 } else {
3941 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3942 if !child_entry.is_ignored {
3943 if let Some((repository_dir, repository, staged_statuses)) =
3944 &job.containing_repository
3945 {
3946 if let Ok(repo_path) = child_entry.path.strip_prefix(&repository_dir.0) {
3947 if let Some(mtime) = child_entry.mtime {
3948 let repo_path = RepoPath(repo_path.into());
3949 child_entry.git_status = combine_git_statuses(
3950 staged_statuses.get(&repo_path).copied(),
3951 repository.lock().unstaged_status(&repo_path, mtime),
3952 );
3953 }
3954 }
3955 }
3956 }
3957 }
3958
3959 {
3960 let relative_path = job.path.join(child_name);
3961 let state = self.state.lock();
3962 if state.snapshot.is_path_private(&relative_path) {
3963 log::debug!("detected private file: {relative_path:?}");
3964 child_entry.is_private = true;
3965 }
3966 drop(state)
3967 }
3968
3969 new_entries.push(child_entry);
3970 }
3971
3972 let mut state = self.state.lock();
3973
3974 // Identify any subdirectories that should not be scanned.
3975 let mut job_ix = 0;
3976 for entry in &mut new_entries {
3977 state.reuse_entry_id(entry);
3978 if entry.is_dir() {
3979 if state.should_scan_directory(entry) {
3980 job_ix += 1;
3981 } else {
3982 log::debug!("defer scanning directory {:?}", entry.path);
3983 entry.kind = EntryKind::UnloadedDir;
3984 new_jobs.remove(job_ix);
3985 }
3986 }
3987 }
3988
3989 state.populate_dir(&job.path, new_entries, new_ignore);
3990
3991 let repository =
3992 dotgit_path.and_then(|path| state.build_git_repository(path, self.fs.as_ref()));
3993
3994 for mut new_job in new_jobs.into_iter().flatten() {
3995 if let Some(containing_repository) = &repository {
3996 new_job.containing_repository = Some(containing_repository.clone());
3997 }
3998
3999 job.scan_queue
4000 .try_send(new_job)
4001 .expect("channel is unbounded");
4002 }
4003
4004 Ok(())
4005 }
4006
4007 async fn reload_entries_for_paths(
4008 &self,
4009 root_abs_path: Arc<Path>,
4010 root_canonical_path: PathBuf,
4011 relative_paths: &[Arc<Path>],
4012 abs_paths: Vec<PathBuf>,
4013 scan_queue_tx: Option<Sender<ScanJob>>,
4014 ) {
4015 let metadata = futures::future::join_all(
4016 abs_paths
4017 .iter()
4018 .map(|abs_path| async move {
4019 let metadata = self.fs.metadata(abs_path).await?;
4020 if let Some(metadata) = metadata {
4021 let canonical_path = self.fs.canonicalize(abs_path).await?;
4022
4023 // If we're on a case-insensitive filesystem (default on macOS), we want
4024 // to only ignore metadata for non-symlink files if their absolute-path matches
4025 // the canonical-path.
4026 // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4027 // and we want to ignore the metadata for the old path (`test.txt`) so it's
4028 // treated as removed.
4029 if !self.fs_case_sensitive && !metadata.is_symlink {
4030 let canonical_file_name = canonical_path.file_name();
4031 let file_name = abs_path.file_name();
4032 if canonical_file_name != file_name {
4033 return Ok(None);
4034 }
4035 }
4036
4037 anyhow::Ok(Some((metadata, canonical_path)))
4038 } else {
4039 Ok(None)
4040 }
4041 })
4042 .collect::<Vec<_>>(),
4043 )
4044 .await;
4045
4046 let mut state = self.state.lock();
4047 let snapshot = &mut state.snapshot;
4048 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
4049 let doing_recursive_update = scan_queue_tx.is_some();
4050 snapshot.scan_id += 1;
4051 if is_idle && !doing_recursive_update {
4052 snapshot.completed_scan_id = snapshot.scan_id;
4053 }
4054
4055 // Remove any entries for paths that no longer exist or are being recursively
4056 // refreshed. Do this before adding any new entries, so that renames can be
4057 // detected regardless of the order of the paths.
4058 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4059 if matches!(metadata, Ok(None)) || doing_recursive_update {
4060 log::trace!("remove path {:?}", path);
4061 state.remove_path(path);
4062 }
4063 }
4064
4065 for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4066 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4067 match metadata {
4068 Ok(Some((metadata, canonical_path))) => {
4069 let ignore_stack = state
4070 .snapshot
4071 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4072
4073 let mut fs_entry = Entry::new(
4074 path.clone(),
4075 metadata,
4076 self.next_entry_id.as_ref(),
4077 state.snapshot.root_char_bag,
4078 );
4079 let is_dir = fs_entry.is_dir();
4080 fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4081 fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4082 fs_entry.is_private = state.snapshot.is_path_private(path);
4083
4084 if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4085 if let Some((work_dir, repo)) = state.snapshot.local_repo_for_path(path) {
4086 if let Ok(repo_path) = path.strip_prefix(work_dir.0) {
4087 if let Some(mtime) = fs_entry.mtime {
4088 let repo_path = RepoPath(repo_path.into());
4089 let repo = repo.repo_ptr.lock();
4090 fs_entry.git_status = repo.status(&repo_path, mtime);
4091 }
4092 }
4093 }
4094 }
4095
4096 if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4097 if state.should_scan_directory(&fs_entry) {
4098 state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4099 } else {
4100 fs_entry.kind = EntryKind::UnloadedDir;
4101 }
4102 }
4103
4104 state.insert_entry(fs_entry, self.fs.as_ref());
4105 }
4106 Ok(None) => {
4107 self.remove_repo_path(path, &mut state.snapshot);
4108 }
4109 Err(err) => {
4110 // TODO - create a special 'error' entry in the entries tree to mark this
4111 log::error!("error reading file {abs_path:?} on event: {err:#}");
4112 }
4113 }
4114 }
4115
4116 util::extend_sorted(
4117 &mut state.changed_paths,
4118 relative_paths.iter().cloned(),
4119 usize::MAX,
4120 Ord::cmp,
4121 );
4122 }
4123
4124 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4125 if !path
4126 .components()
4127 .any(|component| component.as_os_str() == *DOT_GIT)
4128 {
4129 if let Some(repository) = snapshot.repository_for_work_directory(path) {
4130 let entry = repository.work_directory.0;
4131 snapshot.git_repositories.remove(&entry);
4132 snapshot
4133 .snapshot
4134 .repository_entries
4135 .remove(&RepositoryWorkDirectory(path.into()));
4136 return Some(());
4137 }
4138 }
4139
4140 // TODO statuses
4141 // Track when a .git is removed and iterate over the file system there
4142
4143 Some(())
4144 }
4145
4146 async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4147 use futures::FutureExt as _;
4148
4149 let mut snapshot = self.state.lock().snapshot.clone();
4150 let mut ignores_to_update = Vec::new();
4151 let mut ignores_to_delete = Vec::new();
4152 let abs_path = snapshot.abs_path.clone();
4153 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
4154 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4155 if *needs_update {
4156 *needs_update = false;
4157 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4158 ignores_to_update.push(parent_abs_path.clone());
4159 }
4160 }
4161
4162 let ignore_path = parent_path.join(&*GITIGNORE);
4163 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4164 ignores_to_delete.push(parent_abs_path.clone());
4165 }
4166 }
4167 }
4168
4169 for parent_abs_path in ignores_to_delete {
4170 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
4171 self.state
4172 .lock()
4173 .snapshot
4174 .ignores_by_parent_abs_path
4175 .remove(&parent_abs_path);
4176 }
4177
4178 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4179 ignores_to_update.sort_unstable();
4180 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4181 while let Some(parent_abs_path) = ignores_to_update.next() {
4182 while ignores_to_update
4183 .peek()
4184 .map_or(false, |p| p.starts_with(&parent_abs_path))
4185 {
4186 ignores_to_update.next().unwrap();
4187 }
4188
4189 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4190 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
4191 abs_path: parent_abs_path,
4192 ignore_stack,
4193 ignore_queue: ignore_queue_tx.clone(),
4194 scan_queue: scan_job_tx.clone(),
4195 }))
4196 .unwrap();
4197 }
4198 drop(ignore_queue_tx);
4199
4200 self.executor
4201 .scoped(|scope| {
4202 for _ in 0..self.executor.num_cpus() {
4203 scope.spawn(async {
4204 loop {
4205 select_biased! {
4206 // Process any path refresh requests before moving on to process
4207 // the queue of ignore statuses.
4208 request = self.scan_requests_rx.recv().fuse() => {
4209 let Ok(request) = request else { break };
4210 if !self.process_scan_request(request, true).await {
4211 return;
4212 }
4213 }
4214
4215 // Recursively process directories whose ignores have changed.
4216 job = ignore_queue_rx.recv().fuse() => {
4217 let Ok(job) = job else { break };
4218 self.update_ignore_status(job, &snapshot).await;
4219 }
4220 }
4221 }
4222 });
4223 }
4224 })
4225 .await;
4226 }
4227
4228 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4229 log::trace!("update ignore status {:?}", job.abs_path);
4230
4231 let mut ignore_stack = job.ignore_stack;
4232 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4233 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4234 }
4235
4236 let mut entries_by_id_edits = Vec::new();
4237 let mut entries_by_path_edits = Vec::new();
4238 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4239 let repo = snapshot
4240 .local_repo_for_path(path)
4241 .map_or(None, |local_repo| Some(local_repo.1));
4242 for mut entry in snapshot.child_entries(path).cloned() {
4243 let was_ignored = entry.is_ignored;
4244 let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4245 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4246 if entry.is_dir() {
4247 let child_ignore_stack = if entry.is_ignored {
4248 IgnoreStack::all()
4249 } else {
4250 ignore_stack.clone()
4251 };
4252
4253 // Scan any directories that were previously ignored and weren't previously scanned.
4254 if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4255 let state = self.state.lock();
4256 if state.should_scan_directory(&entry) {
4257 state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4258 }
4259 }
4260
4261 job.ignore_queue
4262 .send(UpdateIgnoreStatusJob {
4263 abs_path: abs_path.clone(),
4264 ignore_stack: child_ignore_stack,
4265 ignore_queue: job.ignore_queue.clone(),
4266 scan_queue: job.scan_queue.clone(),
4267 })
4268 .await
4269 .unwrap();
4270 }
4271
4272 if entry.is_ignored != was_ignored {
4273 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4274 path_entry.scan_id = snapshot.scan_id;
4275 path_entry.is_ignored = entry.is_ignored;
4276 if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4277 if let Some(repo) = repo {
4278 if let Some(mtime) = &entry.mtime {
4279 let repo_path = RepoPath(entry.path.to_path_buf());
4280 let repo = repo.repo_ptr.lock();
4281 entry.git_status = repo.status(&repo_path, *mtime);
4282 }
4283 }
4284 }
4285 entries_by_id_edits.push(Edit::Insert(path_entry));
4286 entries_by_path_edits.push(Edit::Insert(entry));
4287 }
4288 }
4289
4290 let state = &mut self.state.lock();
4291 for edit in &entries_by_path_edits {
4292 if let Edit::Insert(entry) = edit {
4293 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4294 state.changed_paths.insert(ix, entry.path.clone());
4295 }
4296 }
4297 }
4298
4299 state
4300 .snapshot
4301 .entries_by_path
4302 .edit(entries_by_path_edits, &());
4303 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4304 }
4305
4306 fn build_change_set(
4307 &self,
4308 old_snapshot: &Snapshot,
4309 new_snapshot: &Snapshot,
4310 event_paths: &[Arc<Path>],
4311 ) -> UpdatedEntriesSet {
4312 use BackgroundScannerPhase::*;
4313 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4314
4315 // Identify which paths have changed. Use the known set of changed
4316 // parent paths to optimize the search.
4317 let mut changes = Vec::new();
4318 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4319 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4320 let mut last_newly_loaded_dir_path = None;
4321 old_paths.next(&());
4322 new_paths.next(&());
4323 for path in event_paths {
4324 let path = PathKey(path.clone());
4325 if old_paths.item().map_or(false, |e| e.path < path.0) {
4326 old_paths.seek_forward(&path, Bias::Left, &());
4327 }
4328 if new_paths.item().map_or(false, |e| e.path < path.0) {
4329 new_paths.seek_forward(&path, Bias::Left, &());
4330 }
4331 loop {
4332 match (old_paths.item(), new_paths.item()) {
4333 (Some(old_entry), Some(new_entry)) => {
4334 if old_entry.path > path.0
4335 && new_entry.path > path.0
4336 && !old_entry.path.starts_with(&path.0)
4337 && !new_entry.path.starts_with(&path.0)
4338 {
4339 break;
4340 }
4341
4342 match Ord::cmp(&old_entry.path, &new_entry.path) {
4343 Ordering::Less => {
4344 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4345 old_paths.next(&());
4346 }
4347 Ordering::Equal => {
4348 if self.phase == EventsReceivedDuringInitialScan {
4349 if old_entry.id != new_entry.id {
4350 changes.push((
4351 old_entry.path.clone(),
4352 old_entry.id,
4353 Removed,
4354 ));
4355 }
4356 // If the worktree was not fully initialized when this event was generated,
4357 // we can't know whether this entry was added during the scan or whether
4358 // it was merely updated.
4359 changes.push((
4360 new_entry.path.clone(),
4361 new_entry.id,
4362 AddedOrUpdated,
4363 ));
4364 } else if old_entry.id != new_entry.id {
4365 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4366 changes.push((new_entry.path.clone(), new_entry.id, Added));
4367 } else if old_entry != new_entry {
4368 if old_entry.kind.is_unloaded() {
4369 last_newly_loaded_dir_path = Some(&new_entry.path);
4370 changes.push((
4371 new_entry.path.clone(),
4372 new_entry.id,
4373 Loaded,
4374 ));
4375 } else {
4376 changes.push((
4377 new_entry.path.clone(),
4378 new_entry.id,
4379 Updated,
4380 ));
4381 }
4382 }
4383 old_paths.next(&());
4384 new_paths.next(&());
4385 }
4386 Ordering::Greater => {
4387 let is_newly_loaded = self.phase == InitialScan
4388 || last_newly_loaded_dir_path
4389 .as_ref()
4390 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4391 changes.push((
4392 new_entry.path.clone(),
4393 new_entry.id,
4394 if is_newly_loaded { Loaded } else { Added },
4395 ));
4396 new_paths.next(&());
4397 }
4398 }
4399 }
4400 (Some(old_entry), None) => {
4401 changes.push((old_entry.path.clone(), old_entry.id, Removed));
4402 old_paths.next(&());
4403 }
4404 (None, Some(new_entry)) => {
4405 let is_newly_loaded = self.phase == InitialScan
4406 || last_newly_loaded_dir_path
4407 .as_ref()
4408 .map_or(false, |dir| new_entry.path.starts_with(&dir));
4409 changes.push((
4410 new_entry.path.clone(),
4411 new_entry.id,
4412 if is_newly_loaded { Loaded } else { Added },
4413 ));
4414 new_paths.next(&());
4415 }
4416 (None, None) => break,
4417 }
4418 }
4419 }
4420
4421 changes.into()
4422 }
4423
4424 async fn progress_timer(&self, running: bool) {
4425 if !running {
4426 return futures::future::pending().await;
4427 }
4428
4429 #[cfg(any(test, feature = "test-support"))]
4430 if self.fs.is_fake() {
4431 return self.executor.simulate_random_delay().await;
4432 }
4433
4434 smol::Timer::after(FS_WATCH_LATENCY).await;
4435 }
4436}
4437
4438fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4439 let mut result = root_char_bag;
4440 result.extend(
4441 path.to_string_lossy()
4442 .chars()
4443 .map(|c| c.to_ascii_lowercase()),
4444 );
4445 result
4446}
4447
4448struct ScanJob {
4449 abs_path: Arc<Path>,
4450 path: Arc<Path>,
4451 ignore_stack: Arc<IgnoreStack>,
4452 scan_queue: Sender<ScanJob>,
4453 ancestor_inodes: TreeSet<u64>,
4454 is_external: bool,
4455 containing_repository: Option<(
4456 RepositoryWorkDirectory,
4457 Arc<Mutex<dyn GitRepository>>,
4458 TreeMap<RepoPath, GitFileStatus>,
4459 )>,
4460}
4461
4462struct UpdateIgnoreStatusJob {
4463 abs_path: Arc<Path>,
4464 ignore_stack: Arc<IgnoreStack>,
4465 ignore_queue: Sender<UpdateIgnoreStatusJob>,
4466 scan_queue: Sender<ScanJob>,
4467}
4468
4469pub trait WorktreeModelHandle {
4470 #[cfg(any(test, feature = "test-support"))]
4471 fn flush_fs_events<'a>(
4472 &self,
4473 cx: &'a mut gpui::TestAppContext,
4474 ) -> futures::future::LocalBoxFuture<'a, ()>;
4475}
4476
4477impl WorktreeModelHandle for Model<Worktree> {
4478 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4479 // occurred before the worktree was constructed. These events can cause the worktree to perform
4480 // extra directory scans, and emit extra scan-state notifications.
4481 //
4482 // This function mutates the worktree's directory and waits for those mutations to be picked up,
4483 // to ensure that all redundant FS events have already been processed.
4484 #[cfg(any(test, feature = "test-support"))]
4485 fn flush_fs_events<'a>(
4486 &self,
4487 cx: &'a mut gpui::TestAppContext,
4488 ) -> futures::future::LocalBoxFuture<'a, ()> {
4489 let file_name = "fs-event-sentinel";
4490
4491 let tree = self.clone();
4492 let (fs, root_path) = self.update(cx, |tree, _| {
4493 let tree = tree.as_local().unwrap();
4494 (tree.fs.clone(), tree.abs_path().clone())
4495 });
4496
4497 async move {
4498 fs.create_file(&root_path.join(file_name), Default::default())
4499 .await
4500 .unwrap();
4501
4502 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4503 .await;
4504
4505 fs.remove_file(&root_path.join(file_name), Default::default())
4506 .await
4507 .unwrap();
4508 cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4509 .await;
4510
4511 cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4512 .await;
4513 }
4514 .boxed_local()
4515 }
4516}
4517
4518#[derive(Clone, Debug)]
4519struct TraversalProgress<'a> {
4520 max_path: &'a Path,
4521 count: usize,
4522 non_ignored_count: usize,
4523 file_count: usize,
4524 non_ignored_file_count: usize,
4525}
4526
4527impl<'a> TraversalProgress<'a> {
4528 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
4529 match (include_ignored, include_dirs) {
4530 (true, true) => self.count,
4531 (true, false) => self.file_count,
4532 (false, true) => self.non_ignored_count,
4533 (false, false) => self.non_ignored_file_count,
4534 }
4535 }
4536}
4537
4538impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4539 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4540 self.max_path = summary.max_path.as_ref();
4541 self.count += summary.count;
4542 self.non_ignored_count += summary.non_ignored_count;
4543 self.file_count += summary.file_count;
4544 self.non_ignored_file_count += summary.non_ignored_file_count;
4545 }
4546}
4547
4548impl<'a> Default for TraversalProgress<'a> {
4549 fn default() -> Self {
4550 Self {
4551 max_path: Path::new(""),
4552 count: 0,
4553 non_ignored_count: 0,
4554 file_count: 0,
4555 non_ignored_file_count: 0,
4556 }
4557 }
4558}
4559
4560#[derive(Clone, Debug, Default, Copy)]
4561struct GitStatuses {
4562 added: usize,
4563 modified: usize,
4564 conflict: usize,
4565}
4566
4567impl AddAssign for GitStatuses {
4568 fn add_assign(&mut self, rhs: Self) {
4569 self.added += rhs.added;
4570 self.modified += rhs.modified;
4571 self.conflict += rhs.conflict;
4572 }
4573}
4574
4575impl Sub for GitStatuses {
4576 type Output = GitStatuses;
4577
4578 fn sub(self, rhs: Self) -> Self::Output {
4579 GitStatuses {
4580 added: self.added - rhs.added,
4581 modified: self.modified - rhs.modified,
4582 conflict: self.conflict - rhs.conflict,
4583 }
4584 }
4585}
4586
4587impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4588 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4589 *self += summary.statuses
4590 }
4591}
4592
4593pub struct Traversal<'a> {
4594 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4595 include_ignored: bool,
4596 include_dirs: bool,
4597}
4598
4599impl<'a> Traversal<'a> {
4600 pub fn advance(&mut self) -> bool {
4601 self.cursor.seek_forward(
4602 &TraversalTarget::Count {
4603 count: self.end_offset() + 1,
4604 include_dirs: self.include_dirs,
4605 include_ignored: self.include_ignored,
4606 },
4607 Bias::Left,
4608 &(),
4609 )
4610 }
4611
4612 pub fn advance_to_sibling(&mut self) -> bool {
4613 while let Some(entry) = self.cursor.item() {
4614 self.cursor.seek_forward(
4615 &TraversalTarget::PathSuccessor(&entry.path),
4616 Bias::Left,
4617 &(),
4618 );
4619 if let Some(entry) = self.cursor.item() {
4620 if (self.include_dirs || !entry.is_dir())
4621 && (self.include_ignored || !entry.is_ignored)
4622 {
4623 return true;
4624 }
4625 }
4626 }
4627 false
4628 }
4629
4630 pub fn entry(&self) -> Option<&'a Entry> {
4631 self.cursor.item()
4632 }
4633
4634 pub fn start_offset(&self) -> usize {
4635 self.cursor
4636 .start()
4637 .count(self.include_dirs, self.include_ignored)
4638 }
4639
4640 pub fn end_offset(&self) -> usize {
4641 self.cursor
4642 .end(&())
4643 .count(self.include_dirs, self.include_ignored)
4644 }
4645}
4646
4647impl<'a> Iterator for Traversal<'a> {
4648 type Item = &'a Entry;
4649
4650 fn next(&mut self) -> Option<Self::Item> {
4651 if let Some(item) = self.entry() {
4652 self.advance();
4653 Some(item)
4654 } else {
4655 None
4656 }
4657 }
4658}
4659
4660#[derive(Debug)]
4661enum TraversalTarget<'a> {
4662 Path(&'a Path),
4663 PathSuccessor(&'a Path),
4664 Count {
4665 count: usize,
4666 include_ignored: bool,
4667 include_dirs: bool,
4668 },
4669}
4670
4671impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4672 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4673 match self {
4674 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4675 TraversalTarget::PathSuccessor(path) => {
4676 if !cursor_location.max_path.starts_with(path) {
4677 Ordering::Equal
4678 } else {
4679 Ordering::Greater
4680 }
4681 }
4682 TraversalTarget::Count {
4683 count,
4684 include_dirs,
4685 include_ignored,
4686 } => Ord::cmp(
4687 count,
4688 &cursor_location.count(*include_dirs, *include_ignored),
4689 ),
4690 }
4691 }
4692}
4693
4694impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4695 for TraversalTarget<'b>
4696{
4697 fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4698 self.cmp(&cursor_location.0, &())
4699 }
4700}
4701
4702struct ChildEntriesIter<'a> {
4703 parent_path: &'a Path,
4704 traversal: Traversal<'a>,
4705}
4706
4707impl<'a> Iterator for ChildEntriesIter<'a> {
4708 type Item = &'a Entry;
4709
4710 fn next(&mut self) -> Option<Self::Item> {
4711 if let Some(item) = self.traversal.entry() {
4712 if item.path.starts_with(&self.parent_path) {
4713 self.traversal.advance_to_sibling();
4714 return Some(item);
4715 }
4716 }
4717 None
4718 }
4719}
4720
4721pub struct DescendentEntriesIter<'a> {
4722 parent_path: &'a Path,
4723 traversal: Traversal<'a>,
4724}
4725
4726impl<'a> Iterator for DescendentEntriesIter<'a> {
4727 type Item = &'a Entry;
4728
4729 fn next(&mut self) -> Option<Self::Item> {
4730 if let Some(item) = self.traversal.entry() {
4731 if item.path.starts_with(&self.parent_path) {
4732 self.traversal.advance();
4733 return Some(item);
4734 }
4735 }
4736 None
4737 }
4738}
4739
4740impl<'a> From<&'a Entry> for proto::Entry {
4741 fn from(entry: &'a Entry) -> Self {
4742 Self {
4743 id: entry.id.to_proto(),
4744 is_dir: entry.is_dir(),
4745 path: entry.path.to_string_lossy().into(),
4746 inode: entry.inode,
4747 mtime: entry.mtime.map(|time| time.into()),
4748 is_symlink: entry.is_symlink,
4749 is_ignored: entry.is_ignored,
4750 is_external: entry.is_external,
4751 git_status: entry.git_status.map(git_status_to_proto),
4752 }
4753 }
4754}
4755
4756impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4757 type Error = anyhow::Error;
4758
4759 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4760 let kind = if entry.is_dir {
4761 EntryKind::Dir
4762 } else {
4763 let mut char_bag = *root_char_bag;
4764 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4765 EntryKind::File(char_bag)
4766 };
4767 let path: Arc<Path> = PathBuf::from(entry.path).into();
4768 Ok(Entry {
4769 id: ProjectEntryId::from_proto(entry.id),
4770 kind,
4771 path,
4772 inode: entry.inode,
4773 mtime: entry.mtime.map(|time| time.into()),
4774 is_symlink: entry.is_symlink,
4775 is_ignored: entry.is_ignored,
4776 is_external: entry.is_external,
4777 git_status: git_status_from_proto(entry.git_status),
4778 is_private: false,
4779 })
4780 }
4781}
4782
4783fn combine_git_statuses(
4784 staged: Option<GitFileStatus>,
4785 unstaged: Option<GitFileStatus>,
4786) -> Option<GitFileStatus> {
4787 if let Some(staged) = staged {
4788 if let Some(unstaged) = unstaged {
4789 if unstaged != staged {
4790 Some(GitFileStatus::Modified)
4791 } else {
4792 Some(staged)
4793 }
4794 } else {
4795 Some(staged)
4796 }
4797 } else {
4798 unstaged
4799 }
4800}
4801
4802fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
4803 git_status.and_then(|status| {
4804 proto::GitStatus::from_i32(status).map(|status| match status {
4805 proto::GitStatus::Added => GitFileStatus::Added,
4806 proto::GitStatus::Modified => GitFileStatus::Modified,
4807 proto::GitStatus::Conflict => GitFileStatus::Conflict,
4808 })
4809 })
4810}
4811
4812fn git_status_to_proto(status: GitFileStatus) -> i32 {
4813 match status {
4814 GitFileStatus::Added => proto::GitStatus::Added as i32,
4815 GitFileStatus::Modified => proto::GitStatus::Modified as i32,
4816 GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
4817 }
4818}
4819
4820#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
4821pub struct ProjectEntryId(usize);
4822
4823impl ProjectEntryId {
4824 pub const MAX: Self = Self(usize::MAX);
4825
4826 pub fn new(counter: &AtomicUsize) -> Self {
4827 Self(counter.fetch_add(1, SeqCst))
4828 }
4829
4830 pub fn from_proto(id: u64) -> Self {
4831 Self(id as usize)
4832 }
4833
4834 pub fn to_proto(&self) -> u64 {
4835 self.0 as u64
4836 }
4837
4838 pub fn to_usize(&self) -> usize {
4839 self.0
4840 }
4841}
4842
4843#[derive(Copy, Clone, Debug, Default, PartialEq, Serialize)]
4844pub struct DiagnosticSummary {
4845 pub error_count: usize,
4846 pub warning_count: usize,
4847}
4848
4849impl DiagnosticSummary {
4850 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
4851 let mut this = Self {
4852 error_count: 0,
4853 warning_count: 0,
4854 };
4855
4856 for entry in diagnostics {
4857 if entry.diagnostic.is_primary {
4858 match entry.diagnostic.severity {
4859 DiagnosticSeverity::ERROR => this.error_count += 1,
4860 DiagnosticSeverity::WARNING => this.warning_count += 1,
4861 _ => {}
4862 }
4863 }
4864 }
4865
4866 this
4867 }
4868
4869 pub fn is_empty(&self) -> bool {
4870 self.error_count == 0 && self.warning_count == 0
4871 }
4872
4873 pub fn to_proto(
4874 &self,
4875 language_server_id: LanguageServerId,
4876 path: &Path,
4877 ) -> proto::DiagnosticSummary {
4878 proto::DiagnosticSummary {
4879 path: path.to_string_lossy().to_string(),
4880 language_server_id: language_server_id.0 as u64,
4881 error_count: self.error_count as u32,
4882 warning_count: self.warning_count as u32,
4883 }
4884 }
4885}