1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 FutureExt, Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TakeUntilExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123#[derive(Clone, Debug, PartialEq, Eq)]
124pub struct RepositoryEntry {
125 pub(crate) work_directory: WorkDirectoryEntry,
126 pub(crate) branch: Option<Arc<str>>,
127 pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
128}
129
130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
131 proto::GitStatus::from_i32(git_status).map(|status| match status {
132 proto::GitStatus::Added => GitFileStatus::Added,
133 proto::GitStatus::Modified => GitFileStatus::Modified,
134 proto::GitStatus::Conflict => GitFileStatus::Conflict,
135 })
136}
137
138impl RepositoryEntry {
139 pub fn branch(&self) -> Option<Arc<str>> {
140 self.branch.clone()
141 }
142
143 pub fn work_directory_id(&self) -> ProjectEntryId {
144 *self.work_directory
145 }
146
147 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
148 snapshot
149 .entry_for_id(self.work_directory_id())
150 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
151 }
152
153 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
154 self.work_directory
155 .relativize(snapshot, path)
156 .and_then(|repo_path| {
157 self.statuses
158 .iter_from(&repo_path)
159 .take_while(|(key, _)| key.starts_with(&repo_path))
160 // Short circut once we've found the highest level
161 .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
162 .map(|(_, status)| status)
163 .reduce(
164 |status_first, status_second| match (status_first, status_second) {
165 (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
166 &GitFileStatus::Conflict
167 }
168 (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
169 &GitFileStatus::Modified
170 }
171 _ => &GitFileStatus::Added,
172 },
173 )
174 .copied()
175 })
176 }
177
178 #[cfg(any(test, feature = "test-support"))]
179 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
180 self.work_directory
181 .relativize(snapshot, path)
182 .and_then(|repo_path| (&self.statuses).get(&repo_path))
183 .cloned()
184 }
185
186 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
187 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
188 let mut removed_statuses: Vec<String> = Vec::new();
189
190 let mut self_statuses = self.statuses.iter().peekable();
191 let mut other_statuses = other.statuses.iter().peekable();
192 loop {
193 match (self_statuses.peek(), other_statuses.peek()) {
194 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
195 match Ord::cmp(self_repo_path, other_repo_path) {
196 Ordering::Less => {
197 updated_statuses.push(make_status_entry(self_repo_path, self_status));
198 self_statuses.next();
199 }
200 Ordering::Equal => {
201 if self_status != other_status {
202 updated_statuses
203 .push(make_status_entry(self_repo_path, self_status));
204 }
205
206 self_statuses.next();
207 other_statuses.next();
208 }
209 Ordering::Greater => {
210 removed_statuses.push(make_repo_path(other_repo_path));
211 other_statuses.next();
212 }
213 }
214 }
215 (Some((self_repo_path, self_status)), None) => {
216 updated_statuses.push(make_status_entry(self_repo_path, self_status));
217 self_statuses.next();
218 }
219 (None, Some((other_repo_path, _))) => {
220 removed_statuses.push(make_repo_path(other_repo_path));
221 other_statuses.next();
222 }
223 (None, None) => break,
224 }
225 }
226
227 proto::RepositoryEntry {
228 work_directory_id: self.work_directory_id().to_proto(),
229 branch: self.branch.as_ref().map(|str| str.to_string()),
230 removed_repo_paths: removed_statuses,
231 updated_statuses,
232 }
233 }
234}
235
236fn make_repo_path(path: &RepoPath) -> String {
237 path.as_os_str().to_string_lossy().to_string()
238}
239
240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
241 proto::StatusEntry {
242 repo_path: make_repo_path(path),
243 status: match status {
244 GitFileStatus::Added => proto::GitStatus::Added.into(),
245 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
246 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
247 },
248 }
249}
250
251impl From<&RepositoryEntry> for proto::RepositoryEntry {
252 fn from(value: &RepositoryEntry) -> Self {
253 proto::RepositoryEntry {
254 work_directory_id: value.work_directory.to_proto(),
255 branch: value.branch.as_ref().map(|str| str.to_string()),
256 updated_statuses: value
257 .statuses
258 .iter()
259 .map(|(repo_path, status)| make_status_entry(repo_path, status))
260 .collect(),
261 removed_repo_paths: Default::default(),
262 }
263 }
264}
265
266/// This path corresponds to the 'content path' (the folder that contains the .git)
267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
268pub struct RepositoryWorkDirectory(Arc<Path>);
269
270impl Default for RepositoryWorkDirectory {
271 fn default() -> Self {
272 RepositoryWorkDirectory(Arc::from(Path::new("")))
273 }
274}
275
276impl AsRef<Path> for RepositoryWorkDirectory {
277 fn as_ref(&self) -> &Path {
278 self.0.as_ref()
279 }
280}
281
282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
283pub struct WorkDirectoryEntry(ProjectEntryId);
284
285impl WorkDirectoryEntry {
286 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
287 worktree.entry_for_id(self.0).and_then(|entry| {
288 path.strip_prefix(&entry.path)
289 .ok()
290 .map(move |path| path.into())
291 })
292 }
293}
294
295impl Deref for WorkDirectoryEntry {
296 type Target = ProjectEntryId;
297
298 fn deref(&self) -> &Self::Target {
299 &self.0
300 }
301}
302
303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
304 fn from(value: ProjectEntryId) -> Self {
305 WorkDirectoryEntry(value)
306 }
307}
308
309#[derive(Debug, Clone)]
310pub struct LocalSnapshot {
311 snapshot: Snapshot,
312 /// All of the gitignore files in the worktree, indexed by their relative path.
313 /// The boolean indicates whether the gitignore needs to be updated.
314 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
315 /// All of the git repositories in the worktree, indexed by the project entry
316 /// id of their parent directory.
317 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
318}
319
320pub struct BackgroundScannerState {
321 snapshot: LocalSnapshot,
322 /// The ids of all of the entries that were removed from the snapshot
323 /// as part of the current update. These entry ids may be re-used
324 /// if the same inode is discovered at a new path, or if the given
325 /// path is re-created after being deleted.
326 removed_entry_ids: HashMap<u64, ProjectEntryId>,
327 changed_paths: Vec<Arc<Path>>,
328 prev_snapshot: Snapshot,
329}
330
331#[derive(Debug, Clone)]
332pub struct LocalRepositoryEntry {
333 pub(crate) scan_id: usize,
334 pub(crate) git_dir_scan_id: usize,
335 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
336 /// Path to the actual .git folder.
337 /// Note: if .git is a file, this points to the folder indicated by the .git file
338 pub(crate) git_dir_path: Arc<Path>,
339}
340
341impl LocalRepositoryEntry {
342 // Note that this path should be relative to the worktree root.
343 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
344 path.starts_with(self.git_dir_path.as_ref())
345 }
346}
347
348impl Deref for LocalSnapshot {
349 type Target = Snapshot;
350
351 fn deref(&self) -> &Self::Target {
352 &self.snapshot
353 }
354}
355
356impl DerefMut for LocalSnapshot {
357 fn deref_mut(&mut self) -> &mut Self::Target {
358 &mut self.snapshot
359 }
360}
361
362enum ScanState {
363 Started,
364 Updated {
365 snapshot: LocalSnapshot,
366 changes: UpdatedEntriesSet,
367 barrier: Option<barrier::Sender>,
368 scanning: bool,
369 },
370}
371
372struct ShareState {
373 project_id: u64,
374 snapshots_tx:
375 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
376 resume_updates: watch::Sender<()>,
377 _maintain_remote_snapshot: Task<Option<()>>,
378}
379
380pub enum Event {
381 UpdatedEntries(UpdatedEntriesSet),
382 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
383}
384
385impl Entity for Worktree {
386 type Event = Event;
387}
388
389impl Worktree {
390 pub async fn local(
391 client: Arc<Client>,
392 path: impl Into<Arc<Path>>,
393 visible: bool,
394 fs: Arc<dyn Fs>,
395 next_entry_id: Arc<AtomicUsize>,
396 cx: &mut AsyncAppContext,
397 ) -> Result<ModelHandle<Self>> {
398 // After determining whether the root entry is a file or a directory, populate the
399 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
400 let abs_path = path.into();
401 let metadata = fs
402 .metadata(&abs_path)
403 .await
404 .context("failed to stat worktree path")?;
405
406 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
407 let root_name = abs_path
408 .file_name()
409 .map_or(String::new(), |f| f.to_string_lossy().to_string());
410
411 let mut snapshot = LocalSnapshot {
412 ignores_by_parent_abs_path: Default::default(),
413 git_repositories: Default::default(),
414 snapshot: Snapshot {
415 id: WorktreeId::from_usize(cx.model_id()),
416 abs_path: abs_path.clone(),
417 root_name: root_name.clone(),
418 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
419 entries_by_path: Default::default(),
420 entries_by_id: Default::default(),
421 repository_entries: Default::default(),
422 scan_id: 1,
423 completed_scan_id: 0,
424 },
425 };
426
427 if let Some(metadata) = metadata {
428 snapshot.insert_entry(
429 Entry::new(
430 Arc::from(Path::new("")),
431 &metadata,
432 &next_entry_id,
433 snapshot.root_char_bag,
434 ),
435 fs.as_ref(),
436 );
437 }
438
439 let (path_changes_tx, path_changes_rx) = channel::unbounded();
440 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
441
442 cx.spawn_weak(|this, mut cx| async move {
443 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
444 this.update(&mut cx, |this, cx| {
445 let this = this.as_local_mut().unwrap();
446 match state {
447 ScanState::Started => {
448 *this.is_scanning.0.borrow_mut() = true;
449 }
450 ScanState::Updated {
451 snapshot,
452 changes,
453 barrier,
454 scanning,
455 } => {
456 *this.is_scanning.0.borrow_mut() = scanning;
457 this.set_snapshot(snapshot, changes, cx);
458 drop(barrier);
459 }
460 }
461 cx.notify();
462 });
463 }
464 })
465 .detach();
466
467 let background_scanner_task = cx.background().spawn({
468 let fs = fs.clone();
469 let snapshot = snapshot.clone();
470 let background = cx.background().clone();
471 async move {
472 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
473 BackgroundScanner::new(
474 snapshot,
475 next_entry_id,
476 fs,
477 scan_states_tx,
478 background,
479 path_changes_rx,
480 )
481 .run(events)
482 .await;
483 }
484 });
485
486 Worktree::Local(LocalWorktree {
487 snapshot,
488 is_scanning: watch::channel_with(true),
489 share: None,
490 path_changes_tx,
491 _background_scanner_task: background_scanner_task,
492 diagnostics: Default::default(),
493 diagnostic_summaries: Default::default(),
494 client,
495 fs,
496 visible,
497 })
498 }))
499 }
500
501 pub fn remote(
502 project_remote_id: u64,
503 replica_id: ReplicaId,
504 worktree: proto::WorktreeMetadata,
505 client: Arc<Client>,
506 cx: &mut AppContext,
507 ) -> ModelHandle<Self> {
508 cx.add_model(|cx: &mut ModelContext<Self>| {
509 let snapshot = Snapshot {
510 id: WorktreeId(worktree.id as usize),
511 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
512 root_name: worktree.root_name.clone(),
513 root_char_bag: worktree
514 .root_name
515 .chars()
516 .map(|c| c.to_ascii_lowercase())
517 .collect(),
518 entries_by_path: Default::default(),
519 entries_by_id: Default::default(),
520 repository_entries: Default::default(),
521 scan_id: 1,
522 completed_scan_id: 0,
523 };
524
525 let (updates_tx, mut updates_rx) = mpsc::unbounded();
526 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
527 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
528
529 cx.background()
530 .spawn({
531 let background_snapshot = background_snapshot.clone();
532 async move {
533 while let Some(update) = updates_rx.next().await {
534 if let Err(error) =
535 background_snapshot.lock().apply_remote_update(update)
536 {
537 log::error!("error applying worktree update: {}", error);
538 }
539 snapshot_updated_tx.send(()).await.ok();
540 }
541 }
542 })
543 .detach();
544
545 cx.spawn_weak(|this, mut cx| async move {
546 while (snapshot_updated_rx.recv().await).is_some() {
547 if let Some(this) = this.upgrade(&cx) {
548 this.update(&mut cx, |this, cx| {
549 let this = this.as_remote_mut().unwrap();
550 this.snapshot = this.background_snapshot.lock().clone();
551 cx.emit(Event::UpdatedEntries(Arc::from([])));
552 cx.notify();
553 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
554 if this.observed_snapshot(*scan_id) {
555 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
556 let _ = tx.send(());
557 } else {
558 break;
559 }
560 }
561 });
562 } else {
563 break;
564 }
565 }
566 })
567 .detach();
568
569 Worktree::Remote(RemoteWorktree {
570 project_id: project_remote_id,
571 replica_id,
572 snapshot: snapshot.clone(),
573 background_snapshot,
574 updates_tx: Some(updates_tx),
575 snapshot_subscriptions: Default::default(),
576 client: client.clone(),
577 diagnostic_summaries: Default::default(),
578 visible: worktree.visible,
579 disconnected: false,
580 })
581 })
582 }
583
584 pub fn as_local(&self) -> Option<&LocalWorktree> {
585 if let Worktree::Local(worktree) = self {
586 Some(worktree)
587 } else {
588 None
589 }
590 }
591
592 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
593 if let Worktree::Remote(worktree) = self {
594 Some(worktree)
595 } else {
596 None
597 }
598 }
599
600 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
601 if let Worktree::Local(worktree) = self {
602 Some(worktree)
603 } else {
604 None
605 }
606 }
607
608 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
609 if let Worktree::Remote(worktree) = self {
610 Some(worktree)
611 } else {
612 None
613 }
614 }
615
616 pub fn is_local(&self) -> bool {
617 matches!(self, Worktree::Local(_))
618 }
619
620 pub fn is_remote(&self) -> bool {
621 !self.is_local()
622 }
623
624 pub fn snapshot(&self) -> Snapshot {
625 match self {
626 Worktree::Local(worktree) => worktree.snapshot().snapshot,
627 Worktree::Remote(worktree) => worktree.snapshot(),
628 }
629 }
630
631 pub fn scan_id(&self) -> usize {
632 match self {
633 Worktree::Local(worktree) => worktree.snapshot.scan_id,
634 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
635 }
636 }
637
638 pub fn completed_scan_id(&self) -> usize {
639 match self {
640 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
641 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
642 }
643 }
644
645 pub fn is_visible(&self) -> bool {
646 match self {
647 Worktree::Local(worktree) => worktree.visible,
648 Worktree::Remote(worktree) => worktree.visible,
649 }
650 }
651
652 pub fn replica_id(&self) -> ReplicaId {
653 match self {
654 Worktree::Local(_) => 0,
655 Worktree::Remote(worktree) => worktree.replica_id,
656 }
657 }
658
659 pub fn diagnostic_summaries(
660 &self,
661 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
662 match self {
663 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
664 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
665 }
666 .iter()
667 .flat_map(|(path, summaries)| {
668 summaries
669 .iter()
670 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
671 })
672 }
673
674 pub fn abs_path(&self) -> Arc<Path> {
675 match self {
676 Worktree::Local(worktree) => worktree.abs_path.clone(),
677 Worktree::Remote(worktree) => worktree.abs_path.clone(),
678 }
679 }
680}
681
682impl LocalWorktree {
683 pub fn contains_abs_path(&self, path: &Path) -> bool {
684 path.starts_with(&self.abs_path)
685 }
686
687 fn absolutize(&self, path: &Path) -> PathBuf {
688 if path.file_name().is_some() {
689 self.abs_path.join(path)
690 } else {
691 self.abs_path.to_path_buf()
692 }
693 }
694
695 pub(crate) fn load_buffer(
696 &mut self,
697 id: u64,
698 path: &Path,
699 cx: &mut ModelContext<Worktree>,
700 ) -> Task<Result<ModelHandle<Buffer>>> {
701 let path = Arc::from(path);
702 cx.spawn(move |this, mut cx| async move {
703 let (file, contents, diff_base) = this
704 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
705 .await?;
706 let text_buffer = cx
707 .background()
708 .spawn(async move { text::Buffer::new(0, id, contents) })
709 .await;
710 Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
711 })
712 }
713
714 pub fn diagnostics_for_path(
715 &self,
716 path: &Path,
717 ) -> Vec<(
718 LanguageServerId,
719 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
720 )> {
721 self.diagnostics.get(path).cloned().unwrap_or_default()
722 }
723
724 pub fn clear_diagnostics_for_language_server(
725 &mut self,
726 server_id: LanguageServerId,
727 _: &mut ModelContext<Worktree>,
728 ) {
729 let worktree_id = self.id().to_proto();
730 self.diagnostic_summaries
731 .retain(|path, summaries_by_server_id| {
732 if summaries_by_server_id.remove(&server_id).is_some() {
733 if let Some(share) = self.share.as_ref() {
734 self.client
735 .send(proto::UpdateDiagnosticSummary {
736 project_id: share.project_id,
737 worktree_id,
738 summary: Some(proto::DiagnosticSummary {
739 path: path.to_string_lossy().to_string(),
740 language_server_id: server_id.0 as u64,
741 error_count: 0,
742 warning_count: 0,
743 }),
744 })
745 .log_err();
746 }
747 !summaries_by_server_id.is_empty()
748 } else {
749 true
750 }
751 });
752
753 self.diagnostics.retain(|_, diagnostics_by_server_id| {
754 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
755 diagnostics_by_server_id.remove(ix);
756 !diagnostics_by_server_id.is_empty()
757 } else {
758 true
759 }
760 });
761 }
762
763 pub fn update_diagnostics(
764 &mut self,
765 server_id: LanguageServerId,
766 worktree_path: Arc<Path>,
767 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
768 _: &mut ModelContext<Worktree>,
769 ) -> Result<bool> {
770 let summaries_by_server_id = self
771 .diagnostic_summaries
772 .entry(worktree_path.clone())
773 .or_default();
774
775 let old_summary = summaries_by_server_id
776 .remove(&server_id)
777 .unwrap_or_default();
778
779 let new_summary = DiagnosticSummary::new(&diagnostics);
780 if new_summary.is_empty() {
781 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
782 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
783 diagnostics_by_server_id.remove(ix);
784 }
785 if diagnostics_by_server_id.is_empty() {
786 self.diagnostics.remove(&worktree_path);
787 }
788 }
789 } else {
790 summaries_by_server_id.insert(server_id, new_summary);
791 let diagnostics_by_server_id =
792 self.diagnostics.entry(worktree_path.clone()).or_default();
793 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
794 Ok(ix) => {
795 diagnostics_by_server_id[ix] = (server_id, diagnostics);
796 }
797 Err(ix) => {
798 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
799 }
800 }
801 }
802
803 if !old_summary.is_empty() || !new_summary.is_empty() {
804 if let Some(share) = self.share.as_ref() {
805 self.client
806 .send(proto::UpdateDiagnosticSummary {
807 project_id: share.project_id,
808 worktree_id: self.id().to_proto(),
809 summary: Some(proto::DiagnosticSummary {
810 path: worktree_path.to_string_lossy().to_string(),
811 language_server_id: server_id.0 as u64,
812 error_count: new_summary.error_count as u32,
813 warning_count: new_summary.warning_count as u32,
814 }),
815 })
816 .log_err();
817 }
818 }
819
820 Ok(!old_summary.is_empty() || !new_summary.is_empty())
821 }
822
823 fn set_snapshot(
824 &mut self,
825 new_snapshot: LocalSnapshot,
826 entry_changes: UpdatedEntriesSet,
827 cx: &mut ModelContext<Worktree>,
828 ) {
829 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
830
831 self.snapshot = new_snapshot;
832
833 if let Some(share) = self.share.as_mut() {
834 share
835 .snapshots_tx
836 .unbounded_send((
837 self.snapshot.clone(),
838 entry_changes.clone(),
839 repo_changes.clone(),
840 ))
841 .ok();
842 }
843
844 if !entry_changes.is_empty() {
845 cx.emit(Event::UpdatedEntries(entry_changes));
846 }
847 if !repo_changes.is_empty() {
848 cx.emit(Event::UpdatedGitRepositories(repo_changes));
849 }
850 }
851
852 fn changed_repos(
853 &self,
854 old_snapshot: &LocalSnapshot,
855 new_snapshot: &LocalSnapshot,
856 ) -> UpdatedGitRepositoriesSet {
857 let mut changes = Vec::new();
858 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
859 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
860 loop {
861 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
862 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
863 match Ord::cmp(&new_entry_id, &old_entry_id) {
864 Ordering::Less => {
865 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
866 changes.push((entry.path.clone(), None));
867 }
868 new_repos.next();
869 }
870 Ordering::Equal => {
871 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
872 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
873 changes.push((
874 entry.path.clone(),
875 old_snapshot
876 .repository_entries
877 .get(&RepositoryWorkDirectory(entry.path.clone()))
878 .cloned(),
879 ));
880 }
881 }
882 new_repos.next();
883 old_repos.next();
884 }
885 Ordering::Greater => {
886 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
887 changes.push((
888 entry.path.clone(),
889 old_snapshot
890 .repository_entries
891 .get(&RepositoryWorkDirectory(entry.path.clone()))
892 .cloned(),
893 ));
894 }
895 old_repos.next();
896 }
897 }
898 }
899 (Some((entry_id, _)), None) => {
900 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
901 changes.push((entry.path.clone(), None));
902 }
903 new_repos.next();
904 }
905 (None, Some((entry_id, _))) => {
906 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
907 changes.push((
908 entry.path.clone(),
909 old_snapshot
910 .repository_entries
911 .get(&RepositoryWorkDirectory(entry.path.clone()))
912 .cloned(),
913 ));
914 }
915 old_repos.next();
916 }
917 (None, None) => break,
918 }
919 }
920
921 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
922 (value.0.clone(), value.1.clone())
923 }
924
925 changes.into()
926 }
927
928 pub fn scan_complete(&self) -> impl Future<Output = ()> {
929 let mut is_scanning_rx = self.is_scanning.1.clone();
930 async move {
931 let mut is_scanning = is_scanning_rx.borrow().clone();
932 while is_scanning {
933 if let Some(value) = is_scanning_rx.recv().await {
934 is_scanning = value;
935 } else {
936 break;
937 }
938 }
939 }
940 }
941
942 pub fn snapshot(&self) -> LocalSnapshot {
943 self.snapshot.clone()
944 }
945
946 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
947 proto::WorktreeMetadata {
948 id: self.id().to_proto(),
949 root_name: self.root_name().to_string(),
950 visible: self.visible,
951 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
952 }
953 }
954
955 fn load(
956 &self,
957 path: &Path,
958 cx: &mut ModelContext<Worktree>,
959 ) -> Task<Result<(File, String, Option<String>)>> {
960 let handle = cx.handle();
961 let path = Arc::from(path);
962 let abs_path = self.absolutize(&path);
963 let fs = self.fs.clone();
964 let snapshot = self.snapshot();
965
966 let mut index_task = None;
967
968 if let Some(repo) = snapshot.repository_for_path(&path) {
969 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
970 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
971 let repo = repo.repo_ptr.to_owned();
972 index_task = Some(
973 cx.background()
974 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
975 );
976 }
977 }
978
979 cx.spawn(|this, mut cx| async move {
980 let text = fs.load(&abs_path).await?;
981
982 let diff_base = if let Some(index_task) = index_task {
983 index_task.await
984 } else {
985 None
986 };
987
988 // Eagerly populate the snapshot with an updated entry for the loaded file
989 let entry = this
990 .update(&mut cx, |this, cx| {
991 this.as_local().unwrap().refresh_entry(path, None, cx)
992 })
993 .await?;
994
995 Ok((
996 File {
997 entry_id: entry.id,
998 worktree: handle,
999 path: entry.path,
1000 mtime: entry.mtime,
1001 is_local: true,
1002 is_deleted: false,
1003 },
1004 text,
1005 diff_base,
1006 ))
1007 })
1008 }
1009
1010 pub fn save_buffer(
1011 &self,
1012 buffer_handle: ModelHandle<Buffer>,
1013 path: Arc<Path>,
1014 has_changed_file: bool,
1015 cx: &mut ModelContext<Worktree>,
1016 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1017 let handle = cx.handle();
1018 let buffer = buffer_handle.read(cx);
1019
1020 let rpc = self.client.clone();
1021 let buffer_id = buffer.remote_id();
1022 let project_id = self.share.as_ref().map(|share| share.project_id);
1023
1024 let text = buffer.as_rope().clone();
1025 let fingerprint = text.fingerprint();
1026 let version = buffer.version();
1027 let save = self.write_file(path, text, buffer.line_ending(), cx);
1028
1029 cx.as_mut().spawn(|mut cx| async move {
1030 let entry = save.await?;
1031
1032 if has_changed_file {
1033 let new_file = Arc::new(File {
1034 entry_id: entry.id,
1035 worktree: handle,
1036 path: entry.path,
1037 mtime: entry.mtime,
1038 is_local: true,
1039 is_deleted: false,
1040 });
1041
1042 if let Some(project_id) = project_id {
1043 rpc.send(proto::UpdateBufferFile {
1044 project_id,
1045 buffer_id,
1046 file: Some(new_file.to_proto()),
1047 })
1048 .log_err();
1049 }
1050
1051 buffer_handle.update(&mut cx, |buffer, cx| {
1052 if has_changed_file {
1053 buffer.file_updated(new_file, cx).detach();
1054 }
1055 });
1056 }
1057
1058 if let Some(project_id) = project_id {
1059 rpc.send(proto::BufferSaved {
1060 project_id,
1061 buffer_id,
1062 version: serialize_version(&version),
1063 mtime: Some(entry.mtime.into()),
1064 fingerprint: serialize_fingerprint(fingerprint),
1065 })?;
1066 }
1067
1068 buffer_handle.update(&mut cx, |buffer, cx| {
1069 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1070 });
1071
1072 Ok((version, fingerprint, entry.mtime))
1073 })
1074 }
1075
1076 pub fn create_entry(
1077 &self,
1078 path: impl Into<Arc<Path>>,
1079 is_dir: bool,
1080 cx: &mut ModelContext<Worktree>,
1081 ) -> Task<Result<Entry>> {
1082 let path = path.into();
1083 let abs_path = self.absolutize(&path);
1084 let fs = self.fs.clone();
1085 let write = cx.background().spawn(async move {
1086 if is_dir {
1087 fs.create_dir(&abs_path).await
1088 } else {
1089 fs.save(&abs_path, &Default::default(), Default::default())
1090 .await
1091 }
1092 });
1093
1094 cx.spawn(|this, mut cx| async move {
1095 write.await?;
1096 this.update(&mut cx, |this, cx| {
1097 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1098 })
1099 .await
1100 })
1101 }
1102
1103 pub fn write_file(
1104 &self,
1105 path: impl Into<Arc<Path>>,
1106 text: Rope,
1107 line_ending: LineEnding,
1108 cx: &mut ModelContext<Worktree>,
1109 ) -> Task<Result<Entry>> {
1110 let path = path.into();
1111 let abs_path = self.absolutize(&path);
1112 let fs = self.fs.clone();
1113 let write = cx
1114 .background()
1115 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1116
1117 cx.spawn(|this, mut cx| async move {
1118 write.await?;
1119 this.update(&mut cx, |this, cx| {
1120 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1121 })
1122 .await
1123 })
1124 }
1125
1126 pub fn delete_entry(
1127 &self,
1128 entry_id: ProjectEntryId,
1129 cx: &mut ModelContext<Worktree>,
1130 ) -> Option<Task<Result<()>>> {
1131 let entry = self.entry_for_id(entry_id)?.clone();
1132 let abs_path = self.abs_path.clone();
1133 let fs = self.fs.clone();
1134
1135 let delete = cx.background().spawn(async move {
1136 let mut abs_path = fs.canonicalize(&abs_path).await?;
1137 if entry.path.file_name().is_some() {
1138 abs_path = abs_path.join(&entry.path);
1139 }
1140 if entry.is_file() {
1141 fs.remove_file(&abs_path, Default::default()).await?;
1142 } else {
1143 fs.remove_dir(
1144 &abs_path,
1145 RemoveOptions {
1146 recursive: true,
1147 ignore_if_not_exists: false,
1148 },
1149 )
1150 .await?;
1151 }
1152 anyhow::Ok(abs_path)
1153 });
1154
1155 Some(cx.spawn(|this, mut cx| async move {
1156 let abs_path = delete.await?;
1157 let (tx, mut rx) = barrier::channel();
1158 this.update(&mut cx, |this, _| {
1159 this.as_local_mut()
1160 .unwrap()
1161 .path_changes_tx
1162 .try_send((vec![abs_path], tx))
1163 })?;
1164 rx.recv().await;
1165 Ok(())
1166 }))
1167 }
1168
1169 pub fn rename_entry(
1170 &self,
1171 entry_id: ProjectEntryId,
1172 new_path: impl Into<Arc<Path>>,
1173 cx: &mut ModelContext<Worktree>,
1174 ) -> Option<Task<Result<Entry>>> {
1175 let old_path = self.entry_for_id(entry_id)?.path.clone();
1176 let new_path = new_path.into();
1177 let abs_old_path = self.absolutize(&old_path);
1178 let abs_new_path = self.absolutize(&new_path);
1179 let fs = self.fs.clone();
1180 let rename = cx.background().spawn(async move {
1181 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1182 .await
1183 });
1184
1185 Some(cx.spawn(|this, mut cx| async move {
1186 rename.await?;
1187 this.update(&mut cx, |this, cx| {
1188 this.as_local_mut()
1189 .unwrap()
1190 .refresh_entry(new_path.clone(), Some(old_path), cx)
1191 })
1192 .await
1193 }))
1194 }
1195
1196 pub fn copy_entry(
1197 &self,
1198 entry_id: ProjectEntryId,
1199 new_path: impl Into<Arc<Path>>,
1200 cx: &mut ModelContext<Worktree>,
1201 ) -> Option<Task<Result<Entry>>> {
1202 let old_path = self.entry_for_id(entry_id)?.path.clone();
1203 let new_path = new_path.into();
1204 let abs_old_path = self.absolutize(&old_path);
1205 let abs_new_path = self.absolutize(&new_path);
1206 let fs = self.fs.clone();
1207 let copy = cx.background().spawn(async move {
1208 copy_recursive(
1209 fs.as_ref(),
1210 &abs_old_path,
1211 &abs_new_path,
1212 Default::default(),
1213 )
1214 .await
1215 });
1216
1217 Some(cx.spawn(|this, mut cx| async move {
1218 copy.await?;
1219 this.update(&mut cx, |this, cx| {
1220 this.as_local_mut()
1221 .unwrap()
1222 .refresh_entry(new_path.clone(), None, cx)
1223 })
1224 .await
1225 }))
1226 }
1227
1228 fn refresh_entry(
1229 &self,
1230 path: Arc<Path>,
1231 old_path: Option<Arc<Path>>,
1232 cx: &mut ModelContext<Worktree>,
1233 ) -> Task<Result<Entry>> {
1234 let fs = self.fs.clone();
1235 let abs_root_path = self.abs_path.clone();
1236 let path_changes_tx = self.path_changes_tx.clone();
1237 cx.spawn_weak(move |this, mut cx| async move {
1238 let abs_path = fs.canonicalize(&abs_root_path).await?;
1239 let mut paths = Vec::with_capacity(2);
1240 paths.push(if path.file_name().is_some() {
1241 abs_path.join(&path)
1242 } else {
1243 abs_path.clone()
1244 });
1245 if let Some(old_path) = old_path {
1246 paths.push(if old_path.file_name().is_some() {
1247 abs_path.join(&old_path)
1248 } else {
1249 abs_path.clone()
1250 });
1251 }
1252
1253 let (tx, mut rx) = barrier::channel();
1254 path_changes_tx.try_send((paths, tx))?;
1255 rx.recv().await;
1256 this.upgrade(&cx)
1257 .ok_or_else(|| anyhow!("worktree was dropped"))?
1258 .update(&mut cx, |this, _| {
1259 this.entry_for_path(path)
1260 .cloned()
1261 .ok_or_else(|| anyhow!("failed to read path after update"))
1262 })
1263 })
1264 }
1265
1266 pub fn observe_updates<F, Fut>(
1267 &mut self,
1268 project_id: u64,
1269 cx: &mut ModelContext<Worktree>,
1270 callback: F,
1271 ) -> oneshot::Receiver<()>
1272 where
1273 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1274 Fut: Send + Future<Output = bool>,
1275 {
1276 #[cfg(any(test, feature = "test-support"))]
1277 const MAX_CHUNK_SIZE: usize = 2;
1278 #[cfg(not(any(test, feature = "test-support")))]
1279 const MAX_CHUNK_SIZE: usize = 256;
1280
1281 let (share_tx, share_rx) = oneshot::channel();
1282
1283 if let Some(share) = self.share.as_mut() {
1284 share_tx.send(()).ok();
1285 *share.resume_updates.borrow_mut() = ();
1286 return share_rx;
1287 }
1288
1289 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1290 let (snapshots_tx, mut snapshots_rx) =
1291 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1292 snapshots_tx
1293 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1294 .ok();
1295
1296 let worktree_id = cx.model_id() as u64;
1297 let _maintain_remote_snapshot = cx.background().spawn(async move {
1298 let mut is_first = true;
1299 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1300 let update;
1301 if is_first {
1302 update = snapshot.build_initial_update(project_id, worktree_id);
1303 is_first = false;
1304 } else {
1305 update =
1306 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1307 }
1308
1309 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1310 let _ = resume_updates_rx.try_recv();
1311 loop {
1312 let result = callback(update.clone());
1313 if result.await {
1314 break;
1315 } else {
1316 log::info!("waiting to resume updates");
1317 if resume_updates_rx.next().await.is_none() {
1318 return Some(());
1319 }
1320 }
1321 }
1322 }
1323 }
1324 share_tx.send(()).ok();
1325 Some(())
1326 });
1327
1328 self.share = Some(ShareState {
1329 project_id,
1330 snapshots_tx,
1331 resume_updates: resume_updates_tx,
1332 _maintain_remote_snapshot,
1333 });
1334 share_rx
1335 }
1336
1337 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1338 let client = self.client.clone();
1339
1340 for (path, summaries) in &self.diagnostic_summaries {
1341 for (&server_id, summary) in summaries {
1342 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1343 project_id,
1344 worktree_id: cx.model_id() as u64,
1345 summary: Some(summary.to_proto(server_id, &path)),
1346 }) {
1347 return Task::ready(Err(e));
1348 }
1349 }
1350 }
1351
1352 let rx = self.observe_updates(project_id, cx, move |update| {
1353 client.request(update).map(|result| result.is_ok())
1354 });
1355 cx.foreground()
1356 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1357 }
1358
1359 pub fn unshare(&mut self) {
1360 self.share.take();
1361 }
1362
1363 pub fn is_shared(&self) -> bool {
1364 self.share.is_some()
1365 }
1366}
1367
1368impl RemoteWorktree {
1369 fn snapshot(&self) -> Snapshot {
1370 self.snapshot.clone()
1371 }
1372
1373 pub fn disconnected_from_host(&mut self) {
1374 self.updates_tx.take();
1375 self.snapshot_subscriptions.clear();
1376 self.disconnected = true;
1377 }
1378
1379 pub fn save_buffer(
1380 &self,
1381 buffer_handle: ModelHandle<Buffer>,
1382 cx: &mut ModelContext<Worktree>,
1383 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1384 let buffer = buffer_handle.read(cx);
1385 let buffer_id = buffer.remote_id();
1386 let version = buffer.version();
1387 let rpc = self.client.clone();
1388 let project_id = self.project_id;
1389 cx.as_mut().spawn(|mut cx| async move {
1390 let response = rpc
1391 .request(proto::SaveBuffer {
1392 project_id,
1393 buffer_id,
1394 version: serialize_version(&version),
1395 })
1396 .await?;
1397 let version = deserialize_version(&response.version);
1398 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1399 let mtime = response
1400 .mtime
1401 .ok_or_else(|| anyhow!("missing mtime"))?
1402 .into();
1403
1404 buffer_handle.update(&mut cx, |buffer, cx| {
1405 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1406 });
1407
1408 Ok((version, fingerprint, mtime))
1409 })
1410 }
1411
1412 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1413 if let Some(updates_tx) = &self.updates_tx {
1414 updates_tx
1415 .unbounded_send(update)
1416 .expect("consumer runs to completion");
1417 }
1418 }
1419
1420 fn observed_snapshot(&self, scan_id: usize) -> bool {
1421 self.completed_scan_id >= scan_id
1422 }
1423
1424 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1425 let (tx, rx) = oneshot::channel();
1426 if self.observed_snapshot(scan_id) {
1427 let _ = tx.send(());
1428 } else if self.disconnected {
1429 drop(tx);
1430 } else {
1431 match self
1432 .snapshot_subscriptions
1433 .binary_search_by_key(&scan_id, |probe| probe.0)
1434 {
1435 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1436 }
1437 }
1438
1439 async move {
1440 rx.await?;
1441 Ok(())
1442 }
1443 }
1444
1445 pub fn update_diagnostic_summary(
1446 &mut self,
1447 path: Arc<Path>,
1448 summary: &proto::DiagnosticSummary,
1449 ) {
1450 let server_id = LanguageServerId(summary.language_server_id as usize);
1451 let summary = DiagnosticSummary {
1452 error_count: summary.error_count as usize,
1453 warning_count: summary.warning_count as usize,
1454 };
1455
1456 if summary.is_empty() {
1457 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1458 summaries.remove(&server_id);
1459 if summaries.is_empty() {
1460 self.diagnostic_summaries.remove(&path);
1461 }
1462 }
1463 } else {
1464 self.diagnostic_summaries
1465 .entry(path)
1466 .or_default()
1467 .insert(server_id, summary);
1468 }
1469 }
1470
1471 pub fn insert_entry(
1472 &mut self,
1473 entry: proto::Entry,
1474 scan_id: usize,
1475 cx: &mut ModelContext<Worktree>,
1476 ) -> Task<Result<Entry>> {
1477 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1478 cx.spawn(|this, mut cx| async move {
1479 wait_for_snapshot.await?;
1480 this.update(&mut cx, |worktree, _| {
1481 let worktree = worktree.as_remote_mut().unwrap();
1482 let mut snapshot = worktree.background_snapshot.lock();
1483 let entry = snapshot.insert_entry(entry);
1484 worktree.snapshot = snapshot.clone();
1485 entry
1486 })
1487 })
1488 }
1489
1490 pub(crate) fn delete_entry(
1491 &mut self,
1492 id: ProjectEntryId,
1493 scan_id: usize,
1494 cx: &mut ModelContext<Worktree>,
1495 ) -> Task<Result<()>> {
1496 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1497 cx.spawn(|this, mut cx| async move {
1498 wait_for_snapshot.await?;
1499 this.update(&mut cx, |worktree, _| {
1500 let worktree = worktree.as_remote_mut().unwrap();
1501 let mut snapshot = worktree.background_snapshot.lock();
1502 snapshot.delete_entry(id);
1503 worktree.snapshot = snapshot.clone();
1504 });
1505 Ok(())
1506 })
1507 }
1508}
1509
1510impl Snapshot {
1511 pub fn id(&self) -> WorktreeId {
1512 self.id
1513 }
1514
1515 pub fn abs_path(&self) -> &Arc<Path> {
1516 &self.abs_path
1517 }
1518
1519 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1520 self.entries_by_id.get(&entry_id, &()).is_some()
1521 }
1522
1523 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1524 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1525 let old_entry = self.entries_by_id.insert_or_replace(
1526 PathEntry {
1527 id: entry.id,
1528 path: entry.path.clone(),
1529 is_ignored: entry.is_ignored,
1530 scan_id: 0,
1531 },
1532 &(),
1533 );
1534 if let Some(old_entry) = old_entry {
1535 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1536 }
1537 self.entries_by_path.insert_or_replace(entry.clone(), &());
1538 Ok(entry)
1539 }
1540
1541 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1542 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1543 self.entries_by_path = {
1544 let mut cursor = self.entries_by_path.cursor();
1545 let mut new_entries_by_path =
1546 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1547 while let Some(entry) = cursor.item() {
1548 if entry.path.starts_with(&removed_entry.path) {
1549 self.entries_by_id.remove(&entry.id, &());
1550 cursor.next(&());
1551 } else {
1552 break;
1553 }
1554 }
1555 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1556 new_entries_by_path
1557 };
1558
1559 Some(removed_entry.path)
1560 }
1561
1562 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1563 let mut entries_by_path_edits = Vec::new();
1564 let mut entries_by_id_edits = Vec::new();
1565
1566 for entry_id in update.removed_entries {
1567 let entry_id = ProjectEntryId::from_proto(entry_id);
1568 entries_by_id_edits.push(Edit::Remove(entry_id));
1569 if let Some(entry) = self.entry_for_id(entry_id) {
1570 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1571 }
1572 }
1573
1574 for entry in update.updated_entries {
1575 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1576 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1577 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1578 }
1579 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1580 if old_entry.id != entry.id {
1581 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1582 }
1583 }
1584 entries_by_id_edits.push(Edit::Insert(PathEntry {
1585 id: entry.id,
1586 path: entry.path.clone(),
1587 is_ignored: entry.is_ignored,
1588 scan_id: 0,
1589 }));
1590 entries_by_path_edits.push(Edit::Insert(entry));
1591 }
1592
1593 self.entries_by_path.edit(entries_by_path_edits, &());
1594 self.entries_by_id.edit(entries_by_id_edits, &());
1595
1596 update.removed_repositories.sort_unstable();
1597 self.repository_entries.retain(|_, entry| {
1598 if let Ok(_) = update
1599 .removed_repositories
1600 .binary_search(&entry.work_directory.to_proto())
1601 {
1602 false
1603 } else {
1604 true
1605 }
1606 });
1607
1608 for repository in update.updated_repositories {
1609 let work_directory_entry: WorkDirectoryEntry =
1610 ProjectEntryId::from_proto(repository.work_directory_id).into();
1611
1612 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1613 let mut statuses = TreeMap::default();
1614 for status_entry in repository.updated_statuses {
1615 let Some(git_file_status) = read_git_status(status_entry.status) else {
1616 continue;
1617 };
1618
1619 let repo_path = RepoPath::new(status_entry.repo_path.into());
1620 statuses.insert(repo_path, git_file_status);
1621 }
1622
1623 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1624 if self.repository_entries.get(&work_directory).is_some() {
1625 self.repository_entries.update(&work_directory, |repo| {
1626 repo.branch = repository.branch.map(Into::into);
1627 repo.statuses.insert_tree(statuses);
1628
1629 for repo_path in repository.removed_repo_paths {
1630 let repo_path = RepoPath::new(repo_path.into());
1631 repo.statuses.remove(&repo_path);
1632 }
1633 });
1634 } else {
1635 self.repository_entries.insert(
1636 work_directory,
1637 RepositoryEntry {
1638 work_directory: work_directory_entry,
1639 branch: repository.branch.map(Into::into),
1640 statuses,
1641 },
1642 )
1643 }
1644 } else {
1645 log::error!("no work directory entry for repository {:?}", repository)
1646 }
1647 }
1648
1649 self.scan_id = update.scan_id as usize;
1650 if update.is_last_update {
1651 self.completed_scan_id = update.scan_id as usize;
1652 }
1653
1654 Ok(())
1655 }
1656
1657 pub fn file_count(&self) -> usize {
1658 self.entries_by_path.summary().file_count
1659 }
1660
1661 pub fn visible_file_count(&self) -> usize {
1662 self.entries_by_path.summary().visible_file_count
1663 }
1664
1665 fn traverse_from_offset(
1666 &self,
1667 include_dirs: bool,
1668 include_ignored: bool,
1669 start_offset: usize,
1670 ) -> Traversal {
1671 let mut cursor = self.entries_by_path.cursor();
1672 cursor.seek(
1673 &TraversalTarget::Count {
1674 count: start_offset,
1675 include_dirs,
1676 include_ignored,
1677 },
1678 Bias::Right,
1679 &(),
1680 );
1681 Traversal {
1682 cursor,
1683 include_dirs,
1684 include_ignored,
1685 }
1686 }
1687
1688 fn traverse_from_path(
1689 &self,
1690 include_dirs: bool,
1691 include_ignored: bool,
1692 path: &Path,
1693 ) -> Traversal {
1694 let mut cursor = self.entries_by_path.cursor();
1695 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1696 Traversal {
1697 cursor,
1698 include_dirs,
1699 include_ignored,
1700 }
1701 }
1702
1703 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1704 self.traverse_from_offset(false, include_ignored, start)
1705 }
1706
1707 pub fn entries(&self, include_ignored: bool) -> Traversal {
1708 self.traverse_from_offset(true, include_ignored, 0)
1709 }
1710
1711 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1712 self.repository_entries
1713 .iter()
1714 .map(|(path, entry)| (&path.0, entry))
1715 }
1716
1717 /// Get the repository whose work directory contains the given path.
1718 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1719 self.repository_entries
1720 .get(&RepositoryWorkDirectory(path.into()))
1721 .cloned()
1722 }
1723
1724 /// Get the repository whose work directory contains the given path.
1725 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1726 self.repository_and_work_directory_for_path(path)
1727 .map(|e| e.1)
1728 }
1729
1730 pub fn repository_and_work_directory_for_path(
1731 &self,
1732 path: &Path,
1733 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1734 self.repository_entries
1735 .iter()
1736 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1737 .last()
1738 .map(|(path, repo)| (path.clone(), repo.clone()))
1739 }
1740
1741 /// Given an ordered iterator of entries, returns an iterator of those entries,
1742 /// along with their containing git repository.
1743 pub fn entries_with_repositories<'a>(
1744 &'a self,
1745 entries: impl 'a + Iterator<Item = &'a Entry>,
1746 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1747 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1748 let mut repositories = self.repositories().peekable();
1749 entries.map(move |entry| {
1750 while let Some((repo_path, _)) = containing_repos.last() {
1751 if !entry.path.starts_with(repo_path) {
1752 containing_repos.pop();
1753 } else {
1754 break;
1755 }
1756 }
1757 while let Some((repo_path, _)) = repositories.peek() {
1758 if entry.path.starts_with(repo_path) {
1759 containing_repos.push(repositories.next().unwrap());
1760 } else {
1761 break;
1762 }
1763 }
1764 let repo = containing_repos.last().map(|(_, repo)| *repo);
1765 (entry, repo)
1766 })
1767 }
1768
1769 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1770 let empty_path = Path::new("");
1771 self.entries_by_path
1772 .cursor::<()>()
1773 .filter(move |entry| entry.path.as_ref() != empty_path)
1774 .map(|entry| &entry.path)
1775 }
1776
1777 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1778 let mut cursor = self.entries_by_path.cursor();
1779 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1780 let traversal = Traversal {
1781 cursor,
1782 include_dirs: true,
1783 include_ignored: true,
1784 };
1785 ChildEntriesIter {
1786 traversal,
1787 parent_path,
1788 }
1789 }
1790
1791 fn descendent_entries<'a>(
1792 &'a self,
1793 include_dirs: bool,
1794 include_ignored: bool,
1795 parent_path: &'a Path,
1796 ) -> DescendentEntriesIter<'a> {
1797 let mut cursor = self.entries_by_path.cursor();
1798 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1799 let mut traversal = Traversal {
1800 cursor,
1801 include_dirs,
1802 include_ignored,
1803 };
1804
1805 if traversal.end_offset() == traversal.start_offset() {
1806 traversal.advance();
1807 }
1808
1809 DescendentEntriesIter {
1810 traversal,
1811 parent_path,
1812 }
1813 }
1814
1815 pub fn root_entry(&self) -> Option<&Entry> {
1816 self.entry_for_path("")
1817 }
1818
1819 pub fn root_name(&self) -> &str {
1820 &self.root_name
1821 }
1822
1823 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1824 self.repository_entries
1825 .get(&RepositoryWorkDirectory(Path::new("").into()))
1826 .map(|entry| entry.to_owned())
1827 }
1828
1829 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1830 self.repository_entries.values()
1831 }
1832
1833 pub fn scan_id(&self) -> usize {
1834 self.scan_id
1835 }
1836
1837 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1838 let path = path.as_ref();
1839 self.traverse_from_path(true, true, path)
1840 .entry()
1841 .and_then(|entry| {
1842 if entry.path.as_ref() == path {
1843 Some(entry)
1844 } else {
1845 None
1846 }
1847 })
1848 }
1849
1850 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1851 let entry = self.entries_by_id.get(&id, &())?;
1852 self.entry_for_path(&entry.path)
1853 }
1854
1855 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1856 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1857 }
1858}
1859
1860impl LocalSnapshot {
1861 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1862 self.git_repositories.get(&repo.work_directory.0)
1863 }
1864
1865 pub(crate) fn repo_for_metadata(
1866 &self,
1867 path: &Path,
1868 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1869 self.git_repositories
1870 .iter()
1871 .find(|(_, repo)| repo.in_dot_git(path))
1872 }
1873
1874 fn build_update(
1875 &self,
1876 project_id: u64,
1877 worktree_id: u64,
1878 entry_changes: UpdatedEntriesSet,
1879 repo_changes: UpdatedGitRepositoriesSet,
1880 ) -> proto::UpdateWorktree {
1881 let mut updated_entries = Vec::new();
1882 let mut removed_entries = Vec::new();
1883 let mut updated_repositories = Vec::new();
1884 let mut removed_repositories = Vec::new();
1885
1886 for (_, entry_id, path_change) in entry_changes.iter() {
1887 if let PathChange::Removed = path_change {
1888 removed_entries.push(entry_id.0 as u64);
1889 } else if let Some(entry) = self.entry_for_id(*entry_id) {
1890 updated_entries.push(proto::Entry::from(entry));
1891 }
1892 }
1893 for (work_dir_path, old_repo) in repo_changes.iter() {
1894 let new_repo = self
1895 .repository_entries
1896 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1897 match (old_repo, new_repo) {
1898 (Some(old_repo), Some(new_repo)) => {
1899 updated_repositories.push(new_repo.build_update(old_repo));
1900 }
1901 (None, Some(new_repo)) => {
1902 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1903 }
1904 (Some(old_repo), None) => {
1905 removed_repositories.push(old_repo.work_directory.0.to_proto());
1906 }
1907 _ => {}
1908 }
1909 }
1910
1911 removed_entries.sort_unstable();
1912 updated_entries.sort_unstable_by_key(|e| e.id);
1913 removed_repositories.sort_unstable();
1914 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1915
1916 // TODO - optimize, knowing that removed_entries are sorted.
1917 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1918
1919 proto::UpdateWorktree {
1920 project_id,
1921 worktree_id,
1922 abs_path: self.abs_path().to_string_lossy().into(),
1923 root_name: self.root_name().to_string(),
1924 updated_entries,
1925 removed_entries,
1926 scan_id: self.scan_id as u64,
1927 is_last_update: self.completed_scan_id == self.scan_id,
1928 updated_repositories,
1929 removed_repositories,
1930 }
1931 }
1932
1933 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1934 let mut updated_entries = self
1935 .entries_by_path
1936 .iter()
1937 .map(proto::Entry::from)
1938 .collect::<Vec<_>>();
1939 updated_entries.sort_unstable_by_key(|e| e.id);
1940
1941 let mut updated_repositories = self
1942 .repository_entries
1943 .values()
1944 .map(proto::RepositoryEntry::from)
1945 .collect::<Vec<_>>();
1946 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1947
1948 proto::UpdateWorktree {
1949 project_id,
1950 worktree_id,
1951 abs_path: self.abs_path().to_string_lossy().into(),
1952 root_name: self.root_name().to_string(),
1953 updated_entries,
1954 removed_entries: Vec::new(),
1955 scan_id: self.scan_id as u64,
1956 is_last_update: self.completed_scan_id == self.scan_id,
1957 updated_repositories,
1958 removed_repositories: Vec::new(),
1959 }
1960 }
1961
1962 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1963 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1964 let abs_path = self.abs_path.join(&entry.path);
1965 match smol::block_on(build_gitignore(&abs_path, fs)) {
1966 Ok(ignore) => {
1967 self.ignores_by_parent_abs_path
1968 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1969 }
1970 Err(error) => {
1971 log::error!(
1972 "error loading .gitignore file {:?} - {:?}",
1973 &entry.path,
1974 error
1975 );
1976 }
1977 }
1978 }
1979
1980 if entry.kind == EntryKind::PendingDir {
1981 if let Some(existing_entry) =
1982 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1983 {
1984 entry.kind = existing_entry.kind;
1985 }
1986 }
1987
1988 let scan_id = self.scan_id;
1989 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1990 if let Some(removed) = removed {
1991 if removed.id != entry.id {
1992 self.entries_by_id.remove(&removed.id, &());
1993 }
1994 }
1995 self.entries_by_id.insert_or_replace(
1996 PathEntry {
1997 id: entry.id,
1998 path: entry.path.clone(),
1999 is_ignored: entry.is_ignored,
2000 scan_id,
2001 },
2002 &(),
2003 );
2004
2005 entry
2006 }
2007
2008 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2009 let abs_path = self.abs_path.join(&parent_path);
2010 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
2011
2012 // Guard against repositories inside the repository metadata
2013 if work_dir
2014 .components()
2015 .find(|component| component.as_os_str() == *DOT_GIT)
2016 .is_some()
2017 {
2018 return None;
2019 };
2020
2021 let work_dir_id = self
2022 .entry_for_path(work_dir.clone())
2023 .map(|entry| entry.id)?;
2024
2025 if self.git_repositories.get(&work_dir_id).is_none() {
2026 let repo = fs.open_repo(abs_path.as_path())?;
2027 let work_directory = RepositoryWorkDirectory(work_dir.clone());
2028 let scan_id = self.scan_id;
2029
2030 let repo_lock = repo.lock();
2031
2032 self.repository_entries.insert(
2033 work_directory,
2034 RepositoryEntry {
2035 work_directory: work_dir_id.into(),
2036 branch: repo_lock.branch_name().map(Into::into),
2037 statuses: repo_lock.statuses().unwrap_or_default(),
2038 },
2039 );
2040 drop(repo_lock);
2041
2042 self.git_repositories.insert(
2043 work_dir_id,
2044 LocalRepositoryEntry {
2045 scan_id,
2046 git_dir_scan_id: scan_id,
2047 repo_ptr: repo,
2048 git_dir_path: parent_path.clone(),
2049 },
2050 )
2051 }
2052
2053 Some(())
2054 }
2055
2056 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2057 let mut inodes = TreeSet::default();
2058 for ancestor in path.ancestors().skip(1) {
2059 if let Some(entry) = self.entry_for_path(ancestor) {
2060 inodes.insert(entry.inode);
2061 }
2062 }
2063 inodes
2064 }
2065
2066 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2067 let mut new_ignores = Vec::new();
2068 for ancestor in abs_path.ancestors().skip(1) {
2069 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2070 new_ignores.push((ancestor, Some(ignore.clone())));
2071 } else {
2072 new_ignores.push((ancestor, None));
2073 }
2074 }
2075
2076 let mut ignore_stack = IgnoreStack::none();
2077 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2078 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2079 ignore_stack = IgnoreStack::all();
2080 break;
2081 } else if let Some(ignore) = ignore {
2082 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2083 }
2084 }
2085
2086 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2087 ignore_stack = IgnoreStack::all();
2088 }
2089
2090 ignore_stack
2091 }
2092}
2093
2094impl BackgroundScannerState {
2095 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2096 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2097 entry.id = removed_entry_id;
2098 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2099 entry.id = existing_entry.id;
2100 }
2101 }
2102
2103 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2104 self.reuse_entry_id(&mut entry);
2105 self.snapshot.insert_entry(entry, fs)
2106 }
2107
2108 fn populate_dir(
2109 &mut self,
2110 parent_path: Arc<Path>,
2111 entries: impl IntoIterator<Item = Entry>,
2112 ignore: Option<Arc<Gitignore>>,
2113 fs: &dyn Fs,
2114 ) {
2115 let mut parent_entry = if let Some(parent_entry) = self
2116 .snapshot
2117 .entries_by_path
2118 .get(&PathKey(parent_path.clone()), &())
2119 {
2120 parent_entry.clone()
2121 } else {
2122 log::warn!(
2123 "populating a directory {:?} that has been removed",
2124 parent_path
2125 );
2126 return;
2127 };
2128
2129 match parent_entry.kind {
2130 EntryKind::PendingDir => {
2131 parent_entry.kind = EntryKind::Dir;
2132 }
2133 EntryKind::Dir => {}
2134 _ => return,
2135 }
2136
2137 if let Some(ignore) = ignore {
2138 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2139 self.snapshot
2140 .ignores_by_parent_abs_path
2141 .insert(abs_parent_path, (ignore, false));
2142 }
2143
2144 if parent_path.file_name() == Some(&DOT_GIT) {
2145 self.snapshot.build_repo(parent_path, fs);
2146 }
2147
2148 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2149 let mut entries_by_id_edits = Vec::new();
2150
2151 for mut entry in entries {
2152 self.reuse_entry_id(&mut entry);
2153 entries_by_id_edits.push(Edit::Insert(PathEntry {
2154 id: entry.id,
2155 path: entry.path.clone(),
2156 is_ignored: entry.is_ignored,
2157 scan_id: self.snapshot.scan_id,
2158 }));
2159 entries_by_path_edits.push(Edit::Insert(entry));
2160 }
2161
2162 self.snapshot
2163 .entries_by_path
2164 .edit(entries_by_path_edits, &());
2165 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2166 }
2167
2168 fn remove_path(&mut self, path: &Path) {
2169 let mut new_entries;
2170 let removed_entries;
2171 {
2172 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2173 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2174 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2175 new_entries.push_tree(cursor.suffix(&()), &());
2176 }
2177 self.snapshot.entries_by_path = new_entries;
2178
2179 let mut entries_by_id_edits = Vec::new();
2180 for entry in removed_entries.cursor::<()>() {
2181 let removed_entry_id = self
2182 .removed_entry_ids
2183 .entry(entry.inode)
2184 .or_insert(entry.id);
2185 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2186 entries_by_id_edits.push(Edit::Remove(entry.id));
2187 }
2188 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2189
2190 if path.file_name() == Some(&GITIGNORE) {
2191 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2192 if let Some((_, needs_update)) = self
2193 .snapshot
2194 .ignores_by_parent_abs_path
2195 .get_mut(abs_parent_path.as_path())
2196 {
2197 *needs_update = true;
2198 }
2199 }
2200 }
2201}
2202
2203async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2204 let contents = fs.load(abs_path).await?;
2205 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2206 let mut builder = GitignoreBuilder::new(parent);
2207 for line in contents.lines() {
2208 builder.add_line(Some(abs_path.into()), line)?;
2209 }
2210 Ok(builder.build()?)
2211}
2212
2213impl WorktreeId {
2214 pub fn from_usize(handle_id: usize) -> Self {
2215 Self(handle_id)
2216 }
2217
2218 pub(crate) fn from_proto(id: u64) -> Self {
2219 Self(id as usize)
2220 }
2221
2222 pub fn to_proto(&self) -> u64 {
2223 self.0 as u64
2224 }
2225
2226 pub fn to_usize(&self) -> usize {
2227 self.0
2228 }
2229}
2230
2231impl fmt::Display for WorktreeId {
2232 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2233 self.0.fmt(f)
2234 }
2235}
2236
2237impl Deref for Worktree {
2238 type Target = Snapshot;
2239
2240 fn deref(&self) -> &Self::Target {
2241 match self {
2242 Worktree::Local(worktree) => &worktree.snapshot,
2243 Worktree::Remote(worktree) => &worktree.snapshot,
2244 }
2245 }
2246}
2247
2248impl Deref for LocalWorktree {
2249 type Target = LocalSnapshot;
2250
2251 fn deref(&self) -> &Self::Target {
2252 &self.snapshot
2253 }
2254}
2255
2256impl Deref for RemoteWorktree {
2257 type Target = Snapshot;
2258
2259 fn deref(&self) -> &Self::Target {
2260 &self.snapshot
2261 }
2262}
2263
2264impl fmt::Debug for LocalWorktree {
2265 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2266 self.snapshot.fmt(f)
2267 }
2268}
2269
2270impl fmt::Debug for Snapshot {
2271 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2272 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2273 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2274
2275 impl<'a> fmt::Debug for EntriesByPath<'a> {
2276 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2277 f.debug_map()
2278 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2279 .finish()
2280 }
2281 }
2282
2283 impl<'a> fmt::Debug for EntriesById<'a> {
2284 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2285 f.debug_list().entries(self.0.iter()).finish()
2286 }
2287 }
2288
2289 f.debug_struct("Snapshot")
2290 .field("id", &self.id)
2291 .field("root_name", &self.root_name)
2292 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2293 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2294 .finish()
2295 }
2296}
2297
2298#[derive(Clone, PartialEq)]
2299pub struct File {
2300 pub worktree: ModelHandle<Worktree>,
2301 pub path: Arc<Path>,
2302 pub mtime: SystemTime,
2303 pub(crate) entry_id: ProjectEntryId,
2304 pub(crate) is_local: bool,
2305 pub(crate) is_deleted: bool,
2306}
2307
2308impl language::File for File {
2309 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2310 if self.is_local {
2311 Some(self)
2312 } else {
2313 None
2314 }
2315 }
2316
2317 fn mtime(&self) -> SystemTime {
2318 self.mtime
2319 }
2320
2321 fn path(&self) -> &Arc<Path> {
2322 &self.path
2323 }
2324
2325 fn full_path(&self, cx: &AppContext) -> PathBuf {
2326 let mut full_path = PathBuf::new();
2327 let worktree = self.worktree.read(cx);
2328
2329 if worktree.is_visible() {
2330 full_path.push(worktree.root_name());
2331 } else {
2332 let path = worktree.abs_path();
2333
2334 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2335 full_path.push("~");
2336 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2337 } else {
2338 full_path.push(path)
2339 }
2340 }
2341
2342 if self.path.components().next().is_some() {
2343 full_path.push(&self.path);
2344 }
2345
2346 full_path
2347 }
2348
2349 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2350 /// of its worktree, then this method will return the name of the worktree itself.
2351 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2352 self.path
2353 .file_name()
2354 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2355 }
2356
2357 fn is_deleted(&self) -> bool {
2358 self.is_deleted
2359 }
2360
2361 fn as_any(&self) -> &dyn Any {
2362 self
2363 }
2364
2365 fn to_proto(&self) -> rpc::proto::File {
2366 rpc::proto::File {
2367 worktree_id: self.worktree.id() as u64,
2368 entry_id: self.entry_id.to_proto(),
2369 path: self.path.to_string_lossy().into(),
2370 mtime: Some(self.mtime.into()),
2371 is_deleted: self.is_deleted,
2372 }
2373 }
2374}
2375
2376impl language::LocalFile for File {
2377 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2378 self.worktree
2379 .read(cx)
2380 .as_local()
2381 .unwrap()
2382 .abs_path
2383 .join(&self.path)
2384 }
2385
2386 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2387 let worktree = self.worktree.read(cx).as_local().unwrap();
2388 let abs_path = worktree.absolutize(&self.path);
2389 let fs = worktree.fs.clone();
2390 cx.background()
2391 .spawn(async move { fs.load(&abs_path).await })
2392 }
2393
2394 fn buffer_reloaded(
2395 &self,
2396 buffer_id: u64,
2397 version: &clock::Global,
2398 fingerprint: RopeFingerprint,
2399 line_ending: LineEnding,
2400 mtime: SystemTime,
2401 cx: &mut AppContext,
2402 ) {
2403 let worktree = self.worktree.read(cx).as_local().unwrap();
2404 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2405 worktree
2406 .client
2407 .send(proto::BufferReloaded {
2408 project_id,
2409 buffer_id,
2410 version: serialize_version(version),
2411 mtime: Some(mtime.into()),
2412 fingerprint: serialize_fingerprint(fingerprint),
2413 line_ending: serialize_line_ending(line_ending) as i32,
2414 })
2415 .log_err();
2416 }
2417 }
2418}
2419
2420impl File {
2421 pub fn from_proto(
2422 proto: rpc::proto::File,
2423 worktree: ModelHandle<Worktree>,
2424 cx: &AppContext,
2425 ) -> Result<Self> {
2426 let worktree_id = worktree
2427 .read(cx)
2428 .as_remote()
2429 .ok_or_else(|| anyhow!("not remote"))?
2430 .id();
2431
2432 if worktree_id.to_proto() != proto.worktree_id {
2433 return Err(anyhow!("worktree id does not match file"));
2434 }
2435
2436 Ok(Self {
2437 worktree,
2438 path: Path::new(&proto.path).into(),
2439 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2440 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2441 is_local: false,
2442 is_deleted: proto.is_deleted,
2443 })
2444 }
2445
2446 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2447 file.and_then(|f| f.as_any().downcast_ref())
2448 }
2449
2450 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2451 self.worktree.read(cx).id()
2452 }
2453
2454 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2455 if self.is_deleted {
2456 None
2457 } else {
2458 Some(self.entry_id)
2459 }
2460 }
2461}
2462
2463#[derive(Clone, Debug, PartialEq, Eq)]
2464pub struct Entry {
2465 pub id: ProjectEntryId,
2466 pub kind: EntryKind,
2467 pub path: Arc<Path>,
2468 pub inode: u64,
2469 pub mtime: SystemTime,
2470 pub is_symlink: bool,
2471 pub is_ignored: bool,
2472}
2473
2474#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2475pub enum EntryKind {
2476 PendingDir,
2477 Dir,
2478 File(CharBag),
2479}
2480
2481#[derive(Clone, Copy, Debug)]
2482pub enum PathChange {
2483 /// A filesystem entry was was created.
2484 Added,
2485 /// A filesystem entry was removed.
2486 Removed,
2487 /// A filesystem entry was updated.
2488 Updated,
2489 /// A filesystem entry was either updated or added. We don't know
2490 /// whether or not it already existed, because the path had not
2491 /// been loaded before the event.
2492 AddedOrUpdated,
2493 /// A filesystem entry was found during the initial scan of the worktree.
2494 Loaded,
2495}
2496
2497pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2498pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, Option<RepositoryEntry>)]>;
2499
2500impl Entry {
2501 fn new(
2502 path: Arc<Path>,
2503 metadata: &fs::Metadata,
2504 next_entry_id: &AtomicUsize,
2505 root_char_bag: CharBag,
2506 ) -> Self {
2507 Self {
2508 id: ProjectEntryId::new(next_entry_id),
2509 kind: if metadata.is_dir {
2510 EntryKind::PendingDir
2511 } else {
2512 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2513 },
2514 path,
2515 inode: metadata.inode,
2516 mtime: metadata.mtime,
2517 is_symlink: metadata.is_symlink,
2518 is_ignored: false,
2519 }
2520 }
2521
2522 pub fn is_dir(&self) -> bool {
2523 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2524 }
2525
2526 pub fn is_file(&self) -> bool {
2527 matches!(self.kind, EntryKind::File(_))
2528 }
2529}
2530
2531impl sum_tree::Item for Entry {
2532 type Summary = EntrySummary;
2533
2534 fn summary(&self) -> Self::Summary {
2535 let visible_count = if self.is_ignored { 0 } else { 1 };
2536 let file_count;
2537 let visible_file_count;
2538 if self.is_file() {
2539 file_count = 1;
2540 visible_file_count = visible_count;
2541 } else {
2542 file_count = 0;
2543 visible_file_count = 0;
2544 }
2545
2546 EntrySummary {
2547 max_path: self.path.clone(),
2548 count: 1,
2549 visible_count,
2550 file_count,
2551 visible_file_count,
2552 }
2553 }
2554}
2555
2556impl sum_tree::KeyedItem for Entry {
2557 type Key = PathKey;
2558
2559 fn key(&self) -> Self::Key {
2560 PathKey(self.path.clone())
2561 }
2562}
2563
2564#[derive(Clone, Debug)]
2565pub struct EntrySummary {
2566 max_path: Arc<Path>,
2567 count: usize,
2568 visible_count: usize,
2569 file_count: usize,
2570 visible_file_count: usize,
2571}
2572
2573impl Default for EntrySummary {
2574 fn default() -> Self {
2575 Self {
2576 max_path: Arc::from(Path::new("")),
2577 count: 0,
2578 visible_count: 0,
2579 file_count: 0,
2580 visible_file_count: 0,
2581 }
2582 }
2583}
2584
2585impl sum_tree::Summary for EntrySummary {
2586 type Context = ();
2587
2588 fn add_summary(&mut self, rhs: &Self, _: &()) {
2589 self.max_path = rhs.max_path.clone();
2590 self.count += rhs.count;
2591 self.visible_count += rhs.visible_count;
2592 self.file_count += rhs.file_count;
2593 self.visible_file_count += rhs.visible_file_count;
2594 }
2595}
2596
2597#[derive(Clone, Debug)]
2598struct PathEntry {
2599 id: ProjectEntryId,
2600 path: Arc<Path>,
2601 is_ignored: bool,
2602 scan_id: usize,
2603}
2604
2605impl sum_tree::Item for PathEntry {
2606 type Summary = PathEntrySummary;
2607
2608 fn summary(&self) -> Self::Summary {
2609 PathEntrySummary { max_id: self.id }
2610 }
2611}
2612
2613impl sum_tree::KeyedItem for PathEntry {
2614 type Key = ProjectEntryId;
2615
2616 fn key(&self) -> Self::Key {
2617 self.id
2618 }
2619}
2620
2621#[derive(Clone, Debug, Default)]
2622struct PathEntrySummary {
2623 max_id: ProjectEntryId,
2624}
2625
2626impl sum_tree::Summary for PathEntrySummary {
2627 type Context = ();
2628
2629 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2630 self.max_id = summary.max_id;
2631 }
2632}
2633
2634impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2635 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2636 *self = summary.max_id;
2637 }
2638}
2639
2640#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2641pub struct PathKey(Arc<Path>);
2642
2643impl Default for PathKey {
2644 fn default() -> Self {
2645 Self(Path::new("").into())
2646 }
2647}
2648
2649impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2650 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2651 self.0 = summary.max_path.clone();
2652 }
2653}
2654
2655struct BackgroundScanner {
2656 state: Mutex<BackgroundScannerState>,
2657 fs: Arc<dyn Fs>,
2658 status_updates_tx: UnboundedSender<ScanState>,
2659 executor: Arc<executor::Background>,
2660 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2661 next_entry_id: Arc<AtomicUsize>,
2662 phase: BackgroundScannerPhase,
2663}
2664
2665#[derive(PartialEq)]
2666enum BackgroundScannerPhase {
2667 InitialScan,
2668 EventsReceivedDuringInitialScan,
2669 Events,
2670}
2671
2672impl BackgroundScanner {
2673 fn new(
2674 snapshot: LocalSnapshot,
2675 next_entry_id: Arc<AtomicUsize>,
2676 fs: Arc<dyn Fs>,
2677 status_updates_tx: UnboundedSender<ScanState>,
2678 executor: Arc<executor::Background>,
2679 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2680 ) -> Self {
2681 Self {
2682 fs,
2683 status_updates_tx,
2684 executor,
2685 refresh_requests_rx,
2686 next_entry_id,
2687 state: Mutex::new(BackgroundScannerState {
2688 prev_snapshot: snapshot.snapshot.clone(),
2689 snapshot,
2690 removed_entry_ids: Default::default(),
2691 changed_paths: Default::default(),
2692 }),
2693 phase: BackgroundScannerPhase::InitialScan,
2694 }
2695 }
2696
2697 async fn run(
2698 &mut self,
2699 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2700 ) {
2701 use futures::FutureExt as _;
2702
2703 let (root_abs_path, root_inode) = {
2704 let snapshot = &self.state.lock().snapshot;
2705 (
2706 snapshot.abs_path.clone(),
2707 snapshot.root_entry().map(|e| e.inode),
2708 )
2709 };
2710
2711 // Populate ignores above the root.
2712 let ignore_stack;
2713 for ancestor in root_abs_path.ancestors().skip(1) {
2714 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2715 {
2716 self.state
2717 .lock()
2718 .snapshot
2719 .ignores_by_parent_abs_path
2720 .insert(ancestor.into(), (ignore.into(), false));
2721 }
2722 }
2723 {
2724 let mut state = self.state.lock();
2725 state.snapshot.scan_id += 1;
2726 ignore_stack = state
2727 .snapshot
2728 .ignore_stack_for_abs_path(&root_abs_path, true);
2729 if ignore_stack.is_all() {
2730 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2731 root_entry.is_ignored = true;
2732 state.insert_entry(root_entry, self.fs.as_ref());
2733 }
2734 }
2735 };
2736
2737 // Perform an initial scan of the directory.
2738 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2739 smol::block_on(scan_job_tx.send(ScanJob {
2740 abs_path: root_abs_path,
2741 path: Arc::from(Path::new("")),
2742 ignore_stack,
2743 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2744 scan_queue: scan_job_tx.clone(),
2745 }))
2746 .unwrap();
2747 drop(scan_job_tx);
2748 self.scan_dirs(true, scan_job_rx).await;
2749 {
2750 let mut state = self.state.lock();
2751 state.snapshot.completed_scan_id = state.snapshot.scan_id;
2752 }
2753 self.send_status_update(false, None);
2754
2755 // Process any any FS events that occurred while performing the initial scan.
2756 // For these events, update events cannot be as precise, because we didn't
2757 // have the previous state loaded yet.
2758 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2759 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2760 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2761 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2762 paths.extend(more_events.into_iter().map(|e| e.path));
2763 }
2764 self.process_events(paths).await;
2765 }
2766
2767 // Continue processing events until the worktree is dropped.
2768 self.phase = BackgroundScannerPhase::Events;
2769 loop {
2770 select_biased! {
2771 // Process any path refresh requests from the worktree. Prioritize
2772 // these before handling changes reported by the filesystem.
2773 request = self.refresh_requests_rx.recv().fuse() => {
2774 let Ok((paths, barrier)) = request else { break };
2775 if !self.process_refresh_request(paths.clone(), barrier).await {
2776 return;
2777 }
2778 }
2779
2780 events = events_rx.next().fuse() => {
2781 let Some(events) = events else { break };
2782 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2783 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2784 paths.extend(more_events.into_iter().map(|e| e.path));
2785 }
2786 self.process_events(paths.clone()).await;
2787 }
2788 }
2789 }
2790 }
2791
2792 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2793 self.reload_entries_for_paths(paths, None).await;
2794 self.send_status_update(false, Some(barrier))
2795 }
2796
2797 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2798 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2799 let paths = self
2800 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2801 .await;
2802 drop(scan_job_tx);
2803 self.scan_dirs(false, scan_job_rx).await;
2804
2805 self.update_ignore_statuses().await;
2806
2807 {
2808 let mut snapshot = &mut self.state.lock().snapshot;
2809
2810 if let Some(paths) = paths {
2811 for path in paths {
2812 self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2813 }
2814 }
2815
2816 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2817 git_repositories.retain(|work_directory_id, _| {
2818 snapshot
2819 .entry_for_id(*work_directory_id)
2820 .map_or(false, |entry| {
2821 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2822 })
2823 });
2824 snapshot.git_repositories = git_repositories;
2825
2826 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2827 git_repository_entries.retain(|_, entry| {
2828 snapshot
2829 .git_repositories
2830 .get(&entry.work_directory.0)
2831 .is_some()
2832 });
2833 snapshot.snapshot.repository_entries = git_repository_entries;
2834 snapshot.completed_scan_id = snapshot.scan_id;
2835 }
2836
2837 self.send_status_update(false, None);
2838 }
2839
2840 async fn scan_dirs(
2841 &self,
2842 enable_progress_updates: bool,
2843 scan_jobs_rx: channel::Receiver<ScanJob>,
2844 ) {
2845 use futures::FutureExt as _;
2846
2847 if self
2848 .status_updates_tx
2849 .unbounded_send(ScanState::Started)
2850 .is_err()
2851 {
2852 return;
2853 }
2854
2855 let progress_update_count = AtomicUsize::new(0);
2856 self.executor
2857 .scoped(|scope| {
2858 for _ in 0..self.executor.num_cpus() {
2859 scope.spawn(async {
2860 let mut last_progress_update_count = 0;
2861 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2862 futures::pin_mut!(progress_update_timer);
2863
2864 loop {
2865 select_biased! {
2866 // Process any path refresh requests before moving on to process
2867 // the scan queue, so that user operations are prioritized.
2868 request = self.refresh_requests_rx.recv().fuse() => {
2869 let Ok((paths, barrier)) = request else { break };
2870 if !self.process_refresh_request(paths, barrier).await {
2871 return;
2872 }
2873 }
2874
2875 // Send periodic progress updates to the worktree. Use an atomic counter
2876 // to ensure that only one of the workers sends a progress update after
2877 // the update interval elapses.
2878 _ = progress_update_timer => {
2879 match progress_update_count.compare_exchange(
2880 last_progress_update_count,
2881 last_progress_update_count + 1,
2882 SeqCst,
2883 SeqCst
2884 ) {
2885 Ok(_) => {
2886 last_progress_update_count += 1;
2887 self.send_status_update(true, None);
2888 }
2889 Err(count) => {
2890 last_progress_update_count = count;
2891 }
2892 }
2893 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2894 }
2895
2896 // Recursively load directories from the file system.
2897 job = scan_jobs_rx.recv().fuse() => {
2898 let Ok(job) = job else { break };
2899 if let Err(err) = self.scan_dir(&job).await {
2900 if job.path.as_ref() != Path::new("") {
2901 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2902 }
2903 }
2904 }
2905 }
2906 }
2907 })
2908 }
2909 })
2910 .await;
2911 }
2912
2913 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2914 let mut state = self.state.lock();
2915 if state.changed_paths.is_empty() && scanning {
2916 return true;
2917 }
2918
2919 let new_snapshot = state.snapshot.clone();
2920 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2921 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
2922 state.changed_paths.clear();
2923
2924 self.status_updates_tx
2925 .unbounded_send(ScanState::Updated {
2926 snapshot: new_snapshot,
2927 changes,
2928 scanning,
2929 barrier,
2930 })
2931 .is_ok()
2932 }
2933
2934 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2935 let mut new_entries: Vec<Entry> = Vec::new();
2936 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2937 let mut ignore_stack = job.ignore_stack.clone();
2938 let mut new_ignore = None;
2939 let (root_abs_path, root_char_bag, next_entry_id) = {
2940 let snapshot = &self.state.lock().snapshot;
2941 (
2942 snapshot.abs_path().clone(),
2943 snapshot.root_char_bag,
2944 self.next_entry_id.clone(),
2945 )
2946 };
2947 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2948 while let Some(child_abs_path) = child_paths.next().await {
2949 let child_abs_path: Arc<Path> = match child_abs_path {
2950 Ok(child_abs_path) => child_abs_path.into(),
2951 Err(error) => {
2952 log::error!("error processing entry {:?}", error);
2953 continue;
2954 }
2955 };
2956
2957 let child_name = child_abs_path.file_name().unwrap();
2958 let child_path: Arc<Path> = job.path.join(child_name).into();
2959 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2960 Ok(Some(metadata)) => metadata,
2961 Ok(None) => continue,
2962 Err(err) => {
2963 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2964 continue;
2965 }
2966 };
2967
2968 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2969 if child_name == *GITIGNORE {
2970 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2971 Ok(ignore) => {
2972 let ignore = Arc::new(ignore);
2973 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2974 new_ignore = Some(ignore);
2975 }
2976 Err(error) => {
2977 log::error!(
2978 "error loading .gitignore file {:?} - {:?}",
2979 child_name,
2980 error
2981 );
2982 }
2983 }
2984
2985 // Update ignore status of any child entries we've already processed to reflect the
2986 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2987 // there should rarely be too numerous. Update the ignore stack associated with any
2988 // new jobs as well.
2989 let mut new_jobs = new_jobs.iter_mut();
2990 for entry in &mut new_entries {
2991 let entry_abs_path = root_abs_path.join(&entry.path);
2992 entry.is_ignored =
2993 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2994
2995 if entry.is_dir() {
2996 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2997 job.ignore_stack = if entry.is_ignored {
2998 IgnoreStack::all()
2999 } else {
3000 ignore_stack.clone()
3001 };
3002 }
3003 }
3004 }
3005 }
3006
3007 let mut child_entry = Entry::new(
3008 child_path.clone(),
3009 &child_metadata,
3010 &next_entry_id,
3011 root_char_bag,
3012 );
3013
3014 if child_entry.is_dir() {
3015 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3016 child_entry.is_ignored = is_ignored;
3017
3018 // Avoid recursing until crash in the case of a recursive symlink
3019 if !job.ancestor_inodes.contains(&child_entry.inode) {
3020 let mut ancestor_inodes = job.ancestor_inodes.clone();
3021 ancestor_inodes.insert(child_entry.inode);
3022
3023 new_jobs.push(Some(ScanJob {
3024 abs_path: child_abs_path,
3025 path: child_path,
3026 ignore_stack: if is_ignored {
3027 IgnoreStack::all()
3028 } else {
3029 ignore_stack.clone()
3030 },
3031 ancestor_inodes,
3032 scan_queue: job.scan_queue.clone(),
3033 }));
3034 } else {
3035 new_jobs.push(None);
3036 }
3037 } else {
3038 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3039 }
3040
3041 new_entries.push(child_entry);
3042 }
3043
3044 {
3045 let mut state = self.state.lock();
3046 state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
3047 if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3048 state.changed_paths.insert(ix, job.path.clone());
3049 }
3050 }
3051
3052 for new_job in new_jobs {
3053 if let Some(new_job) = new_job {
3054 job.scan_queue.send(new_job).await.unwrap();
3055 }
3056 }
3057
3058 Ok(())
3059 }
3060
3061 async fn reload_entries_for_paths(
3062 &self,
3063 mut abs_paths: Vec<PathBuf>,
3064 scan_queue_tx: Option<Sender<ScanJob>>,
3065 ) -> Option<Vec<Arc<Path>>> {
3066 let doing_recursive_update = scan_queue_tx.is_some();
3067
3068 abs_paths.sort_unstable();
3069 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3070
3071 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3072 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3073 let metadata = futures::future::join_all(
3074 abs_paths
3075 .iter()
3076 .map(|abs_path| self.fs.metadata(&abs_path))
3077 .collect::<Vec<_>>(),
3078 )
3079 .await;
3080
3081 let mut state = self.state.lock();
3082 let snapshot = &mut state.snapshot;
3083 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3084 snapshot.scan_id += 1;
3085 if is_idle && !doing_recursive_update {
3086 snapshot.completed_scan_id = snapshot.scan_id;
3087 }
3088
3089 // Remove any entries for paths that no longer exist or are being recursively
3090 // refreshed. Do this before adding any new entries, so that renames can be
3091 // detected regardless of the order of the paths.
3092 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3093 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3094 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3095 if matches!(metadata, Ok(None)) || doing_recursive_update {
3096 state.remove_path(path);
3097 }
3098 event_paths.push(path.into());
3099 } else {
3100 log::error!(
3101 "unexpected event {:?} for root path {:?}",
3102 abs_path,
3103 root_canonical_path
3104 );
3105 }
3106 }
3107
3108 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3109 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3110
3111 match metadata {
3112 Ok(Some(metadata)) => {
3113 let ignore_stack = state
3114 .snapshot
3115 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3116 let mut fs_entry = Entry::new(
3117 path.clone(),
3118 &metadata,
3119 self.next_entry_id.as_ref(),
3120 state.snapshot.root_char_bag,
3121 );
3122 fs_entry.is_ignored = ignore_stack.is_all();
3123 state.insert_entry(fs_entry, self.fs.as_ref());
3124
3125 if let Some(scan_queue_tx) = &scan_queue_tx {
3126 let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3127 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3128 ancestor_inodes.insert(metadata.inode);
3129 smol::block_on(scan_queue_tx.send(ScanJob {
3130 abs_path,
3131 path,
3132 ignore_stack,
3133 ancestor_inodes,
3134 scan_queue: scan_queue_tx.clone(),
3135 }))
3136 .unwrap();
3137 }
3138 }
3139 }
3140 Ok(None) => {
3141 self.remove_repo_path(&path, &mut state.snapshot);
3142 }
3143 Err(err) => {
3144 // TODO - create a special 'error' entry in the entries tree to mark this
3145 log::error!("error reading file on event {:?}", err);
3146 }
3147 }
3148 }
3149
3150 util::extend_sorted(
3151 &mut state.changed_paths,
3152 event_paths.iter().cloned(),
3153 usize::MAX,
3154 Ord::cmp,
3155 );
3156
3157 Some(event_paths)
3158 }
3159
3160 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3161 if !path
3162 .components()
3163 .any(|component| component.as_os_str() == *DOT_GIT)
3164 {
3165 let scan_id = snapshot.scan_id;
3166
3167 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3168 let entry = repository.work_directory.0;
3169 snapshot.git_repositories.remove(&entry);
3170 snapshot
3171 .snapshot
3172 .repository_entries
3173 .remove(&RepositoryWorkDirectory(path.into()));
3174 return Some(());
3175 }
3176
3177 let repo = snapshot.repository_for_path(&path)?;
3178 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3179 let work_dir = repo.work_directory(snapshot)?;
3180 let work_dir_id = repo.work_directory;
3181
3182 snapshot
3183 .git_repositories
3184 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3185
3186 snapshot.repository_entries.update(&work_dir, |entry| {
3187 entry
3188 .statuses
3189 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3190 });
3191 }
3192
3193 Some(())
3194 }
3195
3196 fn reload_repo_for_file_path(
3197 &self,
3198 path: &Path,
3199 snapshot: &mut LocalSnapshot,
3200 fs: &dyn Fs,
3201 ) -> Option<()> {
3202 let scan_id = snapshot.scan_id;
3203
3204 if path
3205 .components()
3206 .any(|component| component.as_os_str() == *DOT_GIT)
3207 {
3208 let (entry_id, repo_ptr) = {
3209 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3210 let dot_git_dir = path.ancestors()
3211 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3212 .next()?;
3213
3214 snapshot.build_repo(dot_git_dir.into(), fs);
3215 return None;
3216 };
3217 if repo.git_dir_scan_id == scan_id {
3218 return None;
3219 }
3220 (*entry_id, repo.repo_ptr.to_owned())
3221 };
3222
3223 let work_dir = snapshot
3224 .entry_for_id(entry_id)
3225 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3226
3227 let repo = repo_ptr.lock();
3228 repo.reload_index();
3229 let branch = repo.branch_name();
3230 let statuses = repo.statuses().unwrap_or_default();
3231
3232 snapshot.git_repositories.update(&entry_id, |entry| {
3233 entry.scan_id = scan_id;
3234 entry.git_dir_scan_id = scan_id;
3235 });
3236
3237 snapshot.repository_entries.update(&work_dir, |entry| {
3238 entry.branch = branch.map(Into::into);
3239 entry.statuses = statuses;
3240 });
3241 } else {
3242 if snapshot
3243 .entry_for_path(&path)
3244 .map(|entry| entry.is_ignored)
3245 .unwrap_or(false)
3246 {
3247 self.remove_repo_path(&path, snapshot);
3248 return None;
3249 }
3250
3251 let repo = snapshot.repository_for_path(&path)?;
3252
3253 let work_dir = repo.work_directory(snapshot)?;
3254 let work_dir_id = repo.work_directory.clone();
3255
3256 snapshot
3257 .git_repositories
3258 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3259
3260 let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3261
3262 // Short circuit if we've already scanned everything
3263 if local_repo.git_dir_scan_id == scan_id {
3264 return None;
3265 }
3266
3267 let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3268
3269 for entry in snapshot.descendent_entries(false, false, path) {
3270 let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3271 continue;
3272 };
3273
3274 let status = local_repo.repo_ptr.lock().status(&repo_path);
3275 if let Some(status) = status {
3276 repository.statuses.insert(repo_path.clone(), status);
3277 } else {
3278 repository.statuses.remove(&repo_path);
3279 }
3280 }
3281
3282 snapshot.repository_entries.insert(work_dir, repository)
3283 }
3284
3285 Some(())
3286 }
3287
3288 async fn update_ignore_statuses(&self) {
3289 use futures::FutureExt as _;
3290
3291 let mut snapshot = self.state.lock().snapshot.clone();
3292 let mut ignores_to_update = Vec::new();
3293 let mut ignores_to_delete = Vec::new();
3294 let abs_path = snapshot.abs_path.clone();
3295 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3296 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3297 if *needs_update {
3298 *needs_update = false;
3299 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3300 ignores_to_update.push(parent_abs_path.clone());
3301 }
3302 }
3303
3304 let ignore_path = parent_path.join(&*GITIGNORE);
3305 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3306 ignores_to_delete.push(parent_abs_path.clone());
3307 }
3308 }
3309 }
3310
3311 for parent_abs_path in ignores_to_delete {
3312 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3313 self.state
3314 .lock()
3315 .snapshot
3316 .ignores_by_parent_abs_path
3317 .remove(&parent_abs_path);
3318 }
3319
3320 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3321 ignores_to_update.sort_unstable();
3322 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3323 while let Some(parent_abs_path) = ignores_to_update.next() {
3324 while ignores_to_update
3325 .peek()
3326 .map_or(false, |p| p.starts_with(&parent_abs_path))
3327 {
3328 ignores_to_update.next().unwrap();
3329 }
3330
3331 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3332 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3333 abs_path: parent_abs_path,
3334 ignore_stack,
3335 ignore_queue: ignore_queue_tx.clone(),
3336 }))
3337 .unwrap();
3338 }
3339 drop(ignore_queue_tx);
3340
3341 self.executor
3342 .scoped(|scope| {
3343 for _ in 0..self.executor.num_cpus() {
3344 scope.spawn(async {
3345 loop {
3346 select_biased! {
3347 // Process any path refresh requests before moving on to process
3348 // the queue of ignore statuses.
3349 request = self.refresh_requests_rx.recv().fuse() => {
3350 let Ok((paths, barrier)) = request else { break };
3351 if !self.process_refresh_request(paths, barrier).await {
3352 return;
3353 }
3354 }
3355
3356 // Recursively process directories whose ignores have changed.
3357 job = ignore_queue_rx.recv().fuse() => {
3358 let Ok(job) = job else { break };
3359 self.update_ignore_status(job, &snapshot).await;
3360 }
3361 }
3362 }
3363 });
3364 }
3365 })
3366 .await;
3367 }
3368
3369 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3370 let mut ignore_stack = job.ignore_stack;
3371 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3372 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3373 }
3374
3375 let mut entries_by_id_edits = Vec::new();
3376 let mut entries_by_path_edits = Vec::new();
3377 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3378 for mut entry in snapshot.child_entries(path).cloned() {
3379 let was_ignored = entry.is_ignored;
3380 let abs_path = snapshot.abs_path().join(&entry.path);
3381 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3382 if entry.is_dir() {
3383 let child_ignore_stack = if entry.is_ignored {
3384 IgnoreStack::all()
3385 } else {
3386 ignore_stack.clone()
3387 };
3388 job.ignore_queue
3389 .send(UpdateIgnoreStatusJob {
3390 abs_path: abs_path.into(),
3391 ignore_stack: child_ignore_stack,
3392 ignore_queue: job.ignore_queue.clone(),
3393 })
3394 .await
3395 .unwrap();
3396 }
3397
3398 if entry.is_ignored != was_ignored {
3399 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3400 path_entry.scan_id = snapshot.scan_id;
3401 path_entry.is_ignored = entry.is_ignored;
3402 entries_by_id_edits.push(Edit::Insert(path_entry));
3403 entries_by_path_edits.push(Edit::Insert(entry));
3404 }
3405 }
3406
3407 let state = &mut self.state.lock();
3408 for edit in &entries_by_path_edits {
3409 if let Edit::Insert(entry) = edit {
3410 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3411 state.changed_paths.insert(ix, entry.path.clone());
3412 }
3413 }
3414 }
3415
3416 state
3417 .snapshot
3418 .entries_by_path
3419 .edit(entries_by_path_edits, &());
3420 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3421 }
3422
3423 fn build_change_set(
3424 &self,
3425 old_snapshot: &Snapshot,
3426 new_snapshot: &Snapshot,
3427 event_paths: &[Arc<Path>],
3428 ) -> UpdatedEntriesSet {
3429 use BackgroundScannerPhase::*;
3430 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3431
3432 // Identify which paths have changed. Use the known set of changed
3433 // parent paths to optimize the search.
3434 let mut changes = Vec::new();
3435 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3436 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3437 old_paths.next(&());
3438 new_paths.next(&());
3439 for path in event_paths {
3440 let path = PathKey(path.clone());
3441 if old_paths.item().map_or(false, |e| e.path < path.0) {
3442 old_paths.seek_forward(&path, Bias::Left, &());
3443 }
3444 if new_paths.item().map_or(false, |e| e.path < path.0) {
3445 new_paths.seek_forward(&path, Bias::Left, &());
3446 }
3447
3448 loop {
3449 match (old_paths.item(), new_paths.item()) {
3450 (Some(old_entry), Some(new_entry)) => {
3451 if old_entry.path > path.0
3452 && new_entry.path > path.0
3453 && !old_entry.path.starts_with(&path.0)
3454 && !new_entry.path.starts_with(&path.0)
3455 {
3456 break;
3457 }
3458
3459 match Ord::cmp(&old_entry.path, &new_entry.path) {
3460 Ordering::Less => {
3461 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3462 old_paths.next(&());
3463 }
3464 Ordering::Equal => {
3465 if self.phase == EventsReceivedDuringInitialScan {
3466 // If the worktree was not fully initialized when this event was generated,
3467 // we can't know whether this entry was added during the scan or whether
3468 // it was merely updated.
3469 changes.push((
3470 new_entry.path.clone(),
3471 new_entry.id,
3472 AddedOrUpdated,
3473 ));
3474 } else if old_entry.id != new_entry.id {
3475 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3476 changes.push((new_entry.path.clone(), new_entry.id, Added));
3477 } else if old_entry != new_entry {
3478 changes.push((new_entry.path.clone(), new_entry.id, Updated));
3479 }
3480 old_paths.next(&());
3481 new_paths.next(&());
3482 }
3483 Ordering::Greater => {
3484 changes.push((
3485 new_entry.path.clone(),
3486 new_entry.id,
3487 if self.phase == InitialScan {
3488 Loaded
3489 } else {
3490 Added
3491 },
3492 ));
3493 new_paths.next(&());
3494 }
3495 }
3496 }
3497 (Some(old_entry), None) => {
3498 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3499 old_paths.next(&());
3500 }
3501 (None, Some(new_entry)) => {
3502 changes.push((
3503 new_entry.path.clone(),
3504 new_entry.id,
3505 if self.phase == InitialScan {
3506 Loaded
3507 } else {
3508 Added
3509 },
3510 ));
3511 new_paths.next(&());
3512 }
3513 (None, None) => break,
3514 }
3515 }
3516 }
3517
3518 changes.into()
3519 }
3520
3521 async fn progress_timer(&self, running: bool) {
3522 if !running {
3523 return futures::future::pending().await;
3524 }
3525
3526 #[cfg(any(test, feature = "test-support"))]
3527 if self.fs.is_fake() {
3528 return self.executor.simulate_random_delay().await;
3529 }
3530
3531 smol::Timer::after(Duration::from_millis(100)).await;
3532 }
3533}
3534
3535fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3536 let mut result = root_char_bag;
3537 result.extend(
3538 path.to_string_lossy()
3539 .chars()
3540 .map(|c| c.to_ascii_lowercase()),
3541 );
3542 result
3543}
3544
3545struct ScanJob {
3546 abs_path: Arc<Path>,
3547 path: Arc<Path>,
3548 ignore_stack: Arc<IgnoreStack>,
3549 scan_queue: Sender<ScanJob>,
3550 ancestor_inodes: TreeSet<u64>,
3551}
3552
3553struct UpdateIgnoreStatusJob {
3554 abs_path: Arc<Path>,
3555 ignore_stack: Arc<IgnoreStack>,
3556 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3557}
3558
3559pub trait WorktreeHandle {
3560 #[cfg(any(test, feature = "test-support"))]
3561 fn flush_fs_events<'a>(
3562 &self,
3563 cx: &'a gpui::TestAppContext,
3564 ) -> futures::future::LocalBoxFuture<'a, ()>;
3565}
3566
3567impl WorktreeHandle for ModelHandle<Worktree> {
3568 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3569 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3570 // extra directory scans, and emit extra scan-state notifications.
3571 //
3572 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3573 // to ensure that all redundant FS events have already been processed.
3574 #[cfg(any(test, feature = "test-support"))]
3575 fn flush_fs_events<'a>(
3576 &self,
3577 cx: &'a gpui::TestAppContext,
3578 ) -> futures::future::LocalBoxFuture<'a, ()> {
3579 let filename = "fs-event-sentinel";
3580 let tree = self.clone();
3581 let (fs, root_path) = self.read_with(cx, |tree, _| {
3582 let tree = tree.as_local().unwrap();
3583 (tree.fs.clone(), tree.abs_path().clone())
3584 });
3585
3586 async move {
3587 fs.create_file(&root_path.join(filename), Default::default())
3588 .await
3589 .unwrap();
3590 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3591 .await;
3592
3593 fs.remove_file(&root_path.join(filename), Default::default())
3594 .await
3595 .unwrap();
3596 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3597 .await;
3598
3599 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3600 .await;
3601 }
3602 .boxed_local()
3603 }
3604}
3605
3606#[derive(Clone, Debug)]
3607struct TraversalProgress<'a> {
3608 max_path: &'a Path,
3609 count: usize,
3610 visible_count: usize,
3611 file_count: usize,
3612 visible_file_count: usize,
3613}
3614
3615impl<'a> TraversalProgress<'a> {
3616 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3617 match (include_ignored, include_dirs) {
3618 (true, true) => self.count,
3619 (true, false) => self.file_count,
3620 (false, true) => self.visible_count,
3621 (false, false) => self.visible_file_count,
3622 }
3623 }
3624}
3625
3626impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3627 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3628 self.max_path = summary.max_path.as_ref();
3629 self.count += summary.count;
3630 self.visible_count += summary.visible_count;
3631 self.file_count += summary.file_count;
3632 self.visible_file_count += summary.visible_file_count;
3633 }
3634}
3635
3636impl<'a> Default for TraversalProgress<'a> {
3637 fn default() -> Self {
3638 Self {
3639 max_path: Path::new(""),
3640 count: 0,
3641 visible_count: 0,
3642 file_count: 0,
3643 visible_file_count: 0,
3644 }
3645 }
3646}
3647
3648pub struct Traversal<'a> {
3649 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3650 include_ignored: bool,
3651 include_dirs: bool,
3652}
3653
3654impl<'a> Traversal<'a> {
3655 pub fn advance(&mut self) -> bool {
3656 self.cursor.seek_forward(
3657 &TraversalTarget::Count {
3658 count: self.end_offset() + 1,
3659 include_dirs: self.include_dirs,
3660 include_ignored: self.include_ignored,
3661 },
3662 Bias::Left,
3663 &(),
3664 )
3665 }
3666
3667 pub fn advance_to_sibling(&mut self) -> bool {
3668 while let Some(entry) = self.cursor.item() {
3669 self.cursor.seek_forward(
3670 &TraversalTarget::PathSuccessor(&entry.path),
3671 Bias::Left,
3672 &(),
3673 );
3674 if let Some(entry) = self.cursor.item() {
3675 if (self.include_dirs || !entry.is_dir())
3676 && (self.include_ignored || !entry.is_ignored)
3677 {
3678 return true;
3679 }
3680 }
3681 }
3682 false
3683 }
3684
3685 pub fn entry(&self) -> Option<&'a Entry> {
3686 self.cursor.item()
3687 }
3688
3689 pub fn start_offset(&self) -> usize {
3690 self.cursor
3691 .start()
3692 .count(self.include_dirs, self.include_ignored)
3693 }
3694
3695 pub fn end_offset(&self) -> usize {
3696 self.cursor
3697 .end(&())
3698 .count(self.include_dirs, self.include_ignored)
3699 }
3700}
3701
3702impl<'a> Iterator for Traversal<'a> {
3703 type Item = &'a Entry;
3704
3705 fn next(&mut self) -> Option<Self::Item> {
3706 if let Some(item) = self.entry() {
3707 self.advance();
3708 Some(item)
3709 } else {
3710 None
3711 }
3712 }
3713}
3714
3715#[derive(Debug)]
3716enum TraversalTarget<'a> {
3717 Path(&'a Path),
3718 PathSuccessor(&'a Path),
3719 Count {
3720 count: usize,
3721 include_ignored: bool,
3722 include_dirs: bool,
3723 },
3724}
3725
3726impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3727 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3728 match self {
3729 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3730 TraversalTarget::PathSuccessor(path) => {
3731 if !cursor_location.max_path.starts_with(path) {
3732 Ordering::Equal
3733 } else {
3734 Ordering::Greater
3735 }
3736 }
3737 TraversalTarget::Count {
3738 count,
3739 include_dirs,
3740 include_ignored,
3741 } => Ord::cmp(
3742 count,
3743 &cursor_location.count(*include_dirs, *include_ignored),
3744 ),
3745 }
3746 }
3747}
3748
3749struct ChildEntriesIter<'a> {
3750 parent_path: &'a Path,
3751 traversal: Traversal<'a>,
3752}
3753
3754impl<'a> Iterator for ChildEntriesIter<'a> {
3755 type Item = &'a Entry;
3756
3757 fn next(&mut self) -> Option<Self::Item> {
3758 if let Some(item) = self.traversal.entry() {
3759 if item.path.starts_with(&self.parent_path) {
3760 self.traversal.advance_to_sibling();
3761 return Some(item);
3762 }
3763 }
3764 None
3765 }
3766}
3767
3768struct DescendentEntriesIter<'a> {
3769 parent_path: &'a Path,
3770 traversal: Traversal<'a>,
3771}
3772
3773impl<'a> Iterator for DescendentEntriesIter<'a> {
3774 type Item = &'a Entry;
3775
3776 fn next(&mut self) -> Option<Self::Item> {
3777 if let Some(item) = self.traversal.entry() {
3778 if item.path.starts_with(&self.parent_path) {
3779 self.traversal.advance();
3780 return Some(item);
3781 }
3782 }
3783 None
3784 }
3785}
3786
3787impl<'a> From<&'a Entry> for proto::Entry {
3788 fn from(entry: &'a Entry) -> Self {
3789 Self {
3790 id: entry.id.to_proto(),
3791 is_dir: entry.is_dir(),
3792 path: entry.path.to_string_lossy().into(),
3793 inode: entry.inode,
3794 mtime: Some(entry.mtime.into()),
3795 is_symlink: entry.is_symlink,
3796 is_ignored: entry.is_ignored,
3797 }
3798 }
3799}
3800
3801impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3802 type Error = anyhow::Error;
3803
3804 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3805 if let Some(mtime) = entry.mtime {
3806 let kind = if entry.is_dir {
3807 EntryKind::Dir
3808 } else {
3809 let mut char_bag = *root_char_bag;
3810 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3811 EntryKind::File(char_bag)
3812 };
3813 let path: Arc<Path> = PathBuf::from(entry.path).into();
3814 Ok(Entry {
3815 id: ProjectEntryId::from_proto(entry.id),
3816 kind,
3817 path,
3818 inode: entry.inode,
3819 mtime: mtime.into(),
3820 is_symlink: entry.is_symlink,
3821 is_ignored: entry.is_ignored,
3822 })
3823 } else {
3824 Err(anyhow!(
3825 "missing mtime in remote worktree entry {:?}",
3826 entry.path
3827 ))
3828 }
3829 }
3830}
3831
3832#[cfg(test)]
3833mod tests {
3834 use super::*;
3835 use fs::{FakeFs, RealFs};
3836 use gpui::{executor::Deterministic, TestAppContext};
3837 use pretty_assertions::assert_eq;
3838 use rand::prelude::*;
3839 use serde_json::json;
3840 use std::{env, fmt::Write};
3841 use util::{http::FakeHttpClient, test::temp_tree};
3842
3843 #[gpui::test]
3844 async fn test_traversal(cx: &mut TestAppContext) {
3845 let fs = FakeFs::new(cx.background());
3846 fs.insert_tree(
3847 "/root",
3848 json!({
3849 ".gitignore": "a/b\n",
3850 "a": {
3851 "b": "",
3852 "c": "",
3853 }
3854 }),
3855 )
3856 .await;
3857
3858 let http_client = FakeHttpClient::with_404_response();
3859 let client = cx.read(|cx| Client::new(http_client, cx));
3860
3861 let tree = Worktree::local(
3862 client,
3863 Path::new("/root"),
3864 true,
3865 fs,
3866 Default::default(),
3867 &mut cx.to_async(),
3868 )
3869 .await
3870 .unwrap();
3871 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3872 .await;
3873
3874 tree.read_with(cx, |tree, _| {
3875 assert_eq!(
3876 tree.entries(false)
3877 .map(|entry| entry.path.as_ref())
3878 .collect::<Vec<_>>(),
3879 vec![
3880 Path::new(""),
3881 Path::new(".gitignore"),
3882 Path::new("a"),
3883 Path::new("a/c"),
3884 ]
3885 );
3886 assert_eq!(
3887 tree.entries(true)
3888 .map(|entry| entry.path.as_ref())
3889 .collect::<Vec<_>>(),
3890 vec![
3891 Path::new(""),
3892 Path::new(".gitignore"),
3893 Path::new("a"),
3894 Path::new("a/b"),
3895 Path::new("a/c"),
3896 ]
3897 );
3898 })
3899 }
3900
3901 #[gpui::test]
3902 async fn test_descendent_entries(cx: &mut TestAppContext) {
3903 let fs = FakeFs::new(cx.background());
3904 fs.insert_tree(
3905 "/root",
3906 json!({
3907 "a": "",
3908 "b": {
3909 "c": {
3910 "d": ""
3911 },
3912 "e": {}
3913 },
3914 "f": "",
3915 "g": {
3916 "h": {}
3917 },
3918 "i": {
3919 "j": {
3920 "k": ""
3921 },
3922 "l": {
3923
3924 }
3925 },
3926 ".gitignore": "i/j\n",
3927 }),
3928 )
3929 .await;
3930
3931 let http_client = FakeHttpClient::with_404_response();
3932 let client = cx.read(|cx| Client::new(http_client, cx));
3933
3934 let tree = Worktree::local(
3935 client,
3936 Path::new("/root"),
3937 true,
3938 fs,
3939 Default::default(),
3940 &mut cx.to_async(),
3941 )
3942 .await
3943 .unwrap();
3944 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3945 .await;
3946
3947 tree.read_with(cx, |tree, _| {
3948 assert_eq!(
3949 tree.descendent_entries(false, false, Path::new("b"))
3950 .map(|entry| entry.path.as_ref())
3951 .collect::<Vec<_>>(),
3952 vec![Path::new("b/c/d"),]
3953 );
3954 assert_eq!(
3955 tree.descendent_entries(true, false, Path::new("b"))
3956 .map(|entry| entry.path.as_ref())
3957 .collect::<Vec<_>>(),
3958 vec![
3959 Path::new("b"),
3960 Path::new("b/c"),
3961 Path::new("b/c/d"),
3962 Path::new("b/e"),
3963 ]
3964 );
3965
3966 assert_eq!(
3967 tree.descendent_entries(false, false, Path::new("g"))
3968 .map(|entry| entry.path.as_ref())
3969 .collect::<Vec<_>>(),
3970 Vec::<PathBuf>::new()
3971 );
3972 assert_eq!(
3973 tree.descendent_entries(true, false, Path::new("g"))
3974 .map(|entry| entry.path.as_ref())
3975 .collect::<Vec<_>>(),
3976 vec![Path::new("g"), Path::new("g/h"),]
3977 );
3978
3979 assert_eq!(
3980 tree.descendent_entries(false, false, Path::new("i"))
3981 .map(|entry| entry.path.as_ref())
3982 .collect::<Vec<_>>(),
3983 Vec::<PathBuf>::new()
3984 );
3985 assert_eq!(
3986 tree.descendent_entries(false, true, Path::new("i"))
3987 .map(|entry| entry.path.as_ref())
3988 .collect::<Vec<_>>(),
3989 vec![Path::new("i/j/k")]
3990 );
3991 assert_eq!(
3992 tree.descendent_entries(true, false, Path::new("i"))
3993 .map(|entry| entry.path.as_ref())
3994 .collect::<Vec<_>>(),
3995 vec![Path::new("i"), Path::new("i/l"),]
3996 );
3997 })
3998 }
3999
4000 #[gpui::test(iterations = 10)]
4001 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
4002 let fs = FakeFs::new(cx.background());
4003 fs.insert_tree(
4004 "/root",
4005 json!({
4006 "lib": {
4007 "a": {
4008 "a.txt": ""
4009 },
4010 "b": {
4011 "b.txt": ""
4012 }
4013 }
4014 }),
4015 )
4016 .await;
4017 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
4018 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
4019
4020 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4021 let tree = Worktree::local(
4022 client,
4023 Path::new("/root"),
4024 true,
4025 fs.clone(),
4026 Default::default(),
4027 &mut cx.to_async(),
4028 )
4029 .await
4030 .unwrap();
4031
4032 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4033 .await;
4034
4035 tree.read_with(cx, |tree, _| {
4036 assert_eq!(
4037 tree.entries(false)
4038 .map(|entry| entry.path.as_ref())
4039 .collect::<Vec<_>>(),
4040 vec![
4041 Path::new(""),
4042 Path::new("lib"),
4043 Path::new("lib/a"),
4044 Path::new("lib/a/a.txt"),
4045 Path::new("lib/a/lib"),
4046 Path::new("lib/b"),
4047 Path::new("lib/b/b.txt"),
4048 Path::new("lib/b/lib"),
4049 ]
4050 );
4051 });
4052
4053 fs.rename(
4054 Path::new("/root/lib/a/lib"),
4055 Path::new("/root/lib/a/lib-2"),
4056 Default::default(),
4057 )
4058 .await
4059 .unwrap();
4060 executor.run_until_parked();
4061 tree.read_with(cx, |tree, _| {
4062 assert_eq!(
4063 tree.entries(false)
4064 .map(|entry| entry.path.as_ref())
4065 .collect::<Vec<_>>(),
4066 vec![
4067 Path::new(""),
4068 Path::new("lib"),
4069 Path::new("lib/a"),
4070 Path::new("lib/a/a.txt"),
4071 Path::new("lib/a/lib-2"),
4072 Path::new("lib/b"),
4073 Path::new("lib/b/b.txt"),
4074 Path::new("lib/b/lib"),
4075 ]
4076 );
4077 });
4078 }
4079
4080 #[gpui::test]
4081 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4082 // .gitignores are handled explicitly by Zed and do not use the git
4083 // machinery that the git_tests module checks
4084 let parent_dir = temp_tree(json!({
4085 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4086 "tree": {
4087 ".git": {},
4088 ".gitignore": "ignored-dir\n",
4089 "tracked-dir": {
4090 "tracked-file1": "",
4091 "ancestor-ignored-file1": "",
4092 },
4093 "ignored-dir": {
4094 "ignored-file1": ""
4095 }
4096 }
4097 }));
4098 let dir = parent_dir.path().join("tree");
4099
4100 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4101
4102 let tree = Worktree::local(
4103 client,
4104 dir.as_path(),
4105 true,
4106 Arc::new(RealFs),
4107 Default::default(),
4108 &mut cx.to_async(),
4109 )
4110 .await
4111 .unwrap();
4112 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4113 .await;
4114 tree.flush_fs_events(cx).await;
4115 cx.read(|cx| {
4116 let tree = tree.read(cx);
4117 assert!(
4118 !tree
4119 .entry_for_path("tracked-dir/tracked-file1")
4120 .unwrap()
4121 .is_ignored
4122 );
4123 assert!(
4124 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4125 .unwrap()
4126 .is_ignored
4127 );
4128 assert!(
4129 tree.entry_for_path("ignored-dir/ignored-file1")
4130 .unwrap()
4131 .is_ignored
4132 );
4133 });
4134
4135 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4136 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4137 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4138 tree.flush_fs_events(cx).await;
4139 cx.read(|cx| {
4140 let tree = tree.read(cx);
4141 assert!(
4142 !tree
4143 .entry_for_path("tracked-dir/tracked-file2")
4144 .unwrap()
4145 .is_ignored
4146 );
4147 assert!(
4148 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4149 .unwrap()
4150 .is_ignored
4151 );
4152 assert!(
4153 tree.entry_for_path("ignored-dir/ignored-file2")
4154 .unwrap()
4155 .is_ignored
4156 );
4157 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4158 });
4159 }
4160
4161 #[gpui::test]
4162 async fn test_write_file(cx: &mut TestAppContext) {
4163 let dir = temp_tree(json!({
4164 ".git": {},
4165 ".gitignore": "ignored-dir\n",
4166 "tracked-dir": {},
4167 "ignored-dir": {}
4168 }));
4169
4170 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4171
4172 let tree = Worktree::local(
4173 client,
4174 dir.path(),
4175 true,
4176 Arc::new(RealFs),
4177 Default::default(),
4178 &mut cx.to_async(),
4179 )
4180 .await
4181 .unwrap();
4182 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4183 .await;
4184 tree.flush_fs_events(cx).await;
4185
4186 tree.update(cx, |tree, cx| {
4187 tree.as_local().unwrap().write_file(
4188 Path::new("tracked-dir/file.txt"),
4189 "hello".into(),
4190 Default::default(),
4191 cx,
4192 )
4193 })
4194 .await
4195 .unwrap();
4196 tree.update(cx, |tree, cx| {
4197 tree.as_local().unwrap().write_file(
4198 Path::new("ignored-dir/file.txt"),
4199 "world".into(),
4200 Default::default(),
4201 cx,
4202 )
4203 })
4204 .await
4205 .unwrap();
4206
4207 tree.read_with(cx, |tree, _| {
4208 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4209 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4210 assert!(!tracked.is_ignored);
4211 assert!(ignored.is_ignored);
4212 });
4213 }
4214
4215 #[gpui::test(iterations = 30)]
4216 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4217 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4218
4219 let fs = FakeFs::new(cx.background());
4220 fs.insert_tree(
4221 "/root",
4222 json!({
4223 "b": {},
4224 "c": {},
4225 "d": {},
4226 }),
4227 )
4228 .await;
4229
4230 let tree = Worktree::local(
4231 client,
4232 "/root".as_ref(),
4233 true,
4234 fs,
4235 Default::default(),
4236 &mut cx.to_async(),
4237 )
4238 .await
4239 .unwrap();
4240
4241 let snapshot1 = tree.update(cx, |tree, cx| {
4242 let tree = tree.as_local_mut().unwrap();
4243 let snapshot = Arc::new(Mutex::new(tree.snapshot()));
4244 let _ = tree.observe_updates(0, cx, {
4245 let snapshot = snapshot.clone();
4246 move |update| {
4247 snapshot.lock().apply_remote_update(update).unwrap();
4248 async { true }
4249 }
4250 });
4251 snapshot
4252 });
4253
4254 let entry = tree
4255 .update(cx, |tree, cx| {
4256 tree.as_local_mut()
4257 .unwrap()
4258 .create_entry("a/e".as_ref(), true, cx)
4259 })
4260 .await
4261 .unwrap();
4262 assert!(entry.is_dir());
4263
4264 cx.foreground().run_until_parked();
4265 tree.read_with(cx, |tree, _| {
4266 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4267 });
4268
4269 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4270 assert_eq!(
4271 snapshot1.lock().entries(true).collect::<Vec<_>>(),
4272 snapshot2.entries(true).collect::<Vec<_>>()
4273 );
4274 }
4275
4276 #[gpui::test(iterations = 100)]
4277 async fn test_random_worktree_operations_during_initial_scan(
4278 cx: &mut TestAppContext,
4279 mut rng: StdRng,
4280 ) {
4281 let operations = env::var("OPERATIONS")
4282 .map(|o| o.parse().unwrap())
4283 .unwrap_or(5);
4284 let initial_entries = env::var("INITIAL_ENTRIES")
4285 .map(|o| o.parse().unwrap())
4286 .unwrap_or(20);
4287
4288 let root_dir = Path::new("/test");
4289 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4290 fs.as_fake().insert_tree(root_dir, json!({})).await;
4291 for _ in 0..initial_entries {
4292 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4293 }
4294 log::info!("generated initial tree");
4295
4296 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4297 let worktree = Worktree::local(
4298 client.clone(),
4299 root_dir,
4300 true,
4301 fs.clone(),
4302 Default::default(),
4303 &mut cx.to_async(),
4304 )
4305 .await
4306 .unwrap();
4307
4308 let mut snapshots =
4309 vec![worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot())];
4310 let updates = Arc::new(Mutex::new(Vec::new()));
4311 worktree.update(cx, |tree, cx| {
4312 check_worktree_change_events(tree, cx);
4313
4314 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4315 let updates = updates.clone();
4316 move |update| {
4317 updates.lock().push(update);
4318 async { true }
4319 }
4320 });
4321 });
4322
4323 for _ in 0..operations {
4324 worktree
4325 .update(cx, |worktree, cx| {
4326 randomly_mutate_worktree(worktree, &mut rng, cx)
4327 })
4328 .await
4329 .log_err();
4330 worktree.read_with(cx, |tree, _| {
4331 tree.as_local().unwrap().snapshot.check_invariants()
4332 });
4333
4334 if rng.gen_bool(0.6) {
4335 snapshots
4336 .push(worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()));
4337 }
4338 }
4339
4340 worktree
4341 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4342 .await;
4343
4344 cx.foreground().run_until_parked();
4345
4346 let final_snapshot = worktree.read_with(cx, |tree, _| {
4347 let tree = tree.as_local().unwrap();
4348 tree.snapshot.check_invariants();
4349 tree.snapshot()
4350 });
4351
4352 for (i, snapshot) in snapshots.into_iter().enumerate().rev() {
4353 let mut updated_snapshot = snapshot.clone();
4354 for update in updates.lock().iter() {
4355 if update.scan_id >= updated_snapshot.scan_id() as u64 {
4356 updated_snapshot
4357 .apply_remote_update(update.clone())
4358 .unwrap();
4359 }
4360 }
4361
4362 assert_eq!(
4363 updated_snapshot.entries(true).collect::<Vec<_>>(),
4364 final_snapshot.entries(true).collect::<Vec<_>>(),
4365 "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}",
4366 );
4367 }
4368 }
4369
4370 #[gpui::test(iterations = 100)]
4371 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4372 let operations = env::var("OPERATIONS")
4373 .map(|o| o.parse().unwrap())
4374 .unwrap_or(40);
4375 let initial_entries = env::var("INITIAL_ENTRIES")
4376 .map(|o| o.parse().unwrap())
4377 .unwrap_or(20);
4378
4379 let root_dir = Path::new("/test");
4380 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4381 fs.as_fake().insert_tree(root_dir, json!({})).await;
4382 for _ in 0..initial_entries {
4383 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4384 }
4385 log::info!("generated initial tree");
4386
4387 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4388 let worktree = Worktree::local(
4389 client.clone(),
4390 root_dir,
4391 true,
4392 fs.clone(),
4393 Default::default(),
4394 &mut cx.to_async(),
4395 )
4396 .await
4397 .unwrap();
4398
4399 let updates = Arc::new(Mutex::new(Vec::new()));
4400 worktree.update(cx, |tree, cx| {
4401 check_worktree_change_events(tree, cx);
4402
4403 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4404 let updates = updates.clone();
4405 move |update| {
4406 updates.lock().push(update);
4407 async { true }
4408 }
4409 });
4410 });
4411
4412 worktree
4413 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4414 .await;
4415
4416 fs.as_fake().pause_events();
4417 let mut snapshots = Vec::new();
4418 let mut mutations_len = operations;
4419 while mutations_len > 1 {
4420 if rng.gen_bool(0.2) {
4421 worktree
4422 .update(cx, |worktree, cx| {
4423 randomly_mutate_worktree(worktree, &mut rng, cx)
4424 })
4425 .await
4426 .log_err();
4427 } else {
4428 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4429 }
4430
4431 let buffered_event_count = fs.as_fake().buffered_event_count();
4432 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4433 let len = rng.gen_range(0..=buffered_event_count);
4434 log::info!("flushing {} events", len);
4435 fs.as_fake().flush_events(len);
4436 } else {
4437 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4438 mutations_len -= 1;
4439 }
4440
4441 cx.foreground().run_until_parked();
4442 if rng.gen_bool(0.2) {
4443 log::info!("storing snapshot {}", snapshots.len());
4444 let snapshot =
4445 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4446 snapshots.push(snapshot);
4447 }
4448 }
4449
4450 log::info!("quiescing");
4451 fs.as_fake().flush_events(usize::MAX);
4452 cx.foreground().run_until_parked();
4453 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4454 snapshot.check_invariants();
4455
4456 {
4457 let new_worktree = Worktree::local(
4458 client.clone(),
4459 root_dir,
4460 true,
4461 fs.clone(),
4462 Default::default(),
4463 &mut cx.to_async(),
4464 )
4465 .await
4466 .unwrap();
4467 new_worktree
4468 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4469 .await;
4470 let new_snapshot =
4471 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4472 assert_eq!(
4473 snapshot.entries_without_ids(true),
4474 new_snapshot.entries_without_ids(true)
4475 );
4476 }
4477
4478 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate().rev() {
4479 for update in updates.lock().iter() {
4480 if update.scan_id >= prev_snapshot.scan_id() as u64 {
4481 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4482 }
4483 }
4484
4485 assert_eq!(
4486 prev_snapshot.entries(true).collect::<Vec<_>>(),
4487 snapshot.entries(true).collect::<Vec<_>>(),
4488 "wrong updates after snapshot {i}: {updates:#?}",
4489 );
4490 }
4491 }
4492
4493 // The worktree's `UpdatedEntries` event can be used to follow along with
4494 // all changes to the worktree's snapshot.
4495 fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Worktree>) {
4496 let mut entries = tree.entries(true).cloned().collect::<Vec<_>>();
4497 cx.subscribe(&cx.handle(), move |tree, _, event, _| {
4498 if let Event::UpdatedEntries(changes) = event {
4499 for (path, _, change_type) in changes.iter() {
4500 let entry = tree.entry_for_path(&path).cloned();
4501 let ix = match entries.binary_search_by_key(&path, |e| &e.path) {
4502 Ok(ix) | Err(ix) => ix,
4503 };
4504 match change_type {
4505 PathChange::Loaded => entries.insert(ix, entry.unwrap()),
4506 PathChange::Added => entries.insert(ix, entry.unwrap()),
4507 PathChange::Removed => drop(entries.remove(ix)),
4508 PathChange::Updated => {
4509 let entry = entry.unwrap();
4510 let existing_entry = entries.get_mut(ix).unwrap();
4511 assert_eq!(existing_entry.path, entry.path);
4512 *existing_entry = entry;
4513 }
4514 PathChange::AddedOrUpdated => {
4515 let entry = entry.unwrap();
4516 if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
4517 *entries.get_mut(ix).unwrap() = entry;
4518 } else {
4519 entries.insert(ix, entry);
4520 }
4521 }
4522 }
4523 }
4524
4525 let new_entries = tree.entries(true).cloned().collect::<Vec<_>>();
4526 assert_eq!(entries, new_entries, "incorrect changes: {:?}", changes);
4527 }
4528 })
4529 .detach();
4530 }
4531
4532 fn randomly_mutate_worktree(
4533 worktree: &mut Worktree,
4534 rng: &mut impl Rng,
4535 cx: &mut ModelContext<Worktree>,
4536 ) -> Task<Result<()>> {
4537 log::info!("mutating worktree");
4538 let worktree = worktree.as_local_mut().unwrap();
4539 let snapshot = worktree.snapshot();
4540 let entry = snapshot.entries(false).choose(rng).unwrap();
4541
4542 match rng.gen_range(0_u32..100) {
4543 0..=33 if entry.path.as_ref() != Path::new("") => {
4544 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4545 worktree.delete_entry(entry.id, cx).unwrap()
4546 }
4547 ..=66 if entry.path.as_ref() != Path::new("") => {
4548 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4549 let new_parent_path = if other_entry.is_dir() {
4550 other_entry.path.clone()
4551 } else {
4552 other_entry.path.parent().unwrap().into()
4553 };
4554 let mut new_path = new_parent_path.join(gen_name(rng));
4555 if new_path.starts_with(&entry.path) {
4556 new_path = gen_name(rng).into();
4557 }
4558
4559 log::info!(
4560 "renaming entry {:?} ({}) to {:?}",
4561 entry.path,
4562 entry.id.0,
4563 new_path
4564 );
4565 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4566 cx.foreground().spawn(async move {
4567 task.await?;
4568 Ok(())
4569 })
4570 }
4571 _ => {
4572 let task = if entry.is_dir() {
4573 let child_path = entry.path.join(gen_name(rng));
4574 let is_dir = rng.gen_bool(0.3);
4575 log::info!(
4576 "creating {} at {:?}",
4577 if is_dir { "dir" } else { "file" },
4578 child_path,
4579 );
4580 worktree.create_entry(child_path, is_dir, cx)
4581 } else {
4582 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4583 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4584 };
4585 cx.foreground().spawn(async move {
4586 task.await?;
4587 Ok(())
4588 })
4589 }
4590 }
4591 }
4592
4593 async fn randomly_mutate_fs(
4594 fs: &Arc<dyn Fs>,
4595 root_path: &Path,
4596 insertion_probability: f64,
4597 rng: &mut impl Rng,
4598 ) {
4599 log::info!("mutating fs");
4600 let mut files = Vec::new();
4601 let mut dirs = Vec::new();
4602 for path in fs.as_fake().paths() {
4603 if path.starts_with(root_path) {
4604 if fs.is_file(&path).await {
4605 files.push(path);
4606 } else {
4607 dirs.push(path);
4608 }
4609 }
4610 }
4611
4612 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4613 let path = dirs.choose(rng).unwrap();
4614 let new_path = path.join(gen_name(rng));
4615
4616 if rng.gen() {
4617 log::info!(
4618 "creating dir {:?}",
4619 new_path.strip_prefix(root_path).unwrap()
4620 );
4621 fs.create_dir(&new_path).await.unwrap();
4622 } else {
4623 log::info!(
4624 "creating file {:?}",
4625 new_path.strip_prefix(root_path).unwrap()
4626 );
4627 fs.create_file(&new_path, Default::default()).await.unwrap();
4628 }
4629 } else if rng.gen_bool(0.05) {
4630 let ignore_dir_path = dirs.choose(rng).unwrap();
4631 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4632
4633 let subdirs = dirs
4634 .iter()
4635 .filter(|d| d.starts_with(&ignore_dir_path))
4636 .cloned()
4637 .collect::<Vec<_>>();
4638 let subfiles = files
4639 .iter()
4640 .filter(|d| d.starts_with(&ignore_dir_path))
4641 .cloned()
4642 .collect::<Vec<_>>();
4643 let files_to_ignore = {
4644 let len = rng.gen_range(0..=subfiles.len());
4645 subfiles.choose_multiple(rng, len)
4646 };
4647 let dirs_to_ignore = {
4648 let len = rng.gen_range(0..subdirs.len());
4649 subdirs.choose_multiple(rng, len)
4650 };
4651
4652 let mut ignore_contents = String::new();
4653 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4654 writeln!(
4655 ignore_contents,
4656 "{}",
4657 path_to_ignore
4658 .strip_prefix(&ignore_dir_path)
4659 .unwrap()
4660 .to_str()
4661 .unwrap()
4662 )
4663 .unwrap();
4664 }
4665 log::info!(
4666 "creating gitignore {:?} with contents:\n{}",
4667 ignore_path.strip_prefix(&root_path).unwrap(),
4668 ignore_contents
4669 );
4670 fs.save(
4671 &ignore_path,
4672 &ignore_contents.as_str().into(),
4673 Default::default(),
4674 )
4675 .await
4676 .unwrap();
4677 } else {
4678 let old_path = {
4679 let file_path = files.choose(rng);
4680 let dir_path = dirs[1..].choose(rng);
4681 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4682 };
4683
4684 let is_rename = rng.gen();
4685 if is_rename {
4686 let new_path_parent = dirs
4687 .iter()
4688 .filter(|d| !d.starts_with(old_path))
4689 .choose(rng)
4690 .unwrap();
4691
4692 let overwrite_existing_dir =
4693 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4694 let new_path = if overwrite_existing_dir {
4695 fs.remove_dir(
4696 &new_path_parent,
4697 RemoveOptions {
4698 recursive: true,
4699 ignore_if_not_exists: true,
4700 },
4701 )
4702 .await
4703 .unwrap();
4704 new_path_parent.to_path_buf()
4705 } else {
4706 new_path_parent.join(gen_name(rng))
4707 };
4708
4709 log::info!(
4710 "renaming {:?} to {}{:?}",
4711 old_path.strip_prefix(&root_path).unwrap(),
4712 if overwrite_existing_dir {
4713 "overwrite "
4714 } else {
4715 ""
4716 },
4717 new_path.strip_prefix(&root_path).unwrap()
4718 );
4719 fs.rename(
4720 &old_path,
4721 &new_path,
4722 fs::RenameOptions {
4723 overwrite: true,
4724 ignore_if_exists: true,
4725 },
4726 )
4727 .await
4728 .unwrap();
4729 } else if fs.is_file(&old_path).await {
4730 log::info!(
4731 "deleting file {:?}",
4732 old_path.strip_prefix(&root_path).unwrap()
4733 );
4734 fs.remove_file(old_path, Default::default()).await.unwrap();
4735 } else {
4736 log::info!(
4737 "deleting dir {:?}",
4738 old_path.strip_prefix(&root_path).unwrap()
4739 );
4740 fs.remove_dir(
4741 &old_path,
4742 RemoveOptions {
4743 recursive: true,
4744 ignore_if_not_exists: true,
4745 },
4746 )
4747 .await
4748 .unwrap();
4749 }
4750 }
4751 }
4752
4753 fn gen_name(rng: &mut impl Rng) -> String {
4754 (0..6)
4755 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4756 .map(char::from)
4757 .collect()
4758 }
4759
4760 impl LocalSnapshot {
4761 fn check_invariants(&self) {
4762 assert_eq!(
4763 self.entries_by_path
4764 .cursor::<()>()
4765 .map(|e| (&e.path, e.id))
4766 .collect::<Vec<_>>(),
4767 self.entries_by_id
4768 .cursor::<()>()
4769 .map(|e| (&e.path, e.id))
4770 .collect::<collections::BTreeSet<_>>()
4771 .into_iter()
4772 .collect::<Vec<_>>(),
4773 "entries_by_path and entries_by_id are inconsistent"
4774 );
4775
4776 let mut files = self.files(true, 0);
4777 let mut visible_files = self.files(false, 0);
4778 for entry in self.entries_by_path.cursor::<()>() {
4779 if entry.is_file() {
4780 assert_eq!(files.next().unwrap().inode, entry.inode);
4781 if !entry.is_ignored {
4782 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4783 }
4784 }
4785 }
4786
4787 assert!(files.next().is_none());
4788 assert!(visible_files.next().is_none());
4789
4790 let mut bfs_paths = Vec::new();
4791 let mut stack = vec![Path::new("")];
4792 while let Some(path) = stack.pop() {
4793 bfs_paths.push(path);
4794 let ix = stack.len();
4795 for child_entry in self.child_entries(path) {
4796 stack.insert(ix, &child_entry.path);
4797 }
4798 }
4799
4800 let dfs_paths_via_iter = self
4801 .entries_by_path
4802 .cursor::<()>()
4803 .map(|e| e.path.as_ref())
4804 .collect::<Vec<_>>();
4805 assert_eq!(bfs_paths, dfs_paths_via_iter);
4806
4807 let dfs_paths_via_traversal = self
4808 .entries(true)
4809 .map(|e| e.path.as_ref())
4810 .collect::<Vec<_>>();
4811 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4812
4813 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4814 let ignore_parent_path =
4815 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4816 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4817 assert!(self
4818 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4819 .is_some());
4820 }
4821 }
4822
4823 fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4824 let mut paths = Vec::new();
4825 for entry in self.entries_by_path.cursor::<()>() {
4826 if include_ignored || !entry.is_ignored {
4827 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4828 }
4829 }
4830 paths.sort_by(|a, b| a.0.cmp(b.0));
4831 paths
4832 }
4833 }
4834
4835 mod git_tests {
4836 use super::*;
4837 use pretty_assertions::assert_eq;
4838
4839 #[gpui::test]
4840 async fn test_rename_work_directory(cx: &mut TestAppContext) {
4841 let root = temp_tree(json!({
4842 "projects": {
4843 "project1": {
4844 "a": "",
4845 "b": "",
4846 }
4847 },
4848
4849 }));
4850 let root_path = root.path();
4851
4852 let http_client = FakeHttpClient::with_404_response();
4853 let client = cx.read(|cx| Client::new(http_client, cx));
4854 let tree = Worktree::local(
4855 client,
4856 root_path,
4857 true,
4858 Arc::new(RealFs),
4859 Default::default(),
4860 &mut cx.to_async(),
4861 )
4862 .await
4863 .unwrap();
4864
4865 let repo = git_init(&root_path.join("projects/project1"));
4866 git_add("a", &repo);
4867 git_commit("init", &repo);
4868 std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4869
4870 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4871 .await;
4872
4873 tree.flush_fs_events(cx).await;
4874
4875 cx.read(|cx| {
4876 let tree = tree.read(cx);
4877 let (work_dir, repo) = tree.repositories().next().unwrap();
4878 assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4879 assert_eq!(
4880 repo.status_for_file(tree, Path::new("projects/project1/a")),
4881 Some(GitFileStatus::Modified)
4882 );
4883 assert_eq!(
4884 repo.status_for_file(tree, Path::new("projects/project1/b")),
4885 Some(GitFileStatus::Added)
4886 );
4887 });
4888
4889 std::fs::rename(
4890 root_path.join("projects/project1"),
4891 root_path.join("projects/project2"),
4892 )
4893 .ok();
4894 tree.flush_fs_events(cx).await;
4895
4896 cx.read(|cx| {
4897 let tree = tree.read(cx);
4898 let (work_dir, repo) = tree.repositories().next().unwrap();
4899 assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4900 assert_eq!(
4901 repo.status_for_file(tree, Path::new("projects/project2/a")),
4902 Some(GitFileStatus::Modified)
4903 );
4904 assert_eq!(
4905 repo.status_for_file(tree, Path::new("projects/project2/b")),
4906 Some(GitFileStatus::Added)
4907 );
4908 });
4909 }
4910
4911 #[gpui::test]
4912 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4913 let root = temp_tree(json!({
4914 "c.txt": "",
4915 "dir1": {
4916 ".git": {},
4917 "deps": {
4918 "dep1": {
4919 ".git": {},
4920 "src": {
4921 "a.txt": ""
4922 }
4923 }
4924 },
4925 "src": {
4926 "b.txt": ""
4927 }
4928 },
4929 }));
4930
4931 let http_client = FakeHttpClient::with_404_response();
4932 let client = cx.read(|cx| Client::new(http_client, cx));
4933 let tree = Worktree::local(
4934 client,
4935 root.path(),
4936 true,
4937 Arc::new(RealFs),
4938 Default::default(),
4939 &mut cx.to_async(),
4940 )
4941 .await
4942 .unwrap();
4943
4944 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4945 .await;
4946 tree.flush_fs_events(cx).await;
4947
4948 tree.read_with(cx, |tree, _cx| {
4949 let tree = tree.as_local().unwrap();
4950
4951 assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4952
4953 let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4954 assert_eq!(
4955 entry
4956 .work_directory(tree)
4957 .map(|directory| directory.as_ref().to_owned()),
4958 Some(Path::new("dir1").to_owned())
4959 );
4960
4961 let entry = tree
4962 .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4963 .unwrap();
4964 assert_eq!(
4965 entry
4966 .work_directory(tree)
4967 .map(|directory| directory.as_ref().to_owned()),
4968 Some(Path::new("dir1/deps/dep1").to_owned())
4969 );
4970
4971 let entries = tree.files(false, 0);
4972
4973 let paths_with_repos = tree
4974 .entries_with_repositories(entries)
4975 .map(|(entry, repo)| {
4976 (
4977 entry.path.as_ref(),
4978 repo.and_then(|repo| {
4979 repo.work_directory(&tree)
4980 .map(|work_directory| work_directory.0.to_path_buf())
4981 }),
4982 )
4983 })
4984 .collect::<Vec<_>>();
4985
4986 assert_eq!(
4987 paths_with_repos,
4988 &[
4989 (Path::new("c.txt"), None),
4990 (
4991 Path::new("dir1/deps/dep1/src/a.txt"),
4992 Some(Path::new("dir1/deps/dep1").into())
4993 ),
4994 (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4995 ]
4996 );
4997 });
4998
4999 let repo_update_events = Arc::new(Mutex::new(vec![]));
5000 tree.update(cx, |_, cx| {
5001 let repo_update_events = repo_update_events.clone();
5002 cx.subscribe(&tree, move |_, _, event, _| {
5003 if let Event::UpdatedGitRepositories(update) = event {
5004 repo_update_events.lock().push(update.clone());
5005 }
5006 })
5007 .detach();
5008 });
5009
5010 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
5011 tree.flush_fs_events(cx).await;
5012
5013 assert_eq!(
5014 repo_update_events.lock()[0]
5015 .iter()
5016 .map(|e| e.0.clone())
5017 .collect::<Vec<Arc<Path>>>(),
5018 vec![Path::new("dir1").into()]
5019 );
5020
5021 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
5022 tree.flush_fs_events(cx).await;
5023
5024 tree.read_with(cx, |tree, _cx| {
5025 let tree = tree.as_local().unwrap();
5026
5027 assert!(tree
5028 .repository_for_path("dir1/src/b.txt".as_ref())
5029 .is_none());
5030 });
5031 }
5032
5033 #[gpui::test]
5034 async fn test_git_status(cx: &mut TestAppContext) {
5035 const IGNORE_RULE: &'static str = "**/target";
5036
5037 let root = temp_tree(json!({
5038 "project": {
5039 "a.txt": "a",
5040 "b.txt": "bb",
5041 "c": {
5042 "d": {
5043 "e.txt": "eee"
5044 }
5045 },
5046 "f.txt": "ffff",
5047 "target": {
5048 "build_file": "???"
5049 },
5050 ".gitignore": IGNORE_RULE
5051 },
5052
5053 }));
5054
5055 let http_client = FakeHttpClient::with_404_response();
5056 let client = cx.read(|cx| Client::new(http_client, cx));
5057 let tree = Worktree::local(
5058 client,
5059 root.path(),
5060 true,
5061 Arc::new(RealFs),
5062 Default::default(),
5063 &mut cx.to_async(),
5064 )
5065 .await
5066 .unwrap();
5067
5068 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5069 .await;
5070
5071 const A_TXT: &'static str = "a.txt";
5072 const B_TXT: &'static str = "b.txt";
5073 const E_TXT: &'static str = "c/d/e.txt";
5074 const F_TXT: &'static str = "f.txt";
5075 const DOTGITIGNORE: &'static str = ".gitignore";
5076 const BUILD_FILE: &'static str = "target/build_file";
5077
5078 let work_dir = root.path().join("project");
5079 let mut repo = git_init(work_dir.as_path());
5080 repo.add_ignore_rule(IGNORE_RULE).unwrap();
5081 git_add(Path::new(A_TXT), &repo);
5082 git_add(Path::new(E_TXT), &repo);
5083 git_add(Path::new(DOTGITIGNORE), &repo);
5084 git_commit("Initial commit", &repo);
5085
5086 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5087
5088 tree.flush_fs_events(cx).await;
5089
5090 // Check that the right git state is observed on startup
5091 tree.read_with(cx, |tree, _cx| {
5092 let snapshot = tree.snapshot();
5093 assert_eq!(snapshot.repository_entries.iter().count(), 1);
5094 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5095 assert_eq!(dir.0.as_ref(), Path::new("project"));
5096
5097 assert_eq!(repo.statuses.iter().count(), 3);
5098 assert_eq!(
5099 repo.statuses.get(&Path::new(A_TXT).into()),
5100 Some(&GitFileStatus::Modified)
5101 );
5102 assert_eq!(
5103 repo.statuses.get(&Path::new(B_TXT).into()),
5104 Some(&GitFileStatus::Added)
5105 );
5106 assert_eq!(
5107 repo.statuses.get(&Path::new(F_TXT).into()),
5108 Some(&GitFileStatus::Added)
5109 );
5110 });
5111
5112 git_add(Path::new(A_TXT), &repo);
5113 git_add(Path::new(B_TXT), &repo);
5114 git_commit("Committing modified and added", &repo);
5115 tree.flush_fs_events(cx).await;
5116
5117 // Check that repo only changes are tracked
5118 tree.read_with(cx, |tree, _cx| {
5119 let snapshot = tree.snapshot();
5120 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5121
5122 assert_eq!(repo.statuses.iter().count(), 1);
5123 assert_eq!(
5124 repo.statuses.get(&Path::new(F_TXT).into()),
5125 Some(&GitFileStatus::Added)
5126 );
5127 });
5128
5129 git_reset(0, &repo);
5130 git_remove_index(Path::new(B_TXT), &repo);
5131 git_stash(&mut repo);
5132 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5133 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5134 tree.flush_fs_events(cx).await;
5135
5136 // Check that more complex repo changes are tracked
5137 tree.read_with(cx, |tree, _cx| {
5138 let snapshot = tree.snapshot();
5139 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5140
5141 assert_eq!(repo.statuses.iter().count(), 3);
5142 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5143 assert_eq!(
5144 repo.statuses.get(&Path::new(B_TXT).into()),
5145 Some(&GitFileStatus::Added)
5146 );
5147 assert_eq!(
5148 repo.statuses.get(&Path::new(E_TXT).into()),
5149 Some(&GitFileStatus::Modified)
5150 );
5151 assert_eq!(
5152 repo.statuses.get(&Path::new(F_TXT).into()),
5153 Some(&GitFileStatus::Added)
5154 );
5155 });
5156
5157 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5158 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5159 std::fs::write(
5160 work_dir.join(DOTGITIGNORE),
5161 [IGNORE_RULE, "f.txt"].join("\n"),
5162 )
5163 .unwrap();
5164
5165 git_add(Path::new(DOTGITIGNORE), &repo);
5166 git_commit("Committing modified git ignore", &repo);
5167
5168 tree.flush_fs_events(cx).await;
5169
5170 // Check that non-repo behavior is tracked
5171 tree.read_with(cx, |tree, _cx| {
5172 let snapshot = tree.snapshot();
5173 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5174
5175 assert_eq!(repo.statuses.iter().count(), 0);
5176 });
5177
5178 let mut renamed_dir_name = "first_directory/second_directory";
5179 const RENAMED_FILE: &'static str = "rf.txt";
5180
5181 std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5182 std::fs::write(
5183 work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5184 "new-contents",
5185 )
5186 .unwrap();
5187
5188 tree.flush_fs_events(cx).await;
5189
5190 tree.read_with(cx, |tree, _cx| {
5191 let snapshot = tree.snapshot();
5192 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5193
5194 assert_eq!(repo.statuses.iter().count(), 1);
5195 assert_eq!(
5196 repo.statuses
5197 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5198 Some(&GitFileStatus::Added)
5199 );
5200 });
5201
5202 renamed_dir_name = "new_first_directory/second_directory";
5203
5204 std::fs::rename(
5205 work_dir.join("first_directory"),
5206 work_dir.join("new_first_directory"),
5207 )
5208 .unwrap();
5209
5210 tree.flush_fs_events(cx).await;
5211
5212 tree.read_with(cx, |tree, _cx| {
5213 let snapshot = tree.snapshot();
5214 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5215
5216 assert_eq!(repo.statuses.iter().count(), 1);
5217 assert_eq!(
5218 repo.statuses
5219 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5220 Some(&GitFileStatus::Added)
5221 );
5222 });
5223 }
5224
5225 #[track_caller]
5226 fn git_init(path: &Path) -> git2::Repository {
5227 git2::Repository::init(path).expect("Failed to initialize git repository")
5228 }
5229
5230 #[track_caller]
5231 fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5232 let path = path.as_ref();
5233 let mut index = repo.index().expect("Failed to get index");
5234 index.add_path(path).expect("Failed to add a.txt");
5235 index.write().expect("Failed to write index");
5236 }
5237
5238 #[track_caller]
5239 fn git_remove_index(path: &Path, repo: &git2::Repository) {
5240 let mut index = repo.index().expect("Failed to get index");
5241 index.remove_path(path).expect("Failed to add a.txt");
5242 index.write().expect("Failed to write index");
5243 }
5244
5245 #[track_caller]
5246 fn git_commit(msg: &'static str, repo: &git2::Repository) {
5247 use git2::Signature;
5248
5249 let signature = Signature::now("test", "test@zed.dev").unwrap();
5250 let oid = repo.index().unwrap().write_tree().unwrap();
5251 let tree = repo.find_tree(oid).unwrap();
5252 if let Some(head) = repo.head().ok() {
5253 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5254
5255 let parent_commit = parent_obj.as_commit().unwrap();
5256
5257 repo.commit(
5258 Some("HEAD"),
5259 &signature,
5260 &signature,
5261 msg,
5262 &tree,
5263 &[parent_commit],
5264 )
5265 .expect("Failed to commit with parent");
5266 } else {
5267 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5268 .expect("Failed to commit");
5269 }
5270 }
5271
5272 #[track_caller]
5273 fn git_stash(repo: &mut git2::Repository) {
5274 use git2::Signature;
5275
5276 let signature = Signature::now("test", "test@zed.dev").unwrap();
5277 repo.stash_save(&signature, "N/A", None)
5278 .expect("Failed to stash");
5279 }
5280
5281 #[track_caller]
5282 fn git_reset(offset: usize, repo: &git2::Repository) {
5283 let head = repo.head().expect("Couldn't get repo head");
5284 let object = head.peel(git2::ObjectType::Commit).unwrap();
5285 let commit = object.as_commit().unwrap();
5286 let new_head = commit
5287 .parents()
5288 .inspect(|parnet| {
5289 parnet.message();
5290 })
5291 .skip(offset)
5292 .next()
5293 .expect("Not enough history");
5294 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5295 .expect("Could not reset");
5296 }
5297
5298 #[allow(dead_code)]
5299 #[track_caller]
5300 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5301 repo.statuses(None)
5302 .unwrap()
5303 .iter()
5304 .map(|status| (status.path().unwrap().to_string(), status.status()))
5305 .collect()
5306 }
5307 }
5308}