1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 FutureExt, Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123#[derive(Clone, Debug, PartialEq, Eq)]
124pub struct RepositoryEntry {
125 pub(crate) work_directory: WorkDirectoryEntry,
126 pub(crate) branch: Option<Arc<str>>,
127}
128
129impl RepositoryEntry {
130 pub fn branch(&self) -> Option<Arc<str>> {
131 self.branch.clone()
132 }
133
134 pub fn work_directory_id(&self) -> ProjectEntryId {
135 *self.work_directory
136 }
137
138 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
139 snapshot
140 .entry_for_id(self.work_directory_id())
141 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
142 }
143
144 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
145 proto::RepositoryEntry {
146 work_directory_id: self.work_directory_id().to_proto(),
147 branch: self.branch.as_ref().map(|str| str.to_string()),
148 }
149 }
150}
151
152impl From<&RepositoryEntry> for proto::RepositoryEntry {
153 fn from(value: &RepositoryEntry) -> Self {
154 proto::RepositoryEntry {
155 work_directory_id: value.work_directory.to_proto(),
156 branch: value.branch.as_ref().map(|str| str.to_string()),
157 }
158 }
159}
160
161/// This path corresponds to the 'content path' (the folder that contains the .git)
162#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
163pub struct RepositoryWorkDirectory(Arc<Path>);
164
165impl Default for RepositoryWorkDirectory {
166 fn default() -> Self {
167 RepositoryWorkDirectory(Arc::from(Path::new("")))
168 }
169}
170
171impl AsRef<Path> for RepositoryWorkDirectory {
172 fn as_ref(&self) -> &Path {
173 self.0.as_ref()
174 }
175}
176
177#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
178pub struct WorkDirectoryEntry(ProjectEntryId);
179
180impl WorkDirectoryEntry {
181 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
182 worktree.entry_for_id(self.0).and_then(|entry| {
183 path.strip_prefix(&entry.path)
184 .ok()
185 .map(move |path| path.into())
186 })
187 }
188}
189
190impl Deref for WorkDirectoryEntry {
191 type Target = ProjectEntryId;
192
193 fn deref(&self) -> &Self::Target {
194 &self.0
195 }
196}
197
198impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
199 fn from(value: ProjectEntryId) -> Self {
200 WorkDirectoryEntry(value)
201 }
202}
203
204#[derive(Debug, Clone)]
205pub struct LocalSnapshot {
206 snapshot: Snapshot,
207 /// All of the gitignore files in the worktree, indexed by their relative path.
208 /// The boolean indicates whether the gitignore needs to be updated.
209 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
210 /// All of the git repositories in the worktree, indexed by the project entry
211 /// id of their parent directory.
212 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
213}
214
215pub struct BackgroundScannerState {
216 snapshot: LocalSnapshot,
217 /// The ids of all of the entries that were removed from the snapshot
218 /// as part of the current update. These entry ids may be re-used
219 /// if the same inode is discovered at a new path, or if the given
220 /// path is re-created after being deleted.
221 removed_entry_ids: HashMap<u64, ProjectEntryId>,
222 changed_paths: Vec<Arc<Path>>,
223 prev_snapshot: Snapshot,
224}
225
226#[derive(Debug, Clone)]
227pub struct LocalRepositoryEntry {
228 pub(crate) git_dir_scan_id: usize,
229 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
230 /// Path to the actual .git folder.
231 /// Note: if .git is a file, this points to the folder indicated by the .git file
232 pub(crate) git_dir_path: Arc<Path>,
233}
234
235impl LocalRepositoryEntry {
236 // Note that this path should be relative to the worktree root.
237 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
238 path.starts_with(self.git_dir_path.as_ref())
239 }
240}
241
242impl Deref for LocalSnapshot {
243 type Target = Snapshot;
244
245 fn deref(&self) -> &Self::Target {
246 &self.snapshot
247 }
248}
249
250impl DerefMut for LocalSnapshot {
251 fn deref_mut(&mut self) -> &mut Self::Target {
252 &mut self.snapshot
253 }
254}
255
256enum ScanState {
257 Started,
258 Updated {
259 snapshot: LocalSnapshot,
260 changes: UpdatedEntriesSet,
261 barrier: Option<barrier::Sender>,
262 scanning: bool,
263 },
264}
265
266struct ShareState {
267 project_id: u64,
268 snapshots_tx:
269 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
270 resume_updates: watch::Sender<()>,
271 _maintain_remote_snapshot: Task<Option<()>>,
272}
273
274pub enum Event {
275 UpdatedEntries(UpdatedEntriesSet),
276 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
277}
278
279impl Entity for Worktree {
280 type Event = Event;
281}
282
283impl Worktree {
284 pub async fn local(
285 client: Arc<Client>,
286 path: impl Into<Arc<Path>>,
287 visible: bool,
288 fs: Arc<dyn Fs>,
289 next_entry_id: Arc<AtomicUsize>,
290 cx: &mut AsyncAppContext,
291 ) -> Result<ModelHandle<Self>> {
292 // After determining whether the root entry is a file or a directory, populate the
293 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
294 let abs_path = path.into();
295 let metadata = fs
296 .metadata(&abs_path)
297 .await
298 .context("failed to stat worktree path")?;
299
300 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
301 let root_name = abs_path
302 .file_name()
303 .map_or(String::new(), |f| f.to_string_lossy().to_string());
304
305 let mut snapshot = LocalSnapshot {
306 ignores_by_parent_abs_path: Default::default(),
307 git_repositories: Default::default(),
308 snapshot: Snapshot {
309 id: WorktreeId::from_usize(cx.model_id()),
310 abs_path: abs_path.clone(),
311 root_name: root_name.clone(),
312 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
313 entries_by_path: Default::default(),
314 entries_by_id: Default::default(),
315 repository_entries: Default::default(),
316 scan_id: 1,
317 completed_scan_id: 0,
318 },
319 };
320
321 if let Some(metadata) = metadata {
322 snapshot.insert_entry(
323 Entry::new(
324 Arc::from(Path::new("")),
325 &metadata,
326 &next_entry_id,
327 snapshot.root_char_bag,
328 ),
329 fs.as_ref(),
330 );
331 }
332
333 let (path_changes_tx, path_changes_rx) = channel::unbounded();
334 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
335
336 cx.spawn_weak(|this, mut cx| async move {
337 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
338 this.update(&mut cx, |this, cx| {
339 let this = this.as_local_mut().unwrap();
340 match state {
341 ScanState::Started => {
342 *this.is_scanning.0.borrow_mut() = true;
343 }
344 ScanState::Updated {
345 snapshot,
346 changes,
347 barrier,
348 scanning,
349 } => {
350 *this.is_scanning.0.borrow_mut() = scanning;
351 this.set_snapshot(snapshot, changes, cx);
352 drop(barrier);
353 }
354 }
355 cx.notify();
356 });
357 }
358 })
359 .detach();
360
361 let background_scanner_task = cx.background().spawn({
362 let fs = fs.clone();
363 let snapshot = snapshot.clone();
364 let background = cx.background().clone();
365 async move {
366 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
367 BackgroundScanner::new(
368 snapshot,
369 next_entry_id,
370 fs,
371 scan_states_tx,
372 background,
373 path_changes_rx,
374 )
375 .run(events)
376 .await;
377 }
378 });
379
380 Worktree::Local(LocalWorktree {
381 snapshot,
382 is_scanning: watch::channel_with(true),
383 share: None,
384 path_changes_tx,
385 _background_scanner_task: background_scanner_task,
386 diagnostics: Default::default(),
387 diagnostic_summaries: Default::default(),
388 client,
389 fs,
390 visible,
391 })
392 }))
393 }
394
395 pub fn remote(
396 project_remote_id: u64,
397 replica_id: ReplicaId,
398 worktree: proto::WorktreeMetadata,
399 client: Arc<Client>,
400 cx: &mut AppContext,
401 ) -> ModelHandle<Self> {
402 cx.add_model(|cx: &mut ModelContext<Self>| {
403 let snapshot = Snapshot {
404 id: WorktreeId(worktree.id as usize),
405 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
406 root_name: worktree.root_name.clone(),
407 root_char_bag: worktree
408 .root_name
409 .chars()
410 .map(|c| c.to_ascii_lowercase())
411 .collect(),
412 entries_by_path: Default::default(),
413 entries_by_id: Default::default(),
414 repository_entries: Default::default(),
415 scan_id: 1,
416 completed_scan_id: 0,
417 };
418
419 let (updates_tx, mut updates_rx) = mpsc::unbounded();
420 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
421 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
422
423 cx.background()
424 .spawn({
425 let background_snapshot = background_snapshot.clone();
426 async move {
427 while let Some(update) = updates_rx.next().await {
428 if let Err(error) =
429 background_snapshot.lock().apply_remote_update(update)
430 {
431 log::error!("error applying worktree update: {}", error);
432 }
433 snapshot_updated_tx.send(()).await.ok();
434 }
435 }
436 })
437 .detach();
438
439 cx.spawn_weak(|this, mut cx| async move {
440 while (snapshot_updated_rx.recv().await).is_some() {
441 if let Some(this) = this.upgrade(&cx) {
442 this.update(&mut cx, |this, cx| {
443 let this = this.as_remote_mut().unwrap();
444 this.snapshot = this.background_snapshot.lock().clone();
445 cx.emit(Event::UpdatedEntries(Arc::from([])));
446 cx.notify();
447 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
448 if this.observed_snapshot(*scan_id) {
449 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
450 let _ = tx.send(());
451 } else {
452 break;
453 }
454 }
455 });
456 } else {
457 break;
458 }
459 }
460 })
461 .detach();
462
463 Worktree::Remote(RemoteWorktree {
464 project_id: project_remote_id,
465 replica_id,
466 snapshot: snapshot.clone(),
467 background_snapshot,
468 updates_tx: Some(updates_tx),
469 snapshot_subscriptions: Default::default(),
470 client: client.clone(),
471 diagnostic_summaries: Default::default(),
472 visible: worktree.visible,
473 disconnected: false,
474 })
475 })
476 }
477
478 pub fn as_local(&self) -> Option<&LocalWorktree> {
479 if let Worktree::Local(worktree) = self {
480 Some(worktree)
481 } else {
482 None
483 }
484 }
485
486 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
487 if let Worktree::Remote(worktree) = self {
488 Some(worktree)
489 } else {
490 None
491 }
492 }
493
494 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
495 if let Worktree::Local(worktree) = self {
496 Some(worktree)
497 } else {
498 None
499 }
500 }
501
502 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
503 if let Worktree::Remote(worktree) = self {
504 Some(worktree)
505 } else {
506 None
507 }
508 }
509
510 pub fn is_local(&self) -> bool {
511 matches!(self, Worktree::Local(_))
512 }
513
514 pub fn is_remote(&self) -> bool {
515 !self.is_local()
516 }
517
518 pub fn snapshot(&self) -> Snapshot {
519 match self {
520 Worktree::Local(worktree) => worktree.snapshot().snapshot,
521 Worktree::Remote(worktree) => worktree.snapshot(),
522 }
523 }
524
525 pub fn scan_id(&self) -> usize {
526 match self {
527 Worktree::Local(worktree) => worktree.snapshot.scan_id,
528 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
529 }
530 }
531
532 pub fn completed_scan_id(&self) -> usize {
533 match self {
534 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
535 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
536 }
537 }
538
539 pub fn is_visible(&self) -> bool {
540 match self {
541 Worktree::Local(worktree) => worktree.visible,
542 Worktree::Remote(worktree) => worktree.visible,
543 }
544 }
545
546 pub fn replica_id(&self) -> ReplicaId {
547 match self {
548 Worktree::Local(_) => 0,
549 Worktree::Remote(worktree) => worktree.replica_id,
550 }
551 }
552
553 pub fn diagnostic_summaries(
554 &self,
555 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
556 match self {
557 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
558 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
559 }
560 .iter()
561 .flat_map(|(path, summaries)| {
562 summaries
563 .iter()
564 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
565 })
566 }
567
568 pub fn abs_path(&self) -> Arc<Path> {
569 match self {
570 Worktree::Local(worktree) => worktree.abs_path.clone(),
571 Worktree::Remote(worktree) => worktree.abs_path.clone(),
572 }
573 }
574}
575
576impl LocalWorktree {
577 pub fn contains_abs_path(&self, path: &Path) -> bool {
578 path.starts_with(&self.abs_path)
579 }
580
581 fn absolutize(&self, path: &Path) -> PathBuf {
582 if path.file_name().is_some() {
583 self.abs_path.join(path)
584 } else {
585 self.abs_path.to_path_buf()
586 }
587 }
588
589 pub(crate) fn load_buffer(
590 &mut self,
591 id: u64,
592 path: &Path,
593 cx: &mut ModelContext<Worktree>,
594 ) -> Task<Result<ModelHandle<Buffer>>> {
595 let path = Arc::from(path);
596 cx.spawn(move |this, mut cx| async move {
597 let (file, contents, diff_base) = this
598 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
599 .await?;
600 let text_buffer = cx
601 .background()
602 .spawn(async move { text::Buffer::new(0, id, contents) })
603 .await;
604 Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
605 })
606 }
607
608 pub fn diagnostics_for_path(
609 &self,
610 path: &Path,
611 ) -> Vec<(
612 LanguageServerId,
613 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
614 )> {
615 self.diagnostics.get(path).cloned().unwrap_or_default()
616 }
617
618 pub fn clear_diagnostics_for_language_server(
619 &mut self,
620 server_id: LanguageServerId,
621 _: &mut ModelContext<Worktree>,
622 ) {
623 let worktree_id = self.id().to_proto();
624 self.diagnostic_summaries
625 .retain(|path, summaries_by_server_id| {
626 if summaries_by_server_id.remove(&server_id).is_some() {
627 if let Some(share) = self.share.as_ref() {
628 self.client
629 .send(proto::UpdateDiagnosticSummary {
630 project_id: share.project_id,
631 worktree_id,
632 summary: Some(proto::DiagnosticSummary {
633 path: path.to_string_lossy().to_string(),
634 language_server_id: server_id.0 as u64,
635 error_count: 0,
636 warning_count: 0,
637 }),
638 })
639 .log_err();
640 }
641 !summaries_by_server_id.is_empty()
642 } else {
643 true
644 }
645 });
646
647 self.diagnostics.retain(|_, diagnostics_by_server_id| {
648 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
649 diagnostics_by_server_id.remove(ix);
650 !diagnostics_by_server_id.is_empty()
651 } else {
652 true
653 }
654 });
655 }
656
657 pub fn update_diagnostics(
658 &mut self,
659 server_id: LanguageServerId,
660 worktree_path: Arc<Path>,
661 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
662 _: &mut ModelContext<Worktree>,
663 ) -> Result<bool> {
664 let summaries_by_server_id = self
665 .diagnostic_summaries
666 .entry(worktree_path.clone())
667 .or_default();
668
669 let old_summary = summaries_by_server_id
670 .remove(&server_id)
671 .unwrap_or_default();
672
673 let new_summary = DiagnosticSummary::new(&diagnostics);
674 if new_summary.is_empty() {
675 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
676 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
677 diagnostics_by_server_id.remove(ix);
678 }
679 if diagnostics_by_server_id.is_empty() {
680 self.diagnostics.remove(&worktree_path);
681 }
682 }
683 } else {
684 summaries_by_server_id.insert(server_id, new_summary);
685 let diagnostics_by_server_id =
686 self.diagnostics.entry(worktree_path.clone()).or_default();
687 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
688 Ok(ix) => {
689 diagnostics_by_server_id[ix] = (server_id, diagnostics);
690 }
691 Err(ix) => {
692 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
693 }
694 }
695 }
696
697 if !old_summary.is_empty() || !new_summary.is_empty() {
698 if let Some(share) = self.share.as_ref() {
699 self.client
700 .send(proto::UpdateDiagnosticSummary {
701 project_id: share.project_id,
702 worktree_id: self.id().to_proto(),
703 summary: Some(proto::DiagnosticSummary {
704 path: worktree_path.to_string_lossy().to_string(),
705 language_server_id: server_id.0 as u64,
706 error_count: new_summary.error_count as u32,
707 warning_count: new_summary.warning_count as u32,
708 }),
709 })
710 .log_err();
711 }
712 }
713
714 Ok(!old_summary.is_empty() || !new_summary.is_empty())
715 }
716
717 fn set_snapshot(
718 &mut self,
719 new_snapshot: LocalSnapshot,
720 entry_changes: UpdatedEntriesSet,
721 cx: &mut ModelContext<Worktree>,
722 ) {
723 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
724
725 self.snapshot = new_snapshot;
726
727 if let Some(share) = self.share.as_mut() {
728 share
729 .snapshots_tx
730 .unbounded_send((
731 self.snapshot.clone(),
732 entry_changes.clone(),
733 repo_changes.clone(),
734 ))
735 .ok();
736 }
737
738 if !entry_changes.is_empty() {
739 cx.emit(Event::UpdatedEntries(entry_changes));
740 }
741 if !repo_changes.is_empty() {
742 cx.emit(Event::UpdatedGitRepositories(repo_changes));
743 }
744 }
745
746 fn changed_repos(
747 &self,
748 old_snapshot: &LocalSnapshot,
749 new_snapshot: &LocalSnapshot,
750 ) -> UpdatedGitRepositoriesSet {
751 let mut changes = Vec::new();
752 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
753 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
754 loop {
755 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
756 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
757 match Ord::cmp(&new_entry_id, &old_entry_id) {
758 Ordering::Less => {
759 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
760 changes.push((
761 entry.path.clone(),
762 GitRepositoryChange {
763 old_repository: None,
764 },
765 ));
766 }
767 new_repos.next();
768 }
769 Ordering::Equal => {
770 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
771 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
772 let old_repo = old_snapshot
773 .repository_entries
774 .get(&RepositoryWorkDirectory(entry.path.clone()))
775 .cloned();
776 changes.push((
777 entry.path.clone(),
778 GitRepositoryChange {
779 old_repository: old_repo,
780 },
781 ));
782 }
783 }
784 new_repos.next();
785 old_repos.next();
786 }
787 Ordering::Greater => {
788 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
789 let old_repo = old_snapshot
790 .repository_entries
791 .get(&RepositoryWorkDirectory(entry.path.clone()))
792 .cloned();
793 changes.push((
794 entry.path.clone(),
795 GitRepositoryChange {
796 old_repository: old_repo,
797 },
798 ));
799 }
800 old_repos.next();
801 }
802 }
803 }
804 (Some((entry_id, _)), None) => {
805 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
806 changes.push((
807 entry.path.clone(),
808 GitRepositoryChange {
809 old_repository: None,
810 },
811 ));
812 }
813 new_repos.next();
814 }
815 (None, Some((entry_id, _))) => {
816 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
817 let old_repo = old_snapshot
818 .repository_entries
819 .get(&RepositoryWorkDirectory(entry.path.clone()))
820 .cloned();
821 changes.push((
822 entry.path.clone(),
823 GitRepositoryChange {
824 old_repository: old_repo,
825 },
826 ));
827 }
828 old_repos.next();
829 }
830 (None, None) => break,
831 }
832 }
833
834 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
835 (value.0.clone(), value.1.clone())
836 }
837
838 changes.into()
839 }
840
841 pub fn scan_complete(&self) -> impl Future<Output = ()> {
842 let mut is_scanning_rx = self.is_scanning.1.clone();
843 async move {
844 let mut is_scanning = is_scanning_rx.borrow().clone();
845 while is_scanning {
846 if let Some(value) = is_scanning_rx.recv().await {
847 is_scanning = value;
848 } else {
849 break;
850 }
851 }
852 }
853 }
854
855 pub fn snapshot(&self) -> LocalSnapshot {
856 self.snapshot.clone()
857 }
858
859 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
860 proto::WorktreeMetadata {
861 id: self.id().to_proto(),
862 root_name: self.root_name().to_string(),
863 visible: self.visible,
864 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
865 }
866 }
867
868 fn load(
869 &self,
870 path: &Path,
871 cx: &mut ModelContext<Worktree>,
872 ) -> Task<Result<(File, String, Option<String>)>> {
873 let handle = cx.handle();
874 let path = Arc::from(path);
875 let abs_path = self.absolutize(&path);
876 let fs = self.fs.clone();
877 let snapshot = self.snapshot();
878
879 let mut index_task = None;
880
881 if let Some(repo) = snapshot.repository_for_path(&path) {
882 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
883 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
884 let repo = repo.repo_ptr.to_owned();
885 index_task = Some(
886 cx.background()
887 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
888 );
889 }
890 }
891
892 cx.spawn(|this, mut cx| async move {
893 let text = fs.load(&abs_path).await?;
894
895 let diff_base = if let Some(index_task) = index_task {
896 index_task.await
897 } else {
898 None
899 };
900
901 // Eagerly populate the snapshot with an updated entry for the loaded file
902 let entry = this
903 .update(&mut cx, |this, cx| {
904 this.as_local().unwrap().refresh_entry(path, None, cx)
905 })
906 .await?;
907
908 Ok((
909 File {
910 entry_id: entry.id,
911 worktree: handle,
912 path: entry.path,
913 mtime: entry.mtime,
914 is_local: true,
915 is_deleted: false,
916 },
917 text,
918 diff_base,
919 ))
920 })
921 }
922
923 pub fn save_buffer(
924 &self,
925 buffer_handle: ModelHandle<Buffer>,
926 path: Arc<Path>,
927 has_changed_file: bool,
928 cx: &mut ModelContext<Worktree>,
929 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
930 let handle = cx.handle();
931 let buffer = buffer_handle.read(cx);
932
933 let rpc = self.client.clone();
934 let buffer_id = buffer.remote_id();
935 let project_id = self.share.as_ref().map(|share| share.project_id);
936
937 let text = buffer.as_rope().clone();
938 let fingerprint = text.fingerprint();
939 let version = buffer.version();
940 let save = self.write_file(path, text, buffer.line_ending(), cx);
941
942 cx.as_mut().spawn(|mut cx| async move {
943 let entry = save.await?;
944
945 if has_changed_file {
946 let new_file = Arc::new(File {
947 entry_id: entry.id,
948 worktree: handle,
949 path: entry.path,
950 mtime: entry.mtime,
951 is_local: true,
952 is_deleted: false,
953 });
954
955 if let Some(project_id) = project_id {
956 rpc.send(proto::UpdateBufferFile {
957 project_id,
958 buffer_id,
959 file: Some(new_file.to_proto()),
960 })
961 .log_err();
962 }
963
964 buffer_handle.update(&mut cx, |buffer, cx| {
965 if has_changed_file {
966 buffer.file_updated(new_file, cx).detach();
967 }
968 });
969 }
970
971 if let Some(project_id) = project_id {
972 rpc.send(proto::BufferSaved {
973 project_id,
974 buffer_id,
975 version: serialize_version(&version),
976 mtime: Some(entry.mtime.into()),
977 fingerprint: serialize_fingerprint(fingerprint),
978 })?;
979 }
980
981 buffer_handle.update(&mut cx, |buffer, cx| {
982 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
983 });
984
985 Ok((version, fingerprint, entry.mtime))
986 })
987 }
988
989 pub fn create_entry(
990 &self,
991 path: impl Into<Arc<Path>>,
992 is_dir: bool,
993 cx: &mut ModelContext<Worktree>,
994 ) -> Task<Result<Entry>> {
995 let path = path.into();
996 let abs_path = self.absolutize(&path);
997 let fs = self.fs.clone();
998 let write = cx.background().spawn(async move {
999 if is_dir {
1000 fs.create_dir(&abs_path).await
1001 } else {
1002 fs.save(&abs_path, &Default::default(), Default::default())
1003 .await
1004 }
1005 });
1006
1007 cx.spawn(|this, mut cx| async move {
1008 write.await?;
1009 this.update(&mut cx, |this, cx| {
1010 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1011 })
1012 .await
1013 })
1014 }
1015
1016 pub fn write_file(
1017 &self,
1018 path: impl Into<Arc<Path>>,
1019 text: Rope,
1020 line_ending: LineEnding,
1021 cx: &mut ModelContext<Worktree>,
1022 ) -> Task<Result<Entry>> {
1023 let path = path.into();
1024 let abs_path = self.absolutize(&path);
1025 let fs = self.fs.clone();
1026 let write = cx
1027 .background()
1028 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1029
1030 cx.spawn(|this, mut cx| async move {
1031 write.await?;
1032 this.update(&mut cx, |this, cx| {
1033 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1034 })
1035 .await
1036 })
1037 }
1038
1039 pub fn delete_entry(
1040 &self,
1041 entry_id: ProjectEntryId,
1042 cx: &mut ModelContext<Worktree>,
1043 ) -> Option<Task<Result<()>>> {
1044 let entry = self.entry_for_id(entry_id)?.clone();
1045 let abs_path = self.abs_path.clone();
1046 let fs = self.fs.clone();
1047
1048 let delete = cx.background().spawn(async move {
1049 let mut abs_path = fs.canonicalize(&abs_path).await?;
1050 if entry.path.file_name().is_some() {
1051 abs_path = abs_path.join(&entry.path);
1052 }
1053 if entry.is_file() {
1054 fs.remove_file(&abs_path, Default::default()).await?;
1055 } else {
1056 fs.remove_dir(
1057 &abs_path,
1058 RemoveOptions {
1059 recursive: true,
1060 ignore_if_not_exists: false,
1061 },
1062 )
1063 .await?;
1064 }
1065 anyhow::Ok(abs_path)
1066 });
1067
1068 Some(cx.spawn(|this, mut cx| async move {
1069 let abs_path = delete.await?;
1070 let (tx, mut rx) = barrier::channel();
1071 this.update(&mut cx, |this, _| {
1072 this.as_local_mut()
1073 .unwrap()
1074 .path_changes_tx
1075 .try_send((vec![abs_path], tx))
1076 })?;
1077 rx.recv().await;
1078 Ok(())
1079 }))
1080 }
1081
1082 pub fn rename_entry(
1083 &self,
1084 entry_id: ProjectEntryId,
1085 new_path: impl Into<Arc<Path>>,
1086 cx: &mut ModelContext<Worktree>,
1087 ) -> Option<Task<Result<Entry>>> {
1088 let old_path = self.entry_for_id(entry_id)?.path.clone();
1089 let new_path = new_path.into();
1090 let abs_old_path = self.absolutize(&old_path);
1091 let abs_new_path = self.absolutize(&new_path);
1092 let fs = self.fs.clone();
1093 let rename = cx.background().spawn(async move {
1094 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1095 .await
1096 });
1097
1098 Some(cx.spawn(|this, mut cx| async move {
1099 rename.await?;
1100 this.update(&mut cx, |this, cx| {
1101 this.as_local_mut()
1102 .unwrap()
1103 .refresh_entry(new_path.clone(), Some(old_path), cx)
1104 })
1105 .await
1106 }))
1107 }
1108
1109 pub fn copy_entry(
1110 &self,
1111 entry_id: ProjectEntryId,
1112 new_path: impl Into<Arc<Path>>,
1113 cx: &mut ModelContext<Worktree>,
1114 ) -> Option<Task<Result<Entry>>> {
1115 let old_path = self.entry_for_id(entry_id)?.path.clone();
1116 let new_path = new_path.into();
1117 let abs_old_path = self.absolutize(&old_path);
1118 let abs_new_path = self.absolutize(&new_path);
1119 let fs = self.fs.clone();
1120 let copy = cx.background().spawn(async move {
1121 copy_recursive(
1122 fs.as_ref(),
1123 &abs_old_path,
1124 &abs_new_path,
1125 Default::default(),
1126 )
1127 .await
1128 });
1129
1130 Some(cx.spawn(|this, mut cx| async move {
1131 copy.await?;
1132 this.update(&mut cx, |this, cx| {
1133 this.as_local_mut()
1134 .unwrap()
1135 .refresh_entry(new_path.clone(), None, cx)
1136 })
1137 .await
1138 }))
1139 }
1140
1141 fn refresh_entry(
1142 &self,
1143 path: Arc<Path>,
1144 old_path: Option<Arc<Path>>,
1145 cx: &mut ModelContext<Worktree>,
1146 ) -> Task<Result<Entry>> {
1147 let fs = self.fs.clone();
1148 let abs_root_path = self.abs_path.clone();
1149 let path_changes_tx = self.path_changes_tx.clone();
1150 cx.spawn_weak(move |this, mut cx| async move {
1151 let abs_path = fs.canonicalize(&abs_root_path).await?;
1152 let mut paths = Vec::with_capacity(2);
1153 paths.push(if path.file_name().is_some() {
1154 abs_path.join(&path)
1155 } else {
1156 abs_path.clone()
1157 });
1158 if let Some(old_path) = old_path {
1159 paths.push(if old_path.file_name().is_some() {
1160 abs_path.join(&old_path)
1161 } else {
1162 abs_path.clone()
1163 });
1164 }
1165
1166 let (tx, mut rx) = barrier::channel();
1167 path_changes_tx.try_send((paths, tx))?;
1168 rx.recv().await;
1169 this.upgrade(&cx)
1170 .ok_or_else(|| anyhow!("worktree was dropped"))?
1171 .update(&mut cx, |this, _| {
1172 this.entry_for_path(path)
1173 .cloned()
1174 .ok_or_else(|| anyhow!("failed to read path after update"))
1175 })
1176 })
1177 }
1178
1179 pub fn observe_updates<F, Fut>(
1180 &mut self,
1181 project_id: u64,
1182 cx: &mut ModelContext<Worktree>,
1183 callback: F,
1184 ) -> oneshot::Receiver<()>
1185 where
1186 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1187 Fut: Send + Future<Output = bool>,
1188 {
1189 #[cfg(any(test, feature = "test-support"))]
1190 const MAX_CHUNK_SIZE: usize = 2;
1191 #[cfg(not(any(test, feature = "test-support")))]
1192 const MAX_CHUNK_SIZE: usize = 256;
1193
1194 let (share_tx, share_rx) = oneshot::channel();
1195
1196 if let Some(share) = self.share.as_mut() {
1197 share_tx.send(()).ok();
1198 *share.resume_updates.borrow_mut() = ();
1199 return share_rx;
1200 }
1201
1202 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1203 let (snapshots_tx, mut snapshots_rx) =
1204 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1205 snapshots_tx
1206 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1207 .ok();
1208
1209 let worktree_id = cx.model_id() as u64;
1210 let _maintain_remote_snapshot = cx.background().spawn(async move {
1211 let mut is_first = true;
1212 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1213 let update;
1214 if is_first {
1215 update = snapshot.build_initial_update(project_id, worktree_id);
1216 is_first = false;
1217 } else {
1218 update =
1219 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1220 }
1221
1222 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1223 let _ = resume_updates_rx.try_recv();
1224 loop {
1225 let result = callback(update.clone());
1226 if result.await {
1227 break;
1228 } else {
1229 log::info!("waiting to resume updates");
1230 if resume_updates_rx.next().await.is_none() {
1231 return Some(());
1232 }
1233 }
1234 }
1235 }
1236 }
1237 share_tx.send(()).ok();
1238 Some(())
1239 });
1240
1241 self.share = Some(ShareState {
1242 project_id,
1243 snapshots_tx,
1244 resume_updates: resume_updates_tx,
1245 _maintain_remote_snapshot,
1246 });
1247 share_rx
1248 }
1249
1250 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1251 let client = self.client.clone();
1252
1253 for (path, summaries) in &self.diagnostic_summaries {
1254 for (&server_id, summary) in summaries {
1255 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1256 project_id,
1257 worktree_id: cx.model_id() as u64,
1258 summary: Some(summary.to_proto(server_id, &path)),
1259 }) {
1260 return Task::ready(Err(e));
1261 }
1262 }
1263 }
1264
1265 let rx = self.observe_updates(project_id, cx, move |update| {
1266 client.request(update).map(|result| result.is_ok())
1267 });
1268 cx.foreground()
1269 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1270 }
1271
1272 pub fn unshare(&mut self) {
1273 self.share.take();
1274 }
1275
1276 pub fn is_shared(&self) -> bool {
1277 self.share.is_some()
1278 }
1279}
1280
1281impl RemoteWorktree {
1282 fn snapshot(&self) -> Snapshot {
1283 self.snapshot.clone()
1284 }
1285
1286 pub fn disconnected_from_host(&mut self) {
1287 self.updates_tx.take();
1288 self.snapshot_subscriptions.clear();
1289 self.disconnected = true;
1290 }
1291
1292 pub fn save_buffer(
1293 &self,
1294 buffer_handle: ModelHandle<Buffer>,
1295 cx: &mut ModelContext<Worktree>,
1296 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1297 let buffer = buffer_handle.read(cx);
1298 let buffer_id = buffer.remote_id();
1299 let version = buffer.version();
1300 let rpc = self.client.clone();
1301 let project_id = self.project_id;
1302 cx.as_mut().spawn(|mut cx| async move {
1303 let response = rpc
1304 .request(proto::SaveBuffer {
1305 project_id,
1306 buffer_id,
1307 version: serialize_version(&version),
1308 })
1309 .await?;
1310 let version = deserialize_version(&response.version);
1311 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1312 let mtime = response
1313 .mtime
1314 .ok_or_else(|| anyhow!("missing mtime"))?
1315 .into();
1316
1317 buffer_handle.update(&mut cx, |buffer, cx| {
1318 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1319 });
1320
1321 Ok((version, fingerprint, mtime))
1322 })
1323 }
1324
1325 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1326 if let Some(updates_tx) = &self.updates_tx {
1327 updates_tx
1328 .unbounded_send(update)
1329 .expect("consumer runs to completion");
1330 }
1331 }
1332
1333 fn observed_snapshot(&self, scan_id: usize) -> bool {
1334 self.completed_scan_id >= scan_id
1335 }
1336
1337 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1338 let (tx, rx) = oneshot::channel();
1339 if self.observed_snapshot(scan_id) {
1340 let _ = tx.send(());
1341 } else if self.disconnected {
1342 drop(tx);
1343 } else {
1344 match self
1345 .snapshot_subscriptions
1346 .binary_search_by_key(&scan_id, |probe| probe.0)
1347 {
1348 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1349 }
1350 }
1351
1352 async move {
1353 rx.await?;
1354 Ok(())
1355 }
1356 }
1357
1358 pub fn update_diagnostic_summary(
1359 &mut self,
1360 path: Arc<Path>,
1361 summary: &proto::DiagnosticSummary,
1362 ) {
1363 let server_id = LanguageServerId(summary.language_server_id as usize);
1364 let summary = DiagnosticSummary {
1365 error_count: summary.error_count as usize,
1366 warning_count: summary.warning_count as usize,
1367 };
1368
1369 if summary.is_empty() {
1370 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1371 summaries.remove(&server_id);
1372 if summaries.is_empty() {
1373 self.diagnostic_summaries.remove(&path);
1374 }
1375 }
1376 } else {
1377 self.diagnostic_summaries
1378 .entry(path)
1379 .or_default()
1380 .insert(server_id, summary);
1381 }
1382 }
1383
1384 pub fn insert_entry(
1385 &mut self,
1386 entry: proto::Entry,
1387 scan_id: usize,
1388 cx: &mut ModelContext<Worktree>,
1389 ) -> Task<Result<Entry>> {
1390 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1391 cx.spawn(|this, mut cx| async move {
1392 wait_for_snapshot.await?;
1393 this.update(&mut cx, |worktree, _| {
1394 let worktree = worktree.as_remote_mut().unwrap();
1395 let mut snapshot = worktree.background_snapshot.lock();
1396 let entry = snapshot.insert_entry(entry);
1397 worktree.snapshot = snapshot.clone();
1398 entry
1399 })
1400 })
1401 }
1402
1403 pub(crate) fn delete_entry(
1404 &mut self,
1405 id: ProjectEntryId,
1406 scan_id: usize,
1407 cx: &mut ModelContext<Worktree>,
1408 ) -> Task<Result<()>> {
1409 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1410 cx.spawn(|this, mut cx| async move {
1411 wait_for_snapshot.await?;
1412 this.update(&mut cx, |worktree, _| {
1413 let worktree = worktree.as_remote_mut().unwrap();
1414 let mut snapshot = worktree.background_snapshot.lock();
1415 snapshot.delete_entry(id);
1416 worktree.snapshot = snapshot.clone();
1417 });
1418 Ok(())
1419 })
1420 }
1421}
1422
1423impl Snapshot {
1424 pub fn id(&self) -> WorktreeId {
1425 self.id
1426 }
1427
1428 pub fn abs_path(&self) -> &Arc<Path> {
1429 &self.abs_path
1430 }
1431
1432 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1433 self.entries_by_id.get(&entry_id, &()).is_some()
1434 }
1435
1436 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1437 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1438 let old_entry = self.entries_by_id.insert_or_replace(
1439 PathEntry {
1440 id: entry.id,
1441 path: entry.path.clone(),
1442 is_ignored: entry.is_ignored,
1443 scan_id: 0,
1444 },
1445 &(),
1446 );
1447 if let Some(old_entry) = old_entry {
1448 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1449 }
1450 self.entries_by_path.insert_or_replace(entry.clone(), &());
1451 Ok(entry)
1452 }
1453
1454 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1455 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1456 self.entries_by_path = {
1457 let mut cursor = self.entries_by_path.cursor();
1458 let mut new_entries_by_path =
1459 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1460 while let Some(entry) = cursor.item() {
1461 if entry.path.starts_with(&removed_entry.path) {
1462 self.entries_by_id.remove(&entry.id, &());
1463 cursor.next(&());
1464 } else {
1465 break;
1466 }
1467 }
1468 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1469 new_entries_by_path
1470 };
1471
1472 Some(removed_entry.path)
1473 }
1474
1475 #[cfg(any(test, feature = "test-support"))]
1476 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1477 let path = path.into();
1478 self.entries_by_path
1479 .get(&PathKey(Arc::from(path)), &())
1480 .and_then(|entry| entry.git_status)
1481 }
1482
1483 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1484 let mut entries_by_path_edits = Vec::new();
1485 let mut entries_by_id_edits = Vec::new();
1486
1487 for entry_id in update.removed_entries {
1488 let entry_id = ProjectEntryId::from_proto(entry_id);
1489 entries_by_id_edits.push(Edit::Remove(entry_id));
1490 if let Some(entry) = self.entry_for_id(entry_id) {
1491 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1492 }
1493 }
1494
1495 for entry in update.updated_entries {
1496 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1497 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1498 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1499 }
1500 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1501 if old_entry.id != entry.id {
1502 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1503 }
1504 }
1505 entries_by_id_edits.push(Edit::Insert(PathEntry {
1506 id: entry.id,
1507 path: entry.path.clone(),
1508 is_ignored: entry.is_ignored,
1509 scan_id: 0,
1510 }));
1511 entries_by_path_edits.push(Edit::Insert(entry));
1512 }
1513
1514 self.entries_by_path.edit(entries_by_path_edits, &());
1515 self.entries_by_id.edit(entries_by_id_edits, &());
1516
1517 update.removed_repositories.sort_unstable();
1518 self.repository_entries.retain(|_, entry| {
1519 if let Ok(_) = update
1520 .removed_repositories
1521 .binary_search(&entry.work_directory.to_proto())
1522 {
1523 false
1524 } else {
1525 true
1526 }
1527 });
1528
1529 for repository in update.updated_repositories {
1530 let work_directory_entry: WorkDirectoryEntry =
1531 ProjectEntryId::from_proto(repository.work_directory_id).into();
1532
1533 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1534 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1535 if self.repository_entries.get(&work_directory).is_some() {
1536 self.repository_entries.update(&work_directory, |repo| {
1537 repo.branch = repository.branch.map(Into::into);
1538 });
1539 } else {
1540 self.repository_entries.insert(
1541 work_directory,
1542 RepositoryEntry {
1543 work_directory: work_directory_entry,
1544 branch: repository.branch.map(Into::into),
1545 },
1546 )
1547 }
1548 } else {
1549 log::error!("no work directory entry for repository {:?}", repository)
1550 }
1551 }
1552
1553 self.scan_id = update.scan_id as usize;
1554 if update.is_last_update {
1555 self.completed_scan_id = update.scan_id as usize;
1556 }
1557
1558 Ok(())
1559 }
1560
1561 pub fn file_count(&self) -> usize {
1562 self.entries_by_path.summary().file_count
1563 }
1564
1565 pub fn visible_file_count(&self) -> usize {
1566 self.entries_by_path.summary().visible_file_count
1567 }
1568
1569 fn traverse_from_offset(
1570 &self,
1571 include_dirs: bool,
1572 include_ignored: bool,
1573 start_offset: usize,
1574 ) -> Traversal {
1575 let mut cursor = self.entries_by_path.cursor();
1576 cursor.seek(
1577 &TraversalTarget::Count {
1578 count: start_offset,
1579 include_dirs,
1580 include_ignored,
1581 },
1582 Bias::Right,
1583 &(),
1584 );
1585 Traversal {
1586 cursor,
1587 include_dirs,
1588 include_ignored,
1589 }
1590 }
1591
1592 fn traverse_from_path(
1593 &self,
1594 include_dirs: bool,
1595 include_ignored: bool,
1596 path: &Path,
1597 ) -> Traversal {
1598 let mut cursor = self.entries_by_path.cursor();
1599 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1600 Traversal {
1601 cursor,
1602 include_dirs,
1603 include_ignored,
1604 }
1605 }
1606
1607 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1608 self.traverse_from_offset(false, include_ignored, start)
1609 }
1610
1611 pub fn entries(&self, include_ignored: bool) -> Traversal {
1612 self.traverse_from_offset(true, include_ignored, 0)
1613 }
1614
1615 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1616 self.repository_entries
1617 .iter()
1618 .map(|(path, entry)| (&path.0, entry))
1619 }
1620
1621 /// Get the repository whose work directory contains the given path.
1622 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1623 self.repository_entries
1624 .get(&RepositoryWorkDirectory(path.into()))
1625 .cloned()
1626 }
1627
1628 /// Get the repository whose work directory contains the given path.
1629 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1630 self.repository_and_work_directory_for_path(path)
1631 .map(|e| e.1)
1632 }
1633
1634 pub fn repository_and_work_directory_for_path(
1635 &self,
1636 path: &Path,
1637 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1638 self.repository_entries
1639 .iter()
1640 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1641 .last()
1642 .map(|(path, repo)| (path.clone(), repo.clone()))
1643 }
1644
1645 /// Given an ordered iterator of entries, returns an iterator of those entries,
1646 /// along with their containing git repository.
1647 pub fn entries_with_repositories<'a>(
1648 &'a self,
1649 entries: impl 'a + Iterator<Item = &'a Entry>,
1650 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1651 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1652 let mut repositories = self.repositories().peekable();
1653 entries.map(move |entry| {
1654 while let Some((repo_path, _)) = containing_repos.last() {
1655 if !entry.path.starts_with(repo_path) {
1656 containing_repos.pop();
1657 } else {
1658 break;
1659 }
1660 }
1661 while let Some((repo_path, _)) = repositories.peek() {
1662 if entry.path.starts_with(repo_path) {
1663 containing_repos.push(repositories.next().unwrap());
1664 } else {
1665 break;
1666 }
1667 }
1668 let repo = containing_repos.last().map(|(_, repo)| *repo);
1669 (entry, repo)
1670 })
1671 }
1672
1673 pub fn statuses_for_directories(&self, paths: &[&Path]) -> Vec<GitFileStatus> {
1674 // ["/a/b", "a/b/c", "a/b/d", "j"]
1675
1676 // Path stack:
1677 // If path has descendents following it, push to stack: ["a/b"]
1678 // Figure out a/b/c
1679 // Figure out a/b/d
1680 // Once no more descendants, pop the stack:
1681 // Figure out a/b
1682 }
1683
1684 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1685 let empty_path = Path::new("");
1686 self.entries_by_path
1687 .cursor::<()>()
1688 .filter(move |entry| entry.path.as_ref() != empty_path)
1689 .map(|entry| &entry.path)
1690 }
1691
1692 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1693 let mut cursor = self.entries_by_path.cursor();
1694 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1695 let traversal = Traversal {
1696 cursor,
1697 include_dirs: true,
1698 include_ignored: true,
1699 };
1700 ChildEntriesIter {
1701 traversal,
1702 parent_path,
1703 }
1704 }
1705
1706 fn descendent_entries<'a>(
1707 &'a self,
1708 include_dirs: bool,
1709 include_ignored: bool,
1710 parent_path: &'a Path,
1711 ) -> DescendentEntriesIter<'a> {
1712 let mut cursor = self.entries_by_path.cursor();
1713 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1714 let mut traversal = Traversal {
1715 cursor,
1716 include_dirs,
1717 include_ignored,
1718 };
1719
1720 if traversal.end_offset() == traversal.start_offset() {
1721 traversal.advance();
1722 }
1723
1724 DescendentEntriesIter {
1725 traversal,
1726 parent_path,
1727 }
1728 }
1729
1730 pub fn root_entry(&self) -> Option<&Entry> {
1731 self.entry_for_path("")
1732 }
1733
1734 pub fn root_name(&self) -> &str {
1735 &self.root_name
1736 }
1737
1738 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1739 self.repository_entries
1740 .get(&RepositoryWorkDirectory(Path::new("").into()))
1741 .map(|entry| entry.to_owned())
1742 }
1743
1744 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1745 self.repository_entries.values()
1746 }
1747
1748 pub fn scan_id(&self) -> usize {
1749 self.scan_id
1750 }
1751
1752 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1753 let path = path.as_ref();
1754 self.traverse_from_path(true, true, path)
1755 .entry()
1756 .and_then(|entry| {
1757 if entry.path.as_ref() == path {
1758 Some(entry)
1759 } else {
1760 None
1761 }
1762 })
1763 }
1764
1765 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1766 let entry = self.entries_by_id.get(&id, &())?;
1767 self.entry_for_path(&entry.path)
1768 }
1769
1770 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1771 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1772 }
1773}
1774
1775impl LocalSnapshot {
1776 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1777 self.git_repositories.get(&repo.work_directory.0)
1778 }
1779
1780 pub(crate) fn repo_for_metadata(
1781 &self,
1782 path: &Path,
1783 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1784 self.git_repositories
1785 .iter()
1786 .find(|(_, repo)| repo.in_dot_git(path))
1787 }
1788
1789 fn build_update(
1790 &self,
1791 project_id: u64,
1792 worktree_id: u64,
1793 entry_changes: UpdatedEntriesSet,
1794 repo_changes: UpdatedGitRepositoriesSet,
1795 ) -> proto::UpdateWorktree {
1796 let mut updated_entries = Vec::new();
1797 let mut removed_entries = Vec::new();
1798 let mut updated_repositories = Vec::new();
1799 let mut removed_repositories = Vec::new();
1800
1801 for (_, entry_id, path_change) in entry_changes.iter() {
1802 if let PathChange::Removed = path_change {
1803 removed_entries.push(entry_id.0 as u64);
1804 } else if let Some(entry) = self.entry_for_id(*entry_id) {
1805 updated_entries.push(proto::Entry::from(entry));
1806 }
1807 }
1808
1809 for (work_dir_path, change) in repo_changes.iter() {
1810 let new_repo = self
1811 .repository_entries
1812 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1813 match (&change.old_repository, new_repo) {
1814 (Some(old_repo), Some(new_repo)) => {
1815 updated_repositories.push(new_repo.build_update(old_repo));
1816 }
1817 (None, Some(new_repo)) => {
1818 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1819 }
1820 (Some(old_repo), None) => {
1821 removed_repositories.push(old_repo.work_directory.0.to_proto());
1822 }
1823 _ => {}
1824 }
1825 }
1826
1827 removed_entries.sort_unstable();
1828 updated_entries.sort_unstable_by_key(|e| e.id);
1829 removed_repositories.sort_unstable();
1830 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1831
1832 // TODO - optimize, knowing that removed_entries are sorted.
1833 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1834
1835 proto::UpdateWorktree {
1836 project_id,
1837 worktree_id,
1838 abs_path: self.abs_path().to_string_lossy().into(),
1839 root_name: self.root_name().to_string(),
1840 updated_entries,
1841 removed_entries,
1842 scan_id: self.scan_id as u64,
1843 is_last_update: self.completed_scan_id == self.scan_id,
1844 updated_repositories,
1845 removed_repositories,
1846 }
1847 }
1848
1849 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1850 let mut updated_entries = self
1851 .entries_by_path
1852 .iter()
1853 .map(proto::Entry::from)
1854 .collect::<Vec<_>>();
1855 updated_entries.sort_unstable_by_key(|e| e.id);
1856
1857 let mut updated_repositories = self
1858 .repository_entries
1859 .values()
1860 .map(proto::RepositoryEntry::from)
1861 .collect::<Vec<_>>();
1862 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1863
1864 proto::UpdateWorktree {
1865 project_id,
1866 worktree_id,
1867 abs_path: self.abs_path().to_string_lossy().into(),
1868 root_name: self.root_name().to_string(),
1869 updated_entries,
1870 removed_entries: Vec::new(),
1871 scan_id: self.scan_id as u64,
1872 is_last_update: self.completed_scan_id == self.scan_id,
1873 updated_repositories,
1874 removed_repositories: Vec::new(),
1875 }
1876 }
1877
1878 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1879 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1880 let abs_path = self.abs_path.join(&entry.path);
1881 match smol::block_on(build_gitignore(&abs_path, fs)) {
1882 Ok(ignore) => {
1883 self.ignores_by_parent_abs_path
1884 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1885 }
1886 Err(error) => {
1887 log::error!(
1888 "error loading .gitignore file {:?} - {:?}",
1889 &entry.path,
1890 error
1891 );
1892 }
1893 }
1894 }
1895
1896 if entry.kind == EntryKind::PendingDir {
1897 if let Some(existing_entry) =
1898 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1899 {
1900 entry.kind = existing_entry.kind;
1901 }
1902 }
1903
1904 let scan_id = self.scan_id;
1905 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1906 if let Some(removed) = removed {
1907 if removed.id != entry.id {
1908 self.entries_by_id.remove(&removed.id, &());
1909 }
1910 }
1911 self.entries_by_id.insert_or_replace(
1912 PathEntry {
1913 id: entry.id,
1914 path: entry.path.clone(),
1915 is_ignored: entry.is_ignored,
1916 scan_id,
1917 },
1918 &(),
1919 );
1920
1921 entry
1922 }
1923
1924 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1925 let abs_path = self.abs_path.join(&parent_path);
1926 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1927
1928 // Guard against repositories inside the repository metadata
1929 if work_dir
1930 .components()
1931 .find(|component| component.as_os_str() == *DOT_GIT)
1932 .is_some()
1933 {
1934 return None;
1935 };
1936
1937 let work_dir_id = self
1938 .entry_for_path(work_dir.clone())
1939 .map(|entry| entry.id)?;
1940
1941 if self.git_repositories.get(&work_dir_id).is_none() {
1942 let repo = fs.open_repo(abs_path.as_path())?;
1943 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1944
1945 let repo_lock = repo.lock();
1946
1947 self.repository_entries.insert(
1948 work_directory.clone(),
1949 RepositoryEntry {
1950 work_directory: work_dir_id.into(),
1951 branch: repo_lock.branch_name().map(Into::into),
1952 },
1953 );
1954
1955 self.scan_statuses(repo_lock.deref(), &work_directory, &work_directory.0);
1956
1957 drop(repo_lock);
1958
1959 self.git_repositories.insert(
1960 work_dir_id,
1961 LocalRepositoryEntry {
1962 git_dir_scan_id: 0,
1963 repo_ptr: repo,
1964 git_dir_path: parent_path.clone(),
1965 },
1966 )
1967 }
1968
1969 Some(())
1970 }
1971
1972 fn scan_statuses(
1973 &mut self,
1974 repo_ptr: &dyn GitRepository,
1975 work_directory: &RepositoryWorkDirectory,
1976 path: &Path,
1977 ) {
1978 let mut edits = vec![];
1979 let prefer_repo = work_directory.0.deref() == path;
1980
1981 for path in self
1982 .descendent_entries(false, false, path)
1983 .map(|entry| &entry.path)
1984 {
1985 let Ok(repo_path) = path.strip_prefix(&work_directory.0) else {
1986 continue;
1987 };
1988
1989 self.set_git_status(
1990 &path,
1991 repo_ptr.status(&RepoPath(repo_path.into())),
1992 prefer_repo,
1993 &mut edits,
1994 );
1995 }
1996
1997 // let statuses = repo_ptr.statuses().unwrap_or_default();
1998 self.entries_by_path.edit(edits, &());
1999 }
2000
2001 fn set_git_status(
2002 &self,
2003 path: &Path,
2004 status: Option<GitFileStatus>,
2005 prefer_repo: bool,
2006 edits: &mut Vec<Edit<Entry>>,
2007 ) {
2008 for path in path.ancestors() {
2009 dbg!(path);
2010
2011 let search = edits.binary_search_by_key(&path, |edit| match edit {
2012 Edit::Insert(item) => &item.path,
2013 _ => unreachable!(),
2014 });
2015
2016 match search {
2017 Ok(idx) => match &mut edits[idx] {
2018 Edit::Insert(item) => {
2019 item.git_status = GitFileStatus::merge(item.git_status, status, prefer_repo)
2020 }
2021 _ => unreachable!(),
2022 },
2023 Err(idx) => {
2024 let Some(entry) = self.entry_for_path(path) else {
2025 continue;
2026 };
2027
2028 let mut entry = entry.clone();
2029 entry.git_status = dbg!(GitFileStatus::merge(
2030 dbg!(entry.git_status),
2031 status,
2032 prefer_repo
2033 ));
2034
2035 edits.insert(idx, Edit::Insert(entry))
2036 }
2037 }
2038 }
2039 }
2040
2041 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2042 let mut inodes = TreeSet::default();
2043 for ancestor in path.ancestors().skip(1) {
2044 if let Some(entry) = self.entry_for_path(ancestor) {
2045 inodes.insert(entry.inode);
2046 }
2047 }
2048 inodes
2049 }
2050
2051 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2052 let mut new_ignores = Vec::new();
2053 for ancestor in abs_path.ancestors().skip(1) {
2054 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2055 new_ignores.push((ancestor, Some(ignore.clone())));
2056 } else {
2057 new_ignores.push((ancestor, None));
2058 }
2059 }
2060
2061 let mut ignore_stack = IgnoreStack::none();
2062 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2063 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2064 ignore_stack = IgnoreStack::all();
2065 break;
2066 } else if let Some(ignore) = ignore {
2067 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2068 }
2069 }
2070
2071 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2072 ignore_stack = IgnoreStack::all();
2073 }
2074
2075 ignore_stack
2076 }
2077}
2078
2079impl BackgroundScannerState {
2080 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2081 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2082 entry.id = removed_entry_id;
2083 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2084 entry.id = existing_entry.id;
2085 }
2086 }
2087
2088 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2089 self.reuse_entry_id(&mut entry);
2090 self.snapshot.insert_entry(entry, fs)
2091 }
2092
2093 fn populate_dir(
2094 &mut self,
2095 parent_path: Arc<Path>,
2096 entries: impl IntoIterator<Item = Entry>,
2097 ignore: Option<Arc<Gitignore>>,
2098 fs: &dyn Fs,
2099 ) {
2100 let mut parent_entry = if let Some(parent_entry) = self
2101 .snapshot
2102 .entries_by_path
2103 .get(&PathKey(parent_path.clone()), &())
2104 {
2105 parent_entry.clone()
2106 } else {
2107 log::warn!(
2108 "populating a directory {:?} that has been removed",
2109 parent_path
2110 );
2111 return;
2112 };
2113
2114 match parent_entry.kind {
2115 EntryKind::PendingDir => {
2116 parent_entry.kind = EntryKind::Dir;
2117 }
2118 EntryKind::Dir => {}
2119 _ => return,
2120 }
2121
2122 if let Some(ignore) = ignore {
2123 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2124 self.snapshot
2125 .ignores_by_parent_abs_path
2126 .insert(abs_parent_path, (ignore, false));
2127 }
2128
2129 if parent_path.file_name() == Some(&DOT_GIT) {
2130 self.snapshot.build_repo(parent_path, fs);
2131 }
2132
2133 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2134 let mut entries_by_id_edits = Vec::new();
2135
2136 for mut entry in entries {
2137 self.reuse_entry_id(&mut entry);
2138 entries_by_id_edits.push(Edit::Insert(PathEntry {
2139 id: entry.id,
2140 path: entry.path.clone(),
2141 is_ignored: entry.is_ignored,
2142 scan_id: self.snapshot.scan_id,
2143 }));
2144 entries_by_path_edits.push(Edit::Insert(entry));
2145 }
2146
2147 self.snapshot
2148 .entries_by_path
2149 .edit(entries_by_path_edits, &());
2150 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2151 }
2152
2153 fn remove_path(&mut self, path: &Path) {
2154 let mut new_entries;
2155 let removed_entries;
2156 {
2157 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2158 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2159 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2160 new_entries.push_tree(cursor.suffix(&()), &());
2161 }
2162 self.snapshot.entries_by_path = new_entries;
2163
2164 let mut entries_by_id_edits = Vec::new();
2165 for entry in removed_entries.cursor::<()>() {
2166 let removed_entry_id = self
2167 .removed_entry_ids
2168 .entry(entry.inode)
2169 .or_insert(entry.id);
2170 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2171 entries_by_id_edits.push(Edit::Remove(entry.id));
2172 }
2173 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2174
2175 if path.file_name() == Some(&GITIGNORE) {
2176 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2177 if let Some((_, needs_update)) = self
2178 .snapshot
2179 .ignores_by_parent_abs_path
2180 .get_mut(abs_parent_path.as_path())
2181 {
2182 *needs_update = true;
2183 }
2184 }
2185 }
2186}
2187
2188async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2189 let contents = fs.load(abs_path).await?;
2190 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2191 let mut builder = GitignoreBuilder::new(parent);
2192 for line in contents.lines() {
2193 builder.add_line(Some(abs_path.into()), line)?;
2194 }
2195 Ok(builder.build()?)
2196}
2197
2198impl WorktreeId {
2199 pub fn from_usize(handle_id: usize) -> Self {
2200 Self(handle_id)
2201 }
2202
2203 pub(crate) fn from_proto(id: u64) -> Self {
2204 Self(id as usize)
2205 }
2206
2207 pub fn to_proto(&self) -> u64 {
2208 self.0 as u64
2209 }
2210
2211 pub fn to_usize(&self) -> usize {
2212 self.0
2213 }
2214}
2215
2216impl fmt::Display for WorktreeId {
2217 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2218 self.0.fmt(f)
2219 }
2220}
2221
2222impl Deref for Worktree {
2223 type Target = Snapshot;
2224
2225 fn deref(&self) -> &Self::Target {
2226 match self {
2227 Worktree::Local(worktree) => &worktree.snapshot,
2228 Worktree::Remote(worktree) => &worktree.snapshot,
2229 }
2230 }
2231}
2232
2233impl Deref for LocalWorktree {
2234 type Target = LocalSnapshot;
2235
2236 fn deref(&self) -> &Self::Target {
2237 &self.snapshot
2238 }
2239}
2240
2241impl Deref for RemoteWorktree {
2242 type Target = Snapshot;
2243
2244 fn deref(&self) -> &Self::Target {
2245 &self.snapshot
2246 }
2247}
2248
2249impl fmt::Debug for LocalWorktree {
2250 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2251 self.snapshot.fmt(f)
2252 }
2253}
2254
2255impl fmt::Debug for Snapshot {
2256 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2257 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2258 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2259
2260 impl<'a> fmt::Debug for EntriesByPath<'a> {
2261 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2262 f.debug_map()
2263 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2264 .finish()
2265 }
2266 }
2267
2268 impl<'a> fmt::Debug for EntriesById<'a> {
2269 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2270 f.debug_list().entries(self.0.iter()).finish()
2271 }
2272 }
2273
2274 f.debug_struct("Snapshot")
2275 .field("id", &self.id)
2276 .field("root_name", &self.root_name)
2277 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2278 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2279 .finish()
2280 }
2281}
2282
2283#[derive(Clone, PartialEq)]
2284pub struct File {
2285 pub worktree: ModelHandle<Worktree>,
2286 pub path: Arc<Path>,
2287 pub mtime: SystemTime,
2288 pub(crate) entry_id: ProjectEntryId,
2289 pub(crate) is_local: bool,
2290 pub(crate) is_deleted: bool,
2291}
2292
2293impl language::File for File {
2294 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2295 if self.is_local {
2296 Some(self)
2297 } else {
2298 None
2299 }
2300 }
2301
2302 fn mtime(&self) -> SystemTime {
2303 self.mtime
2304 }
2305
2306 fn path(&self) -> &Arc<Path> {
2307 &self.path
2308 }
2309
2310 fn full_path(&self, cx: &AppContext) -> PathBuf {
2311 let mut full_path = PathBuf::new();
2312 let worktree = self.worktree.read(cx);
2313
2314 if worktree.is_visible() {
2315 full_path.push(worktree.root_name());
2316 } else {
2317 let path = worktree.abs_path();
2318
2319 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2320 full_path.push("~");
2321 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2322 } else {
2323 full_path.push(path)
2324 }
2325 }
2326
2327 if self.path.components().next().is_some() {
2328 full_path.push(&self.path);
2329 }
2330
2331 full_path
2332 }
2333
2334 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2335 /// of its worktree, then this method will return the name of the worktree itself.
2336 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2337 self.path
2338 .file_name()
2339 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2340 }
2341
2342 fn is_deleted(&self) -> bool {
2343 self.is_deleted
2344 }
2345
2346 fn as_any(&self) -> &dyn Any {
2347 self
2348 }
2349
2350 fn to_proto(&self) -> rpc::proto::File {
2351 rpc::proto::File {
2352 worktree_id: self.worktree.id() as u64,
2353 entry_id: self.entry_id.to_proto(),
2354 path: self.path.to_string_lossy().into(),
2355 mtime: Some(self.mtime.into()),
2356 is_deleted: self.is_deleted,
2357 }
2358 }
2359}
2360
2361impl language::LocalFile for File {
2362 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2363 self.worktree
2364 .read(cx)
2365 .as_local()
2366 .unwrap()
2367 .abs_path
2368 .join(&self.path)
2369 }
2370
2371 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2372 let worktree = self.worktree.read(cx).as_local().unwrap();
2373 let abs_path = worktree.absolutize(&self.path);
2374 let fs = worktree.fs.clone();
2375 cx.background()
2376 .spawn(async move { fs.load(&abs_path).await })
2377 }
2378
2379 fn buffer_reloaded(
2380 &self,
2381 buffer_id: u64,
2382 version: &clock::Global,
2383 fingerprint: RopeFingerprint,
2384 line_ending: LineEnding,
2385 mtime: SystemTime,
2386 cx: &mut AppContext,
2387 ) {
2388 let worktree = self.worktree.read(cx).as_local().unwrap();
2389 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2390 worktree
2391 .client
2392 .send(proto::BufferReloaded {
2393 project_id,
2394 buffer_id,
2395 version: serialize_version(version),
2396 mtime: Some(mtime.into()),
2397 fingerprint: serialize_fingerprint(fingerprint),
2398 line_ending: serialize_line_ending(line_ending) as i32,
2399 })
2400 .log_err();
2401 }
2402 }
2403}
2404
2405impl File {
2406 pub fn from_proto(
2407 proto: rpc::proto::File,
2408 worktree: ModelHandle<Worktree>,
2409 cx: &AppContext,
2410 ) -> Result<Self> {
2411 let worktree_id = worktree
2412 .read(cx)
2413 .as_remote()
2414 .ok_or_else(|| anyhow!("not remote"))?
2415 .id();
2416
2417 if worktree_id.to_proto() != proto.worktree_id {
2418 return Err(anyhow!("worktree id does not match file"));
2419 }
2420
2421 Ok(Self {
2422 worktree,
2423 path: Path::new(&proto.path).into(),
2424 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2425 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2426 is_local: false,
2427 is_deleted: proto.is_deleted,
2428 })
2429 }
2430
2431 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2432 file.and_then(|f| f.as_any().downcast_ref())
2433 }
2434
2435 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2436 self.worktree.read(cx).id()
2437 }
2438
2439 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2440 if self.is_deleted {
2441 None
2442 } else {
2443 Some(self.entry_id)
2444 }
2445 }
2446}
2447
2448#[derive(Clone, Debug, PartialEq, Eq, Default)]
2449struct GitStatuses {
2450 added: usize,
2451 modified: usize,
2452 conflict: usize,
2453}
2454
2455impl GitStatuses {
2456 fn status(&self) -> Option<GitFileStatus> {
2457 if self.conflict > 0 {
2458 Some(GitFileStatus::Conflict)
2459 } else if self.modified > 0 {
2460 Some(GitFileStatus::Modified)
2461 } else if self.added > 0 {
2462 Some(GitFileStatus::Added)
2463 } else {
2464 None
2465 }
2466 }
2467
2468 fn add_status(&self, status: Option<GitFileStatus>) {}
2469}
2470
2471#[derive(Clone, Debug, PartialEq, Eq)]
2472pub struct Entry {
2473 pub id: ProjectEntryId,
2474 pub kind: EntryKind,
2475 pub path: Arc<Path>,
2476 pub inode: u64,
2477 pub mtime: SystemTime,
2478 pub is_symlink: bool,
2479 pub is_ignored: bool,
2480 pub git_status: Option<GitFileStatus>,
2481}
2482
2483#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2484pub enum EntryKind {
2485 PendingDir,
2486 Dir,
2487 File(CharBag),
2488}
2489
2490#[derive(Clone, Copy, Debug)]
2491pub enum PathChange {
2492 /// A filesystem entry was was created.
2493 Added,
2494 /// A filesystem entry was removed.
2495 Removed,
2496 /// A filesystem entry was updated.
2497 Updated,
2498 /// A filesystem entry was either updated or added. We don't know
2499 /// whether or not it already existed, because the path had not
2500 /// been loaded before the event.
2501 AddedOrUpdated,
2502 /// A filesystem entry was found during the initial scan of the worktree.
2503 Loaded,
2504}
2505
2506pub struct GitRepositoryChange {
2507 /// The previous state of the repository, if it already existed.
2508 pub old_repository: Option<RepositoryEntry>,
2509}
2510
2511pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2512pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
2513
2514impl Entry {
2515 fn new(
2516 path: Arc<Path>,
2517 metadata: &fs::Metadata,
2518 next_entry_id: &AtomicUsize,
2519 root_char_bag: CharBag,
2520 ) -> Self {
2521 Self {
2522 id: ProjectEntryId::new(next_entry_id),
2523 kind: if metadata.is_dir {
2524 EntryKind::PendingDir
2525 } else {
2526 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2527 },
2528 path,
2529 inode: metadata.inode,
2530 mtime: metadata.mtime,
2531 is_symlink: metadata.is_symlink,
2532 is_ignored: false,
2533 git_status: None,
2534 }
2535 }
2536
2537 pub fn is_dir(&self) -> bool {
2538 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2539 }
2540
2541 pub fn is_file(&self) -> bool {
2542 matches!(self.kind, EntryKind::File(_))
2543 }
2544
2545 pub fn git_status(&self) -> Option<GitFileStatus> {
2546 self.git_status /*.status() */
2547 }
2548}
2549
2550impl sum_tree::Item for Entry {
2551 type Summary = EntrySummary;
2552
2553 fn summary(&self) -> Self::Summary {
2554 let visible_count = if self.is_ignored { 0 } else { 1 };
2555 let file_count;
2556 let visible_file_count;
2557 if self.is_file() {
2558 file_count = 1;
2559 visible_file_count = visible_count;
2560 } else {
2561 file_count = 0;
2562 visible_file_count = 0;
2563 }
2564
2565 EntrySummary {
2566 max_path: self.path.clone(),
2567 count: 1,
2568 visible_count,
2569 file_count,
2570 visible_file_count,
2571 }
2572 }
2573}
2574
2575impl sum_tree::KeyedItem for Entry {
2576 type Key = PathKey;
2577
2578 fn key(&self) -> Self::Key {
2579 PathKey(self.path.clone())
2580 }
2581}
2582
2583#[derive(Clone, Debug)]
2584pub struct EntrySummary {
2585 max_path: Arc<Path>,
2586 count: usize,
2587 visible_count: usize,
2588 file_count: usize,
2589 visible_file_count: usize,
2590 // git_modified_count: usize,
2591 // git_added_count: usize,
2592 // git_conflict_count: usize,
2593}
2594
2595impl Default for EntrySummary {
2596 fn default() -> Self {
2597 Self {
2598 max_path: Arc::from(Path::new("")),
2599 count: 0,
2600 visible_count: 0,
2601 file_count: 0,
2602 visible_file_count: 0,
2603 }
2604 }
2605}
2606
2607impl sum_tree::Summary for EntrySummary {
2608 type Context = ();
2609
2610 fn add_summary(&mut self, rhs: &Self, _: &()) {
2611 self.max_path = rhs.max_path.clone();
2612 self.count += rhs.count;
2613 self.visible_count += rhs.visible_count;
2614 self.file_count += rhs.file_count;
2615 self.visible_file_count += rhs.visible_file_count;
2616 }
2617}
2618
2619#[derive(Clone, Debug)]
2620struct PathEntry {
2621 id: ProjectEntryId,
2622 path: Arc<Path>,
2623 is_ignored: bool,
2624 scan_id: usize,
2625}
2626
2627impl sum_tree::Item for PathEntry {
2628 type Summary = PathEntrySummary;
2629
2630 fn summary(&self) -> Self::Summary {
2631 PathEntrySummary { max_id: self.id }
2632 }
2633}
2634
2635impl sum_tree::KeyedItem for PathEntry {
2636 type Key = ProjectEntryId;
2637
2638 fn key(&self) -> Self::Key {
2639 self.id
2640 }
2641}
2642
2643#[derive(Clone, Debug, Default)]
2644struct PathEntrySummary {
2645 max_id: ProjectEntryId,
2646}
2647
2648impl sum_tree::Summary for PathEntrySummary {
2649 type Context = ();
2650
2651 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2652 self.max_id = summary.max_id;
2653 }
2654}
2655
2656impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2657 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2658 *self = summary.max_id;
2659 }
2660}
2661
2662#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2663pub struct PathKey(Arc<Path>);
2664
2665impl Default for PathKey {
2666 fn default() -> Self {
2667 Self(Path::new("").into())
2668 }
2669}
2670
2671impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2672 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2673 self.0 = summary.max_path.clone();
2674 }
2675}
2676
2677struct BackgroundScanner {
2678 state: Mutex<BackgroundScannerState>,
2679 fs: Arc<dyn Fs>,
2680 status_updates_tx: UnboundedSender<ScanState>,
2681 executor: Arc<executor::Background>,
2682 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2683 next_entry_id: Arc<AtomicUsize>,
2684 phase: BackgroundScannerPhase,
2685}
2686
2687#[derive(PartialEq)]
2688enum BackgroundScannerPhase {
2689 InitialScan,
2690 EventsReceivedDuringInitialScan,
2691 Events,
2692}
2693
2694impl BackgroundScanner {
2695 fn new(
2696 snapshot: LocalSnapshot,
2697 next_entry_id: Arc<AtomicUsize>,
2698 fs: Arc<dyn Fs>,
2699 status_updates_tx: UnboundedSender<ScanState>,
2700 executor: Arc<executor::Background>,
2701 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2702 ) -> Self {
2703 Self {
2704 fs,
2705 status_updates_tx,
2706 executor,
2707 refresh_requests_rx,
2708 next_entry_id,
2709 state: Mutex::new(BackgroundScannerState {
2710 prev_snapshot: snapshot.snapshot.clone(),
2711 snapshot,
2712 removed_entry_ids: Default::default(),
2713 changed_paths: Default::default(),
2714 }),
2715 phase: BackgroundScannerPhase::InitialScan,
2716 }
2717 }
2718
2719 async fn run(
2720 &mut self,
2721 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2722 ) {
2723 use futures::FutureExt as _;
2724
2725 let (root_abs_path, root_inode) = {
2726 let snapshot = &self.state.lock().snapshot;
2727 (
2728 snapshot.abs_path.clone(),
2729 snapshot.root_entry().map(|e| e.inode),
2730 )
2731 };
2732
2733 // Populate ignores above the root.
2734 let ignore_stack;
2735 for ancestor in root_abs_path.ancestors().skip(1) {
2736 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2737 {
2738 self.state
2739 .lock()
2740 .snapshot
2741 .ignores_by_parent_abs_path
2742 .insert(ancestor.into(), (ignore.into(), false));
2743 }
2744 }
2745 {
2746 let mut state = self.state.lock();
2747 state.snapshot.scan_id += 1;
2748 ignore_stack = state
2749 .snapshot
2750 .ignore_stack_for_abs_path(&root_abs_path, true);
2751 if ignore_stack.is_all() {
2752 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2753 root_entry.is_ignored = true;
2754 state.insert_entry(root_entry, self.fs.as_ref());
2755 }
2756 }
2757 };
2758
2759 // Perform an initial scan of the directory.
2760 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2761 smol::block_on(scan_job_tx.send(ScanJob {
2762 abs_path: root_abs_path,
2763 path: Arc::from(Path::new("")),
2764 ignore_stack,
2765 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2766 scan_queue: scan_job_tx.clone(),
2767 }))
2768 .unwrap();
2769 drop(scan_job_tx);
2770 self.scan_dirs(true, scan_job_rx).await;
2771 {
2772 let mut state = self.state.lock();
2773 state.snapshot.completed_scan_id = state.snapshot.scan_id;
2774 }
2775 self.send_status_update(false, None);
2776
2777 // Process any any FS events that occurred while performing the initial scan.
2778 // For these events, update events cannot be as precise, because we didn't
2779 // have the previous state loaded yet.
2780 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2781 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2782 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2783 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2784 paths.extend(more_events.into_iter().map(|e| e.path));
2785 }
2786 self.process_events(paths).await;
2787 }
2788
2789 // Continue processing events until the worktree is dropped.
2790 self.phase = BackgroundScannerPhase::Events;
2791 loop {
2792 select_biased! {
2793 // Process any path refresh requests from the worktree. Prioritize
2794 // these before handling changes reported by the filesystem.
2795 request = self.refresh_requests_rx.recv().fuse() => {
2796 let Ok((paths, barrier)) = request else { break };
2797 if !self.process_refresh_request(paths.clone(), barrier).await {
2798 return;
2799 }
2800 }
2801
2802 events = events_rx.next().fuse() => {
2803 let Some(events) = events else { break };
2804 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2805 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2806 paths.extend(more_events.into_iter().map(|e| e.path));
2807 }
2808 self.process_events(paths.clone()).await;
2809 }
2810 }
2811 }
2812 }
2813
2814 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2815 self.reload_entries_for_paths(paths, None).await;
2816 self.send_status_update(false, Some(barrier))
2817 }
2818
2819 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2820 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2821 let paths = self
2822 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2823 .await;
2824 drop(scan_job_tx);
2825 self.scan_dirs(false, scan_job_rx).await;
2826
2827 self.update_ignore_statuses().await;
2828
2829 {
2830 let mut snapshot = &mut self.state.lock().snapshot;
2831
2832 if let Some(paths) = paths {
2833 for path in paths {
2834 self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2835 }
2836 }
2837
2838 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2839 git_repositories.retain(|work_directory_id, _| {
2840 snapshot
2841 .entry_for_id(*work_directory_id)
2842 .map_or(false, |entry| {
2843 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2844 })
2845 });
2846 snapshot.git_repositories = git_repositories;
2847
2848 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2849 git_repository_entries.retain(|_, entry| {
2850 snapshot
2851 .git_repositories
2852 .get(&entry.work_directory.0)
2853 .is_some()
2854 });
2855 snapshot.snapshot.repository_entries = git_repository_entries;
2856 snapshot.completed_scan_id = snapshot.scan_id;
2857 }
2858
2859 self.send_status_update(false, None);
2860 }
2861
2862 async fn scan_dirs(
2863 &self,
2864 enable_progress_updates: bool,
2865 scan_jobs_rx: channel::Receiver<ScanJob>,
2866 ) {
2867 use futures::FutureExt as _;
2868
2869 if self
2870 .status_updates_tx
2871 .unbounded_send(ScanState::Started)
2872 .is_err()
2873 {
2874 return;
2875 }
2876
2877 let progress_update_count = AtomicUsize::new(0);
2878 self.executor
2879 .scoped(|scope| {
2880 for _ in 0..self.executor.num_cpus() {
2881 scope.spawn(async {
2882 let mut last_progress_update_count = 0;
2883 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2884 futures::pin_mut!(progress_update_timer);
2885
2886 loop {
2887 select_biased! {
2888 // Process any path refresh requests before moving on to process
2889 // the scan queue, so that user operations are prioritized.
2890 request = self.refresh_requests_rx.recv().fuse() => {
2891 let Ok((paths, barrier)) = request else { break };
2892 if !self.process_refresh_request(paths, barrier).await {
2893 return;
2894 }
2895 }
2896
2897 // Send periodic progress updates to the worktree. Use an atomic counter
2898 // to ensure that only one of the workers sends a progress update after
2899 // the update interval elapses.
2900 _ = progress_update_timer => {
2901 match progress_update_count.compare_exchange(
2902 last_progress_update_count,
2903 last_progress_update_count + 1,
2904 SeqCst,
2905 SeqCst
2906 ) {
2907 Ok(_) => {
2908 last_progress_update_count += 1;
2909 self.send_status_update(true, None);
2910 }
2911 Err(count) => {
2912 last_progress_update_count = count;
2913 }
2914 }
2915 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2916 }
2917
2918 // Recursively load directories from the file system.
2919 job = scan_jobs_rx.recv().fuse() => {
2920 let Ok(job) = job else { break };
2921 if let Err(err) = self.scan_dir(&job).await {
2922 if job.path.as_ref() != Path::new("") {
2923 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2924 }
2925 }
2926 }
2927 }
2928 }
2929 })
2930 }
2931 })
2932 .await;
2933 }
2934
2935 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2936 let mut state = self.state.lock();
2937 if state.changed_paths.is_empty() && scanning {
2938 return true;
2939 }
2940
2941 let new_snapshot = state.snapshot.clone();
2942 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2943 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
2944 state.changed_paths.clear();
2945
2946 self.status_updates_tx
2947 .unbounded_send(ScanState::Updated {
2948 snapshot: new_snapshot,
2949 changes,
2950 scanning,
2951 barrier,
2952 })
2953 .is_ok()
2954 }
2955
2956 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2957 let mut new_entries: Vec<Entry> = Vec::new();
2958 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2959 let mut ignore_stack = job.ignore_stack.clone();
2960 let mut new_ignore = None;
2961 let (root_abs_path, root_char_bag, next_entry_id) = {
2962 let snapshot = &self.state.lock().snapshot;
2963 (
2964 snapshot.abs_path().clone(),
2965 snapshot.root_char_bag,
2966 self.next_entry_id.clone(),
2967 )
2968 };
2969 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2970 while let Some(child_abs_path) = child_paths.next().await {
2971 let child_abs_path: Arc<Path> = match child_abs_path {
2972 Ok(child_abs_path) => child_abs_path.into(),
2973 Err(error) => {
2974 log::error!("error processing entry {:?}", error);
2975 continue;
2976 }
2977 };
2978
2979 let child_name = child_abs_path.file_name().unwrap();
2980 let child_path: Arc<Path> = job.path.join(child_name).into();
2981 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2982 Ok(Some(metadata)) => metadata,
2983 Ok(None) => continue,
2984 Err(err) => {
2985 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2986 continue;
2987 }
2988 };
2989
2990 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2991 if child_name == *GITIGNORE {
2992 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2993 Ok(ignore) => {
2994 let ignore = Arc::new(ignore);
2995 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2996 new_ignore = Some(ignore);
2997 }
2998 Err(error) => {
2999 log::error!(
3000 "error loading .gitignore file {:?} - {:?}",
3001 child_name,
3002 error
3003 );
3004 }
3005 }
3006
3007 // Update ignore status of any child entries we've already processed to reflect the
3008 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3009 // there should rarely be too numerous. Update the ignore stack associated with any
3010 // new jobs as well.
3011 let mut new_jobs = new_jobs.iter_mut();
3012 for entry in &mut new_entries {
3013 let entry_abs_path = root_abs_path.join(&entry.path);
3014 entry.is_ignored =
3015 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3016
3017 if entry.is_dir() {
3018 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
3019 job.ignore_stack = if entry.is_ignored {
3020 IgnoreStack::all()
3021 } else {
3022 ignore_stack.clone()
3023 };
3024 }
3025 }
3026 }
3027 }
3028
3029 let mut child_entry = Entry::new(
3030 child_path.clone(),
3031 &child_metadata,
3032 &next_entry_id,
3033 root_char_bag,
3034 );
3035
3036 if child_entry.is_dir() {
3037 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3038 child_entry.is_ignored = is_ignored;
3039
3040 // Avoid recursing until crash in the case of a recursive symlink
3041 if !job.ancestor_inodes.contains(&child_entry.inode) {
3042 let mut ancestor_inodes = job.ancestor_inodes.clone();
3043 ancestor_inodes.insert(child_entry.inode);
3044
3045 new_jobs.push(Some(ScanJob {
3046 abs_path: child_abs_path,
3047 path: child_path,
3048 ignore_stack: if is_ignored {
3049 IgnoreStack::all()
3050 } else {
3051 ignore_stack.clone()
3052 },
3053 ancestor_inodes,
3054 scan_queue: job.scan_queue.clone(),
3055 }));
3056 } else {
3057 new_jobs.push(None);
3058 }
3059 } else {
3060 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3061 }
3062
3063 new_entries.push(child_entry);
3064 }
3065
3066 {
3067 let mut state = self.state.lock();
3068 state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
3069 if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3070 state.changed_paths.insert(ix, job.path.clone());
3071 }
3072 }
3073
3074 for new_job in new_jobs {
3075 if let Some(new_job) = new_job {
3076 job.scan_queue.send(new_job).await.unwrap();
3077 }
3078 }
3079
3080 Ok(())
3081 }
3082
3083 async fn reload_entries_for_paths(
3084 &self,
3085 mut abs_paths: Vec<PathBuf>,
3086 scan_queue_tx: Option<Sender<ScanJob>>,
3087 ) -> Option<Vec<Arc<Path>>> {
3088 let doing_recursive_update = scan_queue_tx.is_some();
3089
3090 abs_paths.sort_unstable();
3091 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3092
3093 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3094 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3095 let metadata = futures::future::join_all(
3096 abs_paths
3097 .iter()
3098 .map(|abs_path| self.fs.metadata(&abs_path))
3099 .collect::<Vec<_>>(),
3100 )
3101 .await;
3102
3103 let mut state = self.state.lock();
3104 let snapshot = &mut state.snapshot;
3105 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3106 snapshot.scan_id += 1;
3107 if is_idle && !doing_recursive_update {
3108 snapshot.completed_scan_id = snapshot.scan_id;
3109 }
3110
3111 // Remove any entries for paths that no longer exist or are being recursively
3112 // refreshed. Do this before adding any new entries, so that renames can be
3113 // detected regardless of the order of the paths.
3114 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3115 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3116 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3117 if matches!(metadata, Ok(None)) || doing_recursive_update {
3118 state.remove_path(path);
3119 }
3120 event_paths.push(path.into());
3121 } else {
3122 log::error!(
3123 "unexpected event {:?} for root path {:?}",
3124 abs_path,
3125 root_canonical_path
3126 );
3127 }
3128 }
3129
3130 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3131 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3132
3133 match metadata {
3134 Ok(Some(metadata)) => {
3135 let ignore_stack = state
3136 .snapshot
3137 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3138 let mut fs_entry = Entry::new(
3139 path.clone(),
3140 &metadata,
3141 self.next_entry_id.as_ref(),
3142 state.snapshot.root_char_bag,
3143 );
3144 fs_entry.is_ignored = ignore_stack.is_all();
3145 state.insert_entry(fs_entry, self.fs.as_ref());
3146
3147 if let Some(scan_queue_tx) = &scan_queue_tx {
3148 let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3149 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3150 ancestor_inodes.insert(metadata.inode);
3151 smol::block_on(scan_queue_tx.send(ScanJob {
3152 abs_path,
3153 path,
3154 ignore_stack,
3155 ancestor_inodes,
3156 scan_queue: scan_queue_tx.clone(),
3157 }))
3158 .unwrap();
3159 }
3160 }
3161 }
3162 Ok(None) => {
3163 self.remove_repo_path(&path, &mut state.snapshot);
3164 }
3165 Err(err) => {
3166 // TODO - create a special 'error' entry in the entries tree to mark this
3167 log::error!("error reading file on event {:?}", err);
3168 }
3169 }
3170 }
3171
3172 util::extend_sorted(
3173 &mut state.changed_paths,
3174 event_paths.iter().cloned(),
3175 usize::MAX,
3176 Ord::cmp,
3177 );
3178
3179 Some(event_paths)
3180 }
3181
3182 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3183 if !path
3184 .components()
3185 .any(|component| component.as_os_str() == *DOT_GIT)
3186 {
3187 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3188 let entry = repository.work_directory.0;
3189 snapshot.git_repositories.remove(&entry);
3190 snapshot
3191 .snapshot
3192 .repository_entries
3193 .remove(&RepositoryWorkDirectory(path.into()));
3194 return Some(());
3195 }
3196 }
3197
3198 // TODO statuses
3199 // Track when a .git is removed and iterate over the file system there
3200
3201 Some(())
3202 }
3203
3204 fn reload_repo_for_file_path(
3205 &self,
3206 path: &Path,
3207 snapshot: &mut LocalSnapshot,
3208 fs: &dyn Fs,
3209 ) -> Option<()> {
3210 let scan_id = snapshot.scan_id;
3211
3212 if path
3213 .components()
3214 .any(|component| component.as_os_str() == *DOT_GIT)
3215 {
3216 let (entry_id, repo_ptr) = {
3217 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3218 let dot_git_dir = path.ancestors()
3219 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3220 .next()?;
3221
3222 snapshot.build_repo(dot_git_dir.into(), fs);
3223 return None;
3224 };
3225 if repo.git_dir_scan_id == scan_id {
3226 return None;
3227 }
3228
3229 (*entry_id, repo.repo_ptr.to_owned())
3230 };
3231 /*
3232 1. Populate dir, initializes the git repo
3233 2. Sometimes, we get a file event inside the .git repo, before it's initializaed
3234 In both cases, we should end up with an initialized repo and a full status scan
3235
3236 */
3237
3238 let work_dir = snapshot
3239 .entry_for_id(entry_id)
3240 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3241
3242 let repo = repo_ptr.lock();
3243 repo.reload_index();
3244 let branch = repo.branch_name();
3245
3246 snapshot.git_repositories.update(&entry_id, |entry| {
3247 entry.git_dir_scan_id = scan_id;
3248 });
3249
3250 snapshot
3251 .snapshot
3252 .repository_entries
3253 .update(&work_dir, |entry| {
3254 entry.branch = branch.map(Into::into);
3255 });
3256
3257 snapshot.scan_statuses(repo.deref(), &work_dir, &work_dir.0);
3258 } else {
3259 if snapshot
3260 .entry_for_path(&path)
3261 .map(|entry| entry.is_ignored)
3262 .unwrap_or(false)
3263 {
3264 return None;
3265 }
3266
3267 let repo = snapshot.repository_for_path(&path)?;
3268 let work_directory = &repo.work_directory(snapshot)?;
3269 let work_dir_id = repo.work_directory.clone();
3270 let (local_repo, git_dir_scan_id) =
3271 snapshot.git_repositories.update(&work_dir_id, |entry| {
3272 (entry.repo_ptr.clone(), entry.git_dir_scan_id)
3273 })?;
3274
3275 // Short circuit if we've already scanned everything
3276 if git_dir_scan_id == scan_id {
3277 return None;
3278 }
3279
3280 let repo_ptr = local_repo.lock();
3281
3282 snapshot.scan_statuses(repo_ptr.deref(), &work_directory, path);
3283 }
3284
3285 Some(())
3286 }
3287
3288 async fn update_ignore_statuses(&self) {
3289 use futures::FutureExt as _;
3290
3291 let mut snapshot = self.state.lock().snapshot.clone();
3292 let mut ignores_to_update = Vec::new();
3293 let mut ignores_to_delete = Vec::new();
3294 let abs_path = snapshot.abs_path.clone();
3295 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3296 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3297 if *needs_update {
3298 *needs_update = false;
3299 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3300 ignores_to_update.push(parent_abs_path.clone());
3301 }
3302 }
3303
3304 let ignore_path = parent_path.join(&*GITIGNORE);
3305 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3306 ignores_to_delete.push(parent_abs_path.clone());
3307 }
3308 }
3309 }
3310
3311 for parent_abs_path in ignores_to_delete {
3312 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3313 self.state
3314 .lock()
3315 .snapshot
3316 .ignores_by_parent_abs_path
3317 .remove(&parent_abs_path);
3318 }
3319
3320 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3321 ignores_to_update.sort_unstable();
3322 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3323 while let Some(parent_abs_path) = ignores_to_update.next() {
3324 while ignores_to_update
3325 .peek()
3326 .map_or(false, |p| p.starts_with(&parent_abs_path))
3327 {
3328 ignores_to_update.next().unwrap();
3329 }
3330
3331 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3332 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3333 abs_path: parent_abs_path,
3334 ignore_stack,
3335 ignore_queue: ignore_queue_tx.clone(),
3336 }))
3337 .unwrap();
3338 }
3339 drop(ignore_queue_tx);
3340
3341 self.executor
3342 .scoped(|scope| {
3343 for _ in 0..self.executor.num_cpus() {
3344 scope.spawn(async {
3345 loop {
3346 select_biased! {
3347 // Process any path refresh requests before moving on to process
3348 // the queue of ignore statuses.
3349 request = self.refresh_requests_rx.recv().fuse() => {
3350 let Ok((paths, barrier)) = request else { break };
3351 if !self.process_refresh_request(paths, barrier).await {
3352 return;
3353 }
3354 }
3355
3356 // Recursively process directories whose ignores have changed.
3357 job = ignore_queue_rx.recv().fuse() => {
3358 let Ok(job) = job else { break };
3359 self.update_ignore_status(job, &snapshot).await;
3360 }
3361 }
3362 }
3363 });
3364 }
3365 })
3366 .await;
3367 }
3368
3369 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3370 let mut ignore_stack = job.ignore_stack;
3371 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3372 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3373 }
3374
3375 let mut entries_by_id_edits = Vec::new();
3376 let mut entries_by_path_edits = Vec::new();
3377 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3378 for mut entry in snapshot.child_entries(path).cloned() {
3379 let was_ignored = entry.is_ignored;
3380 let abs_path = snapshot.abs_path().join(&entry.path);
3381 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3382 if entry.is_dir() {
3383 let child_ignore_stack = if entry.is_ignored {
3384 IgnoreStack::all()
3385 } else {
3386 ignore_stack.clone()
3387 };
3388 job.ignore_queue
3389 .send(UpdateIgnoreStatusJob {
3390 abs_path: abs_path.into(),
3391 ignore_stack: child_ignore_stack,
3392 ignore_queue: job.ignore_queue.clone(),
3393 })
3394 .await
3395 .unwrap();
3396 }
3397
3398 if entry.is_ignored != was_ignored {
3399 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3400 path_entry.scan_id = snapshot.scan_id;
3401 path_entry.is_ignored = entry.is_ignored;
3402 entries_by_id_edits.push(Edit::Insert(path_entry));
3403 entries_by_path_edits.push(Edit::Insert(entry));
3404 }
3405 }
3406
3407 let state = &mut self.state.lock();
3408 for edit in &entries_by_path_edits {
3409 if let Edit::Insert(entry) = edit {
3410 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3411 state.changed_paths.insert(ix, entry.path.clone());
3412 }
3413 }
3414 }
3415
3416 state
3417 .snapshot
3418 .entries_by_path
3419 .edit(entries_by_path_edits, &());
3420 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3421 }
3422
3423 fn build_change_set(
3424 &self,
3425 old_snapshot: &Snapshot,
3426 new_snapshot: &Snapshot,
3427 event_paths: &[Arc<Path>],
3428 ) -> UpdatedEntriesSet {
3429 use BackgroundScannerPhase::*;
3430 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3431
3432 // Identify which paths have changed. Use the known set of changed
3433 // parent paths to optimize the search.
3434 let mut changes = Vec::new();
3435 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3436 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3437 old_paths.next(&());
3438 new_paths.next(&());
3439 for path in event_paths {
3440 let path = PathKey(path.clone());
3441 if old_paths.item().map_or(false, |e| e.path < path.0) {
3442 old_paths.seek_forward(&path, Bias::Left, &());
3443 }
3444 if new_paths.item().map_or(false, |e| e.path < path.0) {
3445 new_paths.seek_forward(&path, Bias::Left, &());
3446 }
3447
3448 loop {
3449 match (old_paths.item(), new_paths.item()) {
3450 (Some(old_entry), Some(new_entry)) => {
3451 if old_entry.path > path.0
3452 && new_entry.path > path.0
3453 && !old_entry.path.starts_with(&path.0)
3454 && !new_entry.path.starts_with(&path.0)
3455 {
3456 break;
3457 }
3458
3459 match Ord::cmp(&old_entry.path, &new_entry.path) {
3460 Ordering::Less => {
3461 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3462 old_paths.next(&());
3463 }
3464 Ordering::Equal => {
3465 if self.phase == EventsReceivedDuringInitialScan {
3466 // If the worktree was not fully initialized when this event was generated,
3467 // we can't know whether this entry was added during the scan or whether
3468 // it was merely updated.
3469 changes.push((
3470 new_entry.path.clone(),
3471 new_entry.id,
3472 AddedOrUpdated,
3473 ));
3474 } else if old_entry.id != new_entry.id {
3475 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3476 changes.push((new_entry.path.clone(), new_entry.id, Added));
3477 } else if old_entry != new_entry {
3478 changes.push((new_entry.path.clone(), new_entry.id, Updated));
3479 }
3480 old_paths.next(&());
3481 new_paths.next(&());
3482 }
3483 Ordering::Greater => {
3484 changes.push((
3485 new_entry.path.clone(),
3486 new_entry.id,
3487 if self.phase == InitialScan {
3488 Loaded
3489 } else {
3490 Added
3491 },
3492 ));
3493 new_paths.next(&());
3494 }
3495 }
3496 }
3497 (Some(old_entry), None) => {
3498 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3499 old_paths.next(&());
3500 }
3501 (None, Some(new_entry)) => {
3502 changes.push((
3503 new_entry.path.clone(),
3504 new_entry.id,
3505 if self.phase == InitialScan {
3506 Loaded
3507 } else {
3508 Added
3509 },
3510 ));
3511 new_paths.next(&());
3512 }
3513 (None, None) => break,
3514 }
3515 }
3516 }
3517
3518 changes.into()
3519 }
3520
3521 async fn progress_timer(&self, running: bool) {
3522 if !running {
3523 return futures::future::pending().await;
3524 }
3525
3526 #[cfg(any(test, feature = "test-support"))]
3527 if self.fs.is_fake() {
3528 return self.executor.simulate_random_delay().await;
3529 }
3530
3531 smol::Timer::after(Duration::from_millis(100)).await;
3532 }
3533}
3534
3535fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3536 let mut result = root_char_bag;
3537 result.extend(
3538 path.to_string_lossy()
3539 .chars()
3540 .map(|c| c.to_ascii_lowercase()),
3541 );
3542 result
3543}
3544
3545struct ScanJob {
3546 abs_path: Arc<Path>,
3547 path: Arc<Path>,
3548 ignore_stack: Arc<IgnoreStack>,
3549 scan_queue: Sender<ScanJob>,
3550 ancestor_inodes: TreeSet<u64>,
3551}
3552
3553struct UpdateIgnoreStatusJob {
3554 abs_path: Arc<Path>,
3555 ignore_stack: Arc<IgnoreStack>,
3556 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3557}
3558
3559pub trait WorktreeHandle {
3560 #[cfg(any(test, feature = "test-support"))]
3561 fn flush_fs_events<'a>(
3562 &self,
3563 cx: &'a gpui::TestAppContext,
3564 ) -> futures::future::LocalBoxFuture<'a, ()>;
3565}
3566
3567impl WorktreeHandle for ModelHandle<Worktree> {
3568 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3569 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3570 // extra directory scans, and emit extra scan-state notifications.
3571 //
3572 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3573 // to ensure that all redundant FS events have already been processed.
3574 #[cfg(any(test, feature = "test-support"))]
3575 fn flush_fs_events<'a>(
3576 &self,
3577 cx: &'a gpui::TestAppContext,
3578 ) -> futures::future::LocalBoxFuture<'a, ()> {
3579 let filename = "fs-event-sentinel";
3580 let tree = self.clone();
3581 let (fs, root_path) = self.read_with(cx, |tree, _| {
3582 let tree = tree.as_local().unwrap();
3583 (tree.fs.clone(), tree.abs_path().clone())
3584 });
3585
3586 async move {
3587 fs.create_file(&root_path.join(filename), Default::default())
3588 .await
3589 .unwrap();
3590 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3591 .await;
3592
3593 fs.remove_file(&root_path.join(filename), Default::default())
3594 .await
3595 .unwrap();
3596 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3597 .await;
3598
3599 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3600 .await;
3601 }
3602 .boxed_local()
3603 }
3604}
3605
3606#[derive(Clone, Debug)]
3607struct TraversalProgress<'a> {
3608 max_path: &'a Path,
3609 count: usize,
3610 visible_count: usize,
3611 file_count: usize,
3612 visible_file_count: usize,
3613}
3614
3615impl<'a> TraversalProgress<'a> {
3616 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3617 match (include_ignored, include_dirs) {
3618 (true, true) => self.count,
3619 (true, false) => self.file_count,
3620 (false, true) => self.visible_count,
3621 (false, false) => self.visible_file_count,
3622 }
3623 }
3624}
3625
3626impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3627 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3628 self.max_path = summary.max_path.as_ref();
3629 self.count += summary.count;
3630 self.visible_count += summary.visible_count;
3631 self.file_count += summary.file_count;
3632 self.visible_file_count += summary.visible_file_count;
3633 }
3634}
3635
3636impl<'a> Default for TraversalProgress<'a> {
3637 fn default() -> Self {
3638 Self {
3639 max_path: Path::new(""),
3640 count: 0,
3641 visible_count: 0,
3642 file_count: 0,
3643 visible_file_count: 0,
3644 }
3645 }
3646}
3647
3648pub struct Traversal<'a> {
3649 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3650 include_ignored: bool,
3651 include_dirs: bool,
3652}
3653
3654impl<'a> Traversal<'a> {
3655 pub fn advance(&mut self) -> bool {
3656 self.cursor.seek_forward(
3657 &TraversalTarget::Count {
3658 count: self.end_offset() + 1,
3659 include_dirs: self.include_dirs,
3660 include_ignored: self.include_ignored,
3661 },
3662 Bias::Left,
3663 &(),
3664 )
3665 }
3666
3667 pub fn advance_to_sibling(&mut self) -> bool {
3668 while let Some(entry) = self.cursor.item() {
3669 self.cursor.seek_forward(
3670 &TraversalTarget::PathSuccessor(&entry.path),
3671 Bias::Left,
3672 &(),
3673 );
3674 if let Some(entry) = self.cursor.item() {
3675 if (self.include_dirs || !entry.is_dir())
3676 && (self.include_ignored || !entry.is_ignored)
3677 {
3678 return true;
3679 }
3680 }
3681 }
3682 false
3683 }
3684
3685 pub fn entry(&self) -> Option<&'a Entry> {
3686 self.cursor.item()
3687 }
3688
3689 pub fn start_offset(&self) -> usize {
3690 self.cursor
3691 .start()
3692 .count(self.include_dirs, self.include_ignored)
3693 }
3694
3695 pub fn end_offset(&self) -> usize {
3696 self.cursor
3697 .end(&())
3698 .count(self.include_dirs, self.include_ignored)
3699 }
3700}
3701
3702impl<'a> Iterator for Traversal<'a> {
3703 type Item = &'a Entry;
3704
3705 fn next(&mut self) -> Option<Self::Item> {
3706 if let Some(item) = self.entry() {
3707 self.advance();
3708 Some(item)
3709 } else {
3710 None
3711 }
3712 }
3713}
3714
3715#[derive(Debug)]
3716enum TraversalTarget<'a> {
3717 Path(&'a Path),
3718 PathSuccessor(&'a Path),
3719 Count {
3720 count: usize,
3721 include_ignored: bool,
3722 include_dirs: bool,
3723 },
3724}
3725
3726impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3727 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3728 match self {
3729 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3730 TraversalTarget::PathSuccessor(path) => {
3731 if !cursor_location.max_path.starts_with(path) {
3732 Ordering::Equal
3733 } else {
3734 Ordering::Greater
3735 }
3736 }
3737 TraversalTarget::Count {
3738 count,
3739 include_dirs,
3740 include_ignored,
3741 } => Ord::cmp(
3742 count,
3743 &cursor_location.count(*include_dirs, *include_ignored),
3744 ),
3745 }
3746 }
3747}
3748
3749struct ChildEntriesIter<'a> {
3750 parent_path: &'a Path,
3751 traversal: Traversal<'a>,
3752}
3753
3754impl<'a> Iterator for ChildEntriesIter<'a> {
3755 type Item = &'a Entry;
3756
3757 fn next(&mut self) -> Option<Self::Item> {
3758 if let Some(item) = self.traversal.entry() {
3759 if item.path.starts_with(&self.parent_path) {
3760 self.traversal.advance_to_sibling();
3761 return Some(item);
3762 }
3763 }
3764 None
3765 }
3766}
3767
3768struct DescendentEntriesIter<'a> {
3769 parent_path: &'a Path,
3770 traversal: Traversal<'a>,
3771}
3772
3773impl<'a> Iterator for DescendentEntriesIter<'a> {
3774 type Item = &'a Entry;
3775
3776 fn next(&mut self) -> Option<Self::Item> {
3777 if let Some(item) = self.traversal.entry() {
3778 if item.path.starts_with(&self.parent_path) {
3779 self.traversal.advance();
3780 return Some(item);
3781 }
3782 }
3783 None
3784 }
3785}
3786
3787impl<'a> From<&'a Entry> for proto::Entry {
3788 fn from(entry: &'a Entry) -> Self {
3789 Self {
3790 id: entry.id.to_proto(),
3791 is_dir: entry.is_dir(),
3792 path: entry.path.to_string_lossy().into(),
3793 inode: entry.inode,
3794 mtime: Some(entry.mtime.into()),
3795 is_symlink: entry.is_symlink,
3796 is_ignored: entry.is_ignored,
3797 git_status: entry.git_status.map(|status| status.to_proto()),
3798 }
3799 }
3800}
3801
3802impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3803 type Error = anyhow::Error;
3804
3805 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3806 if let Some(mtime) = entry.mtime {
3807 let kind = if entry.is_dir {
3808 EntryKind::Dir
3809 } else {
3810 let mut char_bag = *root_char_bag;
3811 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3812 EntryKind::File(char_bag)
3813 };
3814 let path: Arc<Path> = PathBuf::from(entry.path).into();
3815 Ok(Entry {
3816 id: ProjectEntryId::from_proto(entry.id),
3817 kind,
3818 path,
3819 inode: entry.inode,
3820 mtime: mtime.into(),
3821 is_symlink: entry.is_symlink,
3822 is_ignored: entry.is_ignored,
3823 git_status: GitFileStatus::from_proto(entry.git_status),
3824 })
3825 } else {
3826 Err(anyhow!(
3827 "missing mtime in remote worktree entry {:?}",
3828 entry.path
3829 ))
3830 }
3831 }
3832}
3833
3834#[cfg(test)]
3835mod tests {
3836 use super::*;
3837 use fs::{FakeFs, RealFs};
3838 use gpui::{executor::Deterministic, TestAppContext};
3839 use pretty_assertions::assert_eq;
3840 use rand::prelude::*;
3841 use serde_json::json;
3842 use std::{env, fmt::Write};
3843 use util::{http::FakeHttpClient, test::temp_tree};
3844
3845 #[gpui::test]
3846 async fn test_traversal(cx: &mut TestAppContext) {
3847 let fs = FakeFs::new(cx.background());
3848 fs.insert_tree(
3849 "/root",
3850 json!({
3851 ".gitignore": "a/b\n",
3852 "a": {
3853 "b": "",
3854 "c": "",
3855 }
3856 }),
3857 )
3858 .await;
3859
3860 let http_client = FakeHttpClient::with_404_response();
3861 let client = cx.read(|cx| Client::new(http_client, cx));
3862
3863 let tree = Worktree::local(
3864 client,
3865 Path::new("/root"),
3866 true,
3867 fs,
3868 Default::default(),
3869 &mut cx.to_async(),
3870 )
3871 .await
3872 .unwrap();
3873 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3874 .await;
3875
3876 tree.read_with(cx, |tree, _| {
3877 assert_eq!(
3878 tree.entries(false)
3879 .map(|entry| entry.path.as_ref())
3880 .collect::<Vec<_>>(),
3881 vec![
3882 Path::new(""),
3883 Path::new(".gitignore"),
3884 Path::new("a"),
3885 Path::new("a/c"),
3886 ]
3887 );
3888 assert_eq!(
3889 tree.entries(true)
3890 .map(|entry| entry.path.as_ref())
3891 .collect::<Vec<_>>(),
3892 vec![
3893 Path::new(""),
3894 Path::new(".gitignore"),
3895 Path::new("a"),
3896 Path::new("a/b"),
3897 Path::new("a/c"),
3898 ]
3899 );
3900 })
3901 }
3902
3903 #[gpui::test]
3904 async fn test_descendent_entries(cx: &mut TestAppContext) {
3905 let fs = FakeFs::new(cx.background());
3906 fs.insert_tree(
3907 "/root",
3908 json!({
3909 "a": "",
3910 "b": {
3911 "c": {
3912 "d": ""
3913 },
3914 "e": {}
3915 },
3916 "f": "",
3917 "g": {
3918 "h": {}
3919 },
3920 "i": {
3921 "j": {
3922 "k": ""
3923 },
3924 "l": {
3925
3926 }
3927 },
3928 ".gitignore": "i/j\n",
3929 }),
3930 )
3931 .await;
3932
3933 let http_client = FakeHttpClient::with_404_response();
3934 let client = cx.read(|cx| Client::new(http_client, cx));
3935
3936 let tree = Worktree::local(
3937 client,
3938 Path::new("/root"),
3939 true,
3940 fs,
3941 Default::default(),
3942 &mut cx.to_async(),
3943 )
3944 .await
3945 .unwrap();
3946 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3947 .await;
3948
3949 tree.read_with(cx, |tree, _| {
3950 assert_eq!(
3951 tree.descendent_entries(false, false, Path::new("b"))
3952 .map(|entry| entry.path.as_ref())
3953 .collect::<Vec<_>>(),
3954 vec![Path::new("b/c/d"),]
3955 );
3956 assert_eq!(
3957 tree.descendent_entries(true, false, Path::new("b"))
3958 .map(|entry| entry.path.as_ref())
3959 .collect::<Vec<_>>(),
3960 vec![
3961 Path::new("b"),
3962 Path::new("b/c"),
3963 Path::new("b/c/d"),
3964 Path::new("b/e"),
3965 ]
3966 );
3967
3968 assert_eq!(
3969 tree.descendent_entries(false, false, Path::new("g"))
3970 .map(|entry| entry.path.as_ref())
3971 .collect::<Vec<_>>(),
3972 Vec::<PathBuf>::new()
3973 );
3974 assert_eq!(
3975 tree.descendent_entries(true, false, Path::new("g"))
3976 .map(|entry| entry.path.as_ref())
3977 .collect::<Vec<_>>(),
3978 vec![Path::new("g"), Path::new("g/h"),]
3979 );
3980
3981 assert_eq!(
3982 tree.descendent_entries(false, false, Path::new("i"))
3983 .map(|entry| entry.path.as_ref())
3984 .collect::<Vec<_>>(),
3985 Vec::<PathBuf>::new()
3986 );
3987 assert_eq!(
3988 tree.descendent_entries(false, true, Path::new("i"))
3989 .map(|entry| entry.path.as_ref())
3990 .collect::<Vec<_>>(),
3991 vec![Path::new("i/j/k")]
3992 );
3993 assert_eq!(
3994 tree.descendent_entries(true, false, Path::new("i"))
3995 .map(|entry| entry.path.as_ref())
3996 .collect::<Vec<_>>(),
3997 vec![Path::new("i"), Path::new("i/l"),]
3998 );
3999 })
4000 }
4001
4002 #[gpui::test(iterations = 10)]
4003 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
4004 let fs = FakeFs::new(cx.background());
4005 fs.insert_tree(
4006 "/root",
4007 json!({
4008 "lib": {
4009 "a": {
4010 "a.txt": ""
4011 },
4012 "b": {
4013 "b.txt": ""
4014 }
4015 }
4016 }),
4017 )
4018 .await;
4019 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
4020 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
4021
4022 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4023 let tree = Worktree::local(
4024 client,
4025 Path::new("/root"),
4026 true,
4027 fs.clone(),
4028 Default::default(),
4029 &mut cx.to_async(),
4030 )
4031 .await
4032 .unwrap();
4033
4034 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4035 .await;
4036
4037 tree.read_with(cx, |tree, _| {
4038 assert_eq!(
4039 tree.entries(false)
4040 .map(|entry| entry.path.as_ref())
4041 .collect::<Vec<_>>(),
4042 vec![
4043 Path::new(""),
4044 Path::new("lib"),
4045 Path::new("lib/a"),
4046 Path::new("lib/a/a.txt"),
4047 Path::new("lib/a/lib"),
4048 Path::new("lib/b"),
4049 Path::new("lib/b/b.txt"),
4050 Path::new("lib/b/lib"),
4051 ]
4052 );
4053 });
4054
4055 fs.rename(
4056 Path::new("/root/lib/a/lib"),
4057 Path::new("/root/lib/a/lib-2"),
4058 Default::default(),
4059 )
4060 .await
4061 .unwrap();
4062 executor.run_until_parked();
4063 tree.read_with(cx, |tree, _| {
4064 assert_eq!(
4065 tree.entries(false)
4066 .map(|entry| entry.path.as_ref())
4067 .collect::<Vec<_>>(),
4068 vec![
4069 Path::new(""),
4070 Path::new("lib"),
4071 Path::new("lib/a"),
4072 Path::new("lib/a/a.txt"),
4073 Path::new("lib/a/lib-2"),
4074 Path::new("lib/b"),
4075 Path::new("lib/b/b.txt"),
4076 Path::new("lib/b/lib"),
4077 ]
4078 );
4079 });
4080 }
4081
4082 #[gpui::test]
4083 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4084 // .gitignores are handled explicitly by Zed and do not use the git
4085 // machinery that the git_tests module checks
4086 let parent_dir = temp_tree(json!({
4087 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4088 "tree": {
4089 ".git": {},
4090 ".gitignore": "ignored-dir\n",
4091 "tracked-dir": {
4092 "tracked-file1": "",
4093 "ancestor-ignored-file1": "",
4094 },
4095 "ignored-dir": {
4096 "ignored-file1": ""
4097 }
4098 }
4099 }));
4100 let dir = parent_dir.path().join("tree");
4101
4102 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4103
4104 let tree = Worktree::local(
4105 client,
4106 dir.as_path(),
4107 true,
4108 Arc::new(RealFs),
4109 Default::default(),
4110 &mut cx.to_async(),
4111 )
4112 .await
4113 .unwrap();
4114 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4115 .await;
4116 tree.flush_fs_events(cx).await;
4117 cx.read(|cx| {
4118 let tree = tree.read(cx);
4119 assert!(
4120 !tree
4121 .entry_for_path("tracked-dir/tracked-file1")
4122 .unwrap()
4123 .is_ignored
4124 );
4125 assert!(
4126 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4127 .unwrap()
4128 .is_ignored
4129 );
4130 assert!(
4131 tree.entry_for_path("ignored-dir/ignored-file1")
4132 .unwrap()
4133 .is_ignored
4134 );
4135 });
4136
4137 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4138 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4139 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4140 tree.flush_fs_events(cx).await;
4141 cx.read(|cx| {
4142 let tree = tree.read(cx);
4143 assert!(
4144 !tree
4145 .entry_for_path("tracked-dir/tracked-file2")
4146 .unwrap()
4147 .is_ignored
4148 );
4149 assert!(
4150 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4151 .unwrap()
4152 .is_ignored
4153 );
4154 assert!(
4155 tree.entry_for_path("ignored-dir/ignored-file2")
4156 .unwrap()
4157 .is_ignored
4158 );
4159 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4160 });
4161 }
4162
4163 #[gpui::test]
4164 async fn test_write_file(cx: &mut TestAppContext) {
4165 let dir = temp_tree(json!({
4166 ".git": {},
4167 ".gitignore": "ignored-dir\n",
4168 "tracked-dir": {},
4169 "ignored-dir": {}
4170 }));
4171
4172 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4173
4174 let tree = Worktree::local(
4175 client,
4176 dir.path(),
4177 true,
4178 Arc::new(RealFs),
4179 Default::default(),
4180 &mut cx.to_async(),
4181 )
4182 .await
4183 .unwrap();
4184 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4185 .await;
4186 tree.flush_fs_events(cx).await;
4187
4188 tree.update(cx, |tree, cx| {
4189 tree.as_local().unwrap().write_file(
4190 Path::new("tracked-dir/file.txt"),
4191 "hello".into(),
4192 Default::default(),
4193 cx,
4194 )
4195 })
4196 .await
4197 .unwrap();
4198 tree.update(cx, |tree, cx| {
4199 tree.as_local().unwrap().write_file(
4200 Path::new("ignored-dir/file.txt"),
4201 "world".into(),
4202 Default::default(),
4203 cx,
4204 )
4205 })
4206 .await
4207 .unwrap();
4208
4209 tree.read_with(cx, |tree, _| {
4210 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4211 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4212 assert!(!tracked.is_ignored);
4213 assert!(ignored.is_ignored);
4214 });
4215 }
4216
4217 #[gpui::test(iterations = 30)]
4218 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4219 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4220
4221 let fs = FakeFs::new(cx.background());
4222 fs.insert_tree(
4223 "/root",
4224 json!({
4225 "b": {},
4226 "c": {},
4227 "d": {},
4228 }),
4229 )
4230 .await;
4231
4232 let tree = Worktree::local(
4233 client,
4234 "/root".as_ref(),
4235 true,
4236 fs,
4237 Default::default(),
4238 &mut cx.to_async(),
4239 )
4240 .await
4241 .unwrap();
4242
4243 let snapshot1 = tree.update(cx, |tree, cx| {
4244 let tree = tree.as_local_mut().unwrap();
4245 let snapshot = Arc::new(Mutex::new(tree.snapshot()));
4246 let _ = tree.observe_updates(0, cx, {
4247 let snapshot = snapshot.clone();
4248 move |update| {
4249 snapshot.lock().apply_remote_update(update).unwrap();
4250 async { true }
4251 }
4252 });
4253 snapshot
4254 });
4255
4256 let entry = tree
4257 .update(cx, |tree, cx| {
4258 tree.as_local_mut()
4259 .unwrap()
4260 .create_entry("a/e".as_ref(), true, cx)
4261 })
4262 .await
4263 .unwrap();
4264 assert!(entry.is_dir());
4265
4266 cx.foreground().run_until_parked();
4267 tree.read_with(cx, |tree, _| {
4268 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4269 });
4270
4271 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4272 assert_eq!(
4273 snapshot1.lock().entries(true).collect::<Vec<_>>(),
4274 snapshot2.entries(true).collect::<Vec<_>>()
4275 );
4276 }
4277
4278 #[gpui::test(iterations = 100)]
4279 async fn test_random_worktree_operations_during_initial_scan(
4280 cx: &mut TestAppContext,
4281 mut rng: StdRng,
4282 ) {
4283 let operations = env::var("OPERATIONS")
4284 .map(|o| o.parse().unwrap())
4285 .unwrap_or(5);
4286 let initial_entries = env::var("INITIAL_ENTRIES")
4287 .map(|o| o.parse().unwrap())
4288 .unwrap_or(20);
4289
4290 let root_dir = Path::new("/test");
4291 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4292 fs.as_fake().insert_tree(root_dir, json!({})).await;
4293 for _ in 0..initial_entries {
4294 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4295 }
4296 log::info!("generated initial tree");
4297
4298 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4299 let worktree = Worktree::local(
4300 client.clone(),
4301 root_dir,
4302 true,
4303 fs.clone(),
4304 Default::default(),
4305 &mut cx.to_async(),
4306 )
4307 .await
4308 .unwrap();
4309
4310 let mut snapshots =
4311 vec![worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot())];
4312 let updates = Arc::new(Mutex::new(Vec::new()));
4313 worktree.update(cx, |tree, cx| {
4314 check_worktree_change_events(tree, cx);
4315
4316 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4317 let updates = updates.clone();
4318 move |update| {
4319 updates.lock().push(update);
4320 async { true }
4321 }
4322 });
4323 });
4324
4325 for _ in 0..operations {
4326 worktree
4327 .update(cx, |worktree, cx| {
4328 randomly_mutate_worktree(worktree, &mut rng, cx)
4329 })
4330 .await
4331 .log_err();
4332 worktree.read_with(cx, |tree, _| {
4333 tree.as_local().unwrap().snapshot.check_invariants()
4334 });
4335
4336 if rng.gen_bool(0.6) {
4337 snapshots
4338 .push(worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()));
4339 }
4340 }
4341
4342 worktree
4343 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4344 .await;
4345
4346 cx.foreground().run_until_parked();
4347
4348 let final_snapshot = worktree.read_with(cx, |tree, _| {
4349 let tree = tree.as_local().unwrap();
4350 tree.snapshot.check_invariants();
4351 tree.snapshot()
4352 });
4353
4354 for (i, snapshot) in snapshots.into_iter().enumerate().rev() {
4355 let mut updated_snapshot = snapshot.clone();
4356 for update in updates.lock().iter() {
4357 if update.scan_id >= updated_snapshot.scan_id() as u64 {
4358 updated_snapshot
4359 .apply_remote_update(update.clone())
4360 .unwrap();
4361 }
4362 }
4363
4364 assert_eq!(
4365 updated_snapshot.entries(true).collect::<Vec<_>>(),
4366 final_snapshot.entries(true).collect::<Vec<_>>(),
4367 "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}",
4368 );
4369 }
4370 }
4371
4372 #[gpui::test(iterations = 100)]
4373 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4374 let operations = env::var("OPERATIONS")
4375 .map(|o| o.parse().unwrap())
4376 .unwrap_or(40);
4377 let initial_entries = env::var("INITIAL_ENTRIES")
4378 .map(|o| o.parse().unwrap())
4379 .unwrap_or(20);
4380
4381 let root_dir = Path::new("/test");
4382 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4383 fs.as_fake().insert_tree(root_dir, json!({})).await;
4384 for _ in 0..initial_entries {
4385 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4386 }
4387 log::info!("generated initial tree");
4388
4389 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4390 let worktree = Worktree::local(
4391 client.clone(),
4392 root_dir,
4393 true,
4394 fs.clone(),
4395 Default::default(),
4396 &mut cx.to_async(),
4397 )
4398 .await
4399 .unwrap();
4400
4401 let updates = Arc::new(Mutex::new(Vec::new()));
4402 worktree.update(cx, |tree, cx| {
4403 check_worktree_change_events(tree, cx);
4404
4405 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4406 let updates = updates.clone();
4407 move |update| {
4408 updates.lock().push(update);
4409 async { true }
4410 }
4411 });
4412 });
4413
4414 worktree
4415 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4416 .await;
4417
4418 fs.as_fake().pause_events();
4419 let mut snapshots = Vec::new();
4420 let mut mutations_len = operations;
4421 while mutations_len > 1 {
4422 if rng.gen_bool(0.2) {
4423 worktree
4424 .update(cx, |worktree, cx| {
4425 randomly_mutate_worktree(worktree, &mut rng, cx)
4426 })
4427 .await
4428 .log_err();
4429 } else {
4430 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4431 }
4432
4433 let buffered_event_count = fs.as_fake().buffered_event_count();
4434 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4435 let len = rng.gen_range(0..=buffered_event_count);
4436 log::info!("flushing {} events", len);
4437 fs.as_fake().flush_events(len);
4438 } else {
4439 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4440 mutations_len -= 1;
4441 }
4442
4443 cx.foreground().run_until_parked();
4444 if rng.gen_bool(0.2) {
4445 log::info!("storing snapshot {}", snapshots.len());
4446 let snapshot =
4447 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4448 snapshots.push(snapshot);
4449 }
4450 }
4451
4452 log::info!("quiescing");
4453 fs.as_fake().flush_events(usize::MAX);
4454 cx.foreground().run_until_parked();
4455 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4456 snapshot.check_invariants();
4457
4458 {
4459 let new_worktree = Worktree::local(
4460 client.clone(),
4461 root_dir,
4462 true,
4463 fs.clone(),
4464 Default::default(),
4465 &mut cx.to_async(),
4466 )
4467 .await
4468 .unwrap();
4469 new_worktree
4470 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4471 .await;
4472 let new_snapshot =
4473 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4474 assert_eq!(
4475 snapshot.entries_without_ids(true),
4476 new_snapshot.entries_without_ids(true)
4477 );
4478 }
4479
4480 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate().rev() {
4481 for update in updates.lock().iter() {
4482 if update.scan_id >= prev_snapshot.scan_id() as u64 {
4483 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4484 }
4485 }
4486
4487 assert_eq!(
4488 prev_snapshot.entries(true).collect::<Vec<_>>(),
4489 snapshot.entries(true).collect::<Vec<_>>(),
4490 "wrong updates after snapshot {i}: {updates:#?}",
4491 );
4492 }
4493 }
4494
4495 // The worktree's `UpdatedEntries` event can be used to follow along with
4496 // all changes to the worktree's snapshot.
4497 fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Worktree>) {
4498 let mut entries = tree.entries(true).cloned().collect::<Vec<_>>();
4499 cx.subscribe(&cx.handle(), move |tree, _, event, _| {
4500 if let Event::UpdatedEntries(changes) = event {
4501 for (path, _, change_type) in changes.iter() {
4502 let entry = tree.entry_for_path(&path).cloned();
4503 let ix = match entries.binary_search_by_key(&path, |e| &e.path) {
4504 Ok(ix) | Err(ix) => ix,
4505 };
4506 match change_type {
4507 PathChange::Loaded => entries.insert(ix, entry.unwrap()),
4508 PathChange::Added => entries.insert(ix, entry.unwrap()),
4509 PathChange::Removed => drop(entries.remove(ix)),
4510 PathChange::Updated => {
4511 let entry = entry.unwrap();
4512 let existing_entry = entries.get_mut(ix).unwrap();
4513 assert_eq!(existing_entry.path, entry.path);
4514 *existing_entry = entry;
4515 }
4516 PathChange::AddedOrUpdated => {
4517 let entry = entry.unwrap();
4518 if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
4519 *entries.get_mut(ix).unwrap() = entry;
4520 } else {
4521 entries.insert(ix, entry);
4522 }
4523 }
4524 }
4525 }
4526
4527 let new_entries = tree.entries(true).cloned().collect::<Vec<_>>();
4528 assert_eq!(entries, new_entries, "incorrect changes: {:?}", changes);
4529 }
4530 })
4531 .detach();
4532 }
4533
4534 fn randomly_mutate_worktree(
4535 worktree: &mut Worktree,
4536 rng: &mut impl Rng,
4537 cx: &mut ModelContext<Worktree>,
4538 ) -> Task<Result<()>> {
4539 log::info!("mutating worktree");
4540 let worktree = worktree.as_local_mut().unwrap();
4541 let snapshot = worktree.snapshot();
4542 let entry = snapshot.entries(false).choose(rng).unwrap();
4543
4544 match rng.gen_range(0_u32..100) {
4545 0..=33 if entry.path.as_ref() != Path::new("") => {
4546 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4547 worktree.delete_entry(entry.id, cx).unwrap()
4548 }
4549 ..=66 if entry.path.as_ref() != Path::new("") => {
4550 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4551 let new_parent_path = if other_entry.is_dir() {
4552 other_entry.path.clone()
4553 } else {
4554 other_entry.path.parent().unwrap().into()
4555 };
4556 let mut new_path = new_parent_path.join(gen_name(rng));
4557 if new_path.starts_with(&entry.path) {
4558 new_path = gen_name(rng).into();
4559 }
4560
4561 log::info!(
4562 "renaming entry {:?} ({}) to {:?}",
4563 entry.path,
4564 entry.id.0,
4565 new_path
4566 );
4567 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4568 cx.foreground().spawn(async move {
4569 task.await?;
4570 Ok(())
4571 })
4572 }
4573 _ => {
4574 let task = if entry.is_dir() {
4575 let child_path = entry.path.join(gen_name(rng));
4576 let is_dir = rng.gen_bool(0.3);
4577 log::info!(
4578 "creating {} at {:?}",
4579 if is_dir { "dir" } else { "file" },
4580 child_path,
4581 );
4582 worktree.create_entry(child_path, is_dir, cx)
4583 } else {
4584 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4585 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4586 };
4587 cx.foreground().spawn(async move {
4588 task.await?;
4589 Ok(())
4590 })
4591 }
4592 }
4593 }
4594
4595 async fn randomly_mutate_fs(
4596 fs: &Arc<dyn Fs>,
4597 root_path: &Path,
4598 insertion_probability: f64,
4599 rng: &mut impl Rng,
4600 ) {
4601 log::info!("mutating fs");
4602 let mut files = Vec::new();
4603 let mut dirs = Vec::new();
4604 for path in fs.as_fake().paths() {
4605 if path.starts_with(root_path) {
4606 if fs.is_file(&path).await {
4607 files.push(path);
4608 } else {
4609 dirs.push(path);
4610 }
4611 }
4612 }
4613
4614 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4615 let path = dirs.choose(rng).unwrap();
4616 let new_path = path.join(gen_name(rng));
4617
4618 if rng.gen() {
4619 log::info!(
4620 "creating dir {:?}",
4621 new_path.strip_prefix(root_path).unwrap()
4622 );
4623 fs.create_dir(&new_path).await.unwrap();
4624 } else {
4625 log::info!(
4626 "creating file {:?}",
4627 new_path.strip_prefix(root_path).unwrap()
4628 );
4629 fs.create_file(&new_path, Default::default()).await.unwrap();
4630 }
4631 } else if rng.gen_bool(0.05) {
4632 let ignore_dir_path = dirs.choose(rng).unwrap();
4633 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4634
4635 let subdirs = dirs
4636 .iter()
4637 .filter(|d| d.starts_with(&ignore_dir_path))
4638 .cloned()
4639 .collect::<Vec<_>>();
4640 let subfiles = files
4641 .iter()
4642 .filter(|d| d.starts_with(&ignore_dir_path))
4643 .cloned()
4644 .collect::<Vec<_>>();
4645 let files_to_ignore = {
4646 let len = rng.gen_range(0..=subfiles.len());
4647 subfiles.choose_multiple(rng, len)
4648 };
4649 let dirs_to_ignore = {
4650 let len = rng.gen_range(0..subdirs.len());
4651 subdirs.choose_multiple(rng, len)
4652 };
4653
4654 let mut ignore_contents = String::new();
4655 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4656 writeln!(
4657 ignore_contents,
4658 "{}",
4659 path_to_ignore
4660 .strip_prefix(&ignore_dir_path)
4661 .unwrap()
4662 .to_str()
4663 .unwrap()
4664 )
4665 .unwrap();
4666 }
4667 log::info!(
4668 "creating gitignore {:?} with contents:\n{}",
4669 ignore_path.strip_prefix(&root_path).unwrap(),
4670 ignore_contents
4671 );
4672 fs.save(
4673 &ignore_path,
4674 &ignore_contents.as_str().into(),
4675 Default::default(),
4676 )
4677 .await
4678 .unwrap();
4679 } else {
4680 let old_path = {
4681 let file_path = files.choose(rng);
4682 let dir_path = dirs[1..].choose(rng);
4683 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4684 };
4685
4686 let is_rename = rng.gen();
4687 if is_rename {
4688 let new_path_parent = dirs
4689 .iter()
4690 .filter(|d| !d.starts_with(old_path))
4691 .choose(rng)
4692 .unwrap();
4693
4694 let overwrite_existing_dir =
4695 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4696 let new_path = if overwrite_existing_dir {
4697 fs.remove_dir(
4698 &new_path_parent,
4699 RemoveOptions {
4700 recursive: true,
4701 ignore_if_not_exists: true,
4702 },
4703 )
4704 .await
4705 .unwrap();
4706 new_path_parent.to_path_buf()
4707 } else {
4708 new_path_parent.join(gen_name(rng))
4709 };
4710
4711 log::info!(
4712 "renaming {:?} to {}{:?}",
4713 old_path.strip_prefix(&root_path).unwrap(),
4714 if overwrite_existing_dir {
4715 "overwrite "
4716 } else {
4717 ""
4718 },
4719 new_path.strip_prefix(&root_path).unwrap()
4720 );
4721 fs.rename(
4722 &old_path,
4723 &new_path,
4724 fs::RenameOptions {
4725 overwrite: true,
4726 ignore_if_exists: true,
4727 },
4728 )
4729 .await
4730 .unwrap();
4731 } else if fs.is_file(&old_path).await {
4732 log::info!(
4733 "deleting file {:?}",
4734 old_path.strip_prefix(&root_path).unwrap()
4735 );
4736 fs.remove_file(old_path, Default::default()).await.unwrap();
4737 } else {
4738 log::info!(
4739 "deleting dir {:?}",
4740 old_path.strip_prefix(&root_path).unwrap()
4741 );
4742 fs.remove_dir(
4743 &old_path,
4744 RemoveOptions {
4745 recursive: true,
4746 ignore_if_not_exists: true,
4747 },
4748 )
4749 .await
4750 .unwrap();
4751 }
4752 }
4753 }
4754
4755 fn gen_name(rng: &mut impl Rng) -> String {
4756 (0..6)
4757 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4758 .map(char::from)
4759 .collect()
4760 }
4761
4762 impl LocalSnapshot {
4763 fn check_invariants(&self) {
4764 assert_eq!(
4765 self.entries_by_path
4766 .cursor::<()>()
4767 .map(|e| (&e.path, e.id))
4768 .collect::<Vec<_>>(),
4769 self.entries_by_id
4770 .cursor::<()>()
4771 .map(|e| (&e.path, e.id))
4772 .collect::<collections::BTreeSet<_>>()
4773 .into_iter()
4774 .collect::<Vec<_>>(),
4775 "entries_by_path and entries_by_id are inconsistent"
4776 );
4777
4778 let mut files = self.files(true, 0);
4779 let mut visible_files = self.files(false, 0);
4780 for entry in self.entries_by_path.cursor::<()>() {
4781 if entry.is_file() {
4782 assert_eq!(files.next().unwrap().inode, entry.inode);
4783 if !entry.is_ignored {
4784 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4785 }
4786 }
4787 }
4788
4789 assert!(files.next().is_none());
4790 assert!(visible_files.next().is_none());
4791
4792 let mut bfs_paths = Vec::new();
4793 let mut stack = vec![Path::new("")];
4794 while let Some(path) = stack.pop() {
4795 bfs_paths.push(path);
4796 let ix = stack.len();
4797 for child_entry in self.child_entries(path) {
4798 stack.insert(ix, &child_entry.path);
4799 }
4800 }
4801
4802 let dfs_paths_via_iter = self
4803 .entries_by_path
4804 .cursor::<()>()
4805 .map(|e| e.path.as_ref())
4806 .collect::<Vec<_>>();
4807 assert_eq!(bfs_paths, dfs_paths_via_iter);
4808
4809 let dfs_paths_via_traversal = self
4810 .entries(true)
4811 .map(|e| e.path.as_ref())
4812 .collect::<Vec<_>>();
4813 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4814
4815 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4816 let ignore_parent_path =
4817 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4818 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4819 assert!(self
4820 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4821 .is_some());
4822 }
4823 }
4824
4825 fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4826 let mut paths = Vec::new();
4827 for entry in self.entries_by_path.cursor::<()>() {
4828 if include_ignored || !entry.is_ignored {
4829 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4830 }
4831 }
4832 paths.sort_by(|a, b| a.0.cmp(b.0));
4833 paths
4834 }
4835 }
4836
4837 mod git_tests {
4838 use super::*;
4839 use pretty_assertions::assert_eq;
4840
4841 #[gpui::test]
4842 async fn test_rename_work_directory(cx: &mut TestAppContext) {
4843 let root = temp_tree(json!({
4844 "projects": {
4845 "project1": {
4846 "a": "",
4847 "b": "",
4848 }
4849 },
4850
4851 }));
4852 let root_path = root.path();
4853
4854 let http_client = FakeHttpClient::with_404_response();
4855 let client = cx.read(|cx| Client::new(http_client, cx));
4856 let tree = Worktree::local(
4857 client,
4858 root_path,
4859 true,
4860 Arc::new(RealFs),
4861 Default::default(),
4862 &mut cx.to_async(),
4863 )
4864 .await
4865 .unwrap();
4866
4867 let repo = git_init(&root_path.join("projects/project1"));
4868 git_add("a", &repo);
4869 git_commit("init", &repo);
4870 std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4871
4872 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4873 .await;
4874
4875 tree.flush_fs_events(cx).await;
4876
4877 cx.read(|cx| {
4878 let tree = tree.read(cx);
4879 let (work_dir, _) = tree.repositories().next().unwrap();
4880 assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4881 assert_eq!(
4882 tree.status_for_file(Path::new("projects/project1/a")),
4883 Some(GitFileStatus::Modified)
4884 );
4885 assert_eq!(
4886 tree.status_for_file(Path::new("projects/project1/b")),
4887 Some(GitFileStatus::Added)
4888 );
4889 });
4890
4891 std::fs::rename(
4892 root_path.join("projects/project1"),
4893 root_path.join("projects/project2"),
4894 )
4895 .ok();
4896 tree.flush_fs_events(cx).await;
4897
4898 cx.read(|cx| {
4899 let tree = tree.read(cx);
4900 let (work_dir, _) = tree.repositories().next().unwrap();
4901 assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4902 assert_eq!(
4903 tree.status_for_file(Path::new("projects/project2/a")),
4904 Some(GitFileStatus::Modified)
4905 );
4906 assert_eq!(
4907 tree.status_for_file(Path::new("projects/project2/b")),
4908 Some(GitFileStatus::Added)
4909 );
4910 });
4911 }
4912
4913 #[gpui::test]
4914 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4915 let root = temp_tree(json!({
4916 "c.txt": "",
4917 "dir1": {
4918 ".git": {},
4919 "deps": {
4920 "dep1": {
4921 ".git": {},
4922 "src": {
4923 "a.txt": ""
4924 }
4925 }
4926 },
4927 "src": {
4928 "b.txt": ""
4929 }
4930 },
4931 }));
4932
4933 let http_client = FakeHttpClient::with_404_response();
4934 let client = cx.read(|cx| Client::new(http_client, cx));
4935 let tree = Worktree::local(
4936 client,
4937 root.path(),
4938 true,
4939 Arc::new(RealFs),
4940 Default::default(),
4941 &mut cx.to_async(),
4942 )
4943 .await
4944 .unwrap();
4945
4946 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4947 .await;
4948 tree.flush_fs_events(cx).await;
4949
4950 tree.read_with(cx, |tree, _cx| {
4951 let tree = tree.as_local().unwrap();
4952
4953 assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4954
4955 let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4956 assert_eq!(
4957 entry
4958 .work_directory(tree)
4959 .map(|directory| directory.as_ref().to_owned()),
4960 Some(Path::new("dir1").to_owned())
4961 );
4962
4963 let entry = tree
4964 .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4965 .unwrap();
4966 assert_eq!(
4967 entry
4968 .work_directory(tree)
4969 .map(|directory| directory.as_ref().to_owned()),
4970 Some(Path::new("dir1/deps/dep1").to_owned())
4971 );
4972
4973 let entries = tree.files(false, 0);
4974
4975 let paths_with_repos = tree
4976 .entries_with_repositories(entries)
4977 .map(|(entry, repo)| {
4978 (
4979 entry.path.as_ref(),
4980 repo.and_then(|repo| {
4981 repo.work_directory(&tree)
4982 .map(|work_directory| work_directory.0.to_path_buf())
4983 }),
4984 )
4985 })
4986 .collect::<Vec<_>>();
4987
4988 assert_eq!(
4989 paths_with_repos,
4990 &[
4991 (Path::new("c.txt"), None),
4992 (
4993 Path::new("dir1/deps/dep1/src/a.txt"),
4994 Some(Path::new("dir1/deps/dep1").into())
4995 ),
4996 (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4997 ]
4998 );
4999 });
5000
5001 let repo_update_events = Arc::new(Mutex::new(vec![]));
5002 tree.update(cx, |_, cx| {
5003 let repo_update_events = repo_update_events.clone();
5004 cx.subscribe(&tree, move |_, _, event, _| {
5005 if let Event::UpdatedGitRepositories(update) = event {
5006 repo_update_events.lock().push(update.clone());
5007 }
5008 })
5009 .detach();
5010 });
5011
5012 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
5013 tree.flush_fs_events(cx).await;
5014
5015 assert_eq!(
5016 repo_update_events.lock()[0]
5017 .iter()
5018 .map(|e| e.0.clone())
5019 .collect::<Vec<Arc<Path>>>(),
5020 vec![Path::new("dir1").into()]
5021 );
5022
5023 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
5024 tree.flush_fs_events(cx).await;
5025
5026 tree.read_with(cx, |tree, _cx| {
5027 let tree = tree.as_local().unwrap();
5028
5029 assert!(tree
5030 .repository_for_path("dir1/src/b.txt".as_ref())
5031 .is_none());
5032 });
5033 }
5034
5035 // TODO: Stream statuses UPDATE THIS TO CHECK BUBBLIBG BEHAVIOR
5036 #[gpui::test]
5037 async fn test_git_status(deterministic: Arc<Deterministic>, cx: &mut TestAppContext) {
5038 const IGNORE_RULE: &'static str = "**/target";
5039
5040 let root = temp_tree(json!({
5041 "project": {
5042 "a.txt": "a",
5043 "b.txt": "bb",
5044 "c": {
5045 "d": {
5046 "e.txt": "eee"
5047 }
5048 },
5049 "f.txt": "ffff",
5050 "target": {
5051 "build_file": "???"
5052 },
5053 ".gitignore": IGNORE_RULE
5054 },
5055
5056 }));
5057
5058 let http_client = FakeHttpClient::with_404_response();
5059 let client = cx.read(|cx| Client::new(http_client, cx));
5060 let tree = Worktree::local(
5061 client,
5062 root.path(),
5063 true,
5064 Arc::new(RealFs),
5065 Default::default(),
5066 &mut cx.to_async(),
5067 )
5068 .await
5069 .unwrap();
5070
5071 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5072 .await;
5073
5074 const A_TXT: &'static str = "a.txt";
5075 const B_TXT: &'static str = "b.txt";
5076 const E_TXT: &'static str = "c/d/e.txt";
5077 const F_TXT: &'static str = "f.txt";
5078 const DOTGITIGNORE: &'static str = ".gitignore";
5079 const BUILD_FILE: &'static str = "target/build_file";
5080 let project_path: &Path = &Path::new("project");
5081
5082 let work_dir = root.path().join("project");
5083 let mut repo = git_init(work_dir.as_path());
5084 repo.add_ignore_rule(IGNORE_RULE).unwrap();
5085 git_add(Path::new(A_TXT), &repo);
5086 git_add(Path::new(E_TXT), &repo);
5087 git_add(Path::new(DOTGITIGNORE), &repo);
5088 git_commit("Initial commit", &repo);
5089
5090 tree.flush_fs_events(cx).await;
5091 deterministic.run_until_parked();
5092
5093 // Check that the right git state is observed on startup
5094 tree.read_with(cx, |tree, _cx| {
5095 let snapshot = tree.snapshot();
5096 assert_eq!(snapshot.repository_entries.iter().count(), 1);
5097 let (dir, _) = snapshot.repository_entries.iter().next().unwrap();
5098 assert_eq!(dir.0.as_ref(), Path::new("project"));
5099
5100 assert_eq!(
5101 snapshot.status_for_file(project_path.join(B_TXT)),
5102 Some(GitFileStatus::Added)
5103 );
5104 assert_eq!(
5105 snapshot.status_for_file(project_path.join(F_TXT)),
5106 Some(GitFileStatus::Added)
5107 );
5108
5109 dbg!(snapshot.entries(false).collect::<Vec<_>>());
5110
5111 // Check stateful bubbling works
5112 assert_eq!(
5113 snapshot.status_for_file(dbg!(project_path)),
5114 Some(GitFileStatus::Added)
5115 );
5116
5117 assert_eq!(snapshot.status_for_file(""), Some(GitFileStatus::Added));
5118 });
5119
5120 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5121
5122 tree.flush_fs_events(cx).await;
5123 deterministic.run_until_parked();
5124
5125 tree.read_with(cx, |tree, _cx| {
5126 let snapshot = tree.snapshot();
5127
5128 assert_eq!(
5129 snapshot.status_for_file(project_path.join(A_TXT)),
5130 Some(GitFileStatus::Modified)
5131 );
5132
5133 // Check stateful bubbling works, modified overrules added
5134 assert_eq!(
5135 snapshot.status_for_file(project_path),
5136 Some(GitFileStatus::Modified)
5137 );
5138
5139 assert_eq!(snapshot.status_for_file(""), Some(GitFileStatus::Modified));
5140 });
5141
5142 git_add(Path::new(A_TXT), &repo);
5143 git_add(Path::new(B_TXT), &repo);
5144 git_commit("Committing modified and added", &repo);
5145 tree.flush_fs_events(cx).await;
5146 deterministic.run_until_parked();
5147
5148 dbg!(git_status(&repo));
5149 // Check that repo only changes are tracked
5150 tree.read_with(cx, |tree, _cx| {
5151 let snapshot = tree.snapshot();
5152
5153 assert_eq!(
5154 snapshot.status_for_file(project_path.join(F_TXT)),
5155 Some(GitFileStatus::Added)
5156 );
5157
5158 dbg!(snapshot.entries(false).collect::<Vec<_>>());
5159
5160 assert_eq!(snapshot.status_for_file(project_path.join(B_TXT)), None);
5161 assert_eq!(snapshot.status_for_file(project_path.join(A_TXT)), None);
5162
5163 // Check bubbling
5164 assert_eq!(
5165 snapshot.status_for_file(project_path),
5166 Some(GitFileStatus::Added)
5167 );
5168
5169 assert_eq!(snapshot.status_for_file(""), Some(GitFileStatus::Added));
5170 });
5171
5172 git_reset(0, &repo);
5173 git_remove_index(Path::new(B_TXT), &repo);
5174 git_stash(&mut repo);
5175 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5176 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5177 tree.flush_fs_events(cx).await;
5178 deterministic.run_until_parked();
5179
5180 // Check that more complex repo changes are tracked
5181 tree.read_with(cx, |tree, _cx| {
5182 let snapshot = tree.snapshot();
5183
5184 assert_eq!(snapshot.status_for_file(project_path.join(A_TXT)), None);
5185 assert_eq!(
5186 snapshot.status_for_file(project_path.join(B_TXT)),
5187 Some(GitFileStatus::Added)
5188 );
5189 assert_eq!(
5190 snapshot.status_for_file(project_path.join(E_TXT)),
5191 Some(GitFileStatus::Modified)
5192 );
5193
5194 // Check status bubbling
5195 assert_eq!(
5196 snapshot.status_for_file(project_path.join(Path::new(E_TXT).parent().unwrap())),
5197 Some(GitFileStatus::Modified)
5198 );
5199 assert_eq!(
5200 snapshot.status_for_file(
5201 project_path.join(Path::new(E_TXT).parent().unwrap().parent().unwrap())
5202 ),
5203 Some(GitFileStatus::Modified)
5204 );
5205 assert_eq!(
5206 snapshot.status_for_file(project_path),
5207 Some(GitFileStatus::Modified)
5208 );
5209
5210 assert_eq!(
5211 snapshot.status_for_file(project_path.join(F_TXT)),
5212 Some(GitFileStatus::Added)
5213 );
5214 });
5215
5216 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5217 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5218 std::fs::write(
5219 work_dir.join(DOTGITIGNORE),
5220 [IGNORE_RULE, "f.txt"].join("\n"),
5221 )
5222 .unwrap();
5223
5224 git_add(Path::new(DOTGITIGNORE), &repo);
5225 git_commit("Committing modified git ignore", &repo);
5226
5227 tree.flush_fs_events(cx).await;
5228 deterministic.run_until_parked();
5229
5230 let mut renamed_dir_name = "first_directory/second_directory";
5231 const RENAMED_FILE: &'static str = "rf.txt";
5232
5233 std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5234 std::fs::write(
5235 work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5236 "new-contents",
5237 )
5238 .unwrap();
5239
5240 tree.flush_fs_events(cx).await;
5241 deterministic.run_until_parked();
5242
5243 tree.read_with(cx, |tree, _cx| {
5244 let snapshot = tree.snapshot();
5245 assert_eq!(
5246 snapshot
5247 .status_for_file(&project_path.join(renamed_dir_name).join(RENAMED_FILE)),
5248 Some(GitFileStatus::Added)
5249 );
5250 });
5251
5252 renamed_dir_name = "new_first_directory/second_directory";
5253
5254 std::fs::rename(
5255 work_dir.join("first_directory"),
5256 work_dir.join("new_first_directory"),
5257 )
5258 .unwrap();
5259
5260 tree.flush_fs_events(cx).await;
5261 deterministic.run_until_parked();
5262
5263 tree.read_with(cx, |tree, _cx| {
5264 let snapshot = tree.snapshot();
5265
5266 assert_eq!(
5267 snapshot.status_for_file(
5268 project_path
5269 .join(Path::new(renamed_dir_name))
5270 .join(RENAMED_FILE)
5271 ),
5272 Some(GitFileStatus::Added)
5273 );
5274 });
5275 }
5276
5277 #[track_caller]
5278 fn git_init(path: &Path) -> git2::Repository {
5279 git2::Repository::init(path).expect("Failed to initialize git repository")
5280 }
5281
5282 #[track_caller]
5283 fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5284 let path = path.as_ref();
5285 let mut index = repo.index().expect("Failed to get index");
5286 index.add_path(path).expect("Failed to add a.txt");
5287 index.write().expect("Failed to write index");
5288 }
5289
5290 #[track_caller]
5291 fn git_remove_index(path: &Path, repo: &git2::Repository) {
5292 let mut index = repo.index().expect("Failed to get index");
5293 index.remove_path(path).expect("Failed to add a.txt");
5294 index.write().expect("Failed to write index");
5295 }
5296
5297 #[track_caller]
5298 fn git_commit(msg: &'static str, repo: &git2::Repository) {
5299 use git2::Signature;
5300
5301 let signature = Signature::now("test", "test@zed.dev").unwrap();
5302 let oid = repo.index().unwrap().write_tree().unwrap();
5303 let tree = repo.find_tree(oid).unwrap();
5304 if let Some(head) = repo.head().ok() {
5305 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5306
5307 let parent_commit = parent_obj.as_commit().unwrap();
5308
5309 repo.commit(
5310 Some("HEAD"),
5311 &signature,
5312 &signature,
5313 msg,
5314 &tree,
5315 &[parent_commit],
5316 )
5317 .expect("Failed to commit with parent");
5318 } else {
5319 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5320 .expect("Failed to commit");
5321 }
5322 }
5323
5324 #[track_caller]
5325 fn git_stash(repo: &mut git2::Repository) {
5326 use git2::Signature;
5327
5328 let signature = Signature::now("test", "test@zed.dev").unwrap();
5329 repo.stash_save(&signature, "N/A", None)
5330 .expect("Failed to stash");
5331 }
5332
5333 #[track_caller]
5334 fn git_reset(offset: usize, repo: &git2::Repository) {
5335 let head = repo.head().expect("Couldn't get repo head");
5336 let object = head.peel(git2::ObjectType::Commit).unwrap();
5337 let commit = object.as_commit().unwrap();
5338 let new_head = commit
5339 .parents()
5340 .inspect(|parnet| {
5341 parnet.message();
5342 })
5343 .skip(offset)
5344 .next()
5345 .expect("Not enough history");
5346 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5347 .expect("Could not reset");
5348 }
5349
5350 #[allow(dead_code)]
5351 #[track_caller]
5352 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5353 repo.statuses(None)
5354 .unwrap()
5355 .iter()
5356 .map(|status| (status.path().unwrap().to_string(), status.status()))
5357 .collect()
5358 }
5359 }
5360}