1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 FutureExt, Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use sha2::digest::typenum::private::IsLessPrivate;
40use smol::channel::{self, Sender};
41use std::{
42 any::Any,
43 cmp::{self, Ordering},
44 convert::TryFrom,
45 ffi::OsStr,
46 fmt,
47 future::Future,
48 mem,
49 ops::{Deref, DerefMut},
50 path::{Path, PathBuf},
51 pin::Pin,
52 sync::{
53 atomic::{AtomicUsize, Ordering::SeqCst},
54 Arc,
55 },
56 time::{Duration, SystemTime},
57};
58use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
59use util::{paths::HOME, ResultExt};
60
61#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
62pub struct WorktreeId(usize);
63
64pub enum Worktree {
65 Local(LocalWorktree),
66 Remote(RemoteWorktree),
67}
68
69pub struct LocalWorktree {
70 snapshot: LocalSnapshot,
71 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
72 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
73 _background_scanner_task: Task<()>,
74 share: Option<ShareState>,
75 diagnostics: HashMap<
76 Arc<Path>,
77 Vec<(
78 LanguageServerId,
79 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
80 )>,
81 >,
82 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
83 client: Arc<Client>,
84 fs: Arc<dyn Fs>,
85 visible: bool,
86}
87
88pub struct RemoteWorktree {
89 snapshot: Snapshot,
90 background_snapshot: Arc<Mutex<Snapshot>>,
91 project_id: u64,
92 client: Arc<Client>,
93 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
94 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
95 replica_id: ReplicaId,
96 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
97 visible: bool,
98 disconnected: bool,
99}
100
101#[derive(Clone)]
102pub struct Snapshot {
103 id: WorktreeId,
104 abs_path: Arc<Path>,
105 root_name: String,
106 root_char_bag: CharBag,
107 entries_by_path: SumTree<Entry>,
108 entries_by_id: SumTree<PathEntry>,
109 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
110
111 /// A number that increases every time the worktree begins scanning
112 /// a set of paths from the filesystem. This scanning could be caused
113 /// by some operation performed on the worktree, such as reading or
114 /// writing a file, or by an event reported by the filesystem.
115 scan_id: usize,
116
117 /// The latest scan id that has completed, and whose preceding scans
118 /// have all completed. The current `scan_id` could be more than one
119 /// greater than the `completed_scan_id` if operations are performed
120 /// on the worktree while it is processing a file-system event.
121 completed_scan_id: usize,
122}
123
124#[derive(Clone, Debug, PartialEq, Eq)]
125pub struct RepositoryEntry {
126 pub(crate) work_directory: WorkDirectoryEntry,
127 pub(crate) branch: Option<Arc<str>>,
128}
129
130impl RepositoryEntry {
131 pub fn branch(&self) -> Option<Arc<str>> {
132 self.branch.clone()
133 }
134
135 pub fn work_directory_id(&self) -> ProjectEntryId {
136 *self.work_directory
137 }
138
139 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
140 snapshot
141 .entry_for_id(self.work_directory_id())
142 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
143 }
144
145 pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
146 proto::RepositoryEntry {
147 work_directory_id: self.work_directory_id().to_proto(),
148 branch: self.branch.as_ref().map(|str| str.to_string()),
149 }
150 }
151}
152
153impl From<&RepositoryEntry> for proto::RepositoryEntry {
154 fn from(value: &RepositoryEntry) -> Self {
155 proto::RepositoryEntry {
156 work_directory_id: value.work_directory.to_proto(),
157 branch: value.branch.as_ref().map(|str| str.to_string()),
158 }
159 }
160}
161
162/// This path corresponds to the 'content path' (the folder that contains the .git)
163#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
164pub struct RepositoryWorkDirectory(Arc<Path>);
165
166impl Default for RepositoryWorkDirectory {
167 fn default() -> Self {
168 RepositoryWorkDirectory(Arc::from(Path::new("")))
169 }
170}
171
172impl AsRef<Path> for RepositoryWorkDirectory {
173 fn as_ref(&self) -> &Path {
174 self.0.as_ref()
175 }
176}
177
178#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
179pub struct WorkDirectoryEntry(ProjectEntryId);
180
181impl WorkDirectoryEntry {
182 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
183 worktree.entry_for_id(self.0).and_then(|entry| {
184 path.strip_prefix(&entry.path)
185 .ok()
186 .map(move |path| path.into())
187 })
188 }
189}
190
191impl Deref for WorkDirectoryEntry {
192 type Target = ProjectEntryId;
193
194 fn deref(&self) -> &Self::Target {
195 &self.0
196 }
197}
198
199impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
200 fn from(value: ProjectEntryId) -> Self {
201 WorkDirectoryEntry(value)
202 }
203}
204
205#[derive(Debug, Clone)]
206pub struct LocalSnapshot {
207 snapshot: Snapshot,
208 /// All of the gitignore files in the worktree, indexed by their relative path.
209 /// The boolean indicates whether the gitignore needs to be updated.
210 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
211 /// All of the git repositories in the worktree, indexed by the project entry
212 /// id of their parent directory.
213 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
214}
215
216pub struct BackgroundScannerState {
217 snapshot: LocalSnapshot,
218 /// The ids of all of the entries that were removed from the snapshot
219 /// as part of the current update. These entry ids may be re-used
220 /// if the same inode is discovered at a new path, or if the given
221 /// path is re-created after being deleted.
222 removed_entry_ids: HashMap<u64, ProjectEntryId>,
223 changed_paths: Vec<Arc<Path>>,
224 prev_snapshot: Snapshot,
225}
226
227#[derive(Debug, Clone)]
228pub struct LocalRepositoryEntry {
229 pub(crate) git_dir_scan_id: usize,
230 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
231 /// Path to the actual .git folder.
232 /// Note: if .git is a file, this points to the folder indicated by the .git file
233 pub(crate) git_dir_path: Arc<Path>,
234}
235
236impl LocalRepositoryEntry {
237 // Note that this path should be relative to the worktree root.
238 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
239 path.starts_with(self.git_dir_path.as_ref())
240 }
241}
242
243impl Deref for LocalSnapshot {
244 type Target = Snapshot;
245
246 fn deref(&self) -> &Self::Target {
247 &self.snapshot
248 }
249}
250
251impl DerefMut for LocalSnapshot {
252 fn deref_mut(&mut self) -> &mut Self::Target {
253 &mut self.snapshot
254 }
255}
256
257enum ScanState {
258 Started,
259 Updated {
260 snapshot: LocalSnapshot,
261 changes: UpdatedEntriesSet,
262 barrier: Option<barrier::Sender>,
263 scanning: bool,
264 },
265}
266
267struct ShareState {
268 project_id: u64,
269 snapshots_tx:
270 mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
271 resume_updates: watch::Sender<()>,
272 _maintain_remote_snapshot: Task<Option<()>>,
273}
274
275pub enum Event {
276 UpdatedEntries(UpdatedEntriesSet),
277 UpdatedGitRepositories(UpdatedGitRepositoriesSet),
278}
279
280impl Entity for Worktree {
281 type Event = Event;
282}
283
284impl Worktree {
285 pub async fn local(
286 client: Arc<Client>,
287 path: impl Into<Arc<Path>>,
288 visible: bool,
289 fs: Arc<dyn Fs>,
290 next_entry_id: Arc<AtomicUsize>,
291 cx: &mut AsyncAppContext,
292 ) -> Result<ModelHandle<Self>> {
293 // After determining whether the root entry is a file or a directory, populate the
294 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
295 let abs_path = path.into();
296 let metadata = fs
297 .metadata(&abs_path)
298 .await
299 .context("failed to stat worktree path")?;
300
301 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
302 let root_name = abs_path
303 .file_name()
304 .map_or(String::new(), |f| f.to_string_lossy().to_string());
305
306 let mut snapshot = LocalSnapshot {
307 ignores_by_parent_abs_path: Default::default(),
308 git_repositories: Default::default(),
309 snapshot: Snapshot {
310 id: WorktreeId::from_usize(cx.model_id()),
311 abs_path: abs_path.clone(),
312 root_name: root_name.clone(),
313 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
314 entries_by_path: Default::default(),
315 entries_by_id: Default::default(),
316 repository_entries: Default::default(),
317 scan_id: 1,
318 completed_scan_id: 0,
319 },
320 };
321
322 if let Some(metadata) = metadata {
323 snapshot.insert_entry(
324 Entry::new(
325 Arc::from(Path::new("")),
326 &metadata,
327 &next_entry_id,
328 snapshot.root_char_bag,
329 ),
330 fs.as_ref(),
331 );
332 }
333
334 let (path_changes_tx, path_changes_rx) = channel::unbounded();
335 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
336
337 cx.spawn_weak(|this, mut cx| async move {
338 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
339 this.update(&mut cx, |this, cx| {
340 let this = this.as_local_mut().unwrap();
341 match state {
342 ScanState::Started => {
343 *this.is_scanning.0.borrow_mut() = true;
344 }
345 ScanState::Updated {
346 snapshot,
347 changes,
348 barrier,
349 scanning,
350 } => {
351 *this.is_scanning.0.borrow_mut() = scanning;
352 this.set_snapshot(snapshot, changes, cx);
353 drop(barrier);
354 }
355 }
356 cx.notify();
357 });
358 }
359 })
360 .detach();
361
362 let background_scanner_task = cx.background().spawn({
363 let fs = fs.clone();
364 let snapshot = snapshot.clone();
365 let background = cx.background().clone();
366 async move {
367 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
368 BackgroundScanner::new(
369 snapshot,
370 next_entry_id,
371 fs,
372 scan_states_tx,
373 background,
374 path_changes_rx,
375 )
376 .run(events)
377 .await;
378 }
379 });
380
381 Worktree::Local(LocalWorktree {
382 snapshot,
383 is_scanning: watch::channel_with(true),
384 share: None,
385 path_changes_tx,
386 _background_scanner_task: background_scanner_task,
387 diagnostics: Default::default(),
388 diagnostic_summaries: Default::default(),
389 client,
390 fs,
391 visible,
392 })
393 }))
394 }
395
396 pub fn remote(
397 project_remote_id: u64,
398 replica_id: ReplicaId,
399 worktree: proto::WorktreeMetadata,
400 client: Arc<Client>,
401 cx: &mut AppContext,
402 ) -> ModelHandle<Self> {
403 cx.add_model(|cx: &mut ModelContext<Self>| {
404 let snapshot = Snapshot {
405 id: WorktreeId(worktree.id as usize),
406 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
407 root_name: worktree.root_name.clone(),
408 root_char_bag: worktree
409 .root_name
410 .chars()
411 .map(|c| c.to_ascii_lowercase())
412 .collect(),
413 entries_by_path: Default::default(),
414 entries_by_id: Default::default(),
415 repository_entries: Default::default(),
416 scan_id: 1,
417 completed_scan_id: 0,
418 };
419
420 let (updates_tx, mut updates_rx) = mpsc::unbounded();
421 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
422 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
423
424 cx.background()
425 .spawn({
426 let background_snapshot = background_snapshot.clone();
427 async move {
428 while let Some(update) = updates_rx.next().await {
429 if let Err(error) =
430 background_snapshot.lock().apply_remote_update(update)
431 {
432 log::error!("error applying worktree update: {}", error);
433 }
434 snapshot_updated_tx.send(()).await.ok();
435 }
436 }
437 })
438 .detach();
439
440 cx.spawn_weak(|this, mut cx| async move {
441 while (snapshot_updated_rx.recv().await).is_some() {
442 if let Some(this) = this.upgrade(&cx) {
443 this.update(&mut cx, |this, cx| {
444 let this = this.as_remote_mut().unwrap();
445 this.snapshot = this.background_snapshot.lock().clone();
446 cx.emit(Event::UpdatedEntries(Arc::from([])));
447 cx.notify();
448 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
449 if this.observed_snapshot(*scan_id) {
450 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
451 let _ = tx.send(());
452 } else {
453 break;
454 }
455 }
456 });
457 } else {
458 break;
459 }
460 }
461 })
462 .detach();
463
464 Worktree::Remote(RemoteWorktree {
465 project_id: project_remote_id,
466 replica_id,
467 snapshot: snapshot.clone(),
468 background_snapshot,
469 updates_tx: Some(updates_tx),
470 snapshot_subscriptions: Default::default(),
471 client: client.clone(),
472 diagnostic_summaries: Default::default(),
473 visible: worktree.visible,
474 disconnected: false,
475 })
476 })
477 }
478
479 pub fn as_local(&self) -> Option<&LocalWorktree> {
480 if let Worktree::Local(worktree) = self {
481 Some(worktree)
482 } else {
483 None
484 }
485 }
486
487 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
488 if let Worktree::Remote(worktree) = self {
489 Some(worktree)
490 } else {
491 None
492 }
493 }
494
495 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
496 if let Worktree::Local(worktree) = self {
497 Some(worktree)
498 } else {
499 None
500 }
501 }
502
503 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
504 if let Worktree::Remote(worktree) = self {
505 Some(worktree)
506 } else {
507 None
508 }
509 }
510
511 pub fn is_local(&self) -> bool {
512 matches!(self, Worktree::Local(_))
513 }
514
515 pub fn is_remote(&self) -> bool {
516 !self.is_local()
517 }
518
519 pub fn snapshot(&self) -> Snapshot {
520 match self {
521 Worktree::Local(worktree) => worktree.snapshot().snapshot,
522 Worktree::Remote(worktree) => worktree.snapshot(),
523 }
524 }
525
526 pub fn scan_id(&self) -> usize {
527 match self {
528 Worktree::Local(worktree) => worktree.snapshot.scan_id,
529 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
530 }
531 }
532
533 pub fn completed_scan_id(&self) -> usize {
534 match self {
535 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
536 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
537 }
538 }
539
540 pub fn is_visible(&self) -> bool {
541 match self {
542 Worktree::Local(worktree) => worktree.visible,
543 Worktree::Remote(worktree) => worktree.visible,
544 }
545 }
546
547 pub fn replica_id(&self) -> ReplicaId {
548 match self {
549 Worktree::Local(_) => 0,
550 Worktree::Remote(worktree) => worktree.replica_id,
551 }
552 }
553
554 pub fn diagnostic_summaries(
555 &self,
556 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
557 match self {
558 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
559 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
560 }
561 .iter()
562 .flat_map(|(path, summaries)| {
563 summaries
564 .iter()
565 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
566 })
567 }
568
569 pub fn abs_path(&self) -> Arc<Path> {
570 match self {
571 Worktree::Local(worktree) => worktree.abs_path.clone(),
572 Worktree::Remote(worktree) => worktree.abs_path.clone(),
573 }
574 }
575}
576
577impl LocalWorktree {
578 pub fn contains_abs_path(&self, path: &Path) -> bool {
579 path.starts_with(&self.abs_path)
580 }
581
582 fn absolutize(&self, path: &Path) -> PathBuf {
583 if path.file_name().is_some() {
584 self.abs_path.join(path)
585 } else {
586 self.abs_path.to_path_buf()
587 }
588 }
589
590 pub(crate) fn load_buffer(
591 &mut self,
592 id: u64,
593 path: &Path,
594 cx: &mut ModelContext<Worktree>,
595 ) -> Task<Result<ModelHandle<Buffer>>> {
596 let path = Arc::from(path);
597 cx.spawn(move |this, mut cx| async move {
598 let (file, contents, diff_base) = this
599 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
600 .await?;
601 let text_buffer = cx
602 .background()
603 .spawn(async move { text::Buffer::new(0, id, contents) })
604 .await;
605 Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
606 })
607 }
608
609 pub fn diagnostics_for_path(
610 &self,
611 path: &Path,
612 ) -> Vec<(
613 LanguageServerId,
614 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
615 )> {
616 self.diagnostics.get(path).cloned().unwrap_or_default()
617 }
618
619 pub fn clear_diagnostics_for_language_server(
620 &mut self,
621 server_id: LanguageServerId,
622 _: &mut ModelContext<Worktree>,
623 ) {
624 let worktree_id = self.id().to_proto();
625 self.diagnostic_summaries
626 .retain(|path, summaries_by_server_id| {
627 if summaries_by_server_id.remove(&server_id).is_some() {
628 if let Some(share) = self.share.as_ref() {
629 self.client
630 .send(proto::UpdateDiagnosticSummary {
631 project_id: share.project_id,
632 worktree_id,
633 summary: Some(proto::DiagnosticSummary {
634 path: path.to_string_lossy().to_string(),
635 language_server_id: server_id.0 as u64,
636 error_count: 0,
637 warning_count: 0,
638 }),
639 })
640 .log_err();
641 }
642 !summaries_by_server_id.is_empty()
643 } else {
644 true
645 }
646 });
647
648 self.diagnostics.retain(|_, diagnostics_by_server_id| {
649 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
650 diagnostics_by_server_id.remove(ix);
651 !diagnostics_by_server_id.is_empty()
652 } else {
653 true
654 }
655 });
656 }
657
658 pub fn update_diagnostics(
659 &mut self,
660 server_id: LanguageServerId,
661 worktree_path: Arc<Path>,
662 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
663 _: &mut ModelContext<Worktree>,
664 ) -> Result<bool> {
665 let summaries_by_server_id = self
666 .diagnostic_summaries
667 .entry(worktree_path.clone())
668 .or_default();
669
670 let old_summary = summaries_by_server_id
671 .remove(&server_id)
672 .unwrap_or_default();
673
674 let new_summary = DiagnosticSummary::new(&diagnostics);
675 if new_summary.is_empty() {
676 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
677 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
678 diagnostics_by_server_id.remove(ix);
679 }
680 if diagnostics_by_server_id.is_empty() {
681 self.diagnostics.remove(&worktree_path);
682 }
683 }
684 } else {
685 summaries_by_server_id.insert(server_id, new_summary);
686 let diagnostics_by_server_id =
687 self.diagnostics.entry(worktree_path.clone()).or_default();
688 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
689 Ok(ix) => {
690 diagnostics_by_server_id[ix] = (server_id, diagnostics);
691 }
692 Err(ix) => {
693 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
694 }
695 }
696 }
697
698 if !old_summary.is_empty() || !new_summary.is_empty() {
699 if let Some(share) = self.share.as_ref() {
700 self.client
701 .send(proto::UpdateDiagnosticSummary {
702 project_id: share.project_id,
703 worktree_id: self.id().to_proto(),
704 summary: Some(proto::DiagnosticSummary {
705 path: worktree_path.to_string_lossy().to_string(),
706 language_server_id: server_id.0 as u64,
707 error_count: new_summary.error_count as u32,
708 warning_count: new_summary.warning_count as u32,
709 }),
710 })
711 .log_err();
712 }
713 }
714
715 Ok(!old_summary.is_empty() || !new_summary.is_empty())
716 }
717
718 fn set_snapshot(
719 &mut self,
720 new_snapshot: LocalSnapshot,
721 entry_changes: UpdatedEntriesSet,
722 cx: &mut ModelContext<Worktree>,
723 ) {
724 let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
725
726 self.snapshot = new_snapshot;
727
728 if let Some(share) = self.share.as_mut() {
729 share
730 .snapshots_tx
731 .unbounded_send((
732 self.snapshot.clone(),
733 entry_changes.clone(),
734 repo_changes.clone(),
735 ))
736 .ok();
737 }
738
739 if !entry_changes.is_empty() {
740 cx.emit(Event::UpdatedEntries(entry_changes));
741 }
742 if !repo_changes.is_empty() {
743 cx.emit(Event::UpdatedGitRepositories(repo_changes));
744 }
745 }
746
747 fn changed_repos(
748 &self,
749 old_snapshot: &LocalSnapshot,
750 new_snapshot: &LocalSnapshot,
751 ) -> UpdatedGitRepositoriesSet {
752 let mut changes = Vec::new();
753 let mut old_repos = old_snapshot.git_repositories.iter().peekable();
754 let mut new_repos = new_snapshot.git_repositories.iter().peekable();
755 loop {
756 match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
757 (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
758 match Ord::cmp(&new_entry_id, &old_entry_id) {
759 Ordering::Less => {
760 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
761 changes.push((
762 entry.path.clone(),
763 GitRepositoryChange {
764 old_repository: None,
765 },
766 ));
767 }
768 new_repos.next();
769 }
770 Ordering::Equal => {
771 if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
772 if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
773 let old_repo = old_snapshot
774 .repository_entries
775 .get(&RepositoryWorkDirectory(entry.path.clone()))
776 .cloned();
777 changes.push((
778 entry.path.clone(),
779 GitRepositoryChange {
780 old_repository: old_repo,
781 },
782 ));
783 }
784 }
785 new_repos.next();
786 old_repos.next();
787 }
788 Ordering::Greater => {
789 if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
790 let old_repo = old_snapshot
791 .repository_entries
792 .get(&RepositoryWorkDirectory(entry.path.clone()))
793 .cloned();
794 changes.push((
795 entry.path.clone(),
796 GitRepositoryChange {
797 old_repository: old_repo,
798 },
799 ));
800 }
801 old_repos.next();
802 }
803 }
804 }
805 (Some((entry_id, _)), None) => {
806 if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
807 changes.push((
808 entry.path.clone(),
809 GitRepositoryChange {
810 old_repository: None,
811 },
812 ));
813 }
814 new_repos.next();
815 }
816 (None, Some((entry_id, _))) => {
817 if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
818 let old_repo = old_snapshot
819 .repository_entries
820 .get(&RepositoryWorkDirectory(entry.path.clone()))
821 .cloned();
822 changes.push((
823 entry.path.clone(),
824 GitRepositoryChange {
825 old_repository: old_repo,
826 },
827 ));
828 }
829 old_repos.next();
830 }
831 (None, None) => break,
832 }
833 }
834
835 fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
836 (value.0.clone(), value.1.clone())
837 }
838
839 changes.into()
840 }
841
842 pub fn scan_complete(&self) -> impl Future<Output = ()> {
843 let mut is_scanning_rx = self.is_scanning.1.clone();
844 async move {
845 let mut is_scanning = is_scanning_rx.borrow().clone();
846 while is_scanning {
847 if let Some(value) = is_scanning_rx.recv().await {
848 is_scanning = value;
849 } else {
850 break;
851 }
852 }
853 }
854 }
855
856 pub fn snapshot(&self) -> LocalSnapshot {
857 self.snapshot.clone()
858 }
859
860 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
861 proto::WorktreeMetadata {
862 id: self.id().to_proto(),
863 root_name: self.root_name().to_string(),
864 visible: self.visible,
865 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
866 }
867 }
868
869 fn load(
870 &self,
871 path: &Path,
872 cx: &mut ModelContext<Worktree>,
873 ) -> Task<Result<(File, String, Option<String>)>> {
874 let handle = cx.handle();
875 let path = Arc::from(path);
876 let abs_path = self.absolutize(&path);
877 let fs = self.fs.clone();
878 let snapshot = self.snapshot();
879
880 let mut index_task = None;
881
882 if let Some(repo) = snapshot.repository_for_path(&path) {
883 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
884 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
885 let repo = repo.repo_ptr.to_owned();
886 index_task = Some(
887 cx.background()
888 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
889 );
890 }
891 }
892
893 cx.spawn(|this, mut cx| async move {
894 let text = fs.load(&abs_path).await?;
895
896 let diff_base = if let Some(index_task) = index_task {
897 index_task.await
898 } else {
899 None
900 };
901
902 // Eagerly populate the snapshot with an updated entry for the loaded file
903 let entry = this
904 .update(&mut cx, |this, cx| {
905 this.as_local().unwrap().refresh_entry(path, None, cx)
906 })
907 .await?;
908
909 Ok((
910 File {
911 entry_id: entry.id,
912 worktree: handle,
913 path: entry.path,
914 mtime: entry.mtime,
915 is_local: true,
916 is_deleted: false,
917 },
918 text,
919 diff_base,
920 ))
921 })
922 }
923
924 pub fn save_buffer(
925 &self,
926 buffer_handle: ModelHandle<Buffer>,
927 path: Arc<Path>,
928 has_changed_file: bool,
929 cx: &mut ModelContext<Worktree>,
930 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
931 let handle = cx.handle();
932 let buffer = buffer_handle.read(cx);
933
934 let rpc = self.client.clone();
935 let buffer_id = buffer.remote_id();
936 let project_id = self.share.as_ref().map(|share| share.project_id);
937
938 let text = buffer.as_rope().clone();
939 let fingerprint = text.fingerprint();
940 let version = buffer.version();
941 let save = self.write_file(path, text, buffer.line_ending(), cx);
942
943 cx.as_mut().spawn(|mut cx| async move {
944 let entry = save.await?;
945
946 if has_changed_file {
947 let new_file = Arc::new(File {
948 entry_id: entry.id,
949 worktree: handle,
950 path: entry.path,
951 mtime: entry.mtime,
952 is_local: true,
953 is_deleted: false,
954 });
955
956 if let Some(project_id) = project_id {
957 rpc.send(proto::UpdateBufferFile {
958 project_id,
959 buffer_id,
960 file: Some(new_file.to_proto()),
961 })
962 .log_err();
963 }
964
965 buffer_handle.update(&mut cx, |buffer, cx| {
966 if has_changed_file {
967 buffer.file_updated(new_file, cx).detach();
968 }
969 });
970 }
971
972 if let Some(project_id) = project_id {
973 rpc.send(proto::BufferSaved {
974 project_id,
975 buffer_id,
976 version: serialize_version(&version),
977 mtime: Some(entry.mtime.into()),
978 fingerprint: serialize_fingerprint(fingerprint),
979 })?;
980 }
981
982 buffer_handle.update(&mut cx, |buffer, cx| {
983 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
984 });
985
986 Ok((version, fingerprint, entry.mtime))
987 })
988 }
989
990 pub fn create_entry(
991 &self,
992 path: impl Into<Arc<Path>>,
993 is_dir: bool,
994 cx: &mut ModelContext<Worktree>,
995 ) -> Task<Result<Entry>> {
996 let path = path.into();
997 let abs_path = self.absolutize(&path);
998 let fs = self.fs.clone();
999 let write = cx.background().spawn(async move {
1000 if is_dir {
1001 fs.create_dir(&abs_path).await
1002 } else {
1003 fs.save(&abs_path, &Default::default(), Default::default())
1004 .await
1005 }
1006 });
1007
1008 cx.spawn(|this, mut cx| async move {
1009 write.await?;
1010 this.update(&mut cx, |this, cx| {
1011 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1012 })
1013 .await
1014 })
1015 }
1016
1017 pub fn write_file(
1018 &self,
1019 path: impl Into<Arc<Path>>,
1020 text: Rope,
1021 line_ending: LineEnding,
1022 cx: &mut ModelContext<Worktree>,
1023 ) -> Task<Result<Entry>> {
1024 let path = path.into();
1025 let abs_path = self.absolutize(&path);
1026 let fs = self.fs.clone();
1027 let write = cx
1028 .background()
1029 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1030
1031 cx.spawn(|this, mut cx| async move {
1032 write.await?;
1033 this.update(&mut cx, |this, cx| {
1034 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1035 })
1036 .await
1037 })
1038 }
1039
1040 pub fn delete_entry(
1041 &self,
1042 entry_id: ProjectEntryId,
1043 cx: &mut ModelContext<Worktree>,
1044 ) -> Option<Task<Result<()>>> {
1045 let entry = self.entry_for_id(entry_id)?.clone();
1046 let abs_path = self.abs_path.clone();
1047 let fs = self.fs.clone();
1048
1049 let delete = cx.background().spawn(async move {
1050 let mut abs_path = fs.canonicalize(&abs_path).await?;
1051 if entry.path.file_name().is_some() {
1052 abs_path = abs_path.join(&entry.path);
1053 }
1054 if entry.is_file() {
1055 fs.remove_file(&abs_path, Default::default()).await?;
1056 } else {
1057 fs.remove_dir(
1058 &abs_path,
1059 RemoveOptions {
1060 recursive: true,
1061 ignore_if_not_exists: false,
1062 },
1063 )
1064 .await?;
1065 }
1066 anyhow::Ok(abs_path)
1067 });
1068
1069 Some(cx.spawn(|this, mut cx| async move {
1070 let abs_path = delete.await?;
1071 let (tx, mut rx) = barrier::channel();
1072 this.update(&mut cx, |this, _| {
1073 this.as_local_mut()
1074 .unwrap()
1075 .path_changes_tx
1076 .try_send((vec![abs_path], tx))
1077 })?;
1078 rx.recv().await;
1079 Ok(())
1080 }))
1081 }
1082
1083 pub fn rename_entry(
1084 &self,
1085 entry_id: ProjectEntryId,
1086 new_path: impl Into<Arc<Path>>,
1087 cx: &mut ModelContext<Worktree>,
1088 ) -> Option<Task<Result<Entry>>> {
1089 let old_path = self.entry_for_id(entry_id)?.path.clone();
1090 let new_path = new_path.into();
1091 let abs_old_path = self.absolutize(&old_path);
1092 let abs_new_path = self.absolutize(&new_path);
1093 let fs = self.fs.clone();
1094 let rename = cx.background().spawn(async move {
1095 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1096 .await
1097 });
1098
1099 Some(cx.spawn(|this, mut cx| async move {
1100 rename.await?;
1101 this.update(&mut cx, |this, cx| {
1102 this.as_local_mut()
1103 .unwrap()
1104 .refresh_entry(new_path.clone(), Some(old_path), cx)
1105 })
1106 .await
1107 }))
1108 }
1109
1110 pub fn copy_entry(
1111 &self,
1112 entry_id: ProjectEntryId,
1113 new_path: impl Into<Arc<Path>>,
1114 cx: &mut ModelContext<Worktree>,
1115 ) -> Option<Task<Result<Entry>>> {
1116 let old_path = self.entry_for_id(entry_id)?.path.clone();
1117 let new_path = new_path.into();
1118 let abs_old_path = self.absolutize(&old_path);
1119 let abs_new_path = self.absolutize(&new_path);
1120 let fs = self.fs.clone();
1121 let copy = cx.background().spawn(async move {
1122 copy_recursive(
1123 fs.as_ref(),
1124 &abs_old_path,
1125 &abs_new_path,
1126 Default::default(),
1127 )
1128 .await
1129 });
1130
1131 Some(cx.spawn(|this, mut cx| async move {
1132 copy.await?;
1133 this.update(&mut cx, |this, cx| {
1134 this.as_local_mut()
1135 .unwrap()
1136 .refresh_entry(new_path.clone(), None, cx)
1137 })
1138 .await
1139 }))
1140 }
1141
1142 fn refresh_entry(
1143 &self,
1144 path: Arc<Path>,
1145 old_path: Option<Arc<Path>>,
1146 cx: &mut ModelContext<Worktree>,
1147 ) -> Task<Result<Entry>> {
1148 let fs = self.fs.clone();
1149 let abs_root_path = self.abs_path.clone();
1150 let path_changes_tx = self.path_changes_tx.clone();
1151 cx.spawn_weak(move |this, mut cx| async move {
1152 let abs_path = fs.canonicalize(&abs_root_path).await?;
1153 let mut paths = Vec::with_capacity(2);
1154 paths.push(if path.file_name().is_some() {
1155 abs_path.join(&path)
1156 } else {
1157 abs_path.clone()
1158 });
1159 if let Some(old_path) = old_path {
1160 paths.push(if old_path.file_name().is_some() {
1161 abs_path.join(&old_path)
1162 } else {
1163 abs_path.clone()
1164 });
1165 }
1166
1167 let (tx, mut rx) = barrier::channel();
1168 path_changes_tx.try_send((paths, tx))?;
1169 rx.recv().await;
1170 this.upgrade(&cx)
1171 .ok_or_else(|| anyhow!("worktree was dropped"))?
1172 .update(&mut cx, |this, _| {
1173 this.entry_for_path(path)
1174 .cloned()
1175 .ok_or_else(|| anyhow!("failed to read path after update"))
1176 })
1177 })
1178 }
1179
1180 pub fn observe_updates<F, Fut>(
1181 &mut self,
1182 project_id: u64,
1183 cx: &mut ModelContext<Worktree>,
1184 callback: F,
1185 ) -> oneshot::Receiver<()>
1186 where
1187 F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1188 Fut: Send + Future<Output = bool>,
1189 {
1190 #[cfg(any(test, feature = "test-support"))]
1191 const MAX_CHUNK_SIZE: usize = 2;
1192 #[cfg(not(any(test, feature = "test-support")))]
1193 const MAX_CHUNK_SIZE: usize = 256;
1194
1195 let (share_tx, share_rx) = oneshot::channel();
1196
1197 if let Some(share) = self.share.as_mut() {
1198 share_tx.send(()).ok();
1199 *share.resume_updates.borrow_mut() = ();
1200 return share_rx;
1201 }
1202
1203 let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1204 let (snapshots_tx, mut snapshots_rx) =
1205 mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1206 snapshots_tx
1207 .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1208 .ok();
1209
1210 let worktree_id = cx.model_id() as u64;
1211 let _maintain_remote_snapshot = cx.background().spawn(async move {
1212 let mut is_first = true;
1213 while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1214 let update;
1215 if is_first {
1216 update = snapshot.build_initial_update(project_id, worktree_id);
1217 is_first = false;
1218 } else {
1219 update =
1220 snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1221 }
1222
1223 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1224 let _ = resume_updates_rx.try_recv();
1225 loop {
1226 let result = callback(update.clone());
1227 if result.await {
1228 break;
1229 } else {
1230 log::info!("waiting to resume updates");
1231 if resume_updates_rx.next().await.is_none() {
1232 return Some(());
1233 }
1234 }
1235 }
1236 }
1237 }
1238 share_tx.send(()).ok();
1239 Some(())
1240 });
1241
1242 self.share = Some(ShareState {
1243 project_id,
1244 snapshots_tx,
1245 resume_updates: resume_updates_tx,
1246 _maintain_remote_snapshot,
1247 });
1248 share_rx
1249 }
1250
1251 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1252 let client = self.client.clone();
1253
1254 for (path, summaries) in &self.diagnostic_summaries {
1255 for (&server_id, summary) in summaries {
1256 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1257 project_id,
1258 worktree_id: cx.model_id() as u64,
1259 summary: Some(summary.to_proto(server_id, &path)),
1260 }) {
1261 return Task::ready(Err(e));
1262 }
1263 }
1264 }
1265
1266 let rx = self.observe_updates(project_id, cx, move |update| {
1267 client.request(update).map(|result| result.is_ok())
1268 });
1269 cx.foreground()
1270 .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1271 }
1272
1273 pub fn unshare(&mut self) {
1274 self.share.take();
1275 }
1276
1277 pub fn is_shared(&self) -> bool {
1278 self.share.is_some()
1279 }
1280}
1281
1282impl RemoteWorktree {
1283 fn snapshot(&self) -> Snapshot {
1284 self.snapshot.clone()
1285 }
1286
1287 pub fn disconnected_from_host(&mut self) {
1288 self.updates_tx.take();
1289 self.snapshot_subscriptions.clear();
1290 self.disconnected = true;
1291 }
1292
1293 pub fn save_buffer(
1294 &self,
1295 buffer_handle: ModelHandle<Buffer>,
1296 cx: &mut ModelContext<Worktree>,
1297 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1298 let buffer = buffer_handle.read(cx);
1299 let buffer_id = buffer.remote_id();
1300 let version = buffer.version();
1301 let rpc = self.client.clone();
1302 let project_id = self.project_id;
1303 cx.as_mut().spawn(|mut cx| async move {
1304 let response = rpc
1305 .request(proto::SaveBuffer {
1306 project_id,
1307 buffer_id,
1308 version: serialize_version(&version),
1309 })
1310 .await?;
1311 let version = deserialize_version(&response.version);
1312 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1313 let mtime = response
1314 .mtime
1315 .ok_or_else(|| anyhow!("missing mtime"))?
1316 .into();
1317
1318 buffer_handle.update(&mut cx, |buffer, cx| {
1319 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1320 });
1321
1322 Ok((version, fingerprint, mtime))
1323 })
1324 }
1325
1326 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1327 if let Some(updates_tx) = &self.updates_tx {
1328 updates_tx
1329 .unbounded_send(update)
1330 .expect("consumer runs to completion");
1331 }
1332 }
1333
1334 fn observed_snapshot(&self, scan_id: usize) -> bool {
1335 self.completed_scan_id >= scan_id
1336 }
1337
1338 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1339 let (tx, rx) = oneshot::channel();
1340 if self.observed_snapshot(scan_id) {
1341 let _ = tx.send(());
1342 } else if self.disconnected {
1343 drop(tx);
1344 } else {
1345 match self
1346 .snapshot_subscriptions
1347 .binary_search_by_key(&scan_id, |probe| probe.0)
1348 {
1349 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1350 }
1351 }
1352
1353 async move {
1354 rx.await?;
1355 Ok(())
1356 }
1357 }
1358
1359 pub fn update_diagnostic_summary(
1360 &mut self,
1361 path: Arc<Path>,
1362 summary: &proto::DiagnosticSummary,
1363 ) {
1364 let server_id = LanguageServerId(summary.language_server_id as usize);
1365 let summary = DiagnosticSummary {
1366 error_count: summary.error_count as usize,
1367 warning_count: summary.warning_count as usize,
1368 };
1369
1370 if summary.is_empty() {
1371 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1372 summaries.remove(&server_id);
1373 if summaries.is_empty() {
1374 self.diagnostic_summaries.remove(&path);
1375 }
1376 }
1377 } else {
1378 self.diagnostic_summaries
1379 .entry(path)
1380 .or_default()
1381 .insert(server_id, summary);
1382 }
1383 }
1384
1385 pub fn insert_entry(
1386 &mut self,
1387 entry: proto::Entry,
1388 scan_id: usize,
1389 cx: &mut ModelContext<Worktree>,
1390 ) -> Task<Result<Entry>> {
1391 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1392 cx.spawn(|this, mut cx| async move {
1393 wait_for_snapshot.await?;
1394 this.update(&mut cx, |worktree, _| {
1395 let worktree = worktree.as_remote_mut().unwrap();
1396 let mut snapshot = worktree.background_snapshot.lock();
1397 let entry = snapshot.insert_entry(entry);
1398 worktree.snapshot = snapshot.clone();
1399 entry
1400 })
1401 })
1402 }
1403
1404 pub(crate) fn delete_entry(
1405 &mut self,
1406 id: ProjectEntryId,
1407 scan_id: usize,
1408 cx: &mut ModelContext<Worktree>,
1409 ) -> Task<Result<()>> {
1410 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1411 cx.spawn(|this, mut cx| async move {
1412 wait_for_snapshot.await?;
1413 this.update(&mut cx, |worktree, _| {
1414 let worktree = worktree.as_remote_mut().unwrap();
1415 let mut snapshot = worktree.background_snapshot.lock();
1416 snapshot.delete_entry(id);
1417 worktree.snapshot = snapshot.clone();
1418 });
1419 Ok(())
1420 })
1421 }
1422}
1423
1424impl Snapshot {
1425 pub fn id(&self) -> WorktreeId {
1426 self.id
1427 }
1428
1429 pub fn abs_path(&self) -> &Arc<Path> {
1430 &self.abs_path
1431 }
1432
1433 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1434 self.entries_by_id.get(&entry_id, &()).is_some()
1435 }
1436
1437 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1438 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1439 let old_entry = self.entries_by_id.insert_or_replace(
1440 PathEntry {
1441 id: entry.id,
1442 path: entry.path.clone(),
1443 is_ignored: entry.is_ignored,
1444 scan_id: 0,
1445 },
1446 &(),
1447 );
1448 if let Some(old_entry) = old_entry {
1449 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1450 }
1451 self.entries_by_path.insert_or_replace(entry.clone(), &());
1452 Ok(entry)
1453 }
1454
1455 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1456 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1457 self.entries_by_path = {
1458 let mut cursor = self.entries_by_path.cursor();
1459 let mut new_entries_by_path =
1460 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1461 while let Some(entry) = cursor.item() {
1462 if entry.path.starts_with(&removed_entry.path) {
1463 self.entries_by_id.remove(&entry.id, &());
1464 cursor.next(&());
1465 } else {
1466 break;
1467 }
1468 }
1469 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1470 new_entries_by_path
1471 };
1472
1473 Some(removed_entry.path)
1474 }
1475
1476 #[cfg(any(test, feature = "test-support"))]
1477 pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1478 let path = path.into();
1479 self.entries_by_path
1480 .get(&PathKey(Arc::from(path)), &())
1481 .and_then(|entry| entry.git_status)
1482 }
1483
1484 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1485 let mut entries_by_path_edits = Vec::new();
1486 let mut entries_by_id_edits = Vec::new();
1487
1488 for entry_id in update.removed_entries {
1489 let entry_id = ProjectEntryId::from_proto(entry_id);
1490 entries_by_id_edits.push(Edit::Remove(entry_id));
1491 if let Some(entry) = self.entry_for_id(entry_id) {
1492 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1493 }
1494 }
1495
1496 for entry in update.updated_entries {
1497 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1498 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1499 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1500 }
1501 if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1502 if old_entry.id != entry.id {
1503 entries_by_id_edits.push(Edit::Remove(old_entry.id));
1504 }
1505 }
1506 entries_by_id_edits.push(Edit::Insert(PathEntry {
1507 id: entry.id,
1508 path: entry.path.clone(),
1509 is_ignored: entry.is_ignored,
1510 scan_id: 0,
1511 }));
1512 entries_by_path_edits.push(Edit::Insert(entry));
1513 }
1514
1515 self.entries_by_path.edit(entries_by_path_edits, &());
1516 self.entries_by_id.edit(entries_by_id_edits, &());
1517
1518 update.removed_repositories.sort_unstable();
1519 self.repository_entries.retain(|_, entry| {
1520 if let Ok(_) = update
1521 .removed_repositories
1522 .binary_search(&entry.work_directory.to_proto())
1523 {
1524 false
1525 } else {
1526 true
1527 }
1528 });
1529
1530 for repository in update.updated_repositories {
1531 let work_directory_entry: WorkDirectoryEntry =
1532 ProjectEntryId::from_proto(repository.work_directory_id).into();
1533
1534 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1535 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1536 if self.repository_entries.get(&work_directory).is_some() {
1537 self.repository_entries.update(&work_directory, |repo| {
1538 repo.branch = repository.branch.map(Into::into);
1539 });
1540 } else {
1541 self.repository_entries.insert(
1542 work_directory,
1543 RepositoryEntry {
1544 work_directory: work_directory_entry,
1545 branch: repository.branch.map(Into::into),
1546 },
1547 )
1548 }
1549 } else {
1550 log::error!("no work directory entry for repository {:?}", repository)
1551 }
1552 }
1553
1554 self.scan_id = update.scan_id as usize;
1555 if update.is_last_update {
1556 self.completed_scan_id = update.scan_id as usize;
1557 }
1558
1559 Ok(())
1560 }
1561
1562 pub fn file_count(&self) -> usize {
1563 self.entries_by_path.summary().file_count
1564 }
1565
1566 pub fn visible_file_count(&self) -> usize {
1567 self.entries_by_path.summary().visible_file_count
1568 }
1569
1570 fn traverse_from_offset(
1571 &self,
1572 include_dirs: bool,
1573 include_ignored: bool,
1574 start_offset: usize,
1575 ) -> Traversal {
1576 let mut cursor = self.entries_by_path.cursor();
1577 cursor.seek(
1578 &TraversalTarget::Count {
1579 count: start_offset,
1580 include_dirs,
1581 include_ignored,
1582 },
1583 Bias::Right,
1584 &(),
1585 );
1586 Traversal {
1587 cursor,
1588 include_dirs,
1589 include_ignored,
1590 }
1591 }
1592
1593 fn traverse_from_path(
1594 &self,
1595 include_dirs: bool,
1596 include_ignored: bool,
1597 path: &Path,
1598 ) -> Traversal {
1599 let mut cursor = self.entries_by_path.cursor();
1600 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1601 Traversal {
1602 cursor,
1603 include_dirs,
1604 include_ignored,
1605 }
1606 }
1607
1608 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1609 self.traverse_from_offset(false, include_ignored, start)
1610 }
1611
1612 pub fn entries(&self, include_ignored: bool) -> Traversal {
1613 self.traverse_from_offset(true, include_ignored, 0)
1614 }
1615
1616 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1617 self.repository_entries
1618 .iter()
1619 .map(|(path, entry)| (&path.0, entry))
1620 }
1621
1622 /// Get the repository whose work directory contains the given path.
1623 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1624 self.repository_entries
1625 .get(&RepositoryWorkDirectory(path.into()))
1626 .cloned()
1627 }
1628
1629 /// Get the repository whose work directory contains the given path.
1630 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1631 self.repository_and_work_directory_for_path(path)
1632 .map(|e| e.1)
1633 }
1634
1635 pub fn repository_and_work_directory_for_path(
1636 &self,
1637 path: &Path,
1638 ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1639 self.repository_entries
1640 .iter()
1641 .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1642 .last()
1643 .map(|(path, repo)| (path.clone(), repo.clone()))
1644 }
1645
1646 /// Given an ordered iterator of entries, returns an iterator of those entries,
1647 /// along with their containing git repository.
1648 pub fn entries_with_repositories<'a>(
1649 &'a self,
1650 entries: impl 'a + Iterator<Item = &'a Entry>,
1651 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1652 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1653 let mut repositories = self.repositories().peekable();
1654 entries.map(move |entry| {
1655 while let Some((repo_path, _)) = containing_repos.last() {
1656 if !entry.path.starts_with(repo_path) {
1657 containing_repos.pop();
1658 } else {
1659 break;
1660 }
1661 }
1662 while let Some((repo_path, _)) = repositories.peek() {
1663 if entry.path.starts_with(repo_path) {
1664 containing_repos.push(repositories.next().unwrap());
1665 } else {
1666 break;
1667 }
1668 }
1669 let repo = containing_repos.last().map(|(_, repo)| *repo);
1670 (entry, repo)
1671 })
1672 }
1673
1674 pub fn statuses_for_directories(&self, paths: &[&Path]) -> Vec<GitFileStatus> {
1675 todo!();
1676 // ["/a/b", "a/b/c", "a/b/d", "j"]
1677
1678 // Path stack:
1679 // If path has descendents following it, push to stack: ["a/b"]
1680 // Figure out a/b/c
1681 // Figure out a/b/d
1682 // Once no more descendants, pop the stack:
1683 // Figure out a/b
1684 }
1685
1686 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1687 let empty_path = Path::new("");
1688 self.entries_by_path
1689 .cursor::<()>()
1690 .filter(move |entry| entry.path.as_ref() != empty_path)
1691 .map(|entry| &entry.path)
1692 }
1693
1694 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1695 let mut cursor = self.entries_by_path.cursor();
1696 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1697 let traversal = Traversal {
1698 cursor,
1699 include_dirs: true,
1700 include_ignored: true,
1701 };
1702 ChildEntriesIter {
1703 traversal,
1704 parent_path,
1705 }
1706 }
1707
1708 fn descendent_entries<'a>(
1709 &'a self,
1710 include_dirs: bool,
1711 include_ignored: bool,
1712 parent_path: &'a Path,
1713 ) -> DescendentEntriesIter<'a> {
1714 let mut cursor = self.entries_by_path.cursor();
1715 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1716 let mut traversal = Traversal {
1717 cursor,
1718 include_dirs,
1719 include_ignored,
1720 };
1721
1722 if traversal.end_offset() == traversal.start_offset() {
1723 traversal.advance();
1724 }
1725
1726 DescendentEntriesIter {
1727 traversal,
1728 parent_path,
1729 }
1730 }
1731
1732 pub fn root_entry(&self) -> Option<&Entry> {
1733 self.entry_for_path("")
1734 }
1735
1736 pub fn root_name(&self) -> &str {
1737 &self.root_name
1738 }
1739
1740 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1741 self.repository_entries
1742 .get(&RepositoryWorkDirectory(Path::new("").into()))
1743 .map(|entry| entry.to_owned())
1744 }
1745
1746 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1747 self.repository_entries.values()
1748 }
1749
1750 pub fn scan_id(&self) -> usize {
1751 self.scan_id
1752 }
1753
1754 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1755 let path = path.as_ref();
1756 self.traverse_from_path(true, true, path)
1757 .entry()
1758 .and_then(|entry| {
1759 if entry.path.as_ref() == path {
1760 Some(entry)
1761 } else {
1762 None
1763 }
1764 })
1765 }
1766
1767 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1768 let entry = self.entries_by_id.get(&id, &())?;
1769 self.entry_for_path(&entry.path)
1770 }
1771
1772 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1773 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1774 }
1775}
1776
1777impl LocalSnapshot {
1778 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1779 self.git_repositories.get(&repo.work_directory.0)
1780 }
1781
1782 pub(crate) fn repo_for_metadata(
1783 &self,
1784 path: &Path,
1785 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1786 self.git_repositories
1787 .iter()
1788 .find(|(_, repo)| repo.in_dot_git(path))
1789 }
1790
1791 fn build_update(
1792 &self,
1793 project_id: u64,
1794 worktree_id: u64,
1795 entry_changes: UpdatedEntriesSet,
1796 repo_changes: UpdatedGitRepositoriesSet,
1797 ) -> proto::UpdateWorktree {
1798 let mut updated_entries = Vec::new();
1799 let mut removed_entries = Vec::new();
1800 let mut updated_repositories = Vec::new();
1801 let mut removed_repositories = Vec::new();
1802
1803 for (_, entry_id, path_change) in entry_changes.iter() {
1804 if let PathChange::Removed = path_change {
1805 removed_entries.push(entry_id.0 as u64);
1806 } else if let Some(entry) = self.entry_for_id(*entry_id) {
1807 updated_entries.push(proto::Entry::from(entry));
1808 }
1809 }
1810
1811 for (work_dir_path, change) in repo_changes.iter() {
1812 let new_repo = self
1813 .repository_entries
1814 .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1815 match (&change.old_repository, new_repo) {
1816 (Some(old_repo), Some(new_repo)) => {
1817 updated_repositories.push(new_repo.build_update(old_repo));
1818 }
1819 (None, Some(new_repo)) => {
1820 updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1821 }
1822 (Some(old_repo), None) => {
1823 removed_repositories.push(old_repo.work_directory.0.to_proto());
1824 }
1825 _ => {}
1826 }
1827 }
1828
1829 removed_entries.sort_unstable();
1830 updated_entries.sort_unstable_by_key(|e| e.id);
1831 removed_repositories.sort_unstable();
1832 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1833
1834 // TODO - optimize, knowing that removed_entries are sorted.
1835 removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1836
1837 proto::UpdateWorktree {
1838 project_id,
1839 worktree_id,
1840 abs_path: self.abs_path().to_string_lossy().into(),
1841 root_name: self.root_name().to_string(),
1842 updated_entries,
1843 removed_entries,
1844 scan_id: self.scan_id as u64,
1845 is_last_update: self.completed_scan_id == self.scan_id,
1846 updated_repositories,
1847 removed_repositories,
1848 }
1849 }
1850
1851 fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1852 let mut updated_entries = self
1853 .entries_by_path
1854 .iter()
1855 .map(proto::Entry::from)
1856 .collect::<Vec<_>>();
1857 updated_entries.sort_unstable_by_key(|e| e.id);
1858
1859 let mut updated_repositories = self
1860 .repository_entries
1861 .values()
1862 .map(proto::RepositoryEntry::from)
1863 .collect::<Vec<_>>();
1864 updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1865
1866 proto::UpdateWorktree {
1867 project_id,
1868 worktree_id,
1869 abs_path: self.abs_path().to_string_lossy().into(),
1870 root_name: self.root_name().to_string(),
1871 updated_entries,
1872 removed_entries: Vec::new(),
1873 scan_id: self.scan_id as u64,
1874 is_last_update: self.completed_scan_id == self.scan_id,
1875 updated_repositories,
1876 removed_repositories: Vec::new(),
1877 }
1878 }
1879
1880 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1881 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1882 let abs_path = self.abs_path.join(&entry.path);
1883 match smol::block_on(build_gitignore(&abs_path, fs)) {
1884 Ok(ignore) => {
1885 self.ignores_by_parent_abs_path
1886 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1887 }
1888 Err(error) => {
1889 log::error!(
1890 "error loading .gitignore file {:?} - {:?}",
1891 &entry.path,
1892 error
1893 );
1894 }
1895 }
1896 }
1897
1898 if entry.kind == EntryKind::PendingDir {
1899 if let Some(existing_entry) =
1900 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1901 {
1902 entry.kind = existing_entry.kind;
1903 }
1904 }
1905
1906 let scan_id = self.scan_id;
1907 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1908 if let Some(removed) = removed {
1909 if removed.id != entry.id {
1910 self.entries_by_id.remove(&removed.id, &());
1911 }
1912 }
1913 self.entries_by_id.insert_or_replace(
1914 PathEntry {
1915 id: entry.id,
1916 path: entry.path.clone(),
1917 is_ignored: entry.is_ignored,
1918 scan_id,
1919 },
1920 &(),
1921 );
1922
1923 entry
1924 }
1925
1926 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1927 let abs_path = self.abs_path.join(&parent_path);
1928 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1929
1930 // Guard against repositories inside the repository metadata
1931 if work_dir
1932 .components()
1933 .find(|component| component.as_os_str() == *DOT_GIT)
1934 .is_some()
1935 {
1936 return None;
1937 };
1938
1939 let work_dir_id = self
1940 .entry_for_path(work_dir.clone())
1941 .map(|entry| entry.id)?;
1942
1943 if self.git_repositories.get(&work_dir_id).is_none() {
1944 let repo = fs.open_repo(abs_path.as_path())?;
1945 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1946
1947 let repo_lock = repo.lock();
1948
1949 self.repository_entries.insert(
1950 work_directory.clone(),
1951 RepositoryEntry {
1952 work_directory: work_dir_id.into(),
1953 branch: repo_lock.branch_name().map(Into::into),
1954 },
1955 );
1956
1957 self.scan_statuses(repo_lock.deref(), &work_directory, &work_directory.0);
1958
1959 drop(repo_lock);
1960
1961 self.git_repositories.insert(
1962 work_dir_id,
1963 LocalRepositoryEntry {
1964 git_dir_scan_id: 0,
1965 repo_ptr: repo,
1966 git_dir_path: parent_path.clone(),
1967 },
1968 )
1969 }
1970
1971 Some(())
1972 }
1973
1974 fn scan_statuses(
1975 &mut self,
1976 repo_ptr: &dyn GitRepository,
1977 work_directory: &RepositoryWorkDirectory,
1978 path: &Path,
1979 ) {
1980 let mut edits = vec![];
1981 for mut entry in self.descendent_entries(false, false, path).cloned() {
1982 let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
1983 continue;
1984 };
1985 let git_file_status = repo_ptr.status(&RepoPath(repo_path.into()));
1986 let status = git_file_status;
1987 entry.git_status = status;
1988 edits.push(Edit::Insert(entry));
1989 }
1990
1991 self.entries_by_path.edit(edits, &());
1992 }
1993
1994 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1995 let mut inodes = TreeSet::default();
1996 for ancestor in path.ancestors().skip(1) {
1997 if let Some(entry) = self.entry_for_path(ancestor) {
1998 inodes.insert(entry.inode);
1999 }
2000 }
2001 inodes
2002 }
2003
2004 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2005 let mut new_ignores = Vec::new();
2006 for ancestor in abs_path.ancestors().skip(1) {
2007 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2008 new_ignores.push((ancestor, Some(ignore.clone())));
2009 } else {
2010 new_ignores.push((ancestor, None));
2011 }
2012 }
2013
2014 let mut ignore_stack = IgnoreStack::none();
2015 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2016 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2017 ignore_stack = IgnoreStack::all();
2018 break;
2019 } else if let Some(ignore) = ignore {
2020 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2021 }
2022 }
2023
2024 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2025 ignore_stack = IgnoreStack::all();
2026 }
2027
2028 ignore_stack
2029 }
2030}
2031
2032impl BackgroundScannerState {
2033 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2034 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2035 entry.id = removed_entry_id;
2036 } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2037 entry.id = existing_entry.id;
2038 }
2039 }
2040
2041 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2042 self.reuse_entry_id(&mut entry);
2043 self.snapshot.insert_entry(entry, fs)
2044 }
2045
2046 fn populate_dir(
2047 &mut self,
2048 parent_path: Arc<Path>,
2049 entries: impl IntoIterator<Item = Entry>,
2050 ignore: Option<Arc<Gitignore>>,
2051 fs: &dyn Fs,
2052 ) {
2053 let mut parent_entry = if let Some(parent_entry) = self
2054 .snapshot
2055 .entries_by_path
2056 .get(&PathKey(parent_path.clone()), &())
2057 {
2058 parent_entry.clone()
2059 } else {
2060 log::warn!(
2061 "populating a directory {:?} that has been removed",
2062 parent_path
2063 );
2064 return;
2065 };
2066
2067 match parent_entry.kind {
2068 EntryKind::PendingDir => {
2069 parent_entry.kind = EntryKind::Dir;
2070 }
2071 EntryKind::Dir => {}
2072 _ => return,
2073 }
2074
2075 if let Some(ignore) = ignore {
2076 let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2077 self.snapshot
2078 .ignores_by_parent_abs_path
2079 .insert(abs_parent_path, (ignore, false));
2080 }
2081
2082 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2083 let mut entries_by_id_edits = Vec::new();
2084
2085 for mut entry in entries {
2086 self.reuse_entry_id(&mut entry);
2087 entries_by_id_edits.push(Edit::Insert(PathEntry {
2088 id: entry.id,
2089 path: entry.path.clone(),
2090 is_ignored: entry.is_ignored,
2091 scan_id: self.snapshot.scan_id,
2092 }));
2093 entries_by_path_edits.push(Edit::Insert(entry));
2094 }
2095
2096 self.snapshot
2097 .entries_by_path
2098 .edit(entries_by_path_edits, &());
2099 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2100
2101 if parent_path.file_name() == Some(&DOT_GIT) {
2102 self.snapshot.build_repo(parent_path, fs);
2103 }
2104 }
2105
2106 fn remove_path(&mut self, path: &Path) {
2107 let mut new_entries;
2108 let removed_entries;
2109 {
2110 let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2111 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2112 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2113 new_entries.push_tree(cursor.suffix(&()), &());
2114 }
2115 self.snapshot.entries_by_path = new_entries;
2116
2117 let mut entries_by_id_edits = Vec::new();
2118 for entry in removed_entries.cursor::<()>() {
2119 let removed_entry_id = self
2120 .removed_entry_ids
2121 .entry(entry.inode)
2122 .or_insert(entry.id);
2123 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2124 entries_by_id_edits.push(Edit::Remove(entry.id));
2125 }
2126 self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2127
2128 if path.file_name() == Some(&GITIGNORE) {
2129 let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2130 if let Some((_, needs_update)) = self
2131 .snapshot
2132 .ignores_by_parent_abs_path
2133 .get_mut(abs_parent_path.as_path())
2134 {
2135 *needs_update = true;
2136 }
2137 }
2138 }
2139}
2140
2141async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2142 let contents = fs.load(abs_path).await?;
2143 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2144 let mut builder = GitignoreBuilder::new(parent);
2145 for line in contents.lines() {
2146 builder.add_line(Some(abs_path.into()), line)?;
2147 }
2148 Ok(builder.build()?)
2149}
2150
2151impl WorktreeId {
2152 pub fn from_usize(handle_id: usize) -> Self {
2153 Self(handle_id)
2154 }
2155
2156 pub(crate) fn from_proto(id: u64) -> Self {
2157 Self(id as usize)
2158 }
2159
2160 pub fn to_proto(&self) -> u64 {
2161 self.0 as u64
2162 }
2163
2164 pub fn to_usize(&self) -> usize {
2165 self.0
2166 }
2167}
2168
2169impl fmt::Display for WorktreeId {
2170 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2171 self.0.fmt(f)
2172 }
2173}
2174
2175impl Deref for Worktree {
2176 type Target = Snapshot;
2177
2178 fn deref(&self) -> &Self::Target {
2179 match self {
2180 Worktree::Local(worktree) => &worktree.snapshot,
2181 Worktree::Remote(worktree) => &worktree.snapshot,
2182 }
2183 }
2184}
2185
2186impl Deref for LocalWorktree {
2187 type Target = LocalSnapshot;
2188
2189 fn deref(&self) -> &Self::Target {
2190 &self.snapshot
2191 }
2192}
2193
2194impl Deref for RemoteWorktree {
2195 type Target = Snapshot;
2196
2197 fn deref(&self) -> &Self::Target {
2198 &self.snapshot
2199 }
2200}
2201
2202impl fmt::Debug for LocalWorktree {
2203 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2204 self.snapshot.fmt(f)
2205 }
2206}
2207
2208impl fmt::Debug for Snapshot {
2209 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2210 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2211 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2212
2213 impl<'a> fmt::Debug for EntriesByPath<'a> {
2214 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2215 f.debug_map()
2216 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2217 .finish()
2218 }
2219 }
2220
2221 impl<'a> fmt::Debug for EntriesById<'a> {
2222 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2223 f.debug_list().entries(self.0.iter()).finish()
2224 }
2225 }
2226
2227 f.debug_struct("Snapshot")
2228 .field("id", &self.id)
2229 .field("root_name", &self.root_name)
2230 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2231 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2232 .finish()
2233 }
2234}
2235
2236#[derive(Clone, PartialEq)]
2237pub struct File {
2238 pub worktree: ModelHandle<Worktree>,
2239 pub path: Arc<Path>,
2240 pub mtime: SystemTime,
2241 pub(crate) entry_id: ProjectEntryId,
2242 pub(crate) is_local: bool,
2243 pub(crate) is_deleted: bool,
2244}
2245
2246impl language::File for File {
2247 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2248 if self.is_local {
2249 Some(self)
2250 } else {
2251 None
2252 }
2253 }
2254
2255 fn mtime(&self) -> SystemTime {
2256 self.mtime
2257 }
2258
2259 fn path(&self) -> &Arc<Path> {
2260 &self.path
2261 }
2262
2263 fn full_path(&self, cx: &AppContext) -> PathBuf {
2264 let mut full_path = PathBuf::new();
2265 let worktree = self.worktree.read(cx);
2266
2267 if worktree.is_visible() {
2268 full_path.push(worktree.root_name());
2269 } else {
2270 let path = worktree.abs_path();
2271
2272 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2273 full_path.push("~");
2274 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2275 } else {
2276 full_path.push(path)
2277 }
2278 }
2279
2280 if self.path.components().next().is_some() {
2281 full_path.push(&self.path);
2282 }
2283
2284 full_path
2285 }
2286
2287 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2288 /// of its worktree, then this method will return the name of the worktree itself.
2289 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2290 self.path
2291 .file_name()
2292 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2293 }
2294
2295 fn is_deleted(&self) -> bool {
2296 self.is_deleted
2297 }
2298
2299 fn as_any(&self) -> &dyn Any {
2300 self
2301 }
2302
2303 fn to_proto(&self) -> rpc::proto::File {
2304 rpc::proto::File {
2305 worktree_id: self.worktree.id() as u64,
2306 entry_id: self.entry_id.to_proto(),
2307 path: self.path.to_string_lossy().into(),
2308 mtime: Some(self.mtime.into()),
2309 is_deleted: self.is_deleted,
2310 }
2311 }
2312}
2313
2314impl language::LocalFile for File {
2315 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2316 self.worktree
2317 .read(cx)
2318 .as_local()
2319 .unwrap()
2320 .abs_path
2321 .join(&self.path)
2322 }
2323
2324 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2325 let worktree = self.worktree.read(cx).as_local().unwrap();
2326 let abs_path = worktree.absolutize(&self.path);
2327 let fs = worktree.fs.clone();
2328 cx.background()
2329 .spawn(async move { fs.load(&abs_path).await })
2330 }
2331
2332 fn buffer_reloaded(
2333 &self,
2334 buffer_id: u64,
2335 version: &clock::Global,
2336 fingerprint: RopeFingerprint,
2337 line_ending: LineEnding,
2338 mtime: SystemTime,
2339 cx: &mut AppContext,
2340 ) {
2341 let worktree = self.worktree.read(cx).as_local().unwrap();
2342 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2343 worktree
2344 .client
2345 .send(proto::BufferReloaded {
2346 project_id,
2347 buffer_id,
2348 version: serialize_version(version),
2349 mtime: Some(mtime.into()),
2350 fingerprint: serialize_fingerprint(fingerprint),
2351 line_ending: serialize_line_ending(line_ending) as i32,
2352 })
2353 .log_err();
2354 }
2355 }
2356}
2357
2358impl File {
2359 pub fn from_proto(
2360 proto: rpc::proto::File,
2361 worktree: ModelHandle<Worktree>,
2362 cx: &AppContext,
2363 ) -> Result<Self> {
2364 let worktree_id = worktree
2365 .read(cx)
2366 .as_remote()
2367 .ok_or_else(|| anyhow!("not remote"))?
2368 .id();
2369
2370 if worktree_id.to_proto() != proto.worktree_id {
2371 return Err(anyhow!("worktree id does not match file"));
2372 }
2373
2374 Ok(Self {
2375 worktree,
2376 path: Path::new(&proto.path).into(),
2377 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2378 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2379 is_local: false,
2380 is_deleted: proto.is_deleted,
2381 })
2382 }
2383
2384 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2385 file.and_then(|f| f.as_any().downcast_ref())
2386 }
2387
2388 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2389 self.worktree.read(cx).id()
2390 }
2391
2392 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2393 if self.is_deleted {
2394 None
2395 } else {
2396 Some(self.entry_id)
2397 }
2398 }
2399}
2400
2401#[derive(Clone, Debug, PartialEq, Eq)]
2402pub struct Entry {
2403 pub id: ProjectEntryId,
2404 pub kind: EntryKind,
2405 pub path: Arc<Path>,
2406 pub inode: u64,
2407 pub mtime: SystemTime,
2408 pub is_symlink: bool,
2409 pub is_ignored: bool,
2410 pub git_status: Option<GitFileStatus>,
2411}
2412
2413#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2414pub enum EntryKind {
2415 PendingDir,
2416 Dir,
2417 File(CharBag),
2418}
2419
2420#[derive(Clone, Copy, Debug)]
2421pub enum PathChange {
2422 /// A filesystem entry was was created.
2423 Added,
2424 /// A filesystem entry was removed.
2425 Removed,
2426 /// A filesystem entry was updated.
2427 Updated,
2428 /// A filesystem entry was either updated or added. We don't know
2429 /// whether or not it already existed, because the path had not
2430 /// been loaded before the event.
2431 AddedOrUpdated,
2432 /// A filesystem entry was found during the initial scan of the worktree.
2433 Loaded,
2434}
2435
2436pub struct GitRepositoryChange {
2437 /// The previous state of the repository, if it already existed.
2438 pub old_repository: Option<RepositoryEntry>,
2439}
2440
2441pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2442pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
2443
2444impl Entry {
2445 fn new(
2446 path: Arc<Path>,
2447 metadata: &fs::Metadata,
2448 next_entry_id: &AtomicUsize,
2449 root_char_bag: CharBag,
2450 ) -> Self {
2451 Self {
2452 id: ProjectEntryId::new(next_entry_id),
2453 kind: if metadata.is_dir {
2454 EntryKind::PendingDir
2455 } else {
2456 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2457 },
2458 path,
2459 inode: metadata.inode,
2460 mtime: metadata.mtime,
2461 is_symlink: metadata.is_symlink,
2462 is_ignored: false,
2463 git_status: None,
2464 }
2465 }
2466
2467 pub fn is_dir(&self) -> bool {
2468 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2469 }
2470
2471 pub fn is_file(&self) -> bool {
2472 matches!(self.kind, EntryKind::File(_))
2473 }
2474
2475 pub fn git_status(&self) -> Option<GitFileStatus> {
2476 self.git_status /*.status() */
2477 }
2478}
2479
2480impl sum_tree::Item for Entry {
2481 type Summary = EntrySummary;
2482
2483 fn summary(&self) -> Self::Summary {
2484 let visible_count = if self.is_ignored { 0 } else { 1 };
2485 let file_count;
2486 let visible_file_count;
2487 if self.is_file() {
2488 file_count = 1;
2489 visible_file_count = visible_count;
2490 } else {
2491 file_count = 0;
2492 visible_file_count = 0;
2493 }
2494
2495 EntrySummary {
2496 max_path: self.path.clone(),
2497 count: 1,
2498 visible_count,
2499 file_count,
2500 visible_file_count,
2501 }
2502 }
2503}
2504
2505impl sum_tree::KeyedItem for Entry {
2506 type Key = PathKey;
2507
2508 fn key(&self) -> Self::Key {
2509 PathKey(self.path.clone())
2510 }
2511}
2512
2513#[derive(Clone, Debug)]
2514pub struct EntrySummary {
2515 max_path: Arc<Path>,
2516 count: usize,
2517 visible_count: usize,
2518 file_count: usize,
2519 visible_file_count: usize,
2520 // git_modified_count: usize,
2521 // git_added_count: usize,
2522 // git_conflict_count: usize,
2523}
2524
2525impl Default for EntrySummary {
2526 fn default() -> Self {
2527 Self {
2528 max_path: Arc::from(Path::new("")),
2529 count: 0,
2530 visible_count: 0,
2531 file_count: 0,
2532 visible_file_count: 0,
2533 }
2534 }
2535}
2536
2537impl sum_tree::Summary for EntrySummary {
2538 type Context = ();
2539
2540 fn add_summary(&mut self, rhs: &Self, _: &()) {
2541 self.max_path = rhs.max_path.clone();
2542 self.count += rhs.count;
2543 self.visible_count += rhs.visible_count;
2544 self.file_count += rhs.file_count;
2545 self.visible_file_count += rhs.visible_file_count;
2546 }
2547}
2548
2549#[derive(Clone, Debug)]
2550struct PathEntry {
2551 id: ProjectEntryId,
2552 path: Arc<Path>,
2553 is_ignored: bool,
2554 scan_id: usize,
2555}
2556
2557impl sum_tree::Item for PathEntry {
2558 type Summary = PathEntrySummary;
2559
2560 fn summary(&self) -> Self::Summary {
2561 PathEntrySummary { max_id: self.id }
2562 }
2563}
2564
2565impl sum_tree::KeyedItem for PathEntry {
2566 type Key = ProjectEntryId;
2567
2568 fn key(&self) -> Self::Key {
2569 self.id
2570 }
2571}
2572
2573#[derive(Clone, Debug, Default)]
2574struct PathEntrySummary {
2575 max_id: ProjectEntryId,
2576}
2577
2578impl sum_tree::Summary for PathEntrySummary {
2579 type Context = ();
2580
2581 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2582 self.max_id = summary.max_id;
2583 }
2584}
2585
2586impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2587 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2588 *self = summary.max_id;
2589 }
2590}
2591
2592#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2593pub struct PathKey(Arc<Path>);
2594
2595impl Default for PathKey {
2596 fn default() -> Self {
2597 Self(Path::new("").into())
2598 }
2599}
2600
2601impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2602 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2603 self.0 = summary.max_path.clone();
2604 }
2605}
2606
2607struct BackgroundScanner {
2608 state: Mutex<BackgroundScannerState>,
2609 fs: Arc<dyn Fs>,
2610 status_updates_tx: UnboundedSender<ScanState>,
2611 executor: Arc<executor::Background>,
2612 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2613 next_entry_id: Arc<AtomicUsize>,
2614 phase: BackgroundScannerPhase,
2615}
2616
2617#[derive(PartialEq)]
2618enum BackgroundScannerPhase {
2619 InitialScan,
2620 EventsReceivedDuringInitialScan,
2621 Events,
2622}
2623
2624impl BackgroundScanner {
2625 fn new(
2626 snapshot: LocalSnapshot,
2627 next_entry_id: Arc<AtomicUsize>,
2628 fs: Arc<dyn Fs>,
2629 status_updates_tx: UnboundedSender<ScanState>,
2630 executor: Arc<executor::Background>,
2631 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2632 ) -> Self {
2633 Self {
2634 fs,
2635 status_updates_tx,
2636 executor,
2637 refresh_requests_rx,
2638 next_entry_id,
2639 state: Mutex::new(BackgroundScannerState {
2640 prev_snapshot: snapshot.snapshot.clone(),
2641 snapshot,
2642 removed_entry_ids: Default::default(),
2643 changed_paths: Default::default(),
2644 }),
2645 phase: BackgroundScannerPhase::InitialScan,
2646 }
2647 }
2648
2649 async fn run(
2650 &mut self,
2651 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2652 ) {
2653 use futures::FutureExt as _;
2654
2655 let (root_abs_path, root_inode) = {
2656 let snapshot = &self.state.lock().snapshot;
2657 (
2658 snapshot.abs_path.clone(),
2659 snapshot.root_entry().map(|e| e.inode),
2660 )
2661 };
2662
2663 // Populate ignores above the root.
2664 let ignore_stack;
2665 for ancestor in root_abs_path.ancestors().skip(1) {
2666 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2667 {
2668 self.state
2669 .lock()
2670 .snapshot
2671 .ignores_by_parent_abs_path
2672 .insert(ancestor.into(), (ignore.into(), false));
2673 }
2674 }
2675 {
2676 let mut state = self.state.lock();
2677 state.snapshot.scan_id += 1;
2678 ignore_stack = state
2679 .snapshot
2680 .ignore_stack_for_abs_path(&root_abs_path, true);
2681 if ignore_stack.is_all() {
2682 if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2683 root_entry.is_ignored = true;
2684 state.insert_entry(root_entry, self.fs.as_ref());
2685 }
2686 }
2687 };
2688
2689 // Perform an initial scan of the directory.
2690 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2691 smol::block_on(scan_job_tx.send(ScanJob {
2692 abs_path: root_abs_path,
2693 path: Arc::from(Path::new("")),
2694 ignore_stack,
2695 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2696 scan_queue: scan_job_tx.clone(),
2697 }))
2698 .unwrap();
2699 drop(scan_job_tx);
2700 self.scan_dirs(true, scan_job_rx).await;
2701 {
2702 let mut state = self.state.lock();
2703 state.snapshot.completed_scan_id = state.snapshot.scan_id;
2704 }
2705 self.send_status_update(false, None);
2706
2707 // Process any any FS events that occurred while performing the initial scan.
2708 // For these events, update events cannot be as precise, because we didn't
2709 // have the previous state loaded yet.
2710 self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2711 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2712 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2713 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2714 paths.extend(more_events.into_iter().map(|e| e.path));
2715 }
2716 self.process_events(paths).await;
2717 }
2718
2719 // Continue processing events until the worktree is dropped.
2720 self.phase = BackgroundScannerPhase::Events;
2721 loop {
2722 select_biased! {
2723 // Process any path refresh requests from the worktree. Prioritize
2724 // these before handling changes reported by the filesystem.
2725 request = self.refresh_requests_rx.recv().fuse() => {
2726 let Ok((paths, barrier)) = request else { break };
2727 if !self.process_refresh_request(paths.clone(), barrier).await {
2728 return;
2729 }
2730 }
2731
2732 events = events_rx.next().fuse() => {
2733 let Some(events) = events else { break };
2734 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2735 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2736 paths.extend(more_events.into_iter().map(|e| e.path));
2737 }
2738 self.process_events(paths.clone()).await;
2739 }
2740 }
2741 }
2742 }
2743
2744 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2745 self.reload_entries_for_paths(paths, None).await;
2746 self.send_status_update(false, Some(barrier))
2747 }
2748
2749 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2750 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2751 let paths = self
2752 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2753 .await;
2754 drop(scan_job_tx);
2755 self.scan_dirs(false, scan_job_rx).await;
2756
2757 self.update_ignore_statuses().await;
2758
2759 {
2760 let mut snapshot = &mut self.state.lock().snapshot;
2761
2762 if let Some(paths) = paths {
2763 for path in paths {
2764 self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2765 }
2766 }
2767
2768 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2769 git_repositories.retain(|work_directory_id, _| {
2770 snapshot
2771 .entry_for_id(*work_directory_id)
2772 .map_or(false, |entry| {
2773 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2774 })
2775 });
2776 snapshot.git_repositories = git_repositories;
2777
2778 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2779 git_repository_entries.retain(|_, entry| {
2780 snapshot
2781 .git_repositories
2782 .get(&entry.work_directory.0)
2783 .is_some()
2784 });
2785 snapshot.snapshot.repository_entries = git_repository_entries;
2786 snapshot.completed_scan_id = snapshot.scan_id;
2787 }
2788
2789 self.send_status_update(false, None);
2790 }
2791
2792 async fn scan_dirs(
2793 &self,
2794 enable_progress_updates: bool,
2795 scan_jobs_rx: channel::Receiver<ScanJob>,
2796 ) {
2797 use futures::FutureExt as _;
2798
2799 if self
2800 .status_updates_tx
2801 .unbounded_send(ScanState::Started)
2802 .is_err()
2803 {
2804 return;
2805 }
2806
2807 let progress_update_count = AtomicUsize::new(0);
2808 self.executor
2809 .scoped(|scope| {
2810 for _ in 0..self.executor.num_cpus() {
2811 scope.spawn(async {
2812 let mut last_progress_update_count = 0;
2813 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2814 futures::pin_mut!(progress_update_timer);
2815
2816 loop {
2817 select_biased! {
2818 // Process any path refresh requests before moving on to process
2819 // the scan queue, so that user operations are prioritized.
2820 request = self.refresh_requests_rx.recv().fuse() => {
2821 let Ok((paths, barrier)) = request else { break };
2822 if !self.process_refresh_request(paths, barrier).await {
2823 return;
2824 }
2825 }
2826
2827 // Send periodic progress updates to the worktree. Use an atomic counter
2828 // to ensure that only one of the workers sends a progress update after
2829 // the update interval elapses.
2830 _ = progress_update_timer => {
2831 match progress_update_count.compare_exchange(
2832 last_progress_update_count,
2833 last_progress_update_count + 1,
2834 SeqCst,
2835 SeqCst
2836 ) {
2837 Ok(_) => {
2838 last_progress_update_count += 1;
2839 self.send_status_update(true, None);
2840 }
2841 Err(count) => {
2842 last_progress_update_count = count;
2843 }
2844 }
2845 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2846 }
2847
2848 // Recursively load directories from the file system.
2849 job = scan_jobs_rx.recv().fuse() => {
2850 let Ok(job) = job else { break };
2851 if let Err(err) = self.scan_dir(&job).await {
2852 if job.path.as_ref() != Path::new("") {
2853 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2854 }
2855 }
2856 }
2857 }
2858 }
2859 })
2860 }
2861 })
2862 .await;
2863 }
2864
2865 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2866 let mut state = self.state.lock();
2867 if state.changed_paths.is_empty() && scanning {
2868 return true;
2869 }
2870
2871 let new_snapshot = state.snapshot.clone();
2872 let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2873 let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
2874 state.changed_paths.clear();
2875
2876 self.status_updates_tx
2877 .unbounded_send(ScanState::Updated {
2878 snapshot: new_snapshot,
2879 changes,
2880 scanning,
2881 barrier,
2882 })
2883 .is_ok()
2884 }
2885
2886 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2887 let mut new_entries: Vec<Entry> = Vec::new();
2888 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2889 let mut ignore_stack = job.ignore_stack.clone();
2890 let mut new_ignore = None;
2891 let (root_abs_path, root_char_bag, next_entry_id) = {
2892 let snapshot = &self.state.lock().snapshot;
2893 (
2894 snapshot.abs_path().clone(),
2895 snapshot.root_char_bag,
2896 self.next_entry_id.clone(),
2897 )
2898 };
2899 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2900 while let Some(child_abs_path) = child_paths.next().await {
2901 let child_abs_path: Arc<Path> = match child_abs_path {
2902 Ok(child_abs_path) => child_abs_path.into(),
2903 Err(error) => {
2904 log::error!("error processing entry {:?}", error);
2905 continue;
2906 }
2907 };
2908
2909 let child_name = child_abs_path.file_name().unwrap();
2910 let child_path: Arc<Path> = job.path.join(child_name).into();
2911 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2912 Ok(Some(metadata)) => metadata,
2913 Ok(None) => continue,
2914 Err(err) => {
2915 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2916 continue;
2917 }
2918 };
2919
2920 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2921 if child_name == *GITIGNORE {
2922 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2923 Ok(ignore) => {
2924 let ignore = Arc::new(ignore);
2925 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2926 new_ignore = Some(ignore);
2927 }
2928 Err(error) => {
2929 log::error!(
2930 "error loading .gitignore file {:?} - {:?}",
2931 child_name,
2932 error
2933 );
2934 }
2935 }
2936
2937 // Update ignore status of any child entries we've already processed to reflect the
2938 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2939 // there should rarely be too numerous. Update the ignore stack associated with any
2940 // new jobs as well.
2941 let mut new_jobs = new_jobs.iter_mut();
2942 for entry in &mut new_entries {
2943 let entry_abs_path = root_abs_path.join(&entry.path);
2944 entry.is_ignored =
2945 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2946
2947 if entry.is_dir() {
2948 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2949 job.ignore_stack = if entry.is_ignored {
2950 IgnoreStack::all()
2951 } else {
2952 ignore_stack.clone()
2953 };
2954 }
2955 }
2956 }
2957 }
2958
2959 let mut child_entry = Entry::new(
2960 child_path.clone(),
2961 &child_metadata,
2962 &next_entry_id,
2963 root_char_bag,
2964 );
2965
2966 if child_entry.is_dir() {
2967 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2968 child_entry.is_ignored = is_ignored;
2969
2970 // Avoid recursing until crash in the case of a recursive symlink
2971 if !job.ancestor_inodes.contains(&child_entry.inode) {
2972 let mut ancestor_inodes = job.ancestor_inodes.clone();
2973 ancestor_inodes.insert(child_entry.inode);
2974
2975 new_jobs.push(Some(ScanJob {
2976 abs_path: child_abs_path,
2977 path: child_path,
2978 ignore_stack: if is_ignored {
2979 IgnoreStack::all()
2980 } else {
2981 ignore_stack.clone()
2982 },
2983 ancestor_inodes,
2984 scan_queue: job.scan_queue.clone(),
2985 }));
2986 } else {
2987 new_jobs.push(None);
2988 }
2989 } else {
2990 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2991 }
2992
2993 new_entries.push(child_entry);
2994 }
2995
2996 {
2997 let mut state = self.state.lock();
2998 state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
2999 if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3000 state.changed_paths.insert(ix, job.path.clone());
3001 }
3002 }
3003
3004 for new_job in new_jobs {
3005 if let Some(new_job) = new_job {
3006 job.scan_queue.send(new_job).await.unwrap();
3007 }
3008 }
3009
3010 Ok(())
3011 }
3012
3013 async fn reload_entries_for_paths(
3014 &self,
3015 mut abs_paths: Vec<PathBuf>,
3016 scan_queue_tx: Option<Sender<ScanJob>>,
3017 ) -> Option<Vec<Arc<Path>>> {
3018 let doing_recursive_update = scan_queue_tx.is_some();
3019
3020 abs_paths.sort_unstable();
3021 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3022
3023 let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3024 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3025 let metadata = futures::future::join_all(
3026 abs_paths
3027 .iter()
3028 .map(|abs_path| self.fs.metadata(&abs_path))
3029 .collect::<Vec<_>>(),
3030 )
3031 .await;
3032
3033 let mut state = self.state.lock();
3034 let snapshot = &mut state.snapshot;
3035 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3036 snapshot.scan_id += 1;
3037 if is_idle && !doing_recursive_update {
3038 snapshot.completed_scan_id = snapshot.scan_id;
3039 }
3040
3041 // Remove any entries for paths that no longer exist or are being recursively
3042 // refreshed. Do this before adding any new entries, so that renames can be
3043 // detected regardless of the order of the paths.
3044 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3045 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3046 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3047 if matches!(metadata, Ok(None)) || doing_recursive_update {
3048 state.remove_path(path);
3049 }
3050 event_paths.push(path.into());
3051 } else {
3052 log::error!(
3053 "unexpected event {:?} for root path {:?}",
3054 abs_path,
3055 root_canonical_path
3056 );
3057 }
3058 }
3059
3060 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3061 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3062
3063 match metadata {
3064 Ok(Some(metadata)) => {
3065 let ignore_stack = state
3066 .snapshot
3067 .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3068 let mut fs_entry = Entry::new(
3069 path.clone(),
3070 &metadata,
3071 self.next_entry_id.as_ref(),
3072 state.snapshot.root_char_bag,
3073 );
3074 fs_entry.is_ignored = ignore_stack.is_all();
3075 state.insert_entry(fs_entry, self.fs.as_ref());
3076
3077 if let Some(scan_queue_tx) = &scan_queue_tx {
3078 let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3079 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3080 ancestor_inodes.insert(metadata.inode);
3081 smol::block_on(scan_queue_tx.send(ScanJob {
3082 abs_path,
3083 path,
3084 ignore_stack,
3085 ancestor_inodes,
3086 scan_queue: scan_queue_tx.clone(),
3087 }))
3088 .unwrap();
3089 }
3090 }
3091 }
3092 Ok(None) => {
3093 self.remove_repo_path(&path, &mut state.snapshot);
3094 }
3095 Err(err) => {
3096 // TODO - create a special 'error' entry in the entries tree to mark this
3097 log::error!("error reading file on event {:?}", err);
3098 }
3099 }
3100 }
3101
3102 util::extend_sorted(
3103 &mut state.changed_paths,
3104 event_paths.iter().cloned(),
3105 usize::MAX,
3106 Ord::cmp,
3107 );
3108
3109 Some(event_paths)
3110 }
3111
3112 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3113 if !path
3114 .components()
3115 .any(|component| component.as_os_str() == *DOT_GIT)
3116 {
3117 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3118 let entry = repository.work_directory.0;
3119 snapshot.git_repositories.remove(&entry);
3120 snapshot
3121 .snapshot
3122 .repository_entries
3123 .remove(&RepositoryWorkDirectory(path.into()));
3124 return Some(());
3125 }
3126 }
3127
3128 // TODO statuses
3129 // Track when a .git is removed and iterate over the file system there
3130
3131 Some(())
3132 }
3133
3134 fn reload_repo_for_file_path(
3135 &self,
3136 path: &Path,
3137 snapshot: &mut LocalSnapshot,
3138 fs: &dyn Fs,
3139 ) -> Option<()> {
3140 let scan_id = snapshot.scan_id;
3141
3142 if path
3143 .components()
3144 .any(|component| component.as_os_str() == *DOT_GIT)
3145 {
3146 let (entry_id, repo_ptr) = {
3147 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3148 let dot_git_dir = path.ancestors()
3149 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3150 .next()?;
3151
3152 snapshot.build_repo(dot_git_dir.into(), fs);
3153 return None;
3154 };
3155 if repo.git_dir_scan_id == scan_id {
3156 return None;
3157 }
3158
3159 (*entry_id, repo.repo_ptr.to_owned())
3160 };
3161 /*
3162 1. Populate dir, initializes the git repo
3163 2. Sometimes, we get a file event inside the .git repo, before it's initializaed
3164 In both cases, we should end up with an initialized repo and a full status scan
3165
3166 */
3167
3168 let work_dir = snapshot
3169 .entry_for_id(entry_id)
3170 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3171
3172 let repo = repo_ptr.lock();
3173 repo.reload_index();
3174 let branch = repo.branch_name();
3175
3176 snapshot.git_repositories.update(&entry_id, |entry| {
3177 entry.git_dir_scan_id = scan_id;
3178 });
3179
3180 snapshot
3181 .snapshot
3182 .repository_entries
3183 .update(&work_dir, |entry| {
3184 entry.branch = branch.map(Into::into);
3185 });
3186
3187 snapshot.scan_statuses(repo.deref(), &work_dir, &work_dir.0);
3188 } else {
3189 if snapshot
3190 .entry_for_path(&path)
3191 .map(|entry| entry.is_ignored)
3192 .unwrap_or(false)
3193 {
3194 return None;
3195 }
3196
3197 let repo = snapshot.repository_for_path(&path)?;
3198 let work_directory = &repo.work_directory(snapshot)?;
3199 let work_dir_id = repo.work_directory.clone();
3200 let (local_repo, git_dir_scan_id) =
3201 snapshot.git_repositories.update(&work_dir_id, |entry| {
3202 (entry.repo_ptr.clone(), entry.git_dir_scan_id)
3203 })?;
3204
3205 // Short circuit if we've already scanned everything
3206 if git_dir_scan_id == scan_id {
3207 return None;
3208 }
3209
3210 let repo_ptr = local_repo.lock();
3211
3212 snapshot.scan_statuses(repo_ptr.deref(), &work_directory, path);
3213 }
3214
3215 Some(())
3216 }
3217
3218 async fn update_ignore_statuses(&self) {
3219 use futures::FutureExt as _;
3220
3221 let mut snapshot = self.state.lock().snapshot.clone();
3222 let mut ignores_to_update = Vec::new();
3223 let mut ignores_to_delete = Vec::new();
3224 let abs_path = snapshot.abs_path.clone();
3225 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3226 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3227 if *needs_update {
3228 *needs_update = false;
3229 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3230 ignores_to_update.push(parent_abs_path.clone());
3231 }
3232 }
3233
3234 let ignore_path = parent_path.join(&*GITIGNORE);
3235 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3236 ignores_to_delete.push(parent_abs_path.clone());
3237 }
3238 }
3239 }
3240
3241 for parent_abs_path in ignores_to_delete {
3242 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3243 self.state
3244 .lock()
3245 .snapshot
3246 .ignores_by_parent_abs_path
3247 .remove(&parent_abs_path);
3248 }
3249
3250 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3251 ignores_to_update.sort_unstable();
3252 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3253 while let Some(parent_abs_path) = ignores_to_update.next() {
3254 while ignores_to_update
3255 .peek()
3256 .map_or(false, |p| p.starts_with(&parent_abs_path))
3257 {
3258 ignores_to_update.next().unwrap();
3259 }
3260
3261 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3262 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3263 abs_path: parent_abs_path,
3264 ignore_stack,
3265 ignore_queue: ignore_queue_tx.clone(),
3266 }))
3267 .unwrap();
3268 }
3269 drop(ignore_queue_tx);
3270
3271 self.executor
3272 .scoped(|scope| {
3273 for _ in 0..self.executor.num_cpus() {
3274 scope.spawn(async {
3275 loop {
3276 select_biased! {
3277 // Process any path refresh requests before moving on to process
3278 // the queue of ignore statuses.
3279 request = self.refresh_requests_rx.recv().fuse() => {
3280 let Ok((paths, barrier)) = request else { break };
3281 if !self.process_refresh_request(paths, barrier).await {
3282 return;
3283 }
3284 }
3285
3286 // Recursively process directories whose ignores have changed.
3287 job = ignore_queue_rx.recv().fuse() => {
3288 let Ok(job) = job else { break };
3289 self.update_ignore_status(job, &snapshot).await;
3290 }
3291 }
3292 }
3293 });
3294 }
3295 })
3296 .await;
3297 }
3298
3299 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3300 let mut ignore_stack = job.ignore_stack;
3301 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3302 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3303 }
3304
3305 let mut entries_by_id_edits = Vec::new();
3306 let mut entries_by_path_edits = Vec::new();
3307 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3308 for mut entry in snapshot.child_entries(path).cloned() {
3309 let was_ignored = entry.is_ignored;
3310 let abs_path = snapshot.abs_path().join(&entry.path);
3311 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3312 if entry.is_dir() {
3313 let child_ignore_stack = if entry.is_ignored {
3314 IgnoreStack::all()
3315 } else {
3316 ignore_stack.clone()
3317 };
3318 job.ignore_queue
3319 .send(UpdateIgnoreStatusJob {
3320 abs_path: abs_path.into(),
3321 ignore_stack: child_ignore_stack,
3322 ignore_queue: job.ignore_queue.clone(),
3323 })
3324 .await
3325 .unwrap();
3326 }
3327
3328 if entry.is_ignored != was_ignored {
3329 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3330 path_entry.scan_id = snapshot.scan_id;
3331 path_entry.is_ignored = entry.is_ignored;
3332 entries_by_id_edits.push(Edit::Insert(path_entry));
3333 entries_by_path_edits.push(Edit::Insert(entry));
3334 }
3335 }
3336
3337 let state = &mut self.state.lock();
3338 for edit in &entries_by_path_edits {
3339 if let Edit::Insert(entry) = edit {
3340 if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3341 state.changed_paths.insert(ix, entry.path.clone());
3342 }
3343 }
3344 }
3345
3346 state
3347 .snapshot
3348 .entries_by_path
3349 .edit(entries_by_path_edits, &());
3350 state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3351 }
3352
3353 fn build_change_set(
3354 &self,
3355 old_snapshot: &Snapshot,
3356 new_snapshot: &Snapshot,
3357 event_paths: &[Arc<Path>],
3358 ) -> UpdatedEntriesSet {
3359 use BackgroundScannerPhase::*;
3360 use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3361
3362 // Identify which paths have changed. Use the known set of changed
3363 // parent paths to optimize the search.
3364 let mut changes = Vec::new();
3365 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3366 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3367 old_paths.next(&());
3368 new_paths.next(&());
3369 for path in event_paths {
3370 let path = PathKey(path.clone());
3371 if old_paths.item().map_or(false, |e| e.path < path.0) {
3372 old_paths.seek_forward(&path, Bias::Left, &());
3373 }
3374 if new_paths.item().map_or(false, |e| e.path < path.0) {
3375 new_paths.seek_forward(&path, Bias::Left, &());
3376 }
3377
3378 loop {
3379 match (old_paths.item(), new_paths.item()) {
3380 (Some(old_entry), Some(new_entry)) => {
3381 if old_entry.path > path.0
3382 && new_entry.path > path.0
3383 && !old_entry.path.starts_with(&path.0)
3384 && !new_entry.path.starts_with(&path.0)
3385 {
3386 break;
3387 }
3388
3389 match Ord::cmp(&old_entry.path, &new_entry.path) {
3390 Ordering::Less => {
3391 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3392 old_paths.next(&());
3393 }
3394 Ordering::Equal => {
3395 if self.phase == EventsReceivedDuringInitialScan {
3396 // If the worktree was not fully initialized when this event was generated,
3397 // we can't know whether this entry was added during the scan or whether
3398 // it was merely updated.
3399 changes.push((
3400 new_entry.path.clone(),
3401 new_entry.id,
3402 AddedOrUpdated,
3403 ));
3404 } else if old_entry.id != new_entry.id {
3405 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3406 changes.push((new_entry.path.clone(), new_entry.id, Added));
3407 } else if old_entry != new_entry {
3408 changes.push((new_entry.path.clone(), new_entry.id, Updated));
3409 }
3410 old_paths.next(&());
3411 new_paths.next(&());
3412 }
3413 Ordering::Greater => {
3414 changes.push((
3415 new_entry.path.clone(),
3416 new_entry.id,
3417 if self.phase == InitialScan {
3418 Loaded
3419 } else {
3420 Added
3421 },
3422 ));
3423 new_paths.next(&());
3424 }
3425 }
3426 }
3427 (Some(old_entry), None) => {
3428 changes.push((old_entry.path.clone(), old_entry.id, Removed));
3429 old_paths.next(&());
3430 }
3431 (None, Some(new_entry)) => {
3432 changes.push((
3433 new_entry.path.clone(),
3434 new_entry.id,
3435 if self.phase == InitialScan {
3436 Loaded
3437 } else {
3438 Added
3439 },
3440 ));
3441 new_paths.next(&());
3442 }
3443 (None, None) => break,
3444 }
3445 }
3446 }
3447
3448 changes.into()
3449 }
3450
3451 async fn progress_timer(&self, running: bool) {
3452 if !running {
3453 return futures::future::pending().await;
3454 }
3455
3456 #[cfg(any(test, feature = "test-support"))]
3457 if self.fs.is_fake() {
3458 return self.executor.simulate_random_delay().await;
3459 }
3460
3461 smol::Timer::after(Duration::from_millis(100)).await;
3462 }
3463}
3464
3465fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3466 let mut result = root_char_bag;
3467 result.extend(
3468 path.to_string_lossy()
3469 .chars()
3470 .map(|c| c.to_ascii_lowercase()),
3471 );
3472 result
3473}
3474
3475struct ScanJob {
3476 abs_path: Arc<Path>,
3477 path: Arc<Path>,
3478 ignore_stack: Arc<IgnoreStack>,
3479 scan_queue: Sender<ScanJob>,
3480 ancestor_inodes: TreeSet<u64>,
3481}
3482
3483struct UpdateIgnoreStatusJob {
3484 abs_path: Arc<Path>,
3485 ignore_stack: Arc<IgnoreStack>,
3486 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3487}
3488
3489pub trait WorktreeHandle {
3490 #[cfg(any(test, feature = "test-support"))]
3491 fn flush_fs_events<'a>(
3492 &self,
3493 cx: &'a gpui::TestAppContext,
3494 ) -> futures::future::LocalBoxFuture<'a, ()>;
3495}
3496
3497impl WorktreeHandle for ModelHandle<Worktree> {
3498 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3499 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3500 // extra directory scans, and emit extra scan-state notifications.
3501 //
3502 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3503 // to ensure that all redundant FS events have already been processed.
3504 #[cfg(any(test, feature = "test-support"))]
3505 fn flush_fs_events<'a>(
3506 &self,
3507 cx: &'a gpui::TestAppContext,
3508 ) -> futures::future::LocalBoxFuture<'a, ()> {
3509 let filename = "fs-event-sentinel";
3510 let tree = self.clone();
3511 let (fs, root_path) = self.read_with(cx, |tree, _| {
3512 let tree = tree.as_local().unwrap();
3513 (tree.fs.clone(), tree.abs_path().clone())
3514 });
3515
3516 async move {
3517 fs.create_file(&root_path.join(filename), Default::default())
3518 .await
3519 .unwrap();
3520 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3521 .await;
3522
3523 fs.remove_file(&root_path.join(filename), Default::default())
3524 .await
3525 .unwrap();
3526 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3527 .await;
3528
3529 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3530 .await;
3531 }
3532 .boxed_local()
3533 }
3534}
3535
3536#[derive(Clone, Debug)]
3537struct TraversalProgress<'a> {
3538 max_path: &'a Path,
3539 count: usize,
3540 visible_count: usize,
3541 file_count: usize,
3542 visible_file_count: usize,
3543}
3544
3545impl<'a> TraversalProgress<'a> {
3546 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3547 match (include_ignored, include_dirs) {
3548 (true, true) => self.count,
3549 (true, false) => self.file_count,
3550 (false, true) => self.visible_count,
3551 (false, false) => self.visible_file_count,
3552 }
3553 }
3554}
3555
3556impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3557 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3558 self.max_path = summary.max_path.as_ref();
3559 self.count += summary.count;
3560 self.visible_count += summary.visible_count;
3561 self.file_count += summary.file_count;
3562 self.visible_file_count += summary.visible_file_count;
3563 }
3564}
3565
3566impl<'a> Default for TraversalProgress<'a> {
3567 fn default() -> Self {
3568 Self {
3569 max_path: Path::new(""),
3570 count: 0,
3571 visible_count: 0,
3572 file_count: 0,
3573 visible_file_count: 0,
3574 }
3575 }
3576}
3577
3578pub struct Traversal<'a> {
3579 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3580 include_ignored: bool,
3581 include_dirs: bool,
3582}
3583
3584impl<'a> Traversal<'a> {
3585 pub fn advance(&mut self) -> bool {
3586 self.cursor.seek_forward(
3587 &TraversalTarget::Count {
3588 count: self.end_offset() + 1,
3589 include_dirs: self.include_dirs,
3590 include_ignored: self.include_ignored,
3591 },
3592 Bias::Left,
3593 &(),
3594 )
3595 }
3596
3597 pub fn advance_to_sibling(&mut self) -> bool {
3598 while let Some(entry) = self.cursor.item() {
3599 self.cursor.seek_forward(
3600 &TraversalTarget::PathSuccessor(&entry.path),
3601 Bias::Left,
3602 &(),
3603 );
3604 if let Some(entry) = self.cursor.item() {
3605 if (self.include_dirs || !entry.is_dir())
3606 && (self.include_ignored || !entry.is_ignored)
3607 {
3608 return true;
3609 }
3610 }
3611 }
3612 false
3613 }
3614
3615 pub fn entry(&self) -> Option<&'a Entry> {
3616 self.cursor.item()
3617 }
3618
3619 pub fn start_offset(&self) -> usize {
3620 self.cursor
3621 .start()
3622 .count(self.include_dirs, self.include_ignored)
3623 }
3624
3625 pub fn end_offset(&self) -> usize {
3626 self.cursor
3627 .end(&())
3628 .count(self.include_dirs, self.include_ignored)
3629 }
3630}
3631
3632impl<'a> Iterator for Traversal<'a> {
3633 type Item = &'a Entry;
3634
3635 fn next(&mut self) -> Option<Self::Item> {
3636 if let Some(item) = self.entry() {
3637 self.advance();
3638 Some(item)
3639 } else {
3640 None
3641 }
3642 }
3643}
3644
3645#[derive(Debug)]
3646enum TraversalTarget<'a> {
3647 Path(&'a Path),
3648 PathSuccessor(&'a Path),
3649 Count {
3650 count: usize,
3651 include_ignored: bool,
3652 include_dirs: bool,
3653 },
3654}
3655
3656impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3657 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3658 match self {
3659 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3660 TraversalTarget::PathSuccessor(path) => {
3661 if !cursor_location.max_path.starts_with(path) {
3662 Ordering::Equal
3663 } else {
3664 Ordering::Greater
3665 }
3666 }
3667 TraversalTarget::Count {
3668 count,
3669 include_dirs,
3670 include_ignored,
3671 } => Ord::cmp(
3672 count,
3673 &cursor_location.count(*include_dirs, *include_ignored),
3674 ),
3675 }
3676 }
3677}
3678
3679struct ChildEntriesIter<'a> {
3680 parent_path: &'a Path,
3681 traversal: Traversal<'a>,
3682}
3683
3684impl<'a> Iterator for ChildEntriesIter<'a> {
3685 type Item = &'a Entry;
3686
3687 fn next(&mut self) -> Option<Self::Item> {
3688 if let Some(item) = self.traversal.entry() {
3689 if item.path.starts_with(&self.parent_path) {
3690 self.traversal.advance_to_sibling();
3691 return Some(item);
3692 }
3693 }
3694 None
3695 }
3696}
3697
3698struct DescendentEntriesIter<'a> {
3699 parent_path: &'a Path,
3700 traversal: Traversal<'a>,
3701}
3702
3703impl<'a> Iterator for DescendentEntriesIter<'a> {
3704 type Item = &'a Entry;
3705
3706 fn next(&mut self) -> Option<Self::Item> {
3707 if let Some(item) = self.traversal.entry() {
3708 if item.path.starts_with(&self.parent_path) {
3709 self.traversal.advance();
3710 return Some(item);
3711 }
3712 }
3713 None
3714 }
3715}
3716
3717impl<'a> From<&'a Entry> for proto::Entry {
3718 fn from(entry: &'a Entry) -> Self {
3719 Self {
3720 id: entry.id.to_proto(),
3721 is_dir: entry.is_dir(),
3722 path: entry.path.to_string_lossy().into(),
3723 inode: entry.inode,
3724 mtime: Some(entry.mtime.into()),
3725 is_symlink: entry.is_symlink,
3726 is_ignored: entry.is_ignored,
3727 git_status: entry.git_status.map(|status| status.to_proto()),
3728 }
3729 }
3730}
3731
3732impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3733 type Error = anyhow::Error;
3734
3735 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3736 if let Some(mtime) = entry.mtime {
3737 let kind = if entry.is_dir {
3738 EntryKind::Dir
3739 } else {
3740 let mut char_bag = *root_char_bag;
3741 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3742 EntryKind::File(char_bag)
3743 };
3744 let path: Arc<Path> = PathBuf::from(entry.path).into();
3745 Ok(Entry {
3746 id: ProjectEntryId::from_proto(entry.id),
3747 kind,
3748 path,
3749 inode: entry.inode,
3750 mtime: mtime.into(),
3751 is_symlink: entry.is_symlink,
3752 is_ignored: entry.is_ignored,
3753 git_status: GitFileStatus::from_proto(entry.git_status),
3754 })
3755 } else {
3756 Err(anyhow!(
3757 "missing mtime in remote worktree entry {:?}",
3758 entry.path
3759 ))
3760 }
3761 }
3762}
3763
3764#[cfg(test)]
3765mod tests {
3766 use super::*;
3767 use fs::{FakeFs, RealFs};
3768 use gpui::{executor::Deterministic, TestAppContext};
3769 use pretty_assertions::assert_eq;
3770 use rand::prelude::*;
3771 use serde_json::json;
3772 use std::{env, fmt::Write};
3773 use util::{http::FakeHttpClient, test::temp_tree};
3774
3775 #[gpui::test]
3776 async fn test_traversal(cx: &mut TestAppContext) {
3777 let fs = FakeFs::new(cx.background());
3778 fs.insert_tree(
3779 "/root",
3780 json!({
3781 ".gitignore": "a/b\n",
3782 "a": {
3783 "b": "",
3784 "c": "",
3785 }
3786 }),
3787 )
3788 .await;
3789
3790 let http_client = FakeHttpClient::with_404_response();
3791 let client = cx.read(|cx| Client::new(http_client, cx));
3792
3793 let tree = Worktree::local(
3794 client,
3795 Path::new("/root"),
3796 true,
3797 fs,
3798 Default::default(),
3799 &mut cx.to_async(),
3800 )
3801 .await
3802 .unwrap();
3803 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3804 .await;
3805
3806 tree.read_with(cx, |tree, _| {
3807 assert_eq!(
3808 tree.entries(false)
3809 .map(|entry| entry.path.as_ref())
3810 .collect::<Vec<_>>(),
3811 vec![
3812 Path::new(""),
3813 Path::new(".gitignore"),
3814 Path::new("a"),
3815 Path::new("a/c"),
3816 ]
3817 );
3818 assert_eq!(
3819 tree.entries(true)
3820 .map(|entry| entry.path.as_ref())
3821 .collect::<Vec<_>>(),
3822 vec![
3823 Path::new(""),
3824 Path::new(".gitignore"),
3825 Path::new("a"),
3826 Path::new("a/b"),
3827 Path::new("a/c"),
3828 ]
3829 );
3830 })
3831 }
3832
3833 #[gpui::test]
3834 async fn test_descendent_entries(cx: &mut TestAppContext) {
3835 let fs = FakeFs::new(cx.background());
3836 fs.insert_tree(
3837 "/root",
3838 json!({
3839 "a": "",
3840 "b": {
3841 "c": {
3842 "d": ""
3843 },
3844 "e": {}
3845 },
3846 "f": "",
3847 "g": {
3848 "h": {}
3849 },
3850 "i": {
3851 "j": {
3852 "k": ""
3853 },
3854 "l": {
3855
3856 }
3857 },
3858 ".gitignore": "i/j\n",
3859 }),
3860 )
3861 .await;
3862
3863 let http_client = FakeHttpClient::with_404_response();
3864 let client = cx.read(|cx| Client::new(http_client, cx));
3865
3866 let tree = Worktree::local(
3867 client,
3868 Path::new("/root"),
3869 true,
3870 fs,
3871 Default::default(),
3872 &mut cx.to_async(),
3873 )
3874 .await
3875 .unwrap();
3876 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3877 .await;
3878
3879 tree.read_with(cx, |tree, _| {
3880 assert_eq!(
3881 tree.descendent_entries(false, false, Path::new("b"))
3882 .map(|entry| entry.path.as_ref())
3883 .collect::<Vec<_>>(),
3884 vec![Path::new("b/c/d"),]
3885 );
3886 assert_eq!(
3887 tree.descendent_entries(true, false, Path::new("b"))
3888 .map(|entry| entry.path.as_ref())
3889 .collect::<Vec<_>>(),
3890 vec![
3891 Path::new("b"),
3892 Path::new("b/c"),
3893 Path::new("b/c/d"),
3894 Path::new("b/e"),
3895 ]
3896 );
3897
3898 assert_eq!(
3899 tree.descendent_entries(false, false, Path::new("g"))
3900 .map(|entry| entry.path.as_ref())
3901 .collect::<Vec<_>>(),
3902 Vec::<PathBuf>::new()
3903 );
3904 assert_eq!(
3905 tree.descendent_entries(true, false, Path::new("g"))
3906 .map(|entry| entry.path.as_ref())
3907 .collect::<Vec<_>>(),
3908 vec![Path::new("g"), Path::new("g/h"),]
3909 );
3910
3911 assert_eq!(
3912 tree.descendent_entries(false, false, Path::new("i"))
3913 .map(|entry| entry.path.as_ref())
3914 .collect::<Vec<_>>(),
3915 Vec::<PathBuf>::new()
3916 );
3917 assert_eq!(
3918 tree.descendent_entries(false, true, Path::new("i"))
3919 .map(|entry| entry.path.as_ref())
3920 .collect::<Vec<_>>(),
3921 vec![Path::new("i/j/k")]
3922 );
3923 assert_eq!(
3924 tree.descendent_entries(true, false, Path::new("i"))
3925 .map(|entry| entry.path.as_ref())
3926 .collect::<Vec<_>>(),
3927 vec![Path::new("i"), Path::new("i/l"),]
3928 );
3929 })
3930 }
3931
3932 #[gpui::test(iterations = 10)]
3933 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3934 let fs = FakeFs::new(cx.background());
3935 fs.insert_tree(
3936 "/root",
3937 json!({
3938 "lib": {
3939 "a": {
3940 "a.txt": ""
3941 },
3942 "b": {
3943 "b.txt": ""
3944 }
3945 }
3946 }),
3947 )
3948 .await;
3949 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3950 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3951
3952 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3953 let tree = Worktree::local(
3954 client,
3955 Path::new("/root"),
3956 true,
3957 fs.clone(),
3958 Default::default(),
3959 &mut cx.to_async(),
3960 )
3961 .await
3962 .unwrap();
3963
3964 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3965 .await;
3966
3967 tree.read_with(cx, |tree, _| {
3968 assert_eq!(
3969 tree.entries(false)
3970 .map(|entry| entry.path.as_ref())
3971 .collect::<Vec<_>>(),
3972 vec![
3973 Path::new(""),
3974 Path::new("lib"),
3975 Path::new("lib/a"),
3976 Path::new("lib/a/a.txt"),
3977 Path::new("lib/a/lib"),
3978 Path::new("lib/b"),
3979 Path::new("lib/b/b.txt"),
3980 Path::new("lib/b/lib"),
3981 ]
3982 );
3983 });
3984
3985 fs.rename(
3986 Path::new("/root/lib/a/lib"),
3987 Path::new("/root/lib/a/lib-2"),
3988 Default::default(),
3989 )
3990 .await
3991 .unwrap();
3992 executor.run_until_parked();
3993 tree.read_with(cx, |tree, _| {
3994 assert_eq!(
3995 tree.entries(false)
3996 .map(|entry| entry.path.as_ref())
3997 .collect::<Vec<_>>(),
3998 vec![
3999 Path::new(""),
4000 Path::new("lib"),
4001 Path::new("lib/a"),
4002 Path::new("lib/a/a.txt"),
4003 Path::new("lib/a/lib-2"),
4004 Path::new("lib/b"),
4005 Path::new("lib/b/b.txt"),
4006 Path::new("lib/b/lib"),
4007 ]
4008 );
4009 });
4010 }
4011
4012 #[gpui::test]
4013 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4014 // .gitignores are handled explicitly by Zed and do not use the git
4015 // machinery that the git_tests module checks
4016 let parent_dir = temp_tree(json!({
4017 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4018 "tree": {
4019 ".git": {},
4020 ".gitignore": "ignored-dir\n",
4021 "tracked-dir": {
4022 "tracked-file1": "",
4023 "ancestor-ignored-file1": "",
4024 },
4025 "ignored-dir": {
4026 "ignored-file1": ""
4027 }
4028 }
4029 }));
4030 let dir = parent_dir.path().join("tree");
4031
4032 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4033
4034 let tree = Worktree::local(
4035 client,
4036 dir.as_path(),
4037 true,
4038 Arc::new(RealFs),
4039 Default::default(),
4040 &mut cx.to_async(),
4041 )
4042 .await
4043 .unwrap();
4044 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4045 .await;
4046 tree.flush_fs_events(cx).await;
4047 cx.read(|cx| {
4048 let tree = tree.read(cx);
4049 assert!(
4050 !tree
4051 .entry_for_path("tracked-dir/tracked-file1")
4052 .unwrap()
4053 .is_ignored
4054 );
4055 assert!(
4056 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4057 .unwrap()
4058 .is_ignored
4059 );
4060 assert!(
4061 tree.entry_for_path("ignored-dir/ignored-file1")
4062 .unwrap()
4063 .is_ignored
4064 );
4065 });
4066
4067 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4068 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4069 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4070 tree.flush_fs_events(cx).await;
4071 cx.read(|cx| {
4072 let tree = tree.read(cx);
4073 assert!(
4074 !tree
4075 .entry_for_path("tracked-dir/tracked-file2")
4076 .unwrap()
4077 .is_ignored
4078 );
4079 assert!(
4080 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4081 .unwrap()
4082 .is_ignored
4083 );
4084 assert!(
4085 tree.entry_for_path("ignored-dir/ignored-file2")
4086 .unwrap()
4087 .is_ignored
4088 );
4089 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4090 });
4091 }
4092
4093 #[gpui::test]
4094 async fn test_write_file(cx: &mut TestAppContext) {
4095 let dir = temp_tree(json!({
4096 ".git": {},
4097 ".gitignore": "ignored-dir\n",
4098 "tracked-dir": {},
4099 "ignored-dir": {}
4100 }));
4101
4102 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4103
4104 let tree = Worktree::local(
4105 client,
4106 dir.path(),
4107 true,
4108 Arc::new(RealFs),
4109 Default::default(),
4110 &mut cx.to_async(),
4111 )
4112 .await
4113 .unwrap();
4114 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4115 .await;
4116 tree.flush_fs_events(cx).await;
4117
4118 tree.update(cx, |tree, cx| {
4119 tree.as_local().unwrap().write_file(
4120 Path::new("tracked-dir/file.txt"),
4121 "hello".into(),
4122 Default::default(),
4123 cx,
4124 )
4125 })
4126 .await
4127 .unwrap();
4128 tree.update(cx, |tree, cx| {
4129 tree.as_local().unwrap().write_file(
4130 Path::new("ignored-dir/file.txt"),
4131 "world".into(),
4132 Default::default(),
4133 cx,
4134 )
4135 })
4136 .await
4137 .unwrap();
4138
4139 tree.read_with(cx, |tree, _| {
4140 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4141 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4142 assert!(!tracked.is_ignored);
4143 assert!(ignored.is_ignored);
4144 });
4145 }
4146
4147 #[gpui::test(iterations = 30)]
4148 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4149 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4150
4151 let fs = FakeFs::new(cx.background());
4152 fs.insert_tree(
4153 "/root",
4154 json!({
4155 "b": {},
4156 "c": {},
4157 "d": {},
4158 }),
4159 )
4160 .await;
4161
4162 let tree = Worktree::local(
4163 client,
4164 "/root".as_ref(),
4165 true,
4166 fs,
4167 Default::default(),
4168 &mut cx.to_async(),
4169 )
4170 .await
4171 .unwrap();
4172
4173 let snapshot1 = tree.update(cx, |tree, cx| {
4174 let tree = tree.as_local_mut().unwrap();
4175 let snapshot = Arc::new(Mutex::new(tree.snapshot()));
4176 let _ = tree.observe_updates(0, cx, {
4177 let snapshot = snapshot.clone();
4178 move |update| {
4179 snapshot.lock().apply_remote_update(update).unwrap();
4180 async { true }
4181 }
4182 });
4183 snapshot
4184 });
4185
4186 let entry = tree
4187 .update(cx, |tree, cx| {
4188 tree.as_local_mut()
4189 .unwrap()
4190 .create_entry("a/e".as_ref(), true, cx)
4191 })
4192 .await
4193 .unwrap();
4194 assert!(entry.is_dir());
4195
4196 cx.foreground().run_until_parked();
4197 tree.read_with(cx, |tree, _| {
4198 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4199 });
4200
4201 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4202 assert_eq!(
4203 snapshot1.lock().entries(true).collect::<Vec<_>>(),
4204 snapshot2.entries(true).collect::<Vec<_>>()
4205 );
4206 }
4207
4208 #[gpui::test(iterations = 100)]
4209 async fn test_random_worktree_operations_during_initial_scan(
4210 cx: &mut TestAppContext,
4211 mut rng: StdRng,
4212 ) {
4213 let operations = env::var("OPERATIONS")
4214 .map(|o| o.parse().unwrap())
4215 .unwrap_or(5);
4216 let initial_entries = env::var("INITIAL_ENTRIES")
4217 .map(|o| o.parse().unwrap())
4218 .unwrap_or(20);
4219
4220 let root_dir = Path::new("/test");
4221 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4222 fs.as_fake().insert_tree(root_dir, json!({})).await;
4223 for _ in 0..initial_entries {
4224 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4225 }
4226 log::info!("generated initial tree");
4227
4228 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4229 let worktree = Worktree::local(
4230 client.clone(),
4231 root_dir,
4232 true,
4233 fs.clone(),
4234 Default::default(),
4235 &mut cx.to_async(),
4236 )
4237 .await
4238 .unwrap();
4239
4240 let mut snapshots =
4241 vec![worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot())];
4242 let updates = Arc::new(Mutex::new(Vec::new()));
4243 worktree.update(cx, |tree, cx| {
4244 check_worktree_change_events(tree, cx);
4245
4246 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4247 let updates = updates.clone();
4248 move |update| {
4249 updates.lock().push(update);
4250 async { true }
4251 }
4252 });
4253 });
4254
4255 for _ in 0..operations {
4256 worktree
4257 .update(cx, |worktree, cx| {
4258 randomly_mutate_worktree(worktree, &mut rng, cx)
4259 })
4260 .await
4261 .log_err();
4262 worktree.read_with(cx, |tree, _| {
4263 tree.as_local().unwrap().snapshot.check_invariants()
4264 });
4265
4266 if rng.gen_bool(0.6) {
4267 snapshots
4268 .push(worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()));
4269 }
4270 }
4271
4272 worktree
4273 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4274 .await;
4275
4276 cx.foreground().run_until_parked();
4277
4278 let final_snapshot = worktree.read_with(cx, |tree, _| {
4279 let tree = tree.as_local().unwrap();
4280 tree.snapshot.check_invariants();
4281 tree.snapshot()
4282 });
4283
4284 for (i, snapshot) in snapshots.into_iter().enumerate().rev() {
4285 let mut updated_snapshot = snapshot.clone();
4286 for update in updates.lock().iter() {
4287 if update.scan_id >= updated_snapshot.scan_id() as u64 {
4288 updated_snapshot
4289 .apply_remote_update(update.clone())
4290 .unwrap();
4291 }
4292 }
4293
4294 assert_eq!(
4295 updated_snapshot.entries(true).collect::<Vec<_>>(),
4296 final_snapshot.entries(true).collect::<Vec<_>>(),
4297 "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}",
4298 );
4299 }
4300 }
4301
4302 #[gpui::test(iterations = 100)]
4303 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4304 let operations = env::var("OPERATIONS")
4305 .map(|o| o.parse().unwrap())
4306 .unwrap_or(40);
4307 let initial_entries = env::var("INITIAL_ENTRIES")
4308 .map(|o| o.parse().unwrap())
4309 .unwrap_or(20);
4310
4311 let root_dir = Path::new("/test");
4312 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4313 fs.as_fake().insert_tree(root_dir, json!({})).await;
4314 for _ in 0..initial_entries {
4315 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4316 }
4317 log::info!("generated initial tree");
4318
4319 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4320 let worktree = Worktree::local(
4321 client.clone(),
4322 root_dir,
4323 true,
4324 fs.clone(),
4325 Default::default(),
4326 &mut cx.to_async(),
4327 )
4328 .await
4329 .unwrap();
4330
4331 let updates = Arc::new(Mutex::new(Vec::new()));
4332 worktree.update(cx, |tree, cx| {
4333 check_worktree_change_events(tree, cx);
4334
4335 let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4336 let updates = updates.clone();
4337 move |update| {
4338 updates.lock().push(update);
4339 async { true }
4340 }
4341 });
4342 });
4343
4344 worktree
4345 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4346 .await;
4347
4348 fs.as_fake().pause_events();
4349 let mut snapshots = Vec::new();
4350 let mut mutations_len = operations;
4351 while mutations_len > 1 {
4352 if rng.gen_bool(0.2) {
4353 worktree
4354 .update(cx, |worktree, cx| {
4355 randomly_mutate_worktree(worktree, &mut rng, cx)
4356 })
4357 .await
4358 .log_err();
4359 } else {
4360 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4361 }
4362
4363 let buffered_event_count = fs.as_fake().buffered_event_count();
4364 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4365 let len = rng.gen_range(0..=buffered_event_count);
4366 log::info!("flushing {} events", len);
4367 fs.as_fake().flush_events(len);
4368 } else {
4369 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4370 mutations_len -= 1;
4371 }
4372
4373 cx.foreground().run_until_parked();
4374 if rng.gen_bool(0.2) {
4375 log::info!("storing snapshot {}", snapshots.len());
4376 let snapshot =
4377 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4378 snapshots.push(snapshot);
4379 }
4380 }
4381
4382 log::info!("quiescing");
4383 fs.as_fake().flush_events(usize::MAX);
4384 cx.foreground().run_until_parked();
4385 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4386 snapshot.check_invariants();
4387
4388 {
4389 let new_worktree = Worktree::local(
4390 client.clone(),
4391 root_dir,
4392 true,
4393 fs.clone(),
4394 Default::default(),
4395 &mut cx.to_async(),
4396 )
4397 .await
4398 .unwrap();
4399 new_worktree
4400 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4401 .await;
4402 let new_snapshot =
4403 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4404 assert_eq!(
4405 snapshot.entries_without_ids(true),
4406 new_snapshot.entries_without_ids(true)
4407 );
4408 }
4409
4410 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate().rev() {
4411 for update in updates.lock().iter() {
4412 if update.scan_id >= prev_snapshot.scan_id() as u64 {
4413 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4414 }
4415 }
4416
4417 assert_eq!(
4418 prev_snapshot.entries(true).collect::<Vec<_>>(),
4419 snapshot.entries(true).collect::<Vec<_>>(),
4420 "wrong updates after snapshot {i}: {updates:#?}",
4421 );
4422 }
4423 }
4424
4425 // The worktree's `UpdatedEntries` event can be used to follow along with
4426 // all changes to the worktree's snapshot.
4427 fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Worktree>) {
4428 let mut entries = tree.entries(true).cloned().collect::<Vec<_>>();
4429 cx.subscribe(&cx.handle(), move |tree, _, event, _| {
4430 if let Event::UpdatedEntries(changes) = event {
4431 for (path, _, change_type) in changes.iter() {
4432 let entry = tree.entry_for_path(&path).cloned();
4433 let ix = match entries.binary_search_by_key(&path, |e| &e.path) {
4434 Ok(ix) | Err(ix) => ix,
4435 };
4436 match change_type {
4437 PathChange::Loaded => entries.insert(ix, entry.unwrap()),
4438 PathChange::Added => entries.insert(ix, entry.unwrap()),
4439 PathChange::Removed => drop(entries.remove(ix)),
4440 PathChange::Updated => {
4441 let entry = entry.unwrap();
4442 let existing_entry = entries.get_mut(ix).unwrap();
4443 assert_eq!(existing_entry.path, entry.path);
4444 *existing_entry = entry;
4445 }
4446 PathChange::AddedOrUpdated => {
4447 let entry = entry.unwrap();
4448 if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
4449 *entries.get_mut(ix).unwrap() = entry;
4450 } else {
4451 entries.insert(ix, entry);
4452 }
4453 }
4454 }
4455 }
4456
4457 let new_entries = tree.entries(true).cloned().collect::<Vec<_>>();
4458 assert_eq!(entries, new_entries, "incorrect changes: {:?}", changes);
4459 }
4460 })
4461 .detach();
4462 }
4463
4464 fn randomly_mutate_worktree(
4465 worktree: &mut Worktree,
4466 rng: &mut impl Rng,
4467 cx: &mut ModelContext<Worktree>,
4468 ) -> Task<Result<()>> {
4469 log::info!("mutating worktree");
4470 let worktree = worktree.as_local_mut().unwrap();
4471 let snapshot = worktree.snapshot();
4472 let entry = snapshot.entries(false).choose(rng).unwrap();
4473
4474 match rng.gen_range(0_u32..100) {
4475 0..=33 if entry.path.as_ref() != Path::new("") => {
4476 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4477 worktree.delete_entry(entry.id, cx).unwrap()
4478 }
4479 ..=66 if entry.path.as_ref() != Path::new("") => {
4480 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4481 let new_parent_path = if other_entry.is_dir() {
4482 other_entry.path.clone()
4483 } else {
4484 other_entry.path.parent().unwrap().into()
4485 };
4486 let mut new_path = new_parent_path.join(gen_name(rng));
4487 if new_path.starts_with(&entry.path) {
4488 new_path = gen_name(rng).into();
4489 }
4490
4491 log::info!(
4492 "renaming entry {:?} ({}) to {:?}",
4493 entry.path,
4494 entry.id.0,
4495 new_path
4496 );
4497 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4498 cx.foreground().spawn(async move {
4499 task.await?;
4500 Ok(())
4501 })
4502 }
4503 _ => {
4504 let task = if entry.is_dir() {
4505 let child_path = entry.path.join(gen_name(rng));
4506 let is_dir = rng.gen_bool(0.3);
4507 log::info!(
4508 "creating {} at {:?}",
4509 if is_dir { "dir" } else { "file" },
4510 child_path,
4511 );
4512 worktree.create_entry(child_path, is_dir, cx)
4513 } else {
4514 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4515 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4516 };
4517 cx.foreground().spawn(async move {
4518 task.await?;
4519 Ok(())
4520 })
4521 }
4522 }
4523 }
4524
4525 async fn randomly_mutate_fs(
4526 fs: &Arc<dyn Fs>,
4527 root_path: &Path,
4528 insertion_probability: f64,
4529 rng: &mut impl Rng,
4530 ) {
4531 log::info!("mutating fs");
4532 let mut files = Vec::new();
4533 let mut dirs = Vec::new();
4534 for path in fs.as_fake().paths() {
4535 if path.starts_with(root_path) {
4536 if fs.is_file(&path).await {
4537 files.push(path);
4538 } else {
4539 dirs.push(path);
4540 }
4541 }
4542 }
4543
4544 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4545 let path = dirs.choose(rng).unwrap();
4546 let new_path = path.join(gen_name(rng));
4547
4548 if rng.gen() {
4549 log::info!(
4550 "creating dir {:?}",
4551 new_path.strip_prefix(root_path).unwrap()
4552 );
4553 fs.create_dir(&new_path).await.unwrap();
4554 } else {
4555 log::info!(
4556 "creating file {:?}",
4557 new_path.strip_prefix(root_path).unwrap()
4558 );
4559 fs.create_file(&new_path, Default::default()).await.unwrap();
4560 }
4561 } else if rng.gen_bool(0.05) {
4562 let ignore_dir_path = dirs.choose(rng).unwrap();
4563 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4564
4565 let subdirs = dirs
4566 .iter()
4567 .filter(|d| d.starts_with(&ignore_dir_path))
4568 .cloned()
4569 .collect::<Vec<_>>();
4570 let subfiles = files
4571 .iter()
4572 .filter(|d| d.starts_with(&ignore_dir_path))
4573 .cloned()
4574 .collect::<Vec<_>>();
4575 let files_to_ignore = {
4576 let len = rng.gen_range(0..=subfiles.len());
4577 subfiles.choose_multiple(rng, len)
4578 };
4579 let dirs_to_ignore = {
4580 let len = rng.gen_range(0..subdirs.len());
4581 subdirs.choose_multiple(rng, len)
4582 };
4583
4584 let mut ignore_contents = String::new();
4585 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4586 writeln!(
4587 ignore_contents,
4588 "{}",
4589 path_to_ignore
4590 .strip_prefix(&ignore_dir_path)
4591 .unwrap()
4592 .to_str()
4593 .unwrap()
4594 )
4595 .unwrap();
4596 }
4597 log::info!(
4598 "creating gitignore {:?} with contents:\n{}",
4599 ignore_path.strip_prefix(&root_path).unwrap(),
4600 ignore_contents
4601 );
4602 fs.save(
4603 &ignore_path,
4604 &ignore_contents.as_str().into(),
4605 Default::default(),
4606 )
4607 .await
4608 .unwrap();
4609 } else {
4610 let old_path = {
4611 let file_path = files.choose(rng);
4612 let dir_path = dirs[1..].choose(rng);
4613 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4614 };
4615
4616 let is_rename = rng.gen();
4617 if is_rename {
4618 let new_path_parent = dirs
4619 .iter()
4620 .filter(|d| !d.starts_with(old_path))
4621 .choose(rng)
4622 .unwrap();
4623
4624 let overwrite_existing_dir =
4625 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4626 let new_path = if overwrite_existing_dir {
4627 fs.remove_dir(
4628 &new_path_parent,
4629 RemoveOptions {
4630 recursive: true,
4631 ignore_if_not_exists: true,
4632 },
4633 )
4634 .await
4635 .unwrap();
4636 new_path_parent.to_path_buf()
4637 } else {
4638 new_path_parent.join(gen_name(rng))
4639 };
4640
4641 log::info!(
4642 "renaming {:?} to {}{:?}",
4643 old_path.strip_prefix(&root_path).unwrap(),
4644 if overwrite_existing_dir {
4645 "overwrite "
4646 } else {
4647 ""
4648 },
4649 new_path.strip_prefix(&root_path).unwrap()
4650 );
4651 fs.rename(
4652 &old_path,
4653 &new_path,
4654 fs::RenameOptions {
4655 overwrite: true,
4656 ignore_if_exists: true,
4657 },
4658 )
4659 .await
4660 .unwrap();
4661 } else if fs.is_file(&old_path).await {
4662 log::info!(
4663 "deleting file {:?}",
4664 old_path.strip_prefix(&root_path).unwrap()
4665 );
4666 fs.remove_file(old_path, Default::default()).await.unwrap();
4667 } else {
4668 log::info!(
4669 "deleting dir {:?}",
4670 old_path.strip_prefix(&root_path).unwrap()
4671 );
4672 fs.remove_dir(
4673 &old_path,
4674 RemoveOptions {
4675 recursive: true,
4676 ignore_if_not_exists: true,
4677 },
4678 )
4679 .await
4680 .unwrap();
4681 }
4682 }
4683 }
4684
4685 fn gen_name(rng: &mut impl Rng) -> String {
4686 (0..6)
4687 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4688 .map(char::from)
4689 .collect()
4690 }
4691
4692 impl LocalSnapshot {
4693 fn check_invariants(&self) {
4694 assert_eq!(
4695 self.entries_by_path
4696 .cursor::<()>()
4697 .map(|e| (&e.path, e.id))
4698 .collect::<Vec<_>>(),
4699 self.entries_by_id
4700 .cursor::<()>()
4701 .map(|e| (&e.path, e.id))
4702 .collect::<collections::BTreeSet<_>>()
4703 .into_iter()
4704 .collect::<Vec<_>>(),
4705 "entries_by_path and entries_by_id are inconsistent"
4706 );
4707
4708 let mut files = self.files(true, 0);
4709 let mut visible_files = self.files(false, 0);
4710 for entry in self.entries_by_path.cursor::<()>() {
4711 if entry.is_file() {
4712 assert_eq!(files.next().unwrap().inode, entry.inode);
4713 if !entry.is_ignored {
4714 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4715 }
4716 }
4717 }
4718
4719 assert!(files.next().is_none());
4720 assert!(visible_files.next().is_none());
4721
4722 let mut bfs_paths = Vec::new();
4723 let mut stack = vec![Path::new("")];
4724 while let Some(path) = stack.pop() {
4725 bfs_paths.push(path);
4726 let ix = stack.len();
4727 for child_entry in self.child_entries(path) {
4728 stack.insert(ix, &child_entry.path);
4729 }
4730 }
4731
4732 let dfs_paths_via_iter = self
4733 .entries_by_path
4734 .cursor::<()>()
4735 .map(|e| e.path.as_ref())
4736 .collect::<Vec<_>>();
4737 assert_eq!(bfs_paths, dfs_paths_via_iter);
4738
4739 let dfs_paths_via_traversal = self
4740 .entries(true)
4741 .map(|e| e.path.as_ref())
4742 .collect::<Vec<_>>();
4743 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4744
4745 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4746 let ignore_parent_path =
4747 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4748 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4749 assert!(self
4750 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4751 .is_some());
4752 }
4753 }
4754
4755 fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4756 let mut paths = Vec::new();
4757 for entry in self.entries_by_path.cursor::<()>() {
4758 if include_ignored || !entry.is_ignored {
4759 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4760 }
4761 }
4762 paths.sort_by(|a, b| a.0.cmp(b.0));
4763 paths
4764 }
4765 }
4766
4767 mod git_tests {
4768 use super::*;
4769 use pretty_assertions::assert_eq;
4770
4771 #[gpui::test]
4772 async fn test_rename_work_directory(cx: &mut TestAppContext) {
4773 let root = temp_tree(json!({
4774 "projects": {
4775 "project1": {
4776 "a": "",
4777 "b": "",
4778 }
4779 },
4780
4781 }));
4782 let root_path = root.path();
4783
4784 let http_client = FakeHttpClient::with_404_response();
4785 let client = cx.read(|cx| Client::new(http_client, cx));
4786 let tree = Worktree::local(
4787 client,
4788 root_path,
4789 true,
4790 Arc::new(RealFs),
4791 Default::default(),
4792 &mut cx.to_async(),
4793 )
4794 .await
4795 .unwrap();
4796
4797 let repo = git_init(&root_path.join("projects/project1"));
4798 git_add("a", &repo);
4799 git_commit("init", &repo);
4800 std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4801
4802 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4803 .await;
4804
4805 tree.flush_fs_events(cx).await;
4806
4807 cx.read(|cx| {
4808 let tree = tree.read(cx);
4809 let (work_dir, _) = tree.repositories().next().unwrap();
4810 assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4811 assert_eq!(
4812 tree.status_for_file(Path::new("projects/project1/a")),
4813 Some(GitFileStatus::Modified)
4814 );
4815 assert_eq!(
4816 tree.status_for_file(Path::new("projects/project1/b")),
4817 Some(GitFileStatus::Added)
4818 );
4819 });
4820
4821 std::fs::rename(
4822 root_path.join("projects/project1"),
4823 root_path.join("projects/project2"),
4824 )
4825 .ok();
4826 tree.flush_fs_events(cx).await;
4827
4828 cx.read(|cx| {
4829 let tree = tree.read(cx);
4830 let (work_dir, _) = tree.repositories().next().unwrap();
4831 assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4832 assert_eq!(
4833 tree.status_for_file(Path::new("projects/project2/a")),
4834 Some(GitFileStatus::Modified)
4835 );
4836 assert_eq!(
4837 tree.status_for_file(Path::new("projects/project2/b")),
4838 Some(GitFileStatus::Added)
4839 );
4840 });
4841 }
4842
4843 #[gpui::test]
4844 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4845 let root = temp_tree(json!({
4846 "c.txt": "",
4847 "dir1": {
4848 ".git": {},
4849 "deps": {
4850 "dep1": {
4851 ".git": {},
4852 "src": {
4853 "a.txt": ""
4854 }
4855 }
4856 },
4857 "src": {
4858 "b.txt": ""
4859 }
4860 },
4861 }));
4862
4863 let http_client = FakeHttpClient::with_404_response();
4864 let client = cx.read(|cx| Client::new(http_client, cx));
4865 let tree = Worktree::local(
4866 client,
4867 root.path(),
4868 true,
4869 Arc::new(RealFs),
4870 Default::default(),
4871 &mut cx.to_async(),
4872 )
4873 .await
4874 .unwrap();
4875
4876 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4877 .await;
4878 tree.flush_fs_events(cx).await;
4879
4880 tree.read_with(cx, |tree, _cx| {
4881 let tree = tree.as_local().unwrap();
4882
4883 assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4884
4885 let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4886 assert_eq!(
4887 entry
4888 .work_directory(tree)
4889 .map(|directory| directory.as_ref().to_owned()),
4890 Some(Path::new("dir1").to_owned())
4891 );
4892
4893 let entry = tree
4894 .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4895 .unwrap();
4896 assert_eq!(
4897 entry
4898 .work_directory(tree)
4899 .map(|directory| directory.as_ref().to_owned()),
4900 Some(Path::new("dir1/deps/dep1").to_owned())
4901 );
4902
4903 let entries = tree.files(false, 0);
4904
4905 let paths_with_repos = tree
4906 .entries_with_repositories(entries)
4907 .map(|(entry, repo)| {
4908 (
4909 entry.path.as_ref(),
4910 repo.and_then(|repo| {
4911 repo.work_directory(&tree)
4912 .map(|work_directory| work_directory.0.to_path_buf())
4913 }),
4914 )
4915 })
4916 .collect::<Vec<_>>();
4917
4918 assert_eq!(
4919 paths_with_repos,
4920 &[
4921 (Path::new("c.txt"), None),
4922 (
4923 Path::new("dir1/deps/dep1/src/a.txt"),
4924 Some(Path::new("dir1/deps/dep1").into())
4925 ),
4926 (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4927 ]
4928 );
4929 });
4930
4931 let repo_update_events = Arc::new(Mutex::new(vec![]));
4932 tree.update(cx, |_, cx| {
4933 let repo_update_events = repo_update_events.clone();
4934 cx.subscribe(&tree, move |_, _, event, _| {
4935 if let Event::UpdatedGitRepositories(update) = event {
4936 repo_update_events.lock().push(update.clone());
4937 }
4938 })
4939 .detach();
4940 });
4941
4942 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4943 tree.flush_fs_events(cx).await;
4944
4945 assert_eq!(
4946 repo_update_events.lock()[0]
4947 .iter()
4948 .map(|e| e.0.clone())
4949 .collect::<Vec<Arc<Path>>>(),
4950 vec![Path::new("dir1").into()]
4951 );
4952
4953 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4954 tree.flush_fs_events(cx).await;
4955
4956 tree.read_with(cx, |tree, _cx| {
4957 let tree = tree.as_local().unwrap();
4958
4959 assert!(tree
4960 .repository_for_path("dir1/src/b.txt".as_ref())
4961 .is_none());
4962 });
4963 }
4964
4965 // TODO: Stream statuses UPDATE THIS TO CHECK BUBBLIBG BEHAVIOR
4966 #[gpui::test]
4967 async fn test_git_status(deterministic: Arc<Deterministic>, cx: &mut TestAppContext) {
4968 const IGNORE_RULE: &'static str = "**/target";
4969
4970 let root = temp_tree(json!({
4971 "project": {
4972 "a.txt": "a",
4973 "b.txt": "bb",
4974 "c": {
4975 "d": {
4976 "e.txt": "eee"
4977 }
4978 },
4979 "f.txt": "ffff",
4980 "target": {
4981 "build_file": "???"
4982 },
4983 ".gitignore": IGNORE_RULE
4984 },
4985
4986 }));
4987
4988 let http_client = FakeHttpClient::with_404_response();
4989 let client = cx.read(|cx| Client::new(http_client, cx));
4990 let tree = Worktree::local(
4991 client,
4992 root.path(),
4993 true,
4994 Arc::new(RealFs),
4995 Default::default(),
4996 &mut cx.to_async(),
4997 )
4998 .await
4999 .unwrap();
5000
5001 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5002 .await;
5003
5004 const A_TXT: &'static str = "a.txt";
5005 const B_TXT: &'static str = "b.txt";
5006 const E_TXT: &'static str = "c/d/e.txt";
5007 const F_TXT: &'static str = "f.txt";
5008 const DOTGITIGNORE: &'static str = ".gitignore";
5009 const BUILD_FILE: &'static str = "target/build_file";
5010 let project_path: &Path = &Path::new("project");
5011
5012 let work_dir = root.path().join("project");
5013 let mut repo = git_init(work_dir.as_path());
5014 repo.add_ignore_rule(IGNORE_RULE).unwrap();
5015 git_add(Path::new(A_TXT), &repo);
5016 git_add(Path::new(E_TXT), &repo);
5017 git_add(Path::new(DOTGITIGNORE), &repo);
5018 git_commit("Initial commit", &repo);
5019
5020 tree.flush_fs_events(cx).await;
5021 deterministic.run_until_parked();
5022
5023 // Check that the right git state is observed on startup
5024 tree.read_with(cx, |tree, _cx| {
5025 let snapshot = tree.snapshot();
5026 assert_eq!(snapshot.repository_entries.iter().count(), 1);
5027 let (dir, _) = snapshot.repository_entries.iter().next().unwrap();
5028 assert_eq!(dir.0.as_ref(), Path::new("project"));
5029
5030 assert_eq!(
5031 snapshot.status_for_file(project_path.join(B_TXT)),
5032 Some(GitFileStatus::Added)
5033 );
5034 assert_eq!(
5035 snapshot.status_for_file(project_path.join(F_TXT)),
5036 Some(GitFileStatus::Added)
5037 );
5038 });
5039
5040 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5041
5042 tree.flush_fs_events(cx).await;
5043 deterministic.run_until_parked();
5044
5045 tree.read_with(cx, |tree, _cx| {
5046 let snapshot = tree.snapshot();
5047
5048 assert_eq!(
5049 snapshot.status_for_file(project_path.join(A_TXT)),
5050 Some(GitFileStatus::Modified)
5051 );
5052 });
5053
5054 git_add(Path::new(A_TXT), &repo);
5055 git_add(Path::new(B_TXT), &repo);
5056 git_commit("Committing modified and added", &repo);
5057 tree.flush_fs_events(cx).await;
5058 deterministic.run_until_parked();
5059
5060 // Check that repo only changes are tracked
5061 tree.read_with(cx, |tree, _cx| {
5062 let snapshot = tree.snapshot();
5063
5064 assert_eq!(
5065 snapshot.status_for_file(project_path.join(F_TXT)),
5066 Some(GitFileStatus::Added)
5067 );
5068
5069 assert_eq!(snapshot.status_for_file(project_path.join(B_TXT)), None);
5070 assert_eq!(snapshot.status_for_file(project_path.join(A_TXT)), None);
5071 });
5072
5073 git_reset(0, &repo);
5074 git_remove_index(Path::new(B_TXT), &repo);
5075 git_stash(&mut repo);
5076 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5077 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5078 tree.flush_fs_events(cx).await;
5079 deterministic.run_until_parked();
5080
5081 // Check that more complex repo changes are tracked
5082 tree.read_with(cx, |tree, _cx| {
5083 let snapshot = tree.snapshot();
5084
5085 assert_eq!(snapshot.status_for_file(project_path.join(A_TXT)), None);
5086 assert_eq!(
5087 snapshot.status_for_file(project_path.join(B_TXT)),
5088 Some(GitFileStatus::Added)
5089 );
5090 assert_eq!(
5091 snapshot.status_for_file(project_path.join(E_TXT)),
5092 Some(GitFileStatus::Modified)
5093 );
5094 });
5095
5096 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5097 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5098 std::fs::write(
5099 work_dir.join(DOTGITIGNORE),
5100 [IGNORE_RULE, "f.txt"].join("\n"),
5101 )
5102 .unwrap();
5103
5104 git_add(Path::new(DOTGITIGNORE), &repo);
5105 git_commit("Committing modified git ignore", &repo);
5106
5107 tree.flush_fs_events(cx).await;
5108 deterministic.run_until_parked();
5109
5110 let mut renamed_dir_name = "first_directory/second_directory";
5111 const RENAMED_FILE: &'static str = "rf.txt";
5112
5113 std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5114 std::fs::write(
5115 work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5116 "new-contents",
5117 )
5118 .unwrap();
5119
5120 tree.flush_fs_events(cx).await;
5121 deterministic.run_until_parked();
5122
5123 tree.read_with(cx, |tree, _cx| {
5124 let snapshot = tree.snapshot();
5125 assert_eq!(
5126 snapshot
5127 .status_for_file(&project_path.join(renamed_dir_name).join(RENAMED_FILE)),
5128 Some(GitFileStatus::Added)
5129 );
5130 });
5131
5132 renamed_dir_name = "new_first_directory/second_directory";
5133
5134 std::fs::rename(
5135 work_dir.join("first_directory"),
5136 work_dir.join("new_first_directory"),
5137 )
5138 .unwrap();
5139
5140 tree.flush_fs_events(cx).await;
5141 deterministic.run_until_parked();
5142
5143 tree.read_with(cx, |tree, _cx| {
5144 let snapshot = tree.snapshot();
5145
5146 assert_eq!(
5147 snapshot.status_for_file(
5148 project_path
5149 .join(Path::new(renamed_dir_name))
5150 .join(RENAMED_FILE)
5151 ),
5152 Some(GitFileStatus::Added)
5153 );
5154 });
5155 }
5156
5157 #[track_caller]
5158 fn git_init(path: &Path) -> git2::Repository {
5159 git2::Repository::init(path).expect("Failed to initialize git repository")
5160 }
5161
5162 #[track_caller]
5163 fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5164 let path = path.as_ref();
5165 let mut index = repo.index().expect("Failed to get index");
5166 index.add_path(path).expect("Failed to add a.txt");
5167 index.write().expect("Failed to write index");
5168 }
5169
5170 #[track_caller]
5171 fn git_remove_index(path: &Path, repo: &git2::Repository) {
5172 let mut index = repo.index().expect("Failed to get index");
5173 index.remove_path(path).expect("Failed to add a.txt");
5174 index.write().expect("Failed to write index");
5175 }
5176
5177 #[track_caller]
5178 fn git_commit(msg: &'static str, repo: &git2::Repository) {
5179 use git2::Signature;
5180
5181 let signature = Signature::now("test", "test@zed.dev").unwrap();
5182 let oid = repo.index().unwrap().write_tree().unwrap();
5183 let tree = repo.find_tree(oid).unwrap();
5184 if let Some(head) = repo.head().ok() {
5185 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5186
5187 let parent_commit = parent_obj.as_commit().unwrap();
5188
5189 repo.commit(
5190 Some("HEAD"),
5191 &signature,
5192 &signature,
5193 msg,
5194 &tree,
5195 &[parent_commit],
5196 )
5197 .expect("Failed to commit with parent");
5198 } else {
5199 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5200 .expect("Failed to commit");
5201 }
5202 }
5203
5204 #[track_caller]
5205 fn git_stash(repo: &mut git2::Repository) {
5206 use git2::Signature;
5207
5208 let signature = Signature::now("test", "test@zed.dev").unwrap();
5209 repo.stash_save(&signature, "N/A", None)
5210 .expect("Failed to stash");
5211 }
5212
5213 #[track_caller]
5214 fn git_reset(offset: usize, repo: &git2::Repository) {
5215 let head = repo.head().expect("Couldn't get repo head");
5216 let object = head.peel(git2::ObjectType::Commit).unwrap();
5217 let commit = object.as_commit().unwrap();
5218 let new_head = commit
5219 .parents()
5220 .inspect(|parnet| {
5221 parnet.message();
5222 })
5223 .skip(offset)
5224 .next()
5225 .expect("Not enough history");
5226 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5227 .expect("Could not reset");
5228 }
5229
5230 #[allow(dead_code)]
5231 #[track_caller]
5232 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5233 repo.statuses(None)
5234 .unwrap()
5235 .iter()
5236 .map(|status| (status.path().unwrap().to_string(), status.status()))
5237 .collect()
5238 }
5239 }
5240}