1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitRepository, GitStatus, RepoPath},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123impl Snapshot {
124 pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
125 let mut max_len = 0;
126 let mut current_candidate = None;
127 for (work_directory, repo) in (&self.repository_entries).iter() {
128 if repo.contains(self, path) {
129 if work_directory.0.as_os_str().len() >= max_len {
130 current_candidate = Some(repo);
131 max_len = work_directory.0.as_os_str().len();
132 } else {
133 break;
134 }
135 }
136 }
137
138 current_candidate.map(|entry| entry.to_owned())
139 }
140}
141
142#[derive(Clone, Debug, PartialEq, Eq)]
143pub struct RepositoryEntry {
144 pub(crate) work_directory: WorkDirectoryEntry,
145 pub(crate) branch: Option<Arc<str>>,
146 pub(crate) statuses: TreeMap<RepoPath, GitStatus>,
147}
148
149impl RepositoryEntry {
150 pub fn branch(&self) -> Option<Arc<str>> {
151 self.branch.clone()
152 }
153
154 pub fn work_directory_id(&self) -> ProjectEntryId {
155 *self.work_directory
156 }
157
158 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
159 snapshot
160 .entry_for_id(self.work_directory_id())
161 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
162 }
163
164 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
165 self.work_directory.contains(snapshot, path)
166 }
167
168 pub fn status_for(&self, snapshot: &Snapshot, path: &Path) -> Option<GitStatus> {
169 self.work_directory
170 .relativize(snapshot, path)
171 .and_then(|repo_path| self.statuses.get(&repo_path))
172 .cloned()
173 }
174}
175
176impl From<&RepositoryEntry> for proto::RepositoryEntry {
177 fn from(value: &RepositoryEntry) -> Self {
178 proto::RepositoryEntry {
179 work_directory_id: value.work_directory.to_proto(),
180 branch: value.branch.as_ref().map(|str| str.to_string()),
181 // TODO: Status
182 removed_statuses: Default::default(),
183 updated_statuses: Default::default(),
184 }
185 }
186}
187
188/// This path corresponds to the 'content path' (the folder that contains the .git)
189#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
190pub struct RepositoryWorkDirectory(Arc<Path>);
191
192impl Default for RepositoryWorkDirectory {
193 fn default() -> Self {
194 RepositoryWorkDirectory(Arc::from(Path::new("")))
195 }
196}
197
198impl AsRef<Path> for RepositoryWorkDirectory {
199 fn as_ref(&self) -> &Path {
200 self.0.as_ref()
201 }
202}
203
204#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
205pub struct WorkDirectoryEntry(ProjectEntryId);
206
207impl WorkDirectoryEntry {
208 // Note that these paths should be relative to the worktree root.
209 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
210 snapshot
211 .entry_for_id(self.0)
212 .map(|entry| path.starts_with(&entry.path))
213 .unwrap_or(false)
214 }
215
216 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
217 worktree.entry_for_id(self.0).and_then(|entry| {
218 path.strip_prefix(&entry.path)
219 .ok()
220 .map(move |path| path.into())
221 })
222 }
223}
224
225impl Deref for WorkDirectoryEntry {
226 type Target = ProjectEntryId;
227
228 fn deref(&self) -> &Self::Target {
229 &self.0
230 }
231}
232
233impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
234 fn from(value: ProjectEntryId) -> Self {
235 WorkDirectoryEntry(value)
236 }
237}
238
239#[derive(Debug, Clone)]
240pub struct LocalSnapshot {
241 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
242 // The ProjectEntryId corresponds to the entry for the .git dir
243 // work_directory_id
244 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
245 removed_entry_ids: HashMap<u64, ProjectEntryId>,
246 next_entry_id: Arc<AtomicUsize>,
247 snapshot: Snapshot,
248}
249
250#[derive(Debug, Clone)]
251pub struct LocalRepositoryEntry {
252 pub(crate) scan_id: usize,
253 pub(crate) full_scan_id: usize,
254 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
255 /// Path to the actual .git folder.
256 /// Note: if .git is a file, this points to the folder indicated by the .git file
257 pub(crate) git_dir_path: Arc<Path>,
258}
259
260impl LocalRepositoryEntry {
261 // Note that this path should be relative to the worktree root.
262 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
263 path.starts_with(self.git_dir_path.as_ref())
264 }
265}
266
267impl Deref for LocalSnapshot {
268 type Target = Snapshot;
269
270 fn deref(&self) -> &Self::Target {
271 &self.snapshot
272 }
273}
274
275impl DerefMut for LocalSnapshot {
276 fn deref_mut(&mut self) -> &mut Self::Target {
277 &mut self.snapshot
278 }
279}
280
281enum ScanState {
282 Started,
283 Updated {
284 snapshot: LocalSnapshot,
285 changes: HashMap<Arc<Path>, PathChange>,
286 barrier: Option<barrier::Sender>,
287 scanning: bool,
288 },
289}
290
291struct ShareState {
292 project_id: u64,
293 snapshots_tx: watch::Sender<LocalSnapshot>,
294 resume_updates: watch::Sender<()>,
295 _maintain_remote_snapshot: Task<Option<()>>,
296}
297
298pub enum Event {
299 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
300 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
301}
302
303impl Entity for Worktree {
304 type Event = Event;
305}
306
307impl Worktree {
308 pub async fn local(
309 client: Arc<Client>,
310 path: impl Into<Arc<Path>>,
311 visible: bool,
312 fs: Arc<dyn Fs>,
313 next_entry_id: Arc<AtomicUsize>,
314 cx: &mut AsyncAppContext,
315 ) -> Result<ModelHandle<Self>> {
316 // After determining whether the root entry is a file or a directory, populate the
317 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
318 let abs_path = path.into();
319 let metadata = fs
320 .metadata(&abs_path)
321 .await
322 .context("failed to stat worktree path")?;
323
324 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
325 let root_name = abs_path
326 .file_name()
327 .map_or(String::new(), |f| f.to_string_lossy().to_string());
328
329 let mut snapshot = LocalSnapshot {
330 ignores_by_parent_abs_path: Default::default(),
331 removed_entry_ids: Default::default(),
332 git_repositories: Default::default(),
333 next_entry_id,
334 snapshot: Snapshot {
335 id: WorktreeId::from_usize(cx.model_id()),
336 abs_path: abs_path.clone(),
337 root_name: root_name.clone(),
338 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
339 entries_by_path: Default::default(),
340 entries_by_id: Default::default(),
341 repository_entries: Default::default(),
342 scan_id: 1,
343 completed_scan_id: 0,
344 },
345 };
346
347 if let Some(metadata) = metadata {
348 snapshot.insert_entry(
349 Entry::new(
350 Arc::from(Path::new("")),
351 &metadata,
352 &snapshot.next_entry_id,
353 snapshot.root_char_bag,
354 ),
355 fs.as_ref(),
356 );
357 }
358
359 let (path_changes_tx, path_changes_rx) = channel::unbounded();
360 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
361
362 cx.spawn_weak(|this, mut cx| async move {
363 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
364 this.update(&mut cx, |this, cx| {
365 let this = this.as_local_mut().unwrap();
366 match state {
367 ScanState::Started => {
368 *this.is_scanning.0.borrow_mut() = true;
369 }
370 ScanState::Updated {
371 snapshot,
372 changes,
373 barrier,
374 scanning,
375 } => {
376 *this.is_scanning.0.borrow_mut() = scanning;
377 this.set_snapshot(snapshot, cx);
378 cx.emit(Event::UpdatedEntries(changes));
379 drop(barrier);
380 }
381 }
382 cx.notify();
383 });
384 }
385 })
386 .detach();
387
388 let background_scanner_task = cx.background().spawn({
389 let fs = fs.clone();
390 let snapshot = snapshot.clone();
391 let background = cx.background().clone();
392 async move {
393 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
394 BackgroundScanner::new(
395 snapshot,
396 fs,
397 scan_states_tx,
398 background,
399 path_changes_rx,
400 )
401 .run(events)
402 .await;
403 }
404 });
405
406 Worktree::Local(LocalWorktree {
407 snapshot,
408 is_scanning: watch::channel_with(true),
409 share: None,
410 path_changes_tx,
411 _background_scanner_task: background_scanner_task,
412 diagnostics: Default::default(),
413 diagnostic_summaries: Default::default(),
414 client,
415 fs,
416 visible,
417 })
418 }))
419 }
420
421 pub fn remote(
422 project_remote_id: u64,
423 replica_id: ReplicaId,
424 worktree: proto::WorktreeMetadata,
425 client: Arc<Client>,
426 cx: &mut AppContext,
427 ) -> ModelHandle<Self> {
428 cx.add_model(|cx: &mut ModelContext<Self>| {
429 let snapshot = Snapshot {
430 id: WorktreeId(worktree.id as usize),
431 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
432 root_name: worktree.root_name.clone(),
433 root_char_bag: worktree
434 .root_name
435 .chars()
436 .map(|c| c.to_ascii_lowercase())
437 .collect(),
438 entries_by_path: Default::default(),
439 entries_by_id: Default::default(),
440 repository_entries: Default::default(),
441 scan_id: 1,
442 completed_scan_id: 0,
443 };
444
445 let (updates_tx, mut updates_rx) = mpsc::unbounded();
446 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
447 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
448
449 cx.background()
450 .spawn({
451 let background_snapshot = background_snapshot.clone();
452 async move {
453 while let Some(update) = updates_rx.next().await {
454 if let Err(error) =
455 background_snapshot.lock().apply_remote_update(update)
456 {
457 log::error!("error applying worktree update: {}", error);
458 }
459 snapshot_updated_tx.send(()).await.ok();
460 }
461 }
462 })
463 .detach();
464
465 cx.spawn_weak(|this, mut cx| async move {
466 while (snapshot_updated_rx.recv().await).is_some() {
467 if let Some(this) = this.upgrade(&cx) {
468 this.update(&mut cx, |this, cx| {
469 let this = this.as_remote_mut().unwrap();
470 this.snapshot = this.background_snapshot.lock().clone();
471 cx.emit(Event::UpdatedEntries(Default::default()));
472 cx.notify();
473 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
474 if this.observed_snapshot(*scan_id) {
475 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
476 let _ = tx.send(());
477 } else {
478 break;
479 }
480 }
481 });
482 } else {
483 break;
484 }
485 }
486 })
487 .detach();
488
489 Worktree::Remote(RemoteWorktree {
490 project_id: project_remote_id,
491 replica_id,
492 snapshot: snapshot.clone(),
493 background_snapshot,
494 updates_tx: Some(updates_tx),
495 snapshot_subscriptions: Default::default(),
496 client: client.clone(),
497 diagnostic_summaries: Default::default(),
498 visible: worktree.visible,
499 disconnected: false,
500 })
501 })
502 }
503
504 pub fn as_local(&self) -> Option<&LocalWorktree> {
505 if let Worktree::Local(worktree) = self {
506 Some(worktree)
507 } else {
508 None
509 }
510 }
511
512 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
513 if let Worktree::Remote(worktree) = self {
514 Some(worktree)
515 } else {
516 None
517 }
518 }
519
520 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
521 if let Worktree::Local(worktree) = self {
522 Some(worktree)
523 } else {
524 None
525 }
526 }
527
528 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
529 if let Worktree::Remote(worktree) = self {
530 Some(worktree)
531 } else {
532 None
533 }
534 }
535
536 pub fn is_local(&self) -> bool {
537 matches!(self, Worktree::Local(_))
538 }
539
540 pub fn is_remote(&self) -> bool {
541 !self.is_local()
542 }
543
544 pub fn snapshot(&self) -> Snapshot {
545 match self {
546 Worktree::Local(worktree) => worktree.snapshot().snapshot,
547 Worktree::Remote(worktree) => worktree.snapshot(),
548 }
549 }
550
551 pub fn scan_id(&self) -> usize {
552 match self {
553 Worktree::Local(worktree) => worktree.snapshot.scan_id,
554 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
555 }
556 }
557
558 pub fn completed_scan_id(&self) -> usize {
559 match self {
560 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
561 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
562 }
563 }
564
565 pub fn is_visible(&self) -> bool {
566 match self {
567 Worktree::Local(worktree) => worktree.visible,
568 Worktree::Remote(worktree) => worktree.visible,
569 }
570 }
571
572 pub fn replica_id(&self) -> ReplicaId {
573 match self {
574 Worktree::Local(_) => 0,
575 Worktree::Remote(worktree) => worktree.replica_id,
576 }
577 }
578
579 pub fn diagnostic_summaries(
580 &self,
581 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
582 match self {
583 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
584 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
585 }
586 .iter()
587 .flat_map(|(path, summaries)| {
588 summaries
589 .iter()
590 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
591 })
592 }
593
594 pub fn abs_path(&self) -> Arc<Path> {
595 match self {
596 Worktree::Local(worktree) => worktree.abs_path.clone(),
597 Worktree::Remote(worktree) => worktree.abs_path.clone(),
598 }
599 }
600}
601
602impl LocalWorktree {
603 pub fn contains_abs_path(&self, path: &Path) -> bool {
604 path.starts_with(&self.abs_path)
605 }
606
607 fn absolutize(&self, path: &Path) -> PathBuf {
608 if path.file_name().is_some() {
609 self.abs_path.join(path)
610 } else {
611 self.abs_path.to_path_buf()
612 }
613 }
614
615 pub(crate) fn load_buffer(
616 &mut self,
617 id: u64,
618 path: &Path,
619 cx: &mut ModelContext<Worktree>,
620 ) -> Task<Result<ModelHandle<Buffer>>> {
621 let path = Arc::from(path);
622 cx.spawn(move |this, mut cx| async move {
623 let (file, contents, diff_base) = this
624 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
625 .await?;
626 let text_buffer = cx
627 .background()
628 .spawn(async move { text::Buffer::new(0, id, contents) })
629 .await;
630 Ok(cx.add_model(|cx| {
631 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
632 buffer.git_diff_recalc(cx);
633 buffer
634 }))
635 })
636 }
637
638 pub fn diagnostics_for_path(
639 &self,
640 path: &Path,
641 ) -> Vec<(
642 LanguageServerId,
643 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
644 )> {
645 self.diagnostics.get(path).cloned().unwrap_or_default()
646 }
647
648 pub fn update_diagnostics(
649 &mut self,
650 server_id: LanguageServerId,
651 worktree_path: Arc<Path>,
652 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
653 _: &mut ModelContext<Worktree>,
654 ) -> Result<bool> {
655 let summaries_by_server_id = self
656 .diagnostic_summaries
657 .entry(worktree_path.clone())
658 .or_default();
659
660 let old_summary = summaries_by_server_id
661 .remove(&server_id)
662 .unwrap_or_default();
663
664 let new_summary = DiagnosticSummary::new(&diagnostics);
665 if new_summary.is_empty() {
666 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
667 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
668 diagnostics_by_server_id.remove(ix);
669 }
670 if diagnostics_by_server_id.is_empty() {
671 self.diagnostics.remove(&worktree_path);
672 }
673 }
674 } else {
675 summaries_by_server_id.insert(server_id, new_summary);
676 let diagnostics_by_server_id =
677 self.diagnostics.entry(worktree_path.clone()).or_default();
678 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
679 Ok(ix) => {
680 diagnostics_by_server_id[ix] = (server_id, diagnostics);
681 }
682 Err(ix) => {
683 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
684 }
685 }
686 }
687
688 if !old_summary.is_empty() || !new_summary.is_empty() {
689 if let Some(share) = self.share.as_ref() {
690 self.client
691 .send(proto::UpdateDiagnosticSummary {
692 project_id: share.project_id,
693 worktree_id: self.id().to_proto(),
694 summary: Some(proto::DiagnosticSummary {
695 path: worktree_path.to_string_lossy().to_string(),
696 language_server_id: server_id.0 as u64,
697 error_count: new_summary.error_count as u32,
698 warning_count: new_summary.warning_count as u32,
699 }),
700 })
701 .log_err();
702 }
703 }
704
705 Ok(!old_summary.is_empty() || !new_summary.is_empty())
706 }
707
708 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
709 let updated_repos =
710 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
711 self.snapshot = new_snapshot;
712
713 if let Some(share) = self.share.as_mut() {
714 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
715 }
716
717 if !updated_repos.is_empty() {
718 cx.emit(Event::UpdatedGitRepositories(updated_repos));
719 }
720 }
721
722 fn changed_repos(
723 &self,
724 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
725 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
726 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
727 let mut diff = HashMap::default();
728 let mut old_repos = old_repos.iter().peekable();
729 let mut new_repos = new_repos.iter().peekable();
730 loop {
731 match (old_repos.peek(), new_repos.peek()) {
732 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
733 match Ord::cmp(old_entry_id, new_entry_id) {
734 Ordering::Less => {
735 if let Some(entry) = self.entry_for_id(**old_entry_id) {
736 diff.insert(entry.path.clone(), (*old_repo).clone());
737 }
738 old_repos.next();
739 }
740 Ordering::Equal => {
741 if old_repo.scan_id != new_repo.scan_id {
742 if let Some(entry) = self.entry_for_id(**new_entry_id) {
743 diff.insert(entry.path.clone(), (*new_repo).clone());
744 }
745 }
746
747 old_repos.next();
748 new_repos.next();
749 }
750 Ordering::Greater => {
751 if let Some(entry) = self.entry_for_id(**new_entry_id) {
752 diff.insert(entry.path.clone(), (*new_repo).clone());
753 }
754 new_repos.next();
755 }
756 }
757 }
758 (Some((old_entry_id, old_repo)), None) => {
759 if let Some(entry) = self.entry_for_id(**old_entry_id) {
760 diff.insert(entry.path.clone(), (*old_repo).clone());
761 }
762 old_repos.next();
763 }
764 (None, Some((new_entry_id, new_repo))) => {
765 if let Some(entry) = self.entry_for_id(**new_entry_id) {
766 diff.insert(entry.path.clone(), (*new_repo).clone());
767 }
768 new_repos.next();
769 }
770 (None, None) => break,
771 }
772 }
773 diff
774 }
775
776 pub fn scan_complete(&self) -> impl Future<Output = ()> {
777 let mut is_scanning_rx = self.is_scanning.1.clone();
778 async move {
779 let mut is_scanning = is_scanning_rx.borrow().clone();
780 while is_scanning {
781 if let Some(value) = is_scanning_rx.recv().await {
782 is_scanning = value;
783 } else {
784 break;
785 }
786 }
787 }
788 }
789
790 pub fn snapshot(&self) -> LocalSnapshot {
791 self.snapshot.clone()
792 }
793
794 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
795 proto::WorktreeMetadata {
796 id: self.id().to_proto(),
797 root_name: self.root_name().to_string(),
798 visible: self.visible,
799 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
800 }
801 }
802
803 fn load(
804 &self,
805 path: &Path,
806 cx: &mut ModelContext<Worktree>,
807 ) -> Task<Result<(File, String, Option<String>)>> {
808 let handle = cx.handle();
809 let path = Arc::from(path);
810 let abs_path = self.absolutize(&path);
811 let fs = self.fs.clone();
812 let snapshot = self.snapshot();
813
814 let mut index_task = None;
815
816 if let Some(repo) = snapshot.repo_for(&path) {
817 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
818 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
819 let repo = repo.repo_ptr.to_owned();
820 index_task = Some(
821 cx.background()
822 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
823 );
824 }
825 }
826
827 cx.spawn(|this, mut cx| async move {
828 let text = fs.load(&abs_path).await?;
829
830 let diff_base = if let Some(index_task) = index_task {
831 index_task.await
832 } else {
833 None
834 };
835
836 // Eagerly populate the snapshot with an updated entry for the loaded file
837 let entry = this
838 .update(&mut cx, |this, cx| {
839 this.as_local().unwrap().refresh_entry(path, None, cx)
840 })
841 .await?;
842
843 Ok((
844 File {
845 entry_id: entry.id,
846 worktree: handle,
847 path: entry.path,
848 mtime: entry.mtime,
849 is_local: true,
850 is_deleted: false,
851 },
852 text,
853 diff_base,
854 ))
855 })
856 }
857
858 pub fn save_buffer(
859 &self,
860 buffer_handle: ModelHandle<Buffer>,
861 path: Arc<Path>,
862 has_changed_file: bool,
863 cx: &mut ModelContext<Worktree>,
864 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
865 let handle = cx.handle();
866 let buffer = buffer_handle.read(cx);
867
868 let rpc = self.client.clone();
869 let buffer_id = buffer.remote_id();
870 let project_id = self.share.as_ref().map(|share| share.project_id);
871
872 let text = buffer.as_rope().clone();
873 let fingerprint = text.fingerprint();
874 let version = buffer.version();
875 let save = self.write_file(path, text, buffer.line_ending(), cx);
876
877 cx.as_mut().spawn(|mut cx| async move {
878 let entry = save.await?;
879
880 if has_changed_file {
881 let new_file = Arc::new(File {
882 entry_id: entry.id,
883 worktree: handle,
884 path: entry.path,
885 mtime: entry.mtime,
886 is_local: true,
887 is_deleted: false,
888 });
889
890 if let Some(project_id) = project_id {
891 rpc.send(proto::UpdateBufferFile {
892 project_id,
893 buffer_id,
894 file: Some(new_file.to_proto()),
895 })
896 .log_err();
897 }
898
899 buffer_handle.update(&mut cx, |buffer, cx| {
900 if has_changed_file {
901 buffer.file_updated(new_file, cx).detach();
902 }
903 });
904 }
905
906 if let Some(project_id) = project_id {
907 rpc.send(proto::BufferSaved {
908 project_id,
909 buffer_id,
910 version: serialize_version(&version),
911 mtime: Some(entry.mtime.into()),
912 fingerprint: serialize_fingerprint(fingerprint),
913 })?;
914 }
915
916 buffer_handle.update(&mut cx, |buffer, cx| {
917 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
918 });
919
920 Ok((version, fingerprint, entry.mtime))
921 })
922 }
923
924 pub fn create_entry(
925 &self,
926 path: impl Into<Arc<Path>>,
927 is_dir: bool,
928 cx: &mut ModelContext<Worktree>,
929 ) -> Task<Result<Entry>> {
930 let path = path.into();
931 let abs_path = self.absolutize(&path);
932 let fs = self.fs.clone();
933 let write = cx.background().spawn(async move {
934 if is_dir {
935 fs.create_dir(&abs_path).await
936 } else {
937 fs.save(&abs_path, &Default::default(), Default::default())
938 .await
939 }
940 });
941
942 cx.spawn(|this, mut cx| async move {
943 write.await?;
944 this.update(&mut cx, |this, cx| {
945 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
946 })
947 .await
948 })
949 }
950
951 pub fn write_file(
952 &self,
953 path: impl Into<Arc<Path>>,
954 text: Rope,
955 line_ending: LineEnding,
956 cx: &mut ModelContext<Worktree>,
957 ) -> Task<Result<Entry>> {
958 let path = path.into();
959 let abs_path = self.absolutize(&path);
960 let fs = self.fs.clone();
961 let write = cx
962 .background()
963 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
964
965 cx.spawn(|this, mut cx| async move {
966 write.await?;
967 this.update(&mut cx, |this, cx| {
968 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
969 })
970 .await
971 })
972 }
973
974 pub fn delete_entry(
975 &self,
976 entry_id: ProjectEntryId,
977 cx: &mut ModelContext<Worktree>,
978 ) -> Option<Task<Result<()>>> {
979 let entry = self.entry_for_id(entry_id)?.clone();
980 let abs_path = self.abs_path.clone();
981 let fs = self.fs.clone();
982
983 let delete = cx.background().spawn(async move {
984 let mut abs_path = fs.canonicalize(&abs_path).await?;
985 if entry.path.file_name().is_some() {
986 abs_path = abs_path.join(&entry.path);
987 }
988 if entry.is_file() {
989 fs.remove_file(&abs_path, Default::default()).await?;
990 } else {
991 fs.remove_dir(
992 &abs_path,
993 RemoveOptions {
994 recursive: true,
995 ignore_if_not_exists: false,
996 },
997 )
998 .await?;
999 }
1000 anyhow::Ok(abs_path)
1001 });
1002
1003 Some(cx.spawn(|this, mut cx| async move {
1004 let abs_path = delete.await?;
1005 let (tx, mut rx) = barrier::channel();
1006 this.update(&mut cx, |this, _| {
1007 this.as_local_mut()
1008 .unwrap()
1009 .path_changes_tx
1010 .try_send((vec![abs_path], tx))
1011 })?;
1012 rx.recv().await;
1013 Ok(())
1014 }))
1015 }
1016
1017 pub fn rename_entry(
1018 &self,
1019 entry_id: ProjectEntryId,
1020 new_path: impl Into<Arc<Path>>,
1021 cx: &mut ModelContext<Worktree>,
1022 ) -> Option<Task<Result<Entry>>> {
1023 let old_path = self.entry_for_id(entry_id)?.path.clone();
1024 let new_path = new_path.into();
1025 let abs_old_path = self.absolutize(&old_path);
1026 let abs_new_path = self.absolutize(&new_path);
1027 let fs = self.fs.clone();
1028 let rename = cx.background().spawn(async move {
1029 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1030 .await
1031 });
1032
1033 Some(cx.spawn(|this, mut cx| async move {
1034 rename.await?;
1035 this.update(&mut cx, |this, cx| {
1036 this.as_local_mut()
1037 .unwrap()
1038 .refresh_entry(new_path.clone(), Some(old_path), cx)
1039 })
1040 .await
1041 }))
1042 }
1043
1044 pub fn copy_entry(
1045 &self,
1046 entry_id: ProjectEntryId,
1047 new_path: impl Into<Arc<Path>>,
1048 cx: &mut ModelContext<Worktree>,
1049 ) -> Option<Task<Result<Entry>>> {
1050 let old_path = self.entry_for_id(entry_id)?.path.clone();
1051 let new_path = new_path.into();
1052 let abs_old_path = self.absolutize(&old_path);
1053 let abs_new_path = self.absolutize(&new_path);
1054 let fs = self.fs.clone();
1055 let copy = cx.background().spawn(async move {
1056 copy_recursive(
1057 fs.as_ref(),
1058 &abs_old_path,
1059 &abs_new_path,
1060 Default::default(),
1061 )
1062 .await
1063 });
1064
1065 Some(cx.spawn(|this, mut cx| async move {
1066 copy.await?;
1067 this.update(&mut cx, |this, cx| {
1068 this.as_local_mut()
1069 .unwrap()
1070 .refresh_entry(new_path.clone(), None, cx)
1071 })
1072 .await
1073 }))
1074 }
1075
1076 fn refresh_entry(
1077 &self,
1078 path: Arc<Path>,
1079 old_path: Option<Arc<Path>>,
1080 cx: &mut ModelContext<Worktree>,
1081 ) -> Task<Result<Entry>> {
1082 let fs = self.fs.clone();
1083 let abs_root_path = self.abs_path.clone();
1084 let path_changes_tx = self.path_changes_tx.clone();
1085 cx.spawn_weak(move |this, mut cx| async move {
1086 let abs_path = fs.canonicalize(&abs_root_path).await?;
1087 let mut paths = Vec::with_capacity(2);
1088 paths.push(if path.file_name().is_some() {
1089 abs_path.join(&path)
1090 } else {
1091 abs_path.clone()
1092 });
1093 if let Some(old_path) = old_path {
1094 paths.push(if old_path.file_name().is_some() {
1095 abs_path.join(&old_path)
1096 } else {
1097 abs_path.clone()
1098 });
1099 }
1100
1101 let (tx, mut rx) = barrier::channel();
1102 path_changes_tx.try_send((paths, tx))?;
1103 rx.recv().await;
1104 this.upgrade(&cx)
1105 .ok_or_else(|| anyhow!("worktree was dropped"))?
1106 .update(&mut cx, |this, _| {
1107 this.entry_for_path(path)
1108 .cloned()
1109 .ok_or_else(|| anyhow!("failed to read path after update"))
1110 })
1111 })
1112 }
1113
1114 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1115 let (share_tx, share_rx) = oneshot::channel();
1116
1117 if let Some(share) = self.share.as_mut() {
1118 let _ = share_tx.send(());
1119 *share.resume_updates.borrow_mut() = ();
1120 } else {
1121 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1122 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1123 let worktree_id = cx.model_id() as u64;
1124
1125 for (path, summaries) in &self.diagnostic_summaries {
1126 for (&server_id, summary) in summaries {
1127 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1128 project_id,
1129 worktree_id,
1130 summary: Some(summary.to_proto(server_id, &path)),
1131 }) {
1132 return Task::ready(Err(e));
1133 }
1134 }
1135 }
1136
1137 let _maintain_remote_snapshot = cx.background().spawn({
1138 let client = self.client.clone();
1139 async move {
1140 let mut share_tx = Some(share_tx);
1141 let mut prev_snapshot = LocalSnapshot {
1142 ignores_by_parent_abs_path: Default::default(),
1143 removed_entry_ids: Default::default(),
1144 next_entry_id: Default::default(),
1145 git_repositories: Default::default(),
1146 snapshot: Snapshot {
1147 id: WorktreeId(worktree_id as usize),
1148 abs_path: Path::new("").into(),
1149 root_name: Default::default(),
1150 root_char_bag: Default::default(),
1151 entries_by_path: Default::default(),
1152 entries_by_id: Default::default(),
1153 repository_entries: Default::default(),
1154 scan_id: 0,
1155 completed_scan_id: 0,
1156 },
1157 };
1158 while let Some(snapshot) = snapshots_rx.recv().await {
1159 #[cfg(any(test, feature = "test-support"))]
1160 const MAX_CHUNK_SIZE: usize = 2;
1161 #[cfg(not(any(test, feature = "test-support")))]
1162 const MAX_CHUNK_SIZE: usize = 256;
1163
1164 let update =
1165 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1166 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1167 let _ = resume_updates_rx.try_recv();
1168 while let Err(error) = client.request(update.clone()).await {
1169 log::error!("failed to send worktree update: {}", error);
1170 log::info!("waiting to resume updates");
1171 if resume_updates_rx.next().await.is_none() {
1172 return Ok(());
1173 }
1174 }
1175 }
1176
1177 if let Some(share_tx) = share_tx.take() {
1178 let _ = share_tx.send(());
1179 }
1180
1181 prev_snapshot = snapshot;
1182 }
1183
1184 Ok::<_, anyhow::Error>(())
1185 }
1186 .log_err()
1187 });
1188
1189 self.share = Some(ShareState {
1190 project_id,
1191 snapshots_tx,
1192 resume_updates: resume_updates_tx,
1193 _maintain_remote_snapshot,
1194 });
1195 }
1196
1197 cx.foreground()
1198 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1199 }
1200
1201 pub fn unshare(&mut self) {
1202 self.share.take();
1203 }
1204
1205 pub fn is_shared(&self) -> bool {
1206 self.share.is_some()
1207 }
1208}
1209
1210impl RemoteWorktree {
1211 fn snapshot(&self) -> Snapshot {
1212 self.snapshot.clone()
1213 }
1214
1215 pub fn disconnected_from_host(&mut self) {
1216 self.updates_tx.take();
1217 self.snapshot_subscriptions.clear();
1218 self.disconnected = true;
1219 }
1220
1221 pub fn save_buffer(
1222 &self,
1223 buffer_handle: ModelHandle<Buffer>,
1224 cx: &mut ModelContext<Worktree>,
1225 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1226 let buffer = buffer_handle.read(cx);
1227 let buffer_id = buffer.remote_id();
1228 let version = buffer.version();
1229 let rpc = self.client.clone();
1230 let project_id = self.project_id;
1231 cx.as_mut().spawn(|mut cx| async move {
1232 let response = rpc
1233 .request(proto::SaveBuffer {
1234 project_id,
1235 buffer_id,
1236 version: serialize_version(&version),
1237 })
1238 .await?;
1239 let version = deserialize_version(&response.version);
1240 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1241 let mtime = response
1242 .mtime
1243 .ok_or_else(|| anyhow!("missing mtime"))?
1244 .into();
1245
1246 buffer_handle.update(&mut cx, |buffer, cx| {
1247 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1248 });
1249
1250 Ok((version, fingerprint, mtime))
1251 })
1252 }
1253
1254 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1255 if let Some(updates_tx) = &self.updates_tx {
1256 updates_tx
1257 .unbounded_send(update)
1258 .expect("consumer runs to completion");
1259 }
1260 }
1261
1262 fn observed_snapshot(&self, scan_id: usize) -> bool {
1263 self.completed_scan_id >= scan_id
1264 }
1265
1266 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1267 let (tx, rx) = oneshot::channel();
1268 if self.observed_snapshot(scan_id) {
1269 let _ = tx.send(());
1270 } else if self.disconnected {
1271 drop(tx);
1272 } else {
1273 match self
1274 .snapshot_subscriptions
1275 .binary_search_by_key(&scan_id, |probe| probe.0)
1276 {
1277 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1278 }
1279 }
1280
1281 async move {
1282 rx.await?;
1283 Ok(())
1284 }
1285 }
1286
1287 pub fn update_diagnostic_summary(
1288 &mut self,
1289 path: Arc<Path>,
1290 summary: &proto::DiagnosticSummary,
1291 ) {
1292 let server_id = LanguageServerId(summary.language_server_id as usize);
1293 let summary = DiagnosticSummary {
1294 error_count: summary.error_count as usize,
1295 warning_count: summary.warning_count as usize,
1296 };
1297
1298 if summary.is_empty() {
1299 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1300 summaries.remove(&server_id);
1301 if summaries.is_empty() {
1302 self.diagnostic_summaries.remove(&path);
1303 }
1304 }
1305 } else {
1306 self.diagnostic_summaries
1307 .entry(path)
1308 .or_default()
1309 .insert(server_id, summary);
1310 }
1311 }
1312
1313 pub fn insert_entry(
1314 &mut self,
1315 entry: proto::Entry,
1316 scan_id: usize,
1317 cx: &mut ModelContext<Worktree>,
1318 ) -> Task<Result<Entry>> {
1319 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1320 cx.spawn(|this, mut cx| async move {
1321 wait_for_snapshot.await?;
1322 this.update(&mut cx, |worktree, _| {
1323 let worktree = worktree.as_remote_mut().unwrap();
1324 let mut snapshot = worktree.background_snapshot.lock();
1325 let entry = snapshot.insert_entry(entry);
1326 worktree.snapshot = snapshot.clone();
1327 entry
1328 })
1329 })
1330 }
1331
1332 pub(crate) fn delete_entry(
1333 &mut self,
1334 id: ProjectEntryId,
1335 scan_id: usize,
1336 cx: &mut ModelContext<Worktree>,
1337 ) -> Task<Result<()>> {
1338 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1339 cx.spawn(|this, mut cx| async move {
1340 wait_for_snapshot.await?;
1341 this.update(&mut cx, |worktree, _| {
1342 let worktree = worktree.as_remote_mut().unwrap();
1343 let mut snapshot = worktree.background_snapshot.lock();
1344 snapshot.delete_entry(id);
1345 worktree.snapshot = snapshot.clone();
1346 });
1347 Ok(())
1348 })
1349 }
1350}
1351
1352impl Snapshot {
1353 pub fn id(&self) -> WorktreeId {
1354 self.id
1355 }
1356
1357 pub fn abs_path(&self) -> &Arc<Path> {
1358 &self.abs_path
1359 }
1360
1361 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1362 self.entries_by_id.get(&entry_id, &()).is_some()
1363 }
1364
1365 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1366 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1367 let old_entry = self.entries_by_id.insert_or_replace(
1368 PathEntry {
1369 id: entry.id,
1370 path: entry.path.clone(),
1371 is_ignored: entry.is_ignored,
1372 scan_id: 0,
1373 },
1374 &(),
1375 );
1376 if let Some(old_entry) = old_entry {
1377 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1378 }
1379 self.entries_by_path.insert_or_replace(entry.clone(), &());
1380 Ok(entry)
1381 }
1382
1383 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1384 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1385 self.entries_by_path = {
1386 let mut cursor = self.entries_by_path.cursor();
1387 let mut new_entries_by_path =
1388 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1389 while let Some(entry) = cursor.item() {
1390 if entry.path.starts_with(&removed_entry.path) {
1391 self.entries_by_id.remove(&entry.id, &());
1392 cursor.next(&());
1393 } else {
1394 break;
1395 }
1396 }
1397 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1398 new_entries_by_path
1399 };
1400
1401 Some(removed_entry.path)
1402 }
1403
1404 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1405 let mut entries_by_path_edits = Vec::new();
1406 let mut entries_by_id_edits = Vec::new();
1407 for entry_id in update.removed_entries {
1408 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1409 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1410 entries_by_id_edits.push(Edit::Remove(entry.id));
1411 }
1412 }
1413
1414 for entry in update.updated_entries {
1415 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1416 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1417 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1418 }
1419 entries_by_id_edits.push(Edit::Insert(PathEntry {
1420 id: entry.id,
1421 path: entry.path.clone(),
1422 is_ignored: entry.is_ignored,
1423 scan_id: 0,
1424 }));
1425 entries_by_path_edits.push(Edit::Insert(entry));
1426 }
1427
1428 self.entries_by_path.edit(entries_by_path_edits, &());
1429 self.entries_by_id.edit(entries_by_id_edits, &());
1430
1431 update.removed_repositories.sort_unstable();
1432 self.repository_entries.retain(|_, entry| {
1433 if let Ok(_) = update
1434 .removed_repositories
1435 .binary_search(&entry.work_directory.to_proto())
1436 {
1437 false
1438 } else {
1439 true
1440 }
1441 });
1442
1443 for repository in update.updated_repositories {
1444 let repository = RepositoryEntry {
1445 work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1446 branch: repository.branch.map(Into::into),
1447 // TODO: status
1448 statuses: Default::default(),
1449 };
1450 if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1451 self.repository_entries
1452 .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1453 } else {
1454 log::error!("no work directory entry for repository {:?}", repository)
1455 }
1456 }
1457
1458 self.scan_id = update.scan_id as usize;
1459 if update.is_last_update {
1460 self.completed_scan_id = update.scan_id as usize;
1461 }
1462
1463 Ok(())
1464 }
1465
1466 pub fn file_count(&self) -> usize {
1467 self.entries_by_path.summary().file_count
1468 }
1469
1470 pub fn visible_file_count(&self) -> usize {
1471 self.entries_by_path.summary().visible_file_count
1472 }
1473
1474 fn traverse_from_offset(
1475 &self,
1476 include_dirs: bool,
1477 include_ignored: bool,
1478 start_offset: usize,
1479 ) -> Traversal {
1480 let mut cursor = self.entries_by_path.cursor();
1481 cursor.seek(
1482 &TraversalTarget::Count {
1483 count: start_offset,
1484 include_dirs,
1485 include_ignored,
1486 },
1487 Bias::Right,
1488 &(),
1489 );
1490 Traversal {
1491 cursor,
1492 include_dirs,
1493 include_ignored,
1494 }
1495 }
1496
1497 fn traverse_from_path(
1498 &self,
1499 include_dirs: bool,
1500 include_ignored: bool,
1501 path: &Path,
1502 ) -> Traversal {
1503 let mut cursor = self.entries_by_path.cursor();
1504 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1505 Traversal {
1506 cursor,
1507 include_dirs,
1508 include_ignored,
1509 }
1510 }
1511
1512 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1513 self.traverse_from_offset(false, include_ignored, start)
1514 }
1515
1516 pub fn entries(&self, include_ignored: bool) -> Traversal {
1517 self.traverse_from_offset(true, include_ignored, 0)
1518 }
1519
1520 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1521 self.repository_entries.values()
1522 }
1523
1524 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1525 let empty_path = Path::new("");
1526 self.entries_by_path
1527 .cursor::<()>()
1528 .filter(move |entry| entry.path.as_ref() != empty_path)
1529 .map(|entry| &entry.path)
1530 }
1531
1532 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1533 let mut cursor = self.entries_by_path.cursor();
1534 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1535 let traversal = Traversal {
1536 cursor,
1537 include_dirs: true,
1538 include_ignored: true,
1539 };
1540 ChildEntriesIter {
1541 traversal,
1542 parent_path,
1543 }
1544 }
1545
1546 pub fn root_entry(&self) -> Option<&Entry> {
1547 self.entry_for_path("")
1548 }
1549
1550 pub fn root_name(&self) -> &str {
1551 &self.root_name
1552 }
1553
1554 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1555 self.repository_entries
1556 .get(&RepositoryWorkDirectory(Path::new("").into()))
1557 .map(|entry| entry.to_owned())
1558 }
1559
1560 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1561 self.repository_entries.values()
1562 }
1563
1564 pub fn scan_id(&self) -> usize {
1565 self.scan_id
1566 }
1567
1568 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1569 let path = path.as_ref();
1570 self.traverse_from_path(true, true, path)
1571 .entry()
1572 .and_then(|entry| {
1573 if entry.path.as_ref() == path {
1574 Some(entry)
1575 } else {
1576 None
1577 }
1578 })
1579 }
1580
1581 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1582 let entry = self.entries_by_id.get(&id, &())?;
1583 self.entry_for_path(&entry.path)
1584 }
1585
1586 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1587 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1588 }
1589}
1590
1591impl LocalSnapshot {
1592 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1593 self.git_repositories.get(&repo.work_directory.0)
1594 }
1595
1596 pub(crate) fn repo_for_metadata(
1597 &self,
1598 path: &Path,
1599 ) -> Option<(ProjectEntryId, Arc<Mutex<dyn GitRepository>>)> {
1600 let (entry_id, local_repo) = self
1601 .git_repositories
1602 .iter()
1603 .find(|(_, repo)| repo.in_dot_git(path))?;
1604 Some((*entry_id, local_repo.repo_ptr.to_owned()))
1605 }
1606
1607 #[cfg(test)]
1608 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1609 let root_name = self.root_name.clone();
1610 proto::UpdateWorktree {
1611 project_id,
1612 worktree_id: self.id().to_proto(),
1613 abs_path: self.abs_path().to_string_lossy().into(),
1614 root_name,
1615 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1616 removed_entries: Default::default(),
1617 scan_id: self.scan_id as u64,
1618 is_last_update: true,
1619 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1620 removed_repositories: Default::default(),
1621 }
1622 }
1623
1624 pub(crate) fn build_update(
1625 &self,
1626 other: &Self,
1627 project_id: u64,
1628 worktree_id: u64,
1629 include_ignored: bool,
1630 ) -> proto::UpdateWorktree {
1631 let mut updated_entries = Vec::new();
1632 let mut removed_entries = Vec::new();
1633 let mut self_entries = self
1634 .entries_by_id
1635 .cursor::<()>()
1636 .filter(|e| include_ignored || !e.is_ignored)
1637 .peekable();
1638 let mut other_entries = other
1639 .entries_by_id
1640 .cursor::<()>()
1641 .filter(|e| include_ignored || !e.is_ignored)
1642 .peekable();
1643 loop {
1644 match (self_entries.peek(), other_entries.peek()) {
1645 (Some(self_entry), Some(other_entry)) => {
1646 match Ord::cmp(&self_entry.id, &other_entry.id) {
1647 Ordering::Less => {
1648 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1649 updated_entries.push(entry);
1650 self_entries.next();
1651 }
1652 Ordering::Equal => {
1653 if self_entry.scan_id != other_entry.scan_id {
1654 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1655 updated_entries.push(entry);
1656 }
1657
1658 self_entries.next();
1659 other_entries.next();
1660 }
1661 Ordering::Greater => {
1662 removed_entries.push(other_entry.id.to_proto());
1663 other_entries.next();
1664 }
1665 }
1666 }
1667 (Some(self_entry), None) => {
1668 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1669 updated_entries.push(entry);
1670 self_entries.next();
1671 }
1672 (None, Some(other_entry)) => {
1673 removed_entries.push(other_entry.id.to_proto());
1674 other_entries.next();
1675 }
1676 (None, None) => break,
1677 }
1678 }
1679
1680 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1681 let mut removed_repositories = Vec::new();
1682 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1683 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1684 loop {
1685 match (self_repos.peek(), other_repos.peek()) {
1686 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1687 match Ord::cmp(self_work_dir, other_work_dir) {
1688 Ordering::Less => {
1689 updated_repositories.push((*self_repo).into());
1690 self_repos.next();
1691 }
1692 Ordering::Equal => {
1693 if self_repo != other_repo {
1694 updated_repositories.push((*self_repo).into());
1695 }
1696
1697 self_repos.next();
1698 other_repos.next();
1699 }
1700 Ordering::Greater => {
1701 removed_repositories.push(other_repo.work_directory.to_proto());
1702 other_repos.next();
1703 }
1704 }
1705 }
1706 (Some((_, self_repo)), None) => {
1707 updated_repositories.push((*self_repo).into());
1708 self_repos.next();
1709 }
1710 (None, Some((_, other_repo))) => {
1711 removed_repositories.push(other_repo.work_directory.to_proto());
1712 other_repos.next();
1713 }
1714 (None, None) => break,
1715 }
1716 }
1717
1718 proto::UpdateWorktree {
1719 project_id,
1720 worktree_id,
1721 abs_path: self.abs_path().to_string_lossy().into(),
1722 root_name: self.root_name().to_string(),
1723 updated_entries,
1724 removed_entries,
1725 scan_id: self.scan_id as u64,
1726 is_last_update: self.completed_scan_id == self.scan_id,
1727 updated_repositories,
1728 removed_repositories,
1729 }
1730 }
1731
1732 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1733 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1734 let abs_path = self.abs_path.join(&entry.path);
1735 match smol::block_on(build_gitignore(&abs_path, fs)) {
1736 Ok(ignore) => {
1737 self.ignores_by_parent_abs_path.insert(
1738 abs_path.parent().unwrap().into(),
1739 (Arc::new(ignore), self.scan_id),
1740 );
1741 }
1742 Err(error) => {
1743 log::error!(
1744 "error loading .gitignore file {:?} - {:?}",
1745 &entry.path,
1746 error
1747 );
1748 }
1749 }
1750 }
1751
1752 self.reuse_entry_id(&mut entry);
1753
1754 if entry.kind == EntryKind::PendingDir {
1755 if let Some(existing_entry) =
1756 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1757 {
1758 entry.kind = existing_entry.kind;
1759 }
1760 }
1761
1762 let scan_id = self.scan_id;
1763 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1764 if let Some(removed) = removed {
1765 if removed.id != entry.id {
1766 self.entries_by_id.remove(&removed.id, &());
1767 }
1768 }
1769 self.entries_by_id.insert_or_replace(
1770 PathEntry {
1771 id: entry.id,
1772 path: entry.path.clone(),
1773 is_ignored: entry.is_ignored,
1774 scan_id,
1775 },
1776 &(),
1777 );
1778
1779 entry
1780 }
1781
1782 fn populate_dir(
1783 &mut self,
1784 parent_path: Arc<Path>,
1785 entries: impl IntoIterator<Item = Entry>,
1786 ignore: Option<Arc<Gitignore>>,
1787 fs: &dyn Fs,
1788 ) {
1789 let mut parent_entry = if let Some(parent_entry) =
1790 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1791 {
1792 parent_entry.clone()
1793 } else {
1794 log::warn!(
1795 "populating a directory {:?} that has been removed",
1796 parent_path
1797 );
1798 return;
1799 };
1800
1801 match parent_entry.kind {
1802 EntryKind::PendingDir => {
1803 parent_entry.kind = EntryKind::Dir;
1804 }
1805 EntryKind::Dir => {}
1806 _ => return,
1807 }
1808
1809 if let Some(ignore) = ignore {
1810 self.ignores_by_parent_abs_path.insert(
1811 self.abs_path.join(&parent_path).into(),
1812 (ignore, self.scan_id),
1813 );
1814 }
1815
1816 if parent_path.file_name() == Some(&DOT_GIT) {
1817 self.build_repo(parent_path, fs);
1818 }
1819
1820 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1821 let mut entries_by_id_edits = Vec::new();
1822
1823 for mut entry in entries {
1824 self.reuse_entry_id(&mut entry);
1825 entries_by_id_edits.push(Edit::Insert(PathEntry {
1826 id: entry.id,
1827 path: entry.path.clone(),
1828 is_ignored: entry.is_ignored,
1829 scan_id: self.scan_id,
1830 }));
1831 entries_by_path_edits.push(Edit::Insert(entry));
1832 }
1833
1834 self.entries_by_path.edit(entries_by_path_edits, &());
1835 self.entries_by_id.edit(entries_by_id_edits, &());
1836 }
1837
1838 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1839 let abs_path = self.abs_path.join(&parent_path);
1840 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1841
1842 // Guard against repositories inside the repository metadata
1843 if work_dir
1844 .components()
1845 .find(|component| component.as_os_str() == *DOT_GIT)
1846 .is_some()
1847 {
1848 return None;
1849 };
1850
1851 let work_dir_id = self
1852 .entry_for_path(work_dir.clone())
1853 .map(|entry| entry.id)?;
1854
1855 if self.git_repositories.get(&work_dir_id).is_none() {
1856 let repo = fs.open_repo(abs_path.as_path())?;
1857 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1858 let scan_id = self.scan_id;
1859
1860 let repo_lock = repo.lock();
1861
1862 self.repository_entries.insert(
1863 work_directory,
1864 RepositoryEntry {
1865 work_directory: work_dir_id.into(),
1866 branch: repo_lock.branch_name().map(Into::into),
1867 statuses: repo_lock.statuses().unwrap_or_default(),
1868 },
1869 );
1870 drop(repo_lock);
1871
1872 self.git_repositories.insert(
1873 work_dir_id,
1874 LocalRepositoryEntry {
1875 scan_id,
1876 full_scan_id: scan_id,
1877 repo_ptr: repo,
1878 git_dir_path: parent_path.clone(),
1879 },
1880 )
1881 }
1882
1883 Some(())
1884 }
1885 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1886 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1887 entry.id = removed_entry_id;
1888 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1889 entry.id = existing_entry.id;
1890 }
1891 }
1892
1893 fn remove_path(&mut self, path: &Path) {
1894 let mut new_entries;
1895 let removed_entries;
1896 {
1897 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1898 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1899 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1900 new_entries.push_tree(cursor.suffix(&()), &());
1901 }
1902 self.entries_by_path = new_entries;
1903
1904 let mut entries_by_id_edits = Vec::new();
1905 for entry in removed_entries.cursor::<()>() {
1906 let removed_entry_id = self
1907 .removed_entry_ids
1908 .entry(entry.inode)
1909 .or_insert(entry.id);
1910 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1911 entries_by_id_edits.push(Edit::Remove(entry.id));
1912 }
1913 self.entries_by_id.edit(entries_by_id_edits, &());
1914
1915 if path.file_name() == Some(&GITIGNORE) {
1916 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1917 if let Some((_, scan_id)) = self
1918 .ignores_by_parent_abs_path
1919 .get_mut(abs_parent_path.as_path())
1920 {
1921 *scan_id = self.snapshot.scan_id;
1922 }
1923 }
1924 }
1925
1926 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1927 let mut inodes = TreeSet::default();
1928 for ancestor in path.ancestors().skip(1) {
1929 if let Some(entry) = self.entry_for_path(ancestor) {
1930 inodes.insert(entry.inode);
1931 }
1932 }
1933 inodes
1934 }
1935
1936 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1937 let mut new_ignores = Vec::new();
1938 for ancestor in abs_path.ancestors().skip(1) {
1939 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1940 new_ignores.push((ancestor, Some(ignore.clone())));
1941 } else {
1942 new_ignores.push((ancestor, None));
1943 }
1944 }
1945
1946 let mut ignore_stack = IgnoreStack::none();
1947 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1948 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1949 ignore_stack = IgnoreStack::all();
1950 break;
1951 } else if let Some(ignore) = ignore {
1952 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1953 }
1954 }
1955
1956 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1957 ignore_stack = IgnoreStack::all();
1958 }
1959
1960 ignore_stack
1961 }
1962}
1963
1964async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1965 let contents = fs.load(abs_path).await?;
1966 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1967 let mut builder = GitignoreBuilder::new(parent);
1968 for line in contents.lines() {
1969 builder.add_line(Some(abs_path.into()), line)?;
1970 }
1971 Ok(builder.build()?)
1972}
1973
1974impl WorktreeId {
1975 pub fn from_usize(handle_id: usize) -> Self {
1976 Self(handle_id)
1977 }
1978
1979 pub(crate) fn from_proto(id: u64) -> Self {
1980 Self(id as usize)
1981 }
1982
1983 pub fn to_proto(&self) -> u64 {
1984 self.0 as u64
1985 }
1986
1987 pub fn to_usize(&self) -> usize {
1988 self.0
1989 }
1990}
1991
1992impl fmt::Display for WorktreeId {
1993 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1994 self.0.fmt(f)
1995 }
1996}
1997
1998impl Deref for Worktree {
1999 type Target = Snapshot;
2000
2001 fn deref(&self) -> &Self::Target {
2002 match self {
2003 Worktree::Local(worktree) => &worktree.snapshot,
2004 Worktree::Remote(worktree) => &worktree.snapshot,
2005 }
2006 }
2007}
2008
2009impl Deref for LocalWorktree {
2010 type Target = LocalSnapshot;
2011
2012 fn deref(&self) -> &Self::Target {
2013 &self.snapshot
2014 }
2015}
2016
2017impl Deref for RemoteWorktree {
2018 type Target = Snapshot;
2019
2020 fn deref(&self) -> &Self::Target {
2021 &self.snapshot
2022 }
2023}
2024
2025impl fmt::Debug for LocalWorktree {
2026 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2027 self.snapshot.fmt(f)
2028 }
2029}
2030
2031impl fmt::Debug for Snapshot {
2032 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2033 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2034 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2035
2036 impl<'a> fmt::Debug for EntriesByPath<'a> {
2037 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2038 f.debug_map()
2039 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2040 .finish()
2041 }
2042 }
2043
2044 impl<'a> fmt::Debug for EntriesById<'a> {
2045 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2046 f.debug_list().entries(self.0.iter()).finish()
2047 }
2048 }
2049
2050 f.debug_struct("Snapshot")
2051 .field("id", &self.id)
2052 .field("root_name", &self.root_name)
2053 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2054 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2055 .finish()
2056 }
2057}
2058
2059#[derive(Clone, PartialEq)]
2060pub struct File {
2061 pub worktree: ModelHandle<Worktree>,
2062 pub path: Arc<Path>,
2063 pub mtime: SystemTime,
2064 pub(crate) entry_id: ProjectEntryId,
2065 pub(crate) is_local: bool,
2066 pub(crate) is_deleted: bool,
2067}
2068
2069impl language::File for File {
2070 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2071 if self.is_local {
2072 Some(self)
2073 } else {
2074 None
2075 }
2076 }
2077
2078 fn mtime(&self) -> SystemTime {
2079 self.mtime
2080 }
2081
2082 fn path(&self) -> &Arc<Path> {
2083 &self.path
2084 }
2085
2086 fn full_path(&self, cx: &AppContext) -> PathBuf {
2087 let mut full_path = PathBuf::new();
2088 let worktree = self.worktree.read(cx);
2089
2090 if worktree.is_visible() {
2091 full_path.push(worktree.root_name());
2092 } else {
2093 let path = worktree.abs_path();
2094
2095 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2096 full_path.push("~");
2097 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2098 } else {
2099 full_path.push(path)
2100 }
2101 }
2102
2103 if self.path.components().next().is_some() {
2104 full_path.push(&self.path);
2105 }
2106
2107 full_path
2108 }
2109
2110 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2111 /// of its worktree, then this method will return the name of the worktree itself.
2112 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2113 self.path
2114 .file_name()
2115 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2116 }
2117
2118 fn is_deleted(&self) -> bool {
2119 self.is_deleted
2120 }
2121
2122 fn as_any(&self) -> &dyn Any {
2123 self
2124 }
2125
2126 fn to_proto(&self) -> rpc::proto::File {
2127 rpc::proto::File {
2128 worktree_id: self.worktree.id() as u64,
2129 entry_id: self.entry_id.to_proto(),
2130 path: self.path.to_string_lossy().into(),
2131 mtime: Some(self.mtime.into()),
2132 is_deleted: self.is_deleted,
2133 }
2134 }
2135}
2136
2137impl language::LocalFile for File {
2138 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2139 self.worktree
2140 .read(cx)
2141 .as_local()
2142 .unwrap()
2143 .abs_path
2144 .join(&self.path)
2145 }
2146
2147 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2148 let worktree = self.worktree.read(cx).as_local().unwrap();
2149 let abs_path = worktree.absolutize(&self.path);
2150 let fs = worktree.fs.clone();
2151 cx.background()
2152 .spawn(async move { fs.load(&abs_path).await })
2153 }
2154
2155 fn buffer_reloaded(
2156 &self,
2157 buffer_id: u64,
2158 version: &clock::Global,
2159 fingerprint: RopeFingerprint,
2160 line_ending: LineEnding,
2161 mtime: SystemTime,
2162 cx: &mut AppContext,
2163 ) {
2164 let worktree = self.worktree.read(cx).as_local().unwrap();
2165 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2166 worktree
2167 .client
2168 .send(proto::BufferReloaded {
2169 project_id,
2170 buffer_id,
2171 version: serialize_version(version),
2172 mtime: Some(mtime.into()),
2173 fingerprint: serialize_fingerprint(fingerprint),
2174 line_ending: serialize_line_ending(line_ending) as i32,
2175 })
2176 .log_err();
2177 }
2178 }
2179}
2180
2181impl File {
2182 pub fn from_proto(
2183 proto: rpc::proto::File,
2184 worktree: ModelHandle<Worktree>,
2185 cx: &AppContext,
2186 ) -> Result<Self> {
2187 let worktree_id = worktree
2188 .read(cx)
2189 .as_remote()
2190 .ok_or_else(|| anyhow!("not remote"))?
2191 .id();
2192
2193 if worktree_id.to_proto() != proto.worktree_id {
2194 return Err(anyhow!("worktree id does not match file"));
2195 }
2196
2197 Ok(Self {
2198 worktree,
2199 path: Path::new(&proto.path).into(),
2200 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2201 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2202 is_local: false,
2203 is_deleted: proto.is_deleted,
2204 })
2205 }
2206
2207 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2208 file.and_then(|f| f.as_any().downcast_ref())
2209 }
2210
2211 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2212 self.worktree.read(cx).id()
2213 }
2214
2215 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2216 if self.is_deleted {
2217 None
2218 } else {
2219 Some(self.entry_id)
2220 }
2221 }
2222}
2223
2224#[derive(Clone, Debug, PartialEq, Eq)]
2225pub struct Entry {
2226 pub id: ProjectEntryId,
2227 pub kind: EntryKind,
2228 pub path: Arc<Path>,
2229 pub inode: u64,
2230 pub mtime: SystemTime,
2231 pub is_symlink: bool,
2232 pub is_ignored: bool,
2233}
2234
2235#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2236pub enum EntryKind {
2237 PendingDir,
2238 Dir,
2239 File(CharBag),
2240}
2241
2242#[derive(Clone, Copy, Debug)]
2243pub enum PathChange {
2244 Added,
2245 Removed,
2246 Updated,
2247 AddedOrUpdated,
2248}
2249
2250impl Entry {
2251 fn new(
2252 path: Arc<Path>,
2253 metadata: &fs::Metadata,
2254 next_entry_id: &AtomicUsize,
2255 root_char_bag: CharBag,
2256 ) -> Self {
2257 Self {
2258 id: ProjectEntryId::new(next_entry_id),
2259 kind: if metadata.is_dir {
2260 EntryKind::PendingDir
2261 } else {
2262 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2263 },
2264 path,
2265 inode: metadata.inode,
2266 mtime: metadata.mtime,
2267 is_symlink: metadata.is_symlink,
2268 is_ignored: false,
2269 }
2270 }
2271
2272 pub fn is_dir(&self) -> bool {
2273 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2274 }
2275
2276 pub fn is_file(&self) -> bool {
2277 matches!(self.kind, EntryKind::File(_))
2278 }
2279}
2280
2281impl sum_tree::Item for Entry {
2282 type Summary = EntrySummary;
2283
2284 fn summary(&self) -> Self::Summary {
2285 let visible_count = if self.is_ignored { 0 } else { 1 };
2286 let file_count;
2287 let visible_file_count;
2288 if self.is_file() {
2289 file_count = 1;
2290 visible_file_count = visible_count;
2291 } else {
2292 file_count = 0;
2293 visible_file_count = 0;
2294 }
2295
2296 EntrySummary {
2297 max_path: self.path.clone(),
2298 count: 1,
2299 visible_count,
2300 file_count,
2301 visible_file_count,
2302 }
2303 }
2304}
2305
2306impl sum_tree::KeyedItem for Entry {
2307 type Key = PathKey;
2308
2309 fn key(&self) -> Self::Key {
2310 PathKey(self.path.clone())
2311 }
2312}
2313
2314#[derive(Clone, Debug)]
2315pub struct EntrySummary {
2316 max_path: Arc<Path>,
2317 count: usize,
2318 visible_count: usize,
2319 file_count: usize,
2320 visible_file_count: usize,
2321}
2322
2323impl Default for EntrySummary {
2324 fn default() -> Self {
2325 Self {
2326 max_path: Arc::from(Path::new("")),
2327 count: 0,
2328 visible_count: 0,
2329 file_count: 0,
2330 visible_file_count: 0,
2331 }
2332 }
2333}
2334
2335impl sum_tree::Summary for EntrySummary {
2336 type Context = ();
2337
2338 fn add_summary(&mut self, rhs: &Self, _: &()) {
2339 self.max_path = rhs.max_path.clone();
2340 self.count += rhs.count;
2341 self.visible_count += rhs.visible_count;
2342 self.file_count += rhs.file_count;
2343 self.visible_file_count += rhs.visible_file_count;
2344 }
2345}
2346
2347#[derive(Clone, Debug)]
2348struct PathEntry {
2349 id: ProjectEntryId,
2350 path: Arc<Path>,
2351 is_ignored: bool,
2352 scan_id: usize,
2353}
2354
2355impl sum_tree::Item for PathEntry {
2356 type Summary = PathEntrySummary;
2357
2358 fn summary(&self) -> Self::Summary {
2359 PathEntrySummary { max_id: self.id }
2360 }
2361}
2362
2363impl sum_tree::KeyedItem for PathEntry {
2364 type Key = ProjectEntryId;
2365
2366 fn key(&self) -> Self::Key {
2367 self.id
2368 }
2369}
2370
2371#[derive(Clone, Debug, Default)]
2372struct PathEntrySummary {
2373 max_id: ProjectEntryId,
2374}
2375
2376impl sum_tree::Summary for PathEntrySummary {
2377 type Context = ();
2378
2379 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2380 self.max_id = summary.max_id;
2381 }
2382}
2383
2384impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2385 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2386 *self = summary.max_id;
2387 }
2388}
2389
2390#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2391pub struct PathKey(Arc<Path>);
2392
2393impl Default for PathKey {
2394 fn default() -> Self {
2395 Self(Path::new("").into())
2396 }
2397}
2398
2399impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2400 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2401 self.0 = summary.max_path.clone();
2402 }
2403}
2404
2405struct BackgroundScanner {
2406 snapshot: Mutex<LocalSnapshot>,
2407 fs: Arc<dyn Fs>,
2408 status_updates_tx: UnboundedSender<ScanState>,
2409 executor: Arc<executor::Background>,
2410 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2411 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2412 finished_initial_scan: bool,
2413}
2414
2415impl BackgroundScanner {
2416 fn new(
2417 snapshot: LocalSnapshot,
2418 fs: Arc<dyn Fs>,
2419 status_updates_tx: UnboundedSender<ScanState>,
2420 executor: Arc<executor::Background>,
2421 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2422 ) -> Self {
2423 Self {
2424 fs,
2425 status_updates_tx,
2426 executor,
2427 refresh_requests_rx,
2428 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2429 snapshot: Mutex::new(snapshot),
2430 finished_initial_scan: false,
2431 }
2432 }
2433
2434 async fn run(
2435 &mut self,
2436 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2437 ) {
2438 use futures::FutureExt as _;
2439
2440 let (root_abs_path, root_inode) = {
2441 let snapshot = self.snapshot.lock();
2442 (
2443 snapshot.abs_path.clone(),
2444 snapshot.root_entry().map(|e| e.inode),
2445 )
2446 };
2447
2448 // Populate ignores above the root.
2449 let ignore_stack;
2450 for ancestor in root_abs_path.ancestors().skip(1) {
2451 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2452 {
2453 self.snapshot
2454 .lock()
2455 .ignores_by_parent_abs_path
2456 .insert(ancestor.into(), (ignore.into(), 0));
2457 }
2458 }
2459 {
2460 let mut snapshot = self.snapshot.lock();
2461 snapshot.scan_id += 1;
2462 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2463 if ignore_stack.is_all() {
2464 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2465 root_entry.is_ignored = true;
2466 snapshot.insert_entry(root_entry, self.fs.as_ref());
2467 }
2468 }
2469 };
2470
2471 // Perform an initial scan of the directory.
2472 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2473 smol::block_on(scan_job_tx.send(ScanJob {
2474 abs_path: root_abs_path,
2475 path: Arc::from(Path::new("")),
2476 ignore_stack,
2477 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2478 scan_queue: scan_job_tx.clone(),
2479 }))
2480 .unwrap();
2481 drop(scan_job_tx);
2482 self.scan_dirs(true, scan_job_rx).await;
2483 {
2484 let mut snapshot = self.snapshot.lock();
2485 snapshot.completed_scan_id = snapshot.scan_id;
2486 }
2487 self.send_status_update(false, None);
2488
2489 // Process any any FS events that occurred while performing the initial scan.
2490 // For these events, update events cannot be as precise, because we didn't
2491 // have the previous state loaded yet.
2492 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2493 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2494 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2495 paths.extend(more_events.into_iter().map(|e| e.path));
2496 }
2497 self.process_events(paths).await;
2498 }
2499
2500 self.finished_initial_scan = true;
2501
2502 // Continue processing events until the worktree is dropped.
2503 loop {
2504 select_biased! {
2505 // Process any path refresh requests from the worktree. Prioritize
2506 // these before handling changes reported by the filesystem.
2507 request = self.refresh_requests_rx.recv().fuse() => {
2508 let Ok((paths, barrier)) = request else { break };
2509 if !self.process_refresh_request(paths, barrier).await {
2510 return;
2511 }
2512 }
2513
2514 events = events_rx.next().fuse() => {
2515 let Some(events) = events else { break };
2516 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2517 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2518 paths.extend(more_events.into_iter().map(|e| e.path));
2519 }
2520 self.process_events(paths).await;
2521 }
2522 }
2523 }
2524 }
2525
2526 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2527 self.reload_entries_for_paths(paths, None).await;
2528 self.send_status_update(false, Some(barrier))
2529 }
2530
2531 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2532 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2533 if let Some(mut paths) = self
2534 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2535 .await
2536 {
2537 paths.sort_unstable();
2538 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2539 }
2540 drop(scan_job_tx);
2541 self.scan_dirs(false, scan_job_rx).await;
2542
2543 self.update_ignore_statuses().await;
2544
2545 let mut snapshot = self.snapshot.lock();
2546
2547 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2548 git_repositories.retain(|work_directory_id, _| {
2549 snapshot
2550 .entry_for_id(*work_directory_id)
2551 .map_or(false, |entry| {
2552 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2553 })
2554 });
2555 snapshot.git_repositories = git_repositories;
2556
2557 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2558 git_repository_entries.retain(|_, entry| {
2559 snapshot
2560 .git_repositories
2561 .get(&entry.work_directory.0)
2562 .is_some()
2563 });
2564 snapshot.snapshot.repository_entries = git_repository_entries;
2565
2566 snapshot.removed_entry_ids.clear();
2567 snapshot.completed_scan_id = snapshot.scan_id;
2568
2569 drop(snapshot);
2570
2571 self.send_status_update(false, None);
2572 }
2573
2574 async fn scan_dirs(
2575 &self,
2576 enable_progress_updates: bool,
2577 scan_jobs_rx: channel::Receiver<ScanJob>,
2578 ) {
2579 use futures::FutureExt as _;
2580
2581 if self
2582 .status_updates_tx
2583 .unbounded_send(ScanState::Started)
2584 .is_err()
2585 {
2586 return;
2587 }
2588
2589 let progress_update_count = AtomicUsize::new(0);
2590 self.executor
2591 .scoped(|scope| {
2592 for _ in 0..self.executor.num_cpus() {
2593 scope.spawn(async {
2594 let mut last_progress_update_count = 0;
2595 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2596 futures::pin_mut!(progress_update_timer);
2597
2598 loop {
2599 select_biased! {
2600 // Process any path refresh requests before moving on to process
2601 // the scan queue, so that user operations are prioritized.
2602 request = self.refresh_requests_rx.recv().fuse() => {
2603 let Ok((paths, barrier)) = request else { break };
2604 if !self.process_refresh_request(paths, barrier).await {
2605 return;
2606 }
2607 }
2608
2609 // Send periodic progress updates to the worktree. Use an atomic counter
2610 // to ensure that only one of the workers sends a progress update after
2611 // the update interval elapses.
2612 _ = progress_update_timer => {
2613 match progress_update_count.compare_exchange(
2614 last_progress_update_count,
2615 last_progress_update_count + 1,
2616 SeqCst,
2617 SeqCst
2618 ) {
2619 Ok(_) => {
2620 last_progress_update_count += 1;
2621 self.send_status_update(true, None);
2622 }
2623 Err(count) => {
2624 last_progress_update_count = count;
2625 }
2626 }
2627 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2628 }
2629
2630 // Recursively load directories from the file system.
2631 job = scan_jobs_rx.recv().fuse() => {
2632 let Ok(job) = job else { break };
2633 if let Err(err) = self.scan_dir(&job).await {
2634 if job.path.as_ref() != Path::new("") {
2635 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2636 }
2637 }
2638 }
2639 }
2640 }
2641 })
2642 }
2643 })
2644 .await;
2645 }
2646
2647 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2648 let mut prev_state = self.prev_state.lock();
2649 let snapshot = self.snapshot.lock().clone();
2650 let mut old_snapshot = snapshot.snapshot.clone();
2651 mem::swap(&mut old_snapshot, &mut prev_state.0);
2652 let changed_paths = mem::take(&mut prev_state.1);
2653 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2654 self.status_updates_tx
2655 .unbounded_send(ScanState::Updated {
2656 snapshot,
2657 changes,
2658 scanning,
2659 barrier,
2660 })
2661 .is_ok()
2662 }
2663
2664 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2665 let mut new_entries: Vec<Entry> = Vec::new();
2666 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2667 let mut ignore_stack = job.ignore_stack.clone();
2668 let mut new_ignore = None;
2669 let (root_abs_path, root_char_bag, next_entry_id) = {
2670 let snapshot = self.snapshot.lock();
2671 (
2672 snapshot.abs_path().clone(),
2673 snapshot.root_char_bag,
2674 snapshot.next_entry_id.clone(),
2675 )
2676 };
2677 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2678 while let Some(child_abs_path) = child_paths.next().await {
2679 let child_abs_path: Arc<Path> = match child_abs_path {
2680 Ok(child_abs_path) => child_abs_path.into(),
2681 Err(error) => {
2682 log::error!("error processing entry {:?}", error);
2683 continue;
2684 }
2685 };
2686
2687 let child_name = child_abs_path.file_name().unwrap();
2688 let child_path: Arc<Path> = job.path.join(child_name).into();
2689 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2690 Ok(Some(metadata)) => metadata,
2691 Ok(None) => continue,
2692 Err(err) => {
2693 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2694 continue;
2695 }
2696 };
2697
2698 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2699 if child_name == *GITIGNORE {
2700 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2701 Ok(ignore) => {
2702 let ignore = Arc::new(ignore);
2703 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2704 new_ignore = Some(ignore);
2705 }
2706 Err(error) => {
2707 log::error!(
2708 "error loading .gitignore file {:?} - {:?}",
2709 child_name,
2710 error
2711 );
2712 }
2713 }
2714
2715 // Update ignore status of any child entries we've already processed to reflect the
2716 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2717 // there should rarely be too numerous. Update the ignore stack associated with any
2718 // new jobs as well.
2719 let mut new_jobs = new_jobs.iter_mut();
2720 for entry in &mut new_entries {
2721 let entry_abs_path = root_abs_path.join(&entry.path);
2722 entry.is_ignored =
2723 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2724
2725 if entry.is_dir() {
2726 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2727 job.ignore_stack = if entry.is_ignored {
2728 IgnoreStack::all()
2729 } else {
2730 ignore_stack.clone()
2731 };
2732 }
2733 }
2734 }
2735 }
2736
2737 let mut child_entry = Entry::new(
2738 child_path.clone(),
2739 &child_metadata,
2740 &next_entry_id,
2741 root_char_bag,
2742 );
2743
2744 if child_entry.is_dir() {
2745 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2746 child_entry.is_ignored = is_ignored;
2747
2748 // Avoid recursing until crash in the case of a recursive symlink
2749 if !job.ancestor_inodes.contains(&child_entry.inode) {
2750 let mut ancestor_inodes = job.ancestor_inodes.clone();
2751 ancestor_inodes.insert(child_entry.inode);
2752
2753 new_jobs.push(Some(ScanJob {
2754 abs_path: child_abs_path,
2755 path: child_path,
2756 ignore_stack: if is_ignored {
2757 IgnoreStack::all()
2758 } else {
2759 ignore_stack.clone()
2760 },
2761 ancestor_inodes,
2762 scan_queue: job.scan_queue.clone(),
2763 }));
2764 } else {
2765 new_jobs.push(None);
2766 }
2767 } else {
2768 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2769 }
2770
2771 new_entries.push(child_entry);
2772 }
2773
2774 self.snapshot.lock().populate_dir(
2775 job.path.clone(),
2776 new_entries,
2777 new_ignore,
2778 self.fs.as_ref(),
2779 );
2780
2781 for new_job in new_jobs {
2782 if let Some(new_job) = new_job {
2783 job.scan_queue.send(new_job).await.unwrap();
2784 }
2785 }
2786
2787 Ok(())
2788 }
2789
2790 async fn reload_entries_for_paths(
2791 &self,
2792 mut abs_paths: Vec<PathBuf>,
2793 scan_queue_tx: Option<Sender<ScanJob>>,
2794 ) -> Option<Vec<Arc<Path>>> {
2795 let doing_recursive_update = scan_queue_tx.is_some();
2796
2797 abs_paths.sort_unstable();
2798 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2799
2800 let root_abs_path = self.snapshot.lock().abs_path.clone();
2801 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2802 let metadata = futures::future::join_all(
2803 abs_paths
2804 .iter()
2805 .map(|abs_path| self.fs.metadata(&abs_path))
2806 .collect::<Vec<_>>(),
2807 )
2808 .await;
2809
2810 let mut snapshot = self.snapshot.lock();
2811 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2812 snapshot.scan_id += 1;
2813 if is_idle && !doing_recursive_update {
2814 snapshot.completed_scan_id = snapshot.scan_id;
2815 }
2816
2817 // Remove any entries for paths that no longer exist or are being recursively
2818 // refreshed. Do this before adding any new entries, so that renames can be
2819 // detected regardless of the order of the paths.
2820 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2821 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2822 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2823 if matches!(metadata, Ok(None)) || doing_recursive_update {
2824 snapshot.remove_path(path);
2825 }
2826 event_paths.push(path.into());
2827 } else {
2828 log::error!(
2829 "unexpected event {:?} for root path {:?}",
2830 abs_path,
2831 root_canonical_path
2832 );
2833 }
2834 }
2835
2836 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2837 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2838
2839 match metadata {
2840 Ok(Some(metadata)) => {
2841 let ignore_stack =
2842 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2843 let mut fs_entry = Entry::new(
2844 path.clone(),
2845 &metadata,
2846 snapshot.next_entry_id.as_ref(),
2847 snapshot.root_char_bag,
2848 );
2849 fs_entry.is_ignored = ignore_stack.is_all();
2850 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2851
2852 self.reload_repo_for_path(&path, &mut snapshot);
2853
2854 if let Some(scan_queue_tx) = &scan_queue_tx {
2855 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2856 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2857 ancestor_inodes.insert(metadata.inode);
2858 smol::block_on(scan_queue_tx.send(ScanJob {
2859 abs_path,
2860 path,
2861 ignore_stack,
2862 ancestor_inodes,
2863 scan_queue: scan_queue_tx.clone(),
2864 }))
2865 .unwrap();
2866 }
2867 }
2868 }
2869 Ok(None) => {
2870 self.remove_repo_path(&path, &mut snapshot);
2871 }
2872 Err(err) => {
2873 // TODO - create a special 'error' entry in the entries tree to mark this
2874 log::error!("error reading file on event {:?}", err);
2875 }
2876 }
2877 }
2878
2879 Some(event_paths)
2880 }
2881
2882 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2883 if !path
2884 .components()
2885 .any(|component| component.as_os_str() == *DOT_GIT)
2886 {
2887 let scan_id = snapshot.scan_id;
2888 let repo = snapshot.repo_for(&path)?;
2889
2890 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2891
2892 let work_dir = repo.work_directory(snapshot)?;
2893 let work_dir_id = repo.work_directory;
2894
2895 snapshot
2896 .git_repositories
2897 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2898
2899 snapshot
2900 .repository_entries
2901 .update(&work_dir, |entry| entry.statuses.remove(&repo_path));
2902 }
2903
2904 Some(())
2905 }
2906
2907 fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2908 let scan_id = snapshot.scan_id;
2909
2910 if path
2911 .components()
2912 .any(|component| component.as_os_str() == *DOT_GIT)
2913 {
2914 let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
2915
2916 let work_dir = snapshot
2917 .entry_for_id(entry_id)
2918 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
2919
2920 let repo = repo.lock();
2921 repo.reload_index();
2922 let branch = repo.branch_name();
2923 let statuses = repo.statuses().unwrap_or_default();
2924
2925 snapshot.git_repositories.update(&entry_id, |entry| {
2926 entry.scan_id = scan_id;
2927 entry.full_scan_id = scan_id;
2928 });
2929
2930 snapshot.repository_entries.update(&work_dir, |entry| {
2931 entry.branch = branch.map(Into::into);
2932 entry.statuses = statuses;
2933 });
2934 } else {
2935 let repo = snapshot.repo_for(&path)?;
2936
2937 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2938
2939 let status = {
2940 let local_repo = snapshot.get_local_repo(&repo)?;
2941
2942 // Short circuit if we've already scanned everything
2943 if local_repo.full_scan_id == scan_id {
2944 return None;
2945 }
2946
2947 let git_ptr = local_repo.repo_ptr.lock();
2948 git_ptr.file_status(&repo_path)?
2949 };
2950
2951 if status != GitStatus::Untracked {
2952 let work_dir = repo.work_directory(snapshot)?;
2953 let work_dir_id = repo.work_directory;
2954
2955 snapshot
2956 .git_repositories
2957 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2958
2959 snapshot
2960 .repository_entries
2961 .update(&work_dir, |entry| entry.statuses.insert(repo_path, status));
2962 }
2963 }
2964
2965 Some(())
2966 }
2967
2968 async fn update_ignore_statuses(&self) {
2969 use futures::FutureExt as _;
2970
2971 let mut snapshot = self.snapshot.lock().clone();
2972 let mut ignores_to_update = Vec::new();
2973 let mut ignores_to_delete = Vec::new();
2974 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2975 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2976 if *scan_id > snapshot.completed_scan_id
2977 && snapshot.entry_for_path(parent_path).is_some()
2978 {
2979 ignores_to_update.push(parent_abs_path.clone());
2980 }
2981
2982 let ignore_path = parent_path.join(&*GITIGNORE);
2983 if snapshot.entry_for_path(ignore_path).is_none() {
2984 ignores_to_delete.push(parent_abs_path.clone());
2985 }
2986 }
2987 }
2988
2989 for parent_abs_path in ignores_to_delete {
2990 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2991 self.snapshot
2992 .lock()
2993 .ignores_by_parent_abs_path
2994 .remove(&parent_abs_path);
2995 }
2996
2997 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2998 ignores_to_update.sort_unstable();
2999 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3000 while let Some(parent_abs_path) = ignores_to_update.next() {
3001 while ignores_to_update
3002 .peek()
3003 .map_or(false, |p| p.starts_with(&parent_abs_path))
3004 {
3005 ignores_to_update.next().unwrap();
3006 }
3007
3008 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3009 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3010 abs_path: parent_abs_path,
3011 ignore_stack,
3012 ignore_queue: ignore_queue_tx.clone(),
3013 }))
3014 .unwrap();
3015 }
3016 drop(ignore_queue_tx);
3017
3018 self.executor
3019 .scoped(|scope| {
3020 for _ in 0..self.executor.num_cpus() {
3021 scope.spawn(async {
3022 loop {
3023 select_biased! {
3024 // Process any path refresh requests before moving on to process
3025 // the queue of ignore statuses.
3026 request = self.refresh_requests_rx.recv().fuse() => {
3027 let Ok((paths, barrier)) = request else { break };
3028 if !self.process_refresh_request(paths, barrier).await {
3029 return;
3030 }
3031 }
3032
3033 // Recursively process directories whose ignores have changed.
3034 job = ignore_queue_rx.recv().fuse() => {
3035 let Ok(job) = job else { break };
3036 self.update_ignore_status(job, &snapshot).await;
3037 }
3038 }
3039 }
3040 });
3041 }
3042 })
3043 .await;
3044 }
3045
3046 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3047 let mut ignore_stack = job.ignore_stack;
3048 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3049 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3050 }
3051
3052 let mut entries_by_id_edits = Vec::new();
3053 let mut entries_by_path_edits = Vec::new();
3054 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3055 for mut entry in snapshot.child_entries(path).cloned() {
3056 let was_ignored = entry.is_ignored;
3057 let abs_path = snapshot.abs_path().join(&entry.path);
3058 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3059 if entry.is_dir() {
3060 let child_ignore_stack = if entry.is_ignored {
3061 IgnoreStack::all()
3062 } else {
3063 ignore_stack.clone()
3064 };
3065 job.ignore_queue
3066 .send(UpdateIgnoreStatusJob {
3067 abs_path: abs_path.into(),
3068 ignore_stack: child_ignore_stack,
3069 ignore_queue: job.ignore_queue.clone(),
3070 })
3071 .await
3072 .unwrap();
3073 }
3074
3075 if entry.is_ignored != was_ignored {
3076 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3077 path_entry.scan_id = snapshot.scan_id;
3078 path_entry.is_ignored = entry.is_ignored;
3079 entries_by_id_edits.push(Edit::Insert(path_entry));
3080 entries_by_path_edits.push(Edit::Insert(entry));
3081 }
3082 }
3083
3084 let mut snapshot = self.snapshot.lock();
3085 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3086 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3087 }
3088
3089 fn build_change_set(
3090 &self,
3091 old_snapshot: &Snapshot,
3092 new_snapshot: &Snapshot,
3093 event_paths: Vec<Arc<Path>>,
3094 ) -> HashMap<Arc<Path>, PathChange> {
3095 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3096
3097 let mut changes = HashMap::default();
3098 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3099 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3100 let received_before_initialized = !self.finished_initial_scan;
3101
3102 for path in event_paths {
3103 let path = PathKey(path);
3104 old_paths.seek(&path, Bias::Left, &());
3105 new_paths.seek(&path, Bias::Left, &());
3106
3107 loop {
3108 match (old_paths.item(), new_paths.item()) {
3109 (Some(old_entry), Some(new_entry)) => {
3110 if old_entry.path > path.0
3111 && new_entry.path > path.0
3112 && !old_entry.path.starts_with(&path.0)
3113 && !new_entry.path.starts_with(&path.0)
3114 {
3115 break;
3116 }
3117
3118 match Ord::cmp(&old_entry.path, &new_entry.path) {
3119 Ordering::Less => {
3120 changes.insert(old_entry.path.clone(), Removed);
3121 old_paths.next(&());
3122 }
3123 Ordering::Equal => {
3124 if received_before_initialized {
3125 // If the worktree was not fully initialized when this event was generated,
3126 // we can't know whether this entry was added during the scan or whether
3127 // it was merely updated.
3128 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3129 } else if old_entry.mtime != new_entry.mtime {
3130 changes.insert(new_entry.path.clone(), Updated);
3131 }
3132 old_paths.next(&());
3133 new_paths.next(&());
3134 }
3135 Ordering::Greater => {
3136 changes.insert(new_entry.path.clone(), Added);
3137 new_paths.next(&());
3138 }
3139 }
3140 }
3141 (Some(old_entry), None) => {
3142 changes.insert(old_entry.path.clone(), Removed);
3143 old_paths.next(&());
3144 }
3145 (None, Some(new_entry)) => {
3146 changes.insert(new_entry.path.clone(), Added);
3147 new_paths.next(&());
3148 }
3149 (None, None) => break,
3150 }
3151 }
3152 }
3153 changes
3154 }
3155
3156 async fn progress_timer(&self, running: bool) {
3157 if !running {
3158 return futures::future::pending().await;
3159 }
3160
3161 #[cfg(any(test, feature = "test-support"))]
3162 if self.fs.is_fake() {
3163 return self.executor.simulate_random_delay().await;
3164 }
3165
3166 smol::Timer::after(Duration::from_millis(100)).await;
3167 }
3168}
3169
3170fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3171 let mut result = root_char_bag;
3172 result.extend(
3173 path.to_string_lossy()
3174 .chars()
3175 .map(|c| c.to_ascii_lowercase()),
3176 );
3177 result
3178}
3179
3180struct ScanJob {
3181 abs_path: Arc<Path>,
3182 path: Arc<Path>,
3183 ignore_stack: Arc<IgnoreStack>,
3184 scan_queue: Sender<ScanJob>,
3185 ancestor_inodes: TreeSet<u64>,
3186}
3187
3188struct UpdateIgnoreStatusJob {
3189 abs_path: Arc<Path>,
3190 ignore_stack: Arc<IgnoreStack>,
3191 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3192}
3193
3194pub trait WorktreeHandle {
3195 #[cfg(any(test, feature = "test-support"))]
3196 fn flush_fs_events<'a>(
3197 &self,
3198 cx: &'a gpui::TestAppContext,
3199 ) -> futures::future::LocalBoxFuture<'a, ()>;
3200}
3201
3202impl WorktreeHandle for ModelHandle<Worktree> {
3203 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3204 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3205 // extra directory scans, and emit extra scan-state notifications.
3206 //
3207 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3208 // to ensure that all redundant FS events have already been processed.
3209 #[cfg(any(test, feature = "test-support"))]
3210 fn flush_fs_events<'a>(
3211 &self,
3212 cx: &'a gpui::TestAppContext,
3213 ) -> futures::future::LocalBoxFuture<'a, ()> {
3214 use smol::future::FutureExt;
3215
3216 let filename = "fs-event-sentinel";
3217 let tree = self.clone();
3218 let (fs, root_path) = self.read_with(cx, |tree, _| {
3219 let tree = tree.as_local().unwrap();
3220 (tree.fs.clone(), tree.abs_path().clone())
3221 });
3222
3223 async move {
3224 fs.create_file(&root_path.join(filename), Default::default())
3225 .await
3226 .unwrap();
3227 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3228 .await;
3229
3230 fs.remove_file(&root_path.join(filename), Default::default())
3231 .await
3232 .unwrap();
3233 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3234 .await;
3235
3236 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3237 .await;
3238 }
3239 .boxed_local()
3240 }
3241}
3242
3243#[derive(Clone, Debug)]
3244struct TraversalProgress<'a> {
3245 max_path: &'a Path,
3246 count: usize,
3247 visible_count: usize,
3248 file_count: usize,
3249 visible_file_count: usize,
3250}
3251
3252impl<'a> TraversalProgress<'a> {
3253 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3254 match (include_ignored, include_dirs) {
3255 (true, true) => self.count,
3256 (true, false) => self.file_count,
3257 (false, true) => self.visible_count,
3258 (false, false) => self.visible_file_count,
3259 }
3260 }
3261}
3262
3263impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3264 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3265 self.max_path = summary.max_path.as_ref();
3266 self.count += summary.count;
3267 self.visible_count += summary.visible_count;
3268 self.file_count += summary.file_count;
3269 self.visible_file_count += summary.visible_file_count;
3270 }
3271}
3272
3273impl<'a> Default for TraversalProgress<'a> {
3274 fn default() -> Self {
3275 Self {
3276 max_path: Path::new(""),
3277 count: 0,
3278 visible_count: 0,
3279 file_count: 0,
3280 visible_file_count: 0,
3281 }
3282 }
3283}
3284
3285pub struct Traversal<'a> {
3286 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3287 include_ignored: bool,
3288 include_dirs: bool,
3289}
3290
3291impl<'a> Traversal<'a> {
3292 pub fn advance(&mut self) -> bool {
3293 self.advance_to_offset(self.offset() + 1)
3294 }
3295
3296 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3297 self.cursor.seek_forward(
3298 &TraversalTarget::Count {
3299 count: offset,
3300 include_dirs: self.include_dirs,
3301 include_ignored: self.include_ignored,
3302 },
3303 Bias::Right,
3304 &(),
3305 )
3306 }
3307
3308 pub fn advance_to_sibling(&mut self) -> bool {
3309 while let Some(entry) = self.cursor.item() {
3310 self.cursor.seek_forward(
3311 &TraversalTarget::PathSuccessor(&entry.path),
3312 Bias::Left,
3313 &(),
3314 );
3315 if let Some(entry) = self.cursor.item() {
3316 if (self.include_dirs || !entry.is_dir())
3317 && (self.include_ignored || !entry.is_ignored)
3318 {
3319 return true;
3320 }
3321 }
3322 }
3323 false
3324 }
3325
3326 pub fn entry(&self) -> Option<&'a Entry> {
3327 self.cursor.item()
3328 }
3329
3330 pub fn offset(&self) -> usize {
3331 self.cursor
3332 .start()
3333 .count(self.include_dirs, self.include_ignored)
3334 }
3335}
3336
3337impl<'a> Iterator for Traversal<'a> {
3338 type Item = &'a Entry;
3339
3340 fn next(&mut self) -> Option<Self::Item> {
3341 if let Some(item) = self.entry() {
3342 self.advance();
3343 Some(item)
3344 } else {
3345 None
3346 }
3347 }
3348}
3349
3350#[derive(Debug)]
3351enum TraversalTarget<'a> {
3352 Path(&'a Path),
3353 PathSuccessor(&'a Path),
3354 Count {
3355 count: usize,
3356 include_ignored: bool,
3357 include_dirs: bool,
3358 },
3359}
3360
3361impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3362 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3363 match self {
3364 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3365 TraversalTarget::PathSuccessor(path) => {
3366 if !cursor_location.max_path.starts_with(path) {
3367 Ordering::Equal
3368 } else {
3369 Ordering::Greater
3370 }
3371 }
3372 TraversalTarget::Count {
3373 count,
3374 include_dirs,
3375 include_ignored,
3376 } => Ord::cmp(
3377 count,
3378 &cursor_location.count(*include_dirs, *include_ignored),
3379 ),
3380 }
3381 }
3382}
3383
3384struct ChildEntriesIter<'a> {
3385 parent_path: &'a Path,
3386 traversal: Traversal<'a>,
3387}
3388
3389impl<'a> Iterator for ChildEntriesIter<'a> {
3390 type Item = &'a Entry;
3391
3392 fn next(&mut self) -> Option<Self::Item> {
3393 if let Some(item) = self.traversal.entry() {
3394 if item.path.starts_with(&self.parent_path) {
3395 self.traversal.advance_to_sibling();
3396 return Some(item);
3397 }
3398 }
3399 None
3400 }
3401}
3402
3403impl<'a> From<&'a Entry> for proto::Entry {
3404 fn from(entry: &'a Entry) -> Self {
3405 Self {
3406 id: entry.id.to_proto(),
3407 is_dir: entry.is_dir(),
3408 path: entry.path.to_string_lossy().into(),
3409 inode: entry.inode,
3410 mtime: Some(entry.mtime.into()),
3411 is_symlink: entry.is_symlink,
3412 is_ignored: entry.is_ignored,
3413 }
3414 }
3415}
3416
3417impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3418 type Error = anyhow::Error;
3419
3420 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3421 if let Some(mtime) = entry.mtime {
3422 let kind = if entry.is_dir {
3423 EntryKind::Dir
3424 } else {
3425 let mut char_bag = *root_char_bag;
3426 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3427 EntryKind::File(char_bag)
3428 };
3429 let path: Arc<Path> = PathBuf::from(entry.path).into();
3430 Ok(Entry {
3431 id: ProjectEntryId::from_proto(entry.id),
3432 kind,
3433 path,
3434 inode: entry.inode,
3435 mtime: mtime.into(),
3436 is_symlink: entry.is_symlink,
3437 is_ignored: entry.is_ignored,
3438 })
3439 } else {
3440 Err(anyhow!(
3441 "missing mtime in remote worktree entry {:?}",
3442 entry.path
3443 ))
3444 }
3445 }
3446}
3447
3448#[cfg(test)]
3449mod tests {
3450 use super::*;
3451 use fs::{FakeFs, RealFs};
3452 use gpui::{executor::Deterministic, TestAppContext};
3453 use pretty_assertions::assert_eq;
3454 use rand::prelude::*;
3455 use serde_json::json;
3456 use std::{env, fmt::Write};
3457 use util::{http::FakeHttpClient, test::temp_tree};
3458
3459 #[gpui::test]
3460 async fn test_traversal(cx: &mut TestAppContext) {
3461 let fs = FakeFs::new(cx.background());
3462 fs.insert_tree(
3463 "/root",
3464 json!({
3465 ".gitignore": "a/b\n",
3466 "a": {
3467 "b": "",
3468 "c": "",
3469 }
3470 }),
3471 )
3472 .await;
3473
3474 let http_client = FakeHttpClient::with_404_response();
3475 let client = cx.read(|cx| Client::new(http_client, cx));
3476
3477 let tree = Worktree::local(
3478 client,
3479 Path::new("/root"),
3480 true,
3481 fs,
3482 Default::default(),
3483 &mut cx.to_async(),
3484 )
3485 .await
3486 .unwrap();
3487 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3488 .await;
3489
3490 tree.read_with(cx, |tree, _| {
3491 assert_eq!(
3492 tree.entries(false)
3493 .map(|entry| entry.path.as_ref())
3494 .collect::<Vec<_>>(),
3495 vec![
3496 Path::new(""),
3497 Path::new(".gitignore"),
3498 Path::new("a"),
3499 Path::new("a/c"),
3500 ]
3501 );
3502 assert_eq!(
3503 tree.entries(true)
3504 .map(|entry| entry.path.as_ref())
3505 .collect::<Vec<_>>(),
3506 vec![
3507 Path::new(""),
3508 Path::new(".gitignore"),
3509 Path::new("a"),
3510 Path::new("a/b"),
3511 Path::new("a/c"),
3512 ]
3513 );
3514 })
3515 }
3516
3517 #[gpui::test(iterations = 10)]
3518 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3519 let fs = FakeFs::new(cx.background());
3520 fs.insert_tree(
3521 "/root",
3522 json!({
3523 "lib": {
3524 "a": {
3525 "a.txt": ""
3526 },
3527 "b": {
3528 "b.txt": ""
3529 }
3530 }
3531 }),
3532 )
3533 .await;
3534 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3535 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3536
3537 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3538 let tree = Worktree::local(
3539 client,
3540 Path::new("/root"),
3541 true,
3542 fs.clone(),
3543 Default::default(),
3544 &mut cx.to_async(),
3545 )
3546 .await
3547 .unwrap();
3548
3549 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3550 .await;
3551
3552 tree.read_with(cx, |tree, _| {
3553 assert_eq!(
3554 tree.entries(false)
3555 .map(|entry| entry.path.as_ref())
3556 .collect::<Vec<_>>(),
3557 vec![
3558 Path::new(""),
3559 Path::new("lib"),
3560 Path::new("lib/a"),
3561 Path::new("lib/a/a.txt"),
3562 Path::new("lib/a/lib"),
3563 Path::new("lib/b"),
3564 Path::new("lib/b/b.txt"),
3565 Path::new("lib/b/lib"),
3566 ]
3567 );
3568 });
3569
3570 fs.rename(
3571 Path::new("/root/lib/a/lib"),
3572 Path::new("/root/lib/a/lib-2"),
3573 Default::default(),
3574 )
3575 .await
3576 .unwrap();
3577 executor.run_until_parked();
3578 tree.read_with(cx, |tree, _| {
3579 assert_eq!(
3580 tree.entries(false)
3581 .map(|entry| entry.path.as_ref())
3582 .collect::<Vec<_>>(),
3583 vec![
3584 Path::new(""),
3585 Path::new("lib"),
3586 Path::new("lib/a"),
3587 Path::new("lib/a/a.txt"),
3588 Path::new("lib/a/lib-2"),
3589 Path::new("lib/b"),
3590 Path::new("lib/b/b.txt"),
3591 Path::new("lib/b/lib"),
3592 ]
3593 );
3594 });
3595 }
3596
3597 #[gpui::test]
3598 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3599 let parent_dir = temp_tree(json!({
3600 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3601 "tree": {
3602 ".git": {},
3603 ".gitignore": "ignored-dir\n",
3604 "tracked-dir": {
3605 "tracked-file1": "",
3606 "ancestor-ignored-file1": "",
3607 },
3608 "ignored-dir": {
3609 "ignored-file1": ""
3610 }
3611 }
3612 }));
3613 let dir = parent_dir.path().join("tree");
3614
3615 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3616
3617 let tree = Worktree::local(
3618 client,
3619 dir.as_path(),
3620 true,
3621 Arc::new(RealFs),
3622 Default::default(),
3623 &mut cx.to_async(),
3624 )
3625 .await
3626 .unwrap();
3627 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3628 .await;
3629 tree.flush_fs_events(cx).await;
3630 cx.read(|cx| {
3631 let tree = tree.read(cx);
3632 assert!(
3633 !tree
3634 .entry_for_path("tracked-dir/tracked-file1")
3635 .unwrap()
3636 .is_ignored
3637 );
3638 assert!(
3639 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3640 .unwrap()
3641 .is_ignored
3642 );
3643 assert!(
3644 tree.entry_for_path("ignored-dir/ignored-file1")
3645 .unwrap()
3646 .is_ignored
3647 );
3648 });
3649
3650 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3651 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3652 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3653 tree.flush_fs_events(cx).await;
3654 cx.read(|cx| {
3655 let tree = tree.read(cx);
3656 assert!(
3657 !tree
3658 .entry_for_path("tracked-dir/tracked-file2")
3659 .unwrap()
3660 .is_ignored
3661 );
3662 assert!(
3663 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3664 .unwrap()
3665 .is_ignored
3666 );
3667 assert!(
3668 tree.entry_for_path("ignored-dir/ignored-file2")
3669 .unwrap()
3670 .is_ignored
3671 );
3672 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3673 });
3674 }
3675
3676 #[gpui::test]
3677 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3678 let root = temp_tree(json!({
3679 "dir1": {
3680 ".git": {},
3681 "deps": {
3682 "dep1": {
3683 ".git": {},
3684 "src": {
3685 "a.txt": ""
3686 }
3687 }
3688 },
3689 "src": {
3690 "b.txt": ""
3691 }
3692 },
3693 "c.txt": "",
3694 }));
3695
3696 let http_client = FakeHttpClient::with_404_response();
3697 let client = cx.read(|cx| Client::new(http_client, cx));
3698 let tree = Worktree::local(
3699 client,
3700 root.path(),
3701 true,
3702 Arc::new(RealFs),
3703 Default::default(),
3704 &mut cx.to_async(),
3705 )
3706 .await
3707 .unwrap();
3708
3709 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3710 .await;
3711 tree.flush_fs_events(cx).await;
3712
3713 tree.read_with(cx, |tree, _cx| {
3714 let tree = tree.as_local().unwrap();
3715
3716 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3717
3718 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3719 assert_eq!(
3720 entry
3721 .work_directory(tree)
3722 .map(|directory| directory.as_ref().to_owned()),
3723 Some(Path::new("dir1").to_owned())
3724 );
3725
3726 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3727 assert_eq!(
3728 entry
3729 .work_directory(tree)
3730 .map(|directory| directory.as_ref().to_owned()),
3731 Some(Path::new("dir1/deps/dep1").to_owned())
3732 );
3733 });
3734
3735 let repo_update_events = Arc::new(Mutex::new(vec![]));
3736 tree.update(cx, |_, cx| {
3737 let repo_update_events = repo_update_events.clone();
3738 cx.subscribe(&tree, move |_, _, event, _| {
3739 if let Event::UpdatedGitRepositories(update) = event {
3740 repo_update_events.lock().push(update.clone());
3741 }
3742 })
3743 .detach();
3744 });
3745
3746 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3747 tree.flush_fs_events(cx).await;
3748
3749 assert_eq!(
3750 repo_update_events.lock()[0]
3751 .keys()
3752 .cloned()
3753 .collect::<Vec<Arc<Path>>>(),
3754 vec![Path::new("dir1").into()]
3755 );
3756
3757 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3758 tree.flush_fs_events(cx).await;
3759
3760 tree.read_with(cx, |tree, _cx| {
3761 let tree = tree.as_local().unwrap();
3762
3763 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3764 });
3765 }
3766
3767 #[gpui::test]
3768 async fn test_git_status(cx: &mut TestAppContext) {
3769 #[track_caller]
3770 fn git_init(path: &Path) -> git2::Repository {
3771 git2::Repository::init(path).expect("Failed to initialize git repository")
3772 }
3773
3774 #[track_caller]
3775 fn git_add(path: &Path, repo: &git2::Repository) {
3776 let mut index = repo.index().expect("Failed to get index");
3777 index.add_path(path).expect("Failed to add a.txt");
3778 index.write().expect("Failed to write index");
3779 }
3780
3781 #[track_caller]
3782 fn git_remove_index(path: &Path, repo: &git2::Repository) {
3783 let mut index = repo.index().expect("Failed to get index");
3784 index.remove_path(path).expect("Failed to add a.txt");
3785 index.write().expect("Failed to write index");
3786 }
3787
3788 #[track_caller]
3789 fn git_commit(msg: &'static str, repo: &git2::Repository) {
3790 let signature = repo.signature().unwrap();
3791 let oid = repo.index().unwrap().write_tree().unwrap();
3792 let tree = repo.find_tree(oid).unwrap();
3793 if let Some(head) = repo.head().ok() {
3794 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3795
3796 let parent_commit = parent_obj.as_commit().unwrap();
3797
3798 repo.commit(
3799 Some("HEAD"),
3800 &signature,
3801 &signature,
3802 msg,
3803 &tree,
3804 &[parent_commit],
3805 )
3806 .expect("Failed to commit with parent");
3807 } else {
3808 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3809 .expect("Failed to commit");
3810 }
3811 }
3812
3813 #[track_caller]
3814 fn git_stash(repo: &mut git2::Repository) {
3815 let signature = repo.signature().unwrap();
3816 repo.stash_save(&signature, "N/A", None)
3817 .expect("Failed to stash");
3818 }
3819
3820 #[track_caller]
3821 fn git_reset(offset: usize, repo: &git2::Repository) {
3822 let head = repo.head().expect("Couldn't get repo head");
3823 let object = head.peel(git2::ObjectType::Commit).unwrap();
3824 let commit = object.as_commit().unwrap();
3825 let new_head = commit
3826 .parents()
3827 .inspect(|parnet| {
3828 parnet.message();
3829 })
3830 .skip(offset)
3831 .next()
3832 .expect("Not enough history");
3833 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
3834 .expect("Could not reset");
3835 }
3836
3837 #[allow(dead_code)]
3838 #[track_caller]
3839 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
3840 repo.statuses(None)
3841 .unwrap()
3842 .iter()
3843 .map(|status| (status.path().unwrap().to_string(), status.status()))
3844 .collect()
3845 }
3846
3847 let root = temp_tree(json!({
3848 "project": {
3849 "a.txt": "a",
3850 "b.txt": "bb",
3851 },
3852
3853 }));
3854
3855 let http_client = FakeHttpClient::with_404_response();
3856 let client = cx.read(|cx| Client::new(http_client, cx));
3857 let tree = Worktree::local(
3858 client,
3859 root.path(),
3860 true,
3861 Arc::new(RealFs),
3862 Default::default(),
3863 &mut cx.to_async(),
3864 )
3865 .await
3866 .unwrap();
3867
3868 const A_TXT: &'static str = "a.txt";
3869 const B_TXT: &'static str = "b.txt";
3870 let work_dir = root.path().join("project");
3871
3872 let mut repo = git_init(work_dir.as_path());
3873 git_add(Path::new(A_TXT), &repo);
3874 git_commit("Initial commit", &repo);
3875
3876 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
3877
3878 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3879 .await;
3880 tree.flush_fs_events(cx).await;
3881
3882 // Check that the right git state is observed on startup
3883 tree.read_with(cx, |tree, _cx| {
3884 let snapshot = tree.snapshot();
3885 assert_eq!(snapshot.repository_entries.iter().count(), 1);
3886 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
3887 assert_eq!(dir.0.as_ref(), Path::new("project"));
3888
3889 assert_eq!(repo.statuses.iter().count(), 2);
3890 assert_eq!(
3891 repo.statuses.get(&Path::new(A_TXT).into()),
3892 Some(&GitStatus::Modified)
3893 );
3894 assert_eq!(
3895 repo.statuses.get(&Path::new(B_TXT).into()),
3896 Some(&GitStatus::Added)
3897 );
3898 });
3899
3900 git_add(Path::new(A_TXT), &repo);
3901 git_add(Path::new(B_TXT), &repo);
3902 git_commit("Committing modified and added", &repo);
3903 tree.flush_fs_events(cx).await;
3904
3905 // Check that repo only changes are tracked
3906 tree.read_with(cx, |tree, _cx| {
3907 let snapshot = tree.snapshot();
3908 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3909
3910 assert_eq!(repo.statuses.iter().count(), 0);
3911 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
3912 assert_eq!(repo.statuses.get(&Path::new(B_TXT).into()), None);
3913 });
3914
3915 git_reset(0, &repo);
3916 git_remove_index(Path::new(B_TXT), &repo);
3917 git_stash(&mut repo);
3918 tree.flush_fs_events(cx).await;
3919
3920 // Check that more complex repo changes are tracked
3921 tree.read_with(cx, |tree, _cx| {
3922 let snapshot = tree.snapshot();
3923 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3924
3925 dbg!(&repo.statuses);
3926
3927 assert_eq!(repo.statuses.iter().count(), 1);
3928 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
3929 assert_eq!(
3930 repo.statuses.get(&Path::new(B_TXT).into()),
3931 Some(&GitStatus::Added)
3932 );
3933 });
3934
3935 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
3936 tree.flush_fs_events(cx).await;
3937
3938 // Check that non-repo behavior is tracked
3939 tree.read_with(cx, |tree, _cx| {
3940 let snapshot = tree.snapshot();
3941 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3942
3943 assert_eq!(repo.statuses.iter().count(), 0);
3944 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
3945 assert_eq!(repo.statuses.get(&Path::new(B_TXT).into()), None);
3946 });
3947 }
3948
3949 #[gpui::test]
3950 async fn test_write_file(cx: &mut TestAppContext) {
3951 let dir = temp_tree(json!({
3952 ".git": {},
3953 ".gitignore": "ignored-dir\n",
3954 "tracked-dir": {},
3955 "ignored-dir": {}
3956 }));
3957
3958 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3959
3960 let tree = Worktree::local(
3961 client,
3962 dir.path(),
3963 true,
3964 Arc::new(RealFs),
3965 Default::default(),
3966 &mut cx.to_async(),
3967 )
3968 .await
3969 .unwrap();
3970 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3971 .await;
3972 tree.flush_fs_events(cx).await;
3973
3974 tree.update(cx, |tree, cx| {
3975 tree.as_local().unwrap().write_file(
3976 Path::new("tracked-dir/file.txt"),
3977 "hello".into(),
3978 Default::default(),
3979 cx,
3980 )
3981 })
3982 .await
3983 .unwrap();
3984 tree.update(cx, |tree, cx| {
3985 tree.as_local().unwrap().write_file(
3986 Path::new("ignored-dir/file.txt"),
3987 "world".into(),
3988 Default::default(),
3989 cx,
3990 )
3991 })
3992 .await
3993 .unwrap();
3994
3995 tree.read_with(cx, |tree, _| {
3996 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3997 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3998 assert!(!tracked.is_ignored);
3999 assert!(ignored.is_ignored);
4000 });
4001 }
4002
4003 #[gpui::test(iterations = 30)]
4004 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4005 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4006
4007 let fs = FakeFs::new(cx.background());
4008 fs.insert_tree(
4009 "/root",
4010 json!({
4011 "b": {},
4012 "c": {},
4013 "d": {},
4014 }),
4015 )
4016 .await;
4017
4018 let tree = Worktree::local(
4019 client,
4020 "/root".as_ref(),
4021 true,
4022 fs,
4023 Default::default(),
4024 &mut cx.to_async(),
4025 )
4026 .await
4027 .unwrap();
4028
4029 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4030
4031 let entry = tree
4032 .update(cx, |tree, cx| {
4033 tree.as_local_mut()
4034 .unwrap()
4035 .create_entry("a/e".as_ref(), true, cx)
4036 })
4037 .await
4038 .unwrap();
4039 assert!(entry.is_dir());
4040
4041 cx.foreground().run_until_parked();
4042 tree.read_with(cx, |tree, _| {
4043 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4044 });
4045
4046 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4047 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4048 snapshot1.apply_remote_update(update).unwrap();
4049 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4050 }
4051
4052 #[gpui::test(iterations = 100)]
4053 async fn test_random_worktree_operations_during_initial_scan(
4054 cx: &mut TestAppContext,
4055 mut rng: StdRng,
4056 ) {
4057 let operations = env::var("OPERATIONS")
4058 .map(|o| o.parse().unwrap())
4059 .unwrap_or(5);
4060 let initial_entries = env::var("INITIAL_ENTRIES")
4061 .map(|o| o.parse().unwrap())
4062 .unwrap_or(20);
4063
4064 let root_dir = Path::new("/test");
4065 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4066 fs.as_fake().insert_tree(root_dir, json!({})).await;
4067 for _ in 0..initial_entries {
4068 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4069 }
4070 log::info!("generated initial tree");
4071
4072 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4073 let worktree = Worktree::local(
4074 client.clone(),
4075 root_dir,
4076 true,
4077 fs.clone(),
4078 Default::default(),
4079 &mut cx.to_async(),
4080 )
4081 .await
4082 .unwrap();
4083
4084 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4085
4086 for _ in 0..operations {
4087 worktree
4088 .update(cx, |worktree, cx| {
4089 randomly_mutate_worktree(worktree, &mut rng, cx)
4090 })
4091 .await
4092 .log_err();
4093 worktree.read_with(cx, |tree, _| {
4094 tree.as_local().unwrap().snapshot.check_invariants()
4095 });
4096
4097 if rng.gen_bool(0.6) {
4098 let new_snapshot =
4099 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4100 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4101 snapshot.apply_remote_update(update.clone()).unwrap();
4102 assert_eq!(
4103 snapshot.to_vec(true),
4104 new_snapshot.to_vec(true),
4105 "incorrect snapshot after update {:?}",
4106 update
4107 );
4108 }
4109 }
4110
4111 worktree
4112 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4113 .await;
4114 worktree.read_with(cx, |tree, _| {
4115 tree.as_local().unwrap().snapshot.check_invariants()
4116 });
4117
4118 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4119 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4120 snapshot.apply_remote_update(update.clone()).unwrap();
4121 assert_eq!(
4122 snapshot.to_vec(true),
4123 new_snapshot.to_vec(true),
4124 "incorrect snapshot after update {:?}",
4125 update
4126 );
4127 }
4128
4129 #[gpui::test(iterations = 100)]
4130 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4131 let operations = env::var("OPERATIONS")
4132 .map(|o| o.parse().unwrap())
4133 .unwrap_or(40);
4134 let initial_entries = env::var("INITIAL_ENTRIES")
4135 .map(|o| o.parse().unwrap())
4136 .unwrap_or(20);
4137
4138 let root_dir = Path::new("/test");
4139 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4140 fs.as_fake().insert_tree(root_dir, json!({})).await;
4141 for _ in 0..initial_entries {
4142 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4143 }
4144 log::info!("generated initial tree");
4145
4146 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4147 let worktree = Worktree::local(
4148 client.clone(),
4149 root_dir,
4150 true,
4151 fs.clone(),
4152 Default::default(),
4153 &mut cx.to_async(),
4154 )
4155 .await
4156 .unwrap();
4157
4158 worktree
4159 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4160 .await;
4161
4162 // After the initial scan is complete, the `UpdatedEntries` event can
4163 // be used to follow along with all changes to the worktree's snapshot.
4164 worktree.update(cx, |tree, cx| {
4165 let mut paths = tree
4166 .as_local()
4167 .unwrap()
4168 .paths()
4169 .cloned()
4170 .collect::<Vec<_>>();
4171
4172 cx.subscribe(&worktree, move |tree, _, event, _| {
4173 if let Event::UpdatedEntries(changes) = event {
4174 for (path, change_type) in changes.iter() {
4175 let path = path.clone();
4176 let ix = match paths.binary_search(&path) {
4177 Ok(ix) | Err(ix) => ix,
4178 };
4179 match change_type {
4180 PathChange::Added => {
4181 assert_ne!(paths.get(ix), Some(&path));
4182 paths.insert(ix, path);
4183 }
4184 PathChange::Removed => {
4185 assert_eq!(paths.get(ix), Some(&path));
4186 paths.remove(ix);
4187 }
4188 PathChange::Updated => {
4189 assert_eq!(paths.get(ix), Some(&path));
4190 }
4191 PathChange::AddedOrUpdated => {
4192 if paths[ix] != path {
4193 paths.insert(ix, path);
4194 }
4195 }
4196 }
4197 }
4198 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4199 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4200 }
4201 })
4202 .detach();
4203 });
4204
4205 let mut snapshots = Vec::new();
4206 let mut mutations_len = operations;
4207 while mutations_len > 1 {
4208 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4209 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4210 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4211 let len = rng.gen_range(0..=buffered_event_count);
4212 log::info!("flushing {} events", len);
4213 fs.as_fake().flush_events(len).await;
4214 } else {
4215 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4216 mutations_len -= 1;
4217 }
4218
4219 cx.foreground().run_until_parked();
4220 if rng.gen_bool(0.2) {
4221 log::info!("storing snapshot {}", snapshots.len());
4222 let snapshot =
4223 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4224 snapshots.push(snapshot);
4225 }
4226 }
4227
4228 log::info!("quiescing");
4229 fs.as_fake().flush_events(usize::MAX).await;
4230 cx.foreground().run_until_parked();
4231 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4232 snapshot.check_invariants();
4233
4234 {
4235 let new_worktree = Worktree::local(
4236 client.clone(),
4237 root_dir,
4238 true,
4239 fs.clone(),
4240 Default::default(),
4241 &mut cx.to_async(),
4242 )
4243 .await
4244 .unwrap();
4245 new_worktree
4246 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4247 .await;
4248 let new_snapshot =
4249 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4250 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4251 }
4252
4253 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4254 let include_ignored = rng.gen::<bool>();
4255 if !include_ignored {
4256 let mut entries_by_path_edits = Vec::new();
4257 let mut entries_by_id_edits = Vec::new();
4258 for entry in prev_snapshot
4259 .entries_by_id
4260 .cursor::<()>()
4261 .filter(|e| e.is_ignored)
4262 {
4263 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4264 entries_by_id_edits.push(Edit::Remove(entry.id));
4265 }
4266
4267 prev_snapshot
4268 .entries_by_path
4269 .edit(entries_by_path_edits, &());
4270 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4271 }
4272
4273 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4274 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4275 assert_eq!(
4276 prev_snapshot.to_vec(include_ignored),
4277 snapshot.to_vec(include_ignored),
4278 "wrong update for snapshot {i}. update: {:?}",
4279 update
4280 );
4281 }
4282 }
4283
4284 fn randomly_mutate_worktree(
4285 worktree: &mut Worktree,
4286 rng: &mut impl Rng,
4287 cx: &mut ModelContext<Worktree>,
4288 ) -> Task<Result<()>> {
4289 let worktree = worktree.as_local_mut().unwrap();
4290 let snapshot = worktree.snapshot();
4291 let entry = snapshot.entries(false).choose(rng).unwrap();
4292
4293 match rng.gen_range(0_u32..100) {
4294 0..=33 if entry.path.as_ref() != Path::new("") => {
4295 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4296 worktree.delete_entry(entry.id, cx).unwrap()
4297 }
4298 ..=66 if entry.path.as_ref() != Path::new("") => {
4299 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4300 let new_parent_path = if other_entry.is_dir() {
4301 other_entry.path.clone()
4302 } else {
4303 other_entry.path.parent().unwrap().into()
4304 };
4305 let mut new_path = new_parent_path.join(gen_name(rng));
4306 if new_path.starts_with(&entry.path) {
4307 new_path = gen_name(rng).into();
4308 }
4309
4310 log::info!(
4311 "renaming entry {:?} ({}) to {:?}",
4312 entry.path,
4313 entry.id.0,
4314 new_path
4315 );
4316 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4317 cx.foreground().spawn(async move {
4318 task.await?;
4319 Ok(())
4320 })
4321 }
4322 _ => {
4323 let task = if entry.is_dir() {
4324 let child_path = entry.path.join(gen_name(rng));
4325 let is_dir = rng.gen_bool(0.3);
4326 log::info!(
4327 "creating {} at {:?}",
4328 if is_dir { "dir" } else { "file" },
4329 child_path,
4330 );
4331 worktree.create_entry(child_path, is_dir, cx)
4332 } else {
4333 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4334 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4335 };
4336 cx.foreground().spawn(async move {
4337 task.await?;
4338 Ok(())
4339 })
4340 }
4341 }
4342 }
4343
4344 async fn randomly_mutate_fs(
4345 fs: &Arc<dyn Fs>,
4346 root_path: &Path,
4347 insertion_probability: f64,
4348 rng: &mut impl Rng,
4349 ) {
4350 let mut files = Vec::new();
4351 let mut dirs = Vec::new();
4352 for path in fs.as_fake().paths() {
4353 if path.starts_with(root_path) {
4354 if fs.is_file(&path).await {
4355 files.push(path);
4356 } else {
4357 dirs.push(path);
4358 }
4359 }
4360 }
4361
4362 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4363 let path = dirs.choose(rng).unwrap();
4364 let new_path = path.join(gen_name(rng));
4365
4366 if rng.gen() {
4367 log::info!(
4368 "creating dir {:?}",
4369 new_path.strip_prefix(root_path).unwrap()
4370 );
4371 fs.create_dir(&new_path).await.unwrap();
4372 } else {
4373 log::info!(
4374 "creating file {:?}",
4375 new_path.strip_prefix(root_path).unwrap()
4376 );
4377 fs.create_file(&new_path, Default::default()).await.unwrap();
4378 }
4379 } else if rng.gen_bool(0.05) {
4380 let ignore_dir_path = dirs.choose(rng).unwrap();
4381 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4382
4383 let subdirs = dirs
4384 .iter()
4385 .filter(|d| d.starts_with(&ignore_dir_path))
4386 .cloned()
4387 .collect::<Vec<_>>();
4388 let subfiles = files
4389 .iter()
4390 .filter(|d| d.starts_with(&ignore_dir_path))
4391 .cloned()
4392 .collect::<Vec<_>>();
4393 let files_to_ignore = {
4394 let len = rng.gen_range(0..=subfiles.len());
4395 subfiles.choose_multiple(rng, len)
4396 };
4397 let dirs_to_ignore = {
4398 let len = rng.gen_range(0..subdirs.len());
4399 subdirs.choose_multiple(rng, len)
4400 };
4401
4402 let mut ignore_contents = String::new();
4403 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4404 writeln!(
4405 ignore_contents,
4406 "{}",
4407 path_to_ignore
4408 .strip_prefix(&ignore_dir_path)
4409 .unwrap()
4410 .to_str()
4411 .unwrap()
4412 )
4413 .unwrap();
4414 }
4415 log::info!(
4416 "creating gitignore {:?} with contents:\n{}",
4417 ignore_path.strip_prefix(&root_path).unwrap(),
4418 ignore_contents
4419 );
4420 fs.save(
4421 &ignore_path,
4422 &ignore_contents.as_str().into(),
4423 Default::default(),
4424 )
4425 .await
4426 .unwrap();
4427 } else {
4428 let old_path = {
4429 let file_path = files.choose(rng);
4430 let dir_path = dirs[1..].choose(rng);
4431 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4432 };
4433
4434 let is_rename = rng.gen();
4435 if is_rename {
4436 let new_path_parent = dirs
4437 .iter()
4438 .filter(|d| !d.starts_with(old_path))
4439 .choose(rng)
4440 .unwrap();
4441
4442 let overwrite_existing_dir =
4443 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4444 let new_path = if overwrite_existing_dir {
4445 fs.remove_dir(
4446 &new_path_parent,
4447 RemoveOptions {
4448 recursive: true,
4449 ignore_if_not_exists: true,
4450 },
4451 )
4452 .await
4453 .unwrap();
4454 new_path_parent.to_path_buf()
4455 } else {
4456 new_path_parent.join(gen_name(rng))
4457 };
4458
4459 log::info!(
4460 "renaming {:?} to {}{:?}",
4461 old_path.strip_prefix(&root_path).unwrap(),
4462 if overwrite_existing_dir {
4463 "overwrite "
4464 } else {
4465 ""
4466 },
4467 new_path.strip_prefix(&root_path).unwrap()
4468 );
4469 fs.rename(
4470 &old_path,
4471 &new_path,
4472 fs::RenameOptions {
4473 overwrite: true,
4474 ignore_if_exists: true,
4475 },
4476 )
4477 .await
4478 .unwrap();
4479 } else if fs.is_file(&old_path).await {
4480 log::info!(
4481 "deleting file {:?}",
4482 old_path.strip_prefix(&root_path).unwrap()
4483 );
4484 fs.remove_file(old_path, Default::default()).await.unwrap();
4485 } else {
4486 log::info!(
4487 "deleting dir {:?}",
4488 old_path.strip_prefix(&root_path).unwrap()
4489 );
4490 fs.remove_dir(
4491 &old_path,
4492 RemoveOptions {
4493 recursive: true,
4494 ignore_if_not_exists: true,
4495 },
4496 )
4497 .await
4498 .unwrap();
4499 }
4500 }
4501 }
4502
4503 fn gen_name(rng: &mut impl Rng) -> String {
4504 (0..6)
4505 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4506 .map(char::from)
4507 .collect()
4508 }
4509
4510 impl LocalSnapshot {
4511 fn check_invariants(&self) {
4512 assert_eq!(
4513 self.entries_by_path
4514 .cursor::<()>()
4515 .map(|e| (&e.path, e.id))
4516 .collect::<Vec<_>>(),
4517 self.entries_by_id
4518 .cursor::<()>()
4519 .map(|e| (&e.path, e.id))
4520 .collect::<collections::BTreeSet<_>>()
4521 .into_iter()
4522 .collect::<Vec<_>>(),
4523 "entries_by_path and entries_by_id are inconsistent"
4524 );
4525
4526 let mut files = self.files(true, 0);
4527 let mut visible_files = self.files(false, 0);
4528 for entry in self.entries_by_path.cursor::<()>() {
4529 if entry.is_file() {
4530 assert_eq!(files.next().unwrap().inode, entry.inode);
4531 if !entry.is_ignored {
4532 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4533 }
4534 }
4535 }
4536
4537 assert!(files.next().is_none());
4538 assert!(visible_files.next().is_none());
4539
4540 let mut bfs_paths = Vec::new();
4541 let mut stack = vec![Path::new("")];
4542 while let Some(path) = stack.pop() {
4543 bfs_paths.push(path);
4544 let ix = stack.len();
4545 for child_entry in self.child_entries(path) {
4546 stack.insert(ix, &child_entry.path);
4547 }
4548 }
4549
4550 let dfs_paths_via_iter = self
4551 .entries_by_path
4552 .cursor::<()>()
4553 .map(|e| e.path.as_ref())
4554 .collect::<Vec<_>>();
4555 assert_eq!(bfs_paths, dfs_paths_via_iter);
4556
4557 let dfs_paths_via_traversal = self
4558 .entries(true)
4559 .map(|e| e.path.as_ref())
4560 .collect::<Vec<_>>();
4561 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4562
4563 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4564 let ignore_parent_path =
4565 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4566 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4567 assert!(self
4568 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4569 .is_some());
4570 }
4571 }
4572
4573 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4574 let mut paths = Vec::new();
4575 for entry in self.entries_by_path.cursor::<()>() {
4576 if include_ignored || !entry.is_ignored {
4577 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4578 }
4579 }
4580 paths.sort_by(|a, b| a.0.cmp(b.0));
4581 paths
4582 }
4583 }
4584}