1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug)]
121pub struct RepositoryEntry {
122 // Path to the actual .git folder.
123 // Note: if .git is a file, this points to the folder indicated by the .git file
124 pub(crate) git_dir_path: Arc<Path>,
125 pub(crate) git_dir_entry_id: ProjectEntryId,
126 pub(crate) scan_id: usize,
127 // TODO: pub(crate) head_ref: Arc<str>,
128}
129
130impl RepositoryEntry {
131 // Note that this path should be relative to the worktree root.
132 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
133 path.starts_with(self.git_dir_path.as_ref())
134 }
135}
136
137/// This path corresponds to the 'content path' (the folder that contains the .git)
138#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
139pub struct RepositoryWorkDirectory(Arc<Path>);
140
141impl RepositoryWorkDirectory {
142 // Note that these paths should be relative to the worktree root.
143 pub(crate) fn contains(&self, path: &Path) -> bool {
144 path.starts_with(self.0.as_ref())
145 }
146
147 pub(crate) fn relativize(&self, path: &Path) -> Option<&Path> {
148 path.strip_prefix(self.0.as_ref()).ok()
149 }
150}
151
152impl Default for RepositoryWorkDirectory {
153 fn default() -> Self {
154 RepositoryWorkDirectory(Arc::from(Path::new("")))
155 }
156}
157
158#[derive(Debug, Clone)]
159pub struct LocalSnapshot {
160 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
161 // The ProjectEntryId corresponds to the entry for the .git dir
162 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
163 removed_entry_ids: HashMap<u64, ProjectEntryId>,
164 next_entry_id: Arc<AtomicUsize>,
165 snapshot: Snapshot,
166}
167
168impl Deref for LocalSnapshot {
169 type Target = Snapshot;
170
171 fn deref(&self) -> &Self::Target {
172 &self.snapshot
173 }
174}
175
176impl DerefMut for LocalSnapshot {
177 fn deref_mut(&mut self) -> &mut Self::Target {
178 &mut self.snapshot
179 }
180}
181
182enum ScanState {
183 Started,
184 Updated {
185 snapshot: LocalSnapshot,
186 changes: HashMap<Arc<Path>, PathChange>,
187 barrier: Option<barrier::Sender>,
188 scanning: bool,
189 },
190}
191
192struct ShareState {
193 project_id: u64,
194 snapshots_tx: watch::Sender<LocalSnapshot>,
195 resume_updates: watch::Sender<()>,
196 _maintain_remote_snapshot: Task<Option<()>>,
197}
198
199pub enum Event {
200 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
201 UpdatedGitRepositories(Vec<RepositoryEntry>),
202}
203
204impl Entity for Worktree {
205 type Event = Event;
206}
207
208impl Worktree {
209 pub async fn local(
210 client: Arc<Client>,
211 path: impl Into<Arc<Path>>,
212 visible: bool,
213 fs: Arc<dyn Fs>,
214 next_entry_id: Arc<AtomicUsize>,
215 cx: &mut AsyncAppContext,
216 ) -> Result<ModelHandle<Self>> {
217 // After determining whether the root entry is a file or a directory, populate the
218 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
219 let abs_path = path.into();
220 let metadata = fs
221 .metadata(&abs_path)
222 .await
223 .context("failed to stat worktree path")?;
224
225 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
226 let root_name = abs_path
227 .file_name()
228 .map_or(String::new(), |f| f.to_string_lossy().to_string());
229
230 let mut snapshot = LocalSnapshot {
231 ignores_by_parent_abs_path: Default::default(),
232 removed_entry_ids: Default::default(),
233 git_repositories: Default::default(),
234 next_entry_id,
235 snapshot: Snapshot {
236 id: WorktreeId::from_usize(cx.model_id()),
237 abs_path: abs_path.clone(),
238 root_name: root_name.clone(),
239 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
240 entries_by_path: Default::default(),
241 entries_by_id: Default::default(),
242 repository_entries: Default::default(),
243 scan_id: 1,
244 completed_scan_id: 0,
245 },
246 };
247
248 if let Some(metadata) = metadata {
249 snapshot.insert_entry(
250 Entry::new(
251 Arc::from(Path::new("")),
252 &metadata,
253 &snapshot.next_entry_id,
254 snapshot.root_char_bag,
255 ),
256 fs.as_ref(),
257 );
258 }
259
260 let (path_changes_tx, path_changes_rx) = channel::unbounded();
261 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
262
263 cx.spawn_weak(|this, mut cx| async move {
264 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
265 this.update(&mut cx, |this, cx| {
266 let this = this.as_local_mut().unwrap();
267 match state {
268 ScanState::Started => {
269 *this.is_scanning.0.borrow_mut() = true;
270 }
271 ScanState::Updated {
272 snapshot,
273 changes,
274 barrier,
275 scanning,
276 } => {
277 *this.is_scanning.0.borrow_mut() = scanning;
278 this.set_snapshot(snapshot, cx);
279 cx.emit(Event::UpdatedEntries(changes));
280 drop(barrier);
281 }
282 }
283 cx.notify();
284 });
285 }
286 })
287 .detach();
288
289 let background_scanner_task = cx.background().spawn({
290 let fs = fs.clone();
291 let snapshot = snapshot.clone();
292 let background = cx.background().clone();
293 async move {
294 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
295 BackgroundScanner::new(
296 snapshot,
297 fs,
298 scan_states_tx,
299 background,
300 path_changes_rx,
301 )
302 .run(events)
303 .await;
304 }
305 });
306
307 Worktree::Local(LocalWorktree {
308 snapshot,
309 is_scanning: watch::channel_with(true),
310 share: None,
311 path_changes_tx,
312 _background_scanner_task: background_scanner_task,
313 diagnostics: Default::default(),
314 diagnostic_summaries: Default::default(),
315 client,
316 fs,
317 visible,
318 })
319 }))
320 }
321
322 pub fn remote(
323 project_remote_id: u64,
324 replica_id: ReplicaId,
325 worktree: proto::WorktreeMetadata,
326 client: Arc<Client>,
327 cx: &mut AppContext,
328 ) -> ModelHandle<Self> {
329 cx.add_model(|cx: &mut ModelContext<Self>| {
330 let snapshot = Snapshot {
331 id: WorktreeId(worktree.id as usize),
332 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
333 root_name: worktree.root_name.clone(),
334 root_char_bag: worktree
335 .root_name
336 .chars()
337 .map(|c| c.to_ascii_lowercase())
338 .collect(),
339 entries_by_path: Default::default(),
340 entries_by_id: Default::default(),
341 repository_entries: Default::default(),
342 scan_id: 1,
343 completed_scan_id: 0,
344 };
345
346 let (updates_tx, mut updates_rx) = mpsc::unbounded();
347 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
348 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
349
350 cx.background()
351 .spawn({
352 let background_snapshot = background_snapshot.clone();
353 async move {
354 while let Some(update) = updates_rx.next().await {
355 if let Err(error) =
356 background_snapshot.lock().apply_remote_update(update)
357 {
358 log::error!("error applying worktree update: {}", error);
359 }
360 snapshot_updated_tx.send(()).await.ok();
361 }
362 }
363 })
364 .detach();
365
366 cx.spawn_weak(|this, mut cx| async move {
367 while (snapshot_updated_rx.recv().await).is_some() {
368 if let Some(this) = this.upgrade(&cx) {
369 this.update(&mut cx, |this, cx| {
370 let this = this.as_remote_mut().unwrap();
371 this.snapshot = this.background_snapshot.lock().clone();
372 cx.emit(Event::UpdatedEntries(Default::default()));
373 cx.notify();
374 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
375 if this.observed_snapshot(*scan_id) {
376 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
377 let _ = tx.send(());
378 } else {
379 break;
380 }
381 }
382 });
383 } else {
384 break;
385 }
386 }
387 })
388 .detach();
389
390 Worktree::Remote(RemoteWorktree {
391 project_id: project_remote_id,
392 replica_id,
393 snapshot: snapshot.clone(),
394 background_snapshot,
395 updates_tx: Some(updates_tx),
396 snapshot_subscriptions: Default::default(),
397 client: client.clone(),
398 diagnostic_summaries: Default::default(),
399 visible: worktree.visible,
400 disconnected: false,
401 })
402 })
403 }
404
405 pub fn as_local(&self) -> Option<&LocalWorktree> {
406 if let Worktree::Local(worktree) = self {
407 Some(worktree)
408 } else {
409 None
410 }
411 }
412
413 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
414 if let Worktree::Remote(worktree) = self {
415 Some(worktree)
416 } else {
417 None
418 }
419 }
420
421 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
422 if let Worktree::Local(worktree) = self {
423 Some(worktree)
424 } else {
425 None
426 }
427 }
428
429 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
430 if let Worktree::Remote(worktree) = self {
431 Some(worktree)
432 } else {
433 None
434 }
435 }
436
437 pub fn is_local(&self) -> bool {
438 matches!(self, Worktree::Local(_))
439 }
440
441 pub fn is_remote(&self) -> bool {
442 !self.is_local()
443 }
444
445 pub fn snapshot(&self) -> Snapshot {
446 match self {
447 Worktree::Local(worktree) => worktree.snapshot().snapshot,
448 Worktree::Remote(worktree) => worktree.snapshot(),
449 }
450 }
451
452 pub fn scan_id(&self) -> usize {
453 match self {
454 Worktree::Local(worktree) => worktree.snapshot.scan_id,
455 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
456 }
457 }
458
459 pub fn completed_scan_id(&self) -> usize {
460 match self {
461 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
462 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
463 }
464 }
465
466 pub fn is_visible(&self) -> bool {
467 match self {
468 Worktree::Local(worktree) => worktree.visible,
469 Worktree::Remote(worktree) => worktree.visible,
470 }
471 }
472
473 pub fn replica_id(&self) -> ReplicaId {
474 match self {
475 Worktree::Local(_) => 0,
476 Worktree::Remote(worktree) => worktree.replica_id,
477 }
478 }
479
480 pub fn diagnostic_summaries(
481 &self,
482 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
483 match self {
484 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
485 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
486 }
487 .iter()
488 .flat_map(|(path, summaries)| {
489 summaries
490 .iter()
491 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
492 })
493 }
494
495 pub fn abs_path(&self) -> Arc<Path> {
496 match self {
497 Worktree::Local(worktree) => worktree.abs_path.clone(),
498 Worktree::Remote(worktree) => worktree.abs_path.clone(),
499 }
500 }
501}
502
503impl LocalWorktree {
504 pub fn contains_abs_path(&self, path: &Path) -> bool {
505 path.starts_with(&self.abs_path)
506 }
507
508 fn absolutize(&self, path: &Path) -> PathBuf {
509 if path.file_name().is_some() {
510 self.abs_path.join(path)
511 } else {
512 self.abs_path.to_path_buf()
513 }
514 }
515
516 pub(crate) fn load_buffer(
517 &mut self,
518 id: u64,
519 path: &Path,
520 cx: &mut ModelContext<Worktree>,
521 ) -> Task<Result<ModelHandle<Buffer>>> {
522 let path = Arc::from(path);
523 cx.spawn(move |this, mut cx| async move {
524 let (file, contents, diff_base) = this
525 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
526 .await?;
527 let text_buffer = cx
528 .background()
529 .spawn(async move { text::Buffer::new(0, id, contents) })
530 .await;
531 Ok(cx.add_model(|cx| {
532 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
533 buffer.git_diff_recalc(cx);
534 buffer
535 }))
536 })
537 }
538
539 pub fn diagnostics_for_path(
540 &self,
541 path: &Path,
542 ) -> Vec<(
543 LanguageServerId,
544 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
545 )> {
546 self.diagnostics.get(path).cloned().unwrap_or_default()
547 }
548
549 pub fn update_diagnostics(
550 &mut self,
551 server_id: LanguageServerId,
552 worktree_path: Arc<Path>,
553 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
554 _: &mut ModelContext<Worktree>,
555 ) -> Result<bool> {
556 let summaries_by_server_id = self
557 .diagnostic_summaries
558 .entry(worktree_path.clone())
559 .or_default();
560
561 let old_summary = summaries_by_server_id
562 .remove(&server_id)
563 .unwrap_or_default();
564
565 let new_summary = DiagnosticSummary::new(&diagnostics);
566 if new_summary.is_empty() {
567 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
568 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
569 diagnostics_by_server_id.remove(ix);
570 }
571 if diagnostics_by_server_id.is_empty() {
572 self.diagnostics.remove(&worktree_path);
573 }
574 }
575 } else {
576 summaries_by_server_id.insert(server_id, new_summary);
577 let diagnostics_by_server_id =
578 self.diagnostics.entry(worktree_path.clone()).or_default();
579 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
580 Ok(ix) => {
581 diagnostics_by_server_id[ix] = (server_id, diagnostics);
582 }
583 Err(ix) => {
584 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
585 }
586 }
587 }
588
589 if !old_summary.is_empty() || !new_summary.is_empty() {
590 if let Some(share) = self.share.as_ref() {
591 self.client
592 .send(proto::UpdateDiagnosticSummary {
593 project_id: share.project_id,
594 worktree_id: self.id().to_proto(),
595 summary: Some(proto::DiagnosticSummary {
596 path: worktree_path.to_string_lossy().to_string(),
597 language_server_id: server_id.0 as u64,
598 error_count: new_summary.error_count as u32,
599 warning_count: new_summary.warning_count as u32,
600 }),
601 })
602 .log_err();
603 }
604 }
605
606 Ok(!old_summary.is_empty() || !new_summary.is_empty())
607 }
608
609 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
610 let updated_repos = Self::changed_repos(
611 &self.snapshot.repository_entries,
612 &new_snapshot.repository_entries,
613 );
614 self.snapshot = new_snapshot;
615
616 if let Some(share) = self.share.as_mut() {
617 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
618 }
619
620 if !updated_repos.is_empty() {
621 cx.emit(Event::UpdatedGitRepositories(updated_repos));
622 }
623 }
624
625 fn changed_repos(
626 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
627 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
628 ) -> Vec<RepositoryEntry> {
629 fn diff<'a>(
630 a: impl Iterator<Item = &'a RepositoryEntry>,
631 b: impl Iterator<Item = &'a RepositoryEntry>,
632 updated: &mut HashMap<&'a Path, RepositoryEntry>,
633 ) {
634 for a_repo in a {
635 let matched = b.find(|b_repo| {
636 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
637 });
638
639 if matched.is_none() {
640 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
641 }
642 }
643 }
644
645 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
646
647 diff(old_repos.values(), new_repos.values(), &mut updated);
648 diff(new_repos.values(), old_repos.values(), &mut updated);
649
650 updated.into_values().collect()
651 }
652
653 pub fn scan_complete(&self) -> impl Future<Output = ()> {
654 let mut is_scanning_rx = self.is_scanning.1.clone();
655 async move {
656 let mut is_scanning = is_scanning_rx.borrow().clone();
657 while is_scanning {
658 if let Some(value) = is_scanning_rx.recv().await {
659 is_scanning = value;
660 } else {
661 break;
662 }
663 }
664 }
665 }
666
667 pub fn snapshot(&self) -> LocalSnapshot {
668 self.snapshot.clone()
669 }
670
671 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
672 proto::WorktreeMetadata {
673 id: self.id().to_proto(),
674 root_name: self.root_name().to_string(),
675 visible: self.visible,
676 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
677 }
678 }
679
680 fn load(
681 &self,
682 path: &Path,
683 cx: &mut ModelContext<Worktree>,
684 ) -> Task<Result<(File, String, Option<String>)>> {
685 let handle = cx.handle();
686 let path = Arc::from(path);
687 let abs_path = self.absolutize(&path);
688 let fs = self.fs.clone();
689 let snapshot = self.snapshot();
690
691 cx.spawn(|this, mut cx| async move {
692 let text = fs.load(&abs_path).await?;
693
694 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
695 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
696 let repo_relative = repo_relative.to_owned();
697 cx.background()
698 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
699 .await
700 } else {
701 None
702 }
703 } else {
704 None
705 };
706
707 // Eagerly populate the snapshot with an updated entry for the loaded file
708 let entry = this
709 .update(&mut cx, |this, cx| {
710 this.as_local().unwrap().refresh_entry(path, None, cx)
711 })
712 .await?;
713
714 Ok((
715 File {
716 entry_id: entry.id,
717 worktree: handle,
718 path: entry.path,
719 mtime: entry.mtime,
720 is_local: true,
721 is_deleted: false,
722 },
723 text,
724 diff_base,
725 ))
726 })
727 }
728
729 pub fn save_buffer(
730 &self,
731 buffer_handle: ModelHandle<Buffer>,
732 path: Arc<Path>,
733 has_changed_file: bool,
734 cx: &mut ModelContext<Worktree>,
735 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
736 let handle = cx.handle();
737 let buffer = buffer_handle.read(cx);
738
739 let rpc = self.client.clone();
740 let buffer_id = buffer.remote_id();
741 let project_id = self.share.as_ref().map(|share| share.project_id);
742
743 let text = buffer.as_rope().clone();
744 let fingerprint = text.fingerprint();
745 let version = buffer.version();
746 let save = self.write_file(path, text, buffer.line_ending(), cx);
747
748 cx.as_mut().spawn(|mut cx| async move {
749 let entry = save.await?;
750
751 if has_changed_file {
752 let new_file = Arc::new(File {
753 entry_id: entry.id,
754 worktree: handle,
755 path: entry.path,
756 mtime: entry.mtime,
757 is_local: true,
758 is_deleted: false,
759 });
760
761 if let Some(project_id) = project_id {
762 rpc.send(proto::UpdateBufferFile {
763 project_id,
764 buffer_id,
765 file: Some(new_file.to_proto()),
766 })
767 .log_err();
768 }
769
770 buffer_handle.update(&mut cx, |buffer, cx| {
771 if has_changed_file {
772 buffer.file_updated(new_file, cx).detach();
773 }
774 });
775 }
776
777 if let Some(project_id) = project_id {
778 rpc.send(proto::BufferSaved {
779 project_id,
780 buffer_id,
781 version: serialize_version(&version),
782 mtime: Some(entry.mtime.into()),
783 fingerprint: serialize_fingerprint(fingerprint),
784 })?;
785 }
786
787 buffer_handle.update(&mut cx, |buffer, cx| {
788 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
789 });
790
791 Ok((version, fingerprint, entry.mtime))
792 })
793 }
794
795 pub fn create_entry(
796 &self,
797 path: impl Into<Arc<Path>>,
798 is_dir: bool,
799 cx: &mut ModelContext<Worktree>,
800 ) -> Task<Result<Entry>> {
801 let path = path.into();
802 let abs_path = self.absolutize(&path);
803 let fs = self.fs.clone();
804 let write = cx.background().spawn(async move {
805 if is_dir {
806 fs.create_dir(&abs_path).await
807 } else {
808 fs.save(&abs_path, &Default::default(), Default::default())
809 .await
810 }
811 });
812
813 cx.spawn(|this, mut cx| async move {
814 write.await?;
815 this.update(&mut cx, |this, cx| {
816 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
817 })
818 .await
819 })
820 }
821
822 pub fn write_file(
823 &self,
824 path: impl Into<Arc<Path>>,
825 text: Rope,
826 line_ending: LineEnding,
827 cx: &mut ModelContext<Worktree>,
828 ) -> Task<Result<Entry>> {
829 let path = path.into();
830 let abs_path = self.absolutize(&path);
831 let fs = self.fs.clone();
832 let write = cx
833 .background()
834 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
835
836 cx.spawn(|this, mut cx| async move {
837 write.await?;
838 this.update(&mut cx, |this, cx| {
839 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
840 })
841 .await
842 })
843 }
844
845 pub fn delete_entry(
846 &self,
847 entry_id: ProjectEntryId,
848 cx: &mut ModelContext<Worktree>,
849 ) -> Option<Task<Result<()>>> {
850 let entry = self.entry_for_id(entry_id)?.clone();
851 let abs_path = self.abs_path.clone();
852 let fs = self.fs.clone();
853
854 let delete = cx.background().spawn(async move {
855 let mut abs_path = fs.canonicalize(&abs_path).await?;
856 if entry.path.file_name().is_some() {
857 abs_path = abs_path.join(&entry.path);
858 }
859 if entry.is_file() {
860 fs.remove_file(&abs_path, Default::default()).await?;
861 } else {
862 fs.remove_dir(
863 &abs_path,
864 RemoveOptions {
865 recursive: true,
866 ignore_if_not_exists: false,
867 },
868 )
869 .await?;
870 }
871 anyhow::Ok(abs_path)
872 });
873
874 Some(cx.spawn(|this, mut cx| async move {
875 let abs_path = delete.await?;
876 let (tx, mut rx) = barrier::channel();
877 this.update(&mut cx, |this, _| {
878 this.as_local_mut()
879 .unwrap()
880 .path_changes_tx
881 .try_send((vec![abs_path], tx))
882 })?;
883 rx.recv().await;
884 Ok(())
885 }))
886 }
887
888 pub fn rename_entry(
889 &self,
890 entry_id: ProjectEntryId,
891 new_path: impl Into<Arc<Path>>,
892 cx: &mut ModelContext<Worktree>,
893 ) -> Option<Task<Result<Entry>>> {
894 let old_path = self.entry_for_id(entry_id)?.path.clone();
895 let new_path = new_path.into();
896 let abs_old_path = self.absolutize(&old_path);
897 let abs_new_path = self.absolutize(&new_path);
898 let fs = self.fs.clone();
899 let rename = cx.background().spawn(async move {
900 fs.rename(&abs_old_path, &abs_new_path, Default::default())
901 .await
902 });
903
904 Some(cx.spawn(|this, mut cx| async move {
905 rename.await?;
906 this.update(&mut cx, |this, cx| {
907 this.as_local_mut()
908 .unwrap()
909 .refresh_entry(new_path.clone(), Some(old_path), cx)
910 })
911 .await
912 }))
913 }
914
915 pub fn copy_entry(
916 &self,
917 entry_id: ProjectEntryId,
918 new_path: impl Into<Arc<Path>>,
919 cx: &mut ModelContext<Worktree>,
920 ) -> Option<Task<Result<Entry>>> {
921 let old_path = self.entry_for_id(entry_id)?.path.clone();
922 let new_path = new_path.into();
923 let abs_old_path = self.absolutize(&old_path);
924 let abs_new_path = self.absolutize(&new_path);
925 let fs = self.fs.clone();
926 let copy = cx.background().spawn(async move {
927 copy_recursive(
928 fs.as_ref(),
929 &abs_old_path,
930 &abs_new_path,
931 Default::default(),
932 )
933 .await
934 });
935
936 Some(cx.spawn(|this, mut cx| async move {
937 copy.await?;
938 this.update(&mut cx, |this, cx| {
939 this.as_local_mut()
940 .unwrap()
941 .refresh_entry(new_path.clone(), None, cx)
942 })
943 .await
944 }))
945 }
946
947 fn refresh_entry(
948 &self,
949 path: Arc<Path>,
950 old_path: Option<Arc<Path>>,
951 cx: &mut ModelContext<Worktree>,
952 ) -> Task<Result<Entry>> {
953 let fs = self.fs.clone();
954 let abs_root_path = self.abs_path.clone();
955 let path_changes_tx = self.path_changes_tx.clone();
956 cx.spawn_weak(move |this, mut cx| async move {
957 let abs_path = fs.canonicalize(&abs_root_path).await?;
958 let mut paths = Vec::with_capacity(2);
959 paths.push(if path.file_name().is_some() {
960 abs_path.join(&path)
961 } else {
962 abs_path.clone()
963 });
964 if let Some(old_path) = old_path {
965 paths.push(if old_path.file_name().is_some() {
966 abs_path.join(&old_path)
967 } else {
968 abs_path.clone()
969 });
970 }
971
972 let (tx, mut rx) = barrier::channel();
973 path_changes_tx.try_send((paths, tx))?;
974 rx.recv().await;
975 this.upgrade(&cx)
976 .ok_or_else(|| anyhow!("worktree was dropped"))?
977 .update(&mut cx, |this, _| {
978 this.entry_for_path(path)
979 .cloned()
980 .ok_or_else(|| anyhow!("failed to read path after update"))
981 })
982 })
983 }
984
985 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
986 let (share_tx, share_rx) = oneshot::channel();
987
988 if let Some(share) = self.share.as_mut() {
989 let _ = share_tx.send(());
990 *share.resume_updates.borrow_mut() = ();
991 } else {
992 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
993 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
994 let worktree_id = cx.model_id() as u64;
995
996 for (path, summaries) in &self.diagnostic_summaries {
997 for (&server_id, summary) in summaries {
998 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
999 project_id,
1000 worktree_id,
1001 summary: Some(summary.to_proto(server_id, &path)),
1002 }) {
1003 return Task::ready(Err(e));
1004 }
1005 }
1006 }
1007
1008 let _maintain_remote_snapshot = cx.background().spawn({
1009 let client = self.client.clone();
1010 async move {
1011 let mut share_tx = Some(share_tx);
1012 let mut prev_snapshot = LocalSnapshot {
1013 ignores_by_parent_abs_path: Default::default(),
1014 removed_entry_ids: Default::default(),
1015 next_entry_id: Default::default(),
1016 git_repositories: Default::default(),
1017 snapshot: Snapshot {
1018 id: WorktreeId(worktree_id as usize),
1019 abs_path: Path::new("").into(),
1020 root_name: Default::default(),
1021 root_char_bag: Default::default(),
1022 entries_by_path: Default::default(),
1023 entries_by_id: Default::default(),
1024 repository_entries: Default::default(),
1025 scan_id: 0,
1026 completed_scan_id: 0,
1027 },
1028 };
1029 while let Some(snapshot) = snapshots_rx.recv().await {
1030 #[cfg(any(test, feature = "test-support"))]
1031 const MAX_CHUNK_SIZE: usize = 2;
1032 #[cfg(not(any(test, feature = "test-support")))]
1033 const MAX_CHUNK_SIZE: usize = 256;
1034
1035 let update =
1036 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1037 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1038 let _ = resume_updates_rx.try_recv();
1039 while let Err(error) = client.request(update.clone()).await {
1040 log::error!("failed to send worktree update: {}", error);
1041 log::info!("waiting to resume updates");
1042 if resume_updates_rx.next().await.is_none() {
1043 return Ok(());
1044 }
1045 }
1046 }
1047
1048 if let Some(share_tx) = share_tx.take() {
1049 let _ = share_tx.send(());
1050 }
1051
1052 prev_snapshot = snapshot;
1053 }
1054
1055 Ok::<_, anyhow::Error>(())
1056 }
1057 .log_err()
1058 });
1059
1060 self.share = Some(ShareState {
1061 project_id,
1062 snapshots_tx,
1063 resume_updates: resume_updates_tx,
1064 _maintain_remote_snapshot,
1065 });
1066 }
1067
1068 cx.foreground()
1069 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1070 }
1071
1072 pub fn unshare(&mut self) {
1073 self.share.take();
1074 }
1075
1076 pub fn is_shared(&self) -> bool {
1077 self.share.is_some()
1078 }
1079}
1080
1081impl RemoteWorktree {
1082 fn snapshot(&self) -> Snapshot {
1083 self.snapshot.clone()
1084 }
1085
1086 pub fn disconnected_from_host(&mut self) {
1087 self.updates_tx.take();
1088 self.snapshot_subscriptions.clear();
1089 self.disconnected = true;
1090 }
1091
1092 pub fn save_buffer(
1093 &self,
1094 buffer_handle: ModelHandle<Buffer>,
1095 cx: &mut ModelContext<Worktree>,
1096 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1097 let buffer = buffer_handle.read(cx);
1098 let buffer_id = buffer.remote_id();
1099 let version = buffer.version();
1100 let rpc = self.client.clone();
1101 let project_id = self.project_id;
1102 cx.as_mut().spawn(|mut cx| async move {
1103 let response = rpc
1104 .request(proto::SaveBuffer {
1105 project_id,
1106 buffer_id,
1107 version: serialize_version(&version),
1108 })
1109 .await?;
1110 let version = deserialize_version(&response.version);
1111 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1112 let mtime = response
1113 .mtime
1114 .ok_or_else(|| anyhow!("missing mtime"))?
1115 .into();
1116
1117 buffer_handle.update(&mut cx, |buffer, cx| {
1118 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1119 });
1120
1121 Ok((version, fingerprint, mtime))
1122 })
1123 }
1124
1125 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1126 if let Some(updates_tx) = &self.updates_tx {
1127 updates_tx
1128 .unbounded_send(update)
1129 .expect("consumer runs to completion");
1130 }
1131 }
1132
1133 fn observed_snapshot(&self, scan_id: usize) -> bool {
1134 self.completed_scan_id >= scan_id
1135 }
1136
1137 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1138 let (tx, rx) = oneshot::channel();
1139 if self.observed_snapshot(scan_id) {
1140 let _ = tx.send(());
1141 } else if self.disconnected {
1142 drop(tx);
1143 } else {
1144 match self
1145 .snapshot_subscriptions
1146 .binary_search_by_key(&scan_id, |probe| probe.0)
1147 {
1148 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1149 }
1150 }
1151
1152 async move {
1153 rx.await?;
1154 Ok(())
1155 }
1156 }
1157
1158 pub fn update_diagnostic_summary(
1159 &mut self,
1160 path: Arc<Path>,
1161 summary: &proto::DiagnosticSummary,
1162 ) {
1163 let server_id = LanguageServerId(summary.language_server_id as usize);
1164 let summary = DiagnosticSummary {
1165 error_count: summary.error_count as usize,
1166 warning_count: summary.warning_count as usize,
1167 };
1168
1169 if summary.is_empty() {
1170 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1171 summaries.remove(&server_id);
1172 if summaries.is_empty() {
1173 self.diagnostic_summaries.remove(&path);
1174 }
1175 }
1176 } else {
1177 self.diagnostic_summaries
1178 .entry(path)
1179 .or_default()
1180 .insert(server_id, summary);
1181 }
1182 }
1183
1184 pub fn insert_entry(
1185 &mut self,
1186 entry: proto::Entry,
1187 scan_id: usize,
1188 cx: &mut ModelContext<Worktree>,
1189 ) -> Task<Result<Entry>> {
1190 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1191 cx.spawn(|this, mut cx| async move {
1192 wait_for_snapshot.await?;
1193 this.update(&mut cx, |worktree, _| {
1194 let worktree = worktree.as_remote_mut().unwrap();
1195 let mut snapshot = worktree.background_snapshot.lock();
1196 let entry = snapshot.insert_entry(entry);
1197 worktree.snapshot = snapshot.clone();
1198 entry
1199 })
1200 })
1201 }
1202
1203 pub(crate) fn delete_entry(
1204 &mut self,
1205 id: ProjectEntryId,
1206 scan_id: usize,
1207 cx: &mut ModelContext<Worktree>,
1208 ) -> Task<Result<()>> {
1209 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1210 cx.spawn(|this, mut cx| async move {
1211 wait_for_snapshot.await?;
1212 this.update(&mut cx, |worktree, _| {
1213 let worktree = worktree.as_remote_mut().unwrap();
1214 let mut snapshot = worktree.background_snapshot.lock();
1215 snapshot.delete_entry(id);
1216 worktree.snapshot = snapshot.clone();
1217 });
1218 Ok(())
1219 })
1220 }
1221}
1222
1223impl Snapshot {
1224 pub fn id(&self) -> WorktreeId {
1225 self.id
1226 }
1227
1228 pub fn abs_path(&self) -> &Arc<Path> {
1229 &self.abs_path
1230 }
1231
1232 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1233 self.entries_by_id.get(&entry_id, &()).is_some()
1234 }
1235
1236 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1237 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1238 let old_entry = self.entries_by_id.insert_or_replace(
1239 PathEntry {
1240 id: entry.id,
1241 path: entry.path.clone(),
1242 is_ignored: entry.is_ignored,
1243 scan_id: 0,
1244 },
1245 &(),
1246 );
1247 if let Some(old_entry) = old_entry {
1248 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1249 }
1250 self.entries_by_path.insert_or_replace(entry.clone(), &());
1251 Ok(entry)
1252 }
1253
1254 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1255 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1256 self.entries_by_path = {
1257 let mut cursor = self.entries_by_path.cursor();
1258 let mut new_entries_by_path =
1259 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1260 while let Some(entry) = cursor.item() {
1261 if entry.path.starts_with(&removed_entry.path) {
1262 self.entries_by_id.remove(&entry.id, &());
1263 cursor.next(&());
1264 } else {
1265 break;
1266 }
1267 }
1268 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1269 new_entries_by_path
1270 };
1271
1272 Some(removed_entry.path)
1273 }
1274
1275 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1276 let mut entries_by_path_edits = Vec::new();
1277 let mut entries_by_id_edits = Vec::new();
1278 for entry_id in update.removed_entries {
1279 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1280 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1281 entries_by_id_edits.push(Edit::Remove(entry.id));
1282 }
1283 }
1284
1285 for entry in update.updated_entries {
1286 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1287 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1288 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1289 }
1290 entries_by_id_edits.push(Edit::Insert(PathEntry {
1291 id: entry.id,
1292 path: entry.path.clone(),
1293 is_ignored: entry.is_ignored,
1294 scan_id: 0,
1295 }));
1296 entries_by_path_edits.push(Edit::Insert(entry));
1297 }
1298
1299 self.entries_by_path.edit(entries_by_path_edits, &());
1300 self.entries_by_id.edit(entries_by_id_edits, &());
1301 self.scan_id = update.scan_id as usize;
1302 if update.is_last_update {
1303 self.completed_scan_id = update.scan_id as usize;
1304 }
1305
1306 Ok(())
1307 }
1308
1309 pub fn file_count(&self) -> usize {
1310 self.entries_by_path.summary().file_count
1311 }
1312
1313 pub fn visible_file_count(&self) -> usize {
1314 self.entries_by_path.summary().visible_file_count
1315 }
1316
1317 fn traverse_from_offset(
1318 &self,
1319 include_dirs: bool,
1320 include_ignored: bool,
1321 start_offset: usize,
1322 ) -> Traversal {
1323 let mut cursor = self.entries_by_path.cursor();
1324 cursor.seek(
1325 &TraversalTarget::Count {
1326 count: start_offset,
1327 include_dirs,
1328 include_ignored,
1329 },
1330 Bias::Right,
1331 &(),
1332 );
1333 Traversal {
1334 cursor,
1335 include_dirs,
1336 include_ignored,
1337 }
1338 }
1339
1340 fn traverse_from_path(
1341 &self,
1342 include_dirs: bool,
1343 include_ignored: bool,
1344 path: &Path,
1345 ) -> Traversal {
1346 let mut cursor = self.entries_by_path.cursor();
1347 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1348 Traversal {
1349 cursor,
1350 include_dirs,
1351 include_ignored,
1352 }
1353 }
1354
1355 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1356 self.traverse_from_offset(false, include_ignored, start)
1357 }
1358
1359 pub fn entries(&self, include_ignored: bool) -> Traversal {
1360 self.traverse_from_offset(true, include_ignored, 0)
1361 }
1362
1363 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1364 let empty_path = Path::new("");
1365 self.entries_by_path
1366 .cursor::<()>()
1367 .filter(move |entry| entry.path.as_ref() != empty_path)
1368 .map(|entry| &entry.path)
1369 }
1370
1371 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1372 let mut cursor = self.entries_by_path.cursor();
1373 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1374 let traversal = Traversal {
1375 cursor,
1376 include_dirs: true,
1377 include_ignored: true,
1378 };
1379 ChildEntriesIter {
1380 traversal,
1381 parent_path,
1382 }
1383 }
1384
1385 pub fn root_entry(&self) -> Option<&Entry> {
1386 self.entry_for_path("")
1387 }
1388
1389 pub fn root_name(&self) -> &str {
1390 &self.root_name
1391 }
1392
1393 pub fn scan_id(&self) -> usize {
1394 self.scan_id
1395 }
1396
1397 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1398 let path = path.as_ref();
1399 self.traverse_from_path(true, true, path)
1400 .entry()
1401 .and_then(|entry| {
1402 if entry.path.as_ref() == path {
1403 Some(entry)
1404 } else {
1405 None
1406 }
1407 })
1408 }
1409
1410 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1411 let entry = self.entries_by_id.get(&id, &())?;
1412 self.entry_for_path(&entry.path)
1413 }
1414
1415 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1416 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1417 }
1418}
1419
1420impl LocalSnapshot {
1421 // Gives the most specific git repository for a given path
1422 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1423 self.repository_entries
1424 .closest(&RepositoryWorkDirectory(path.into()))
1425 .map(|(_, entry)| entry.to_owned())
1426 }
1427
1428 pub(crate) fn repo_with_dot_git_containing(
1429 &mut self,
1430 path: &Path,
1431 ) -> Option<&mut RepositoryEntry> {
1432 // Git repositories cannot be nested, so we don't need to reverse the order
1433 self.git_repositories_old
1434 .iter_mut()
1435 .find(|repo| repo.in_dot_git(path))
1436 }
1437
1438 #[cfg(test)]
1439 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1440 let root_name = self.root_name.clone();
1441 proto::UpdateWorktree {
1442 project_id,
1443 worktree_id: self.id().to_proto(),
1444 abs_path: self.abs_path().to_string_lossy().into(),
1445 root_name,
1446 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1447 removed_entries: Default::default(),
1448 scan_id: self.scan_id as u64,
1449 is_last_update: true,
1450 }
1451 }
1452
1453 pub(crate) fn build_update(
1454 &self,
1455 other: &Self,
1456 project_id: u64,
1457 worktree_id: u64,
1458 include_ignored: bool,
1459 ) -> proto::UpdateWorktree {
1460 let mut updated_entries = Vec::new();
1461 let mut removed_entries = Vec::new();
1462 let mut self_entries = self
1463 .entries_by_id
1464 .cursor::<()>()
1465 .filter(|e| include_ignored || !e.is_ignored)
1466 .peekable();
1467 let mut other_entries = other
1468 .entries_by_id
1469 .cursor::<()>()
1470 .filter(|e| include_ignored || !e.is_ignored)
1471 .peekable();
1472 loop {
1473 match (self_entries.peek(), other_entries.peek()) {
1474 (Some(self_entry), Some(other_entry)) => {
1475 match Ord::cmp(&self_entry.id, &other_entry.id) {
1476 Ordering::Less => {
1477 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1478 updated_entries.push(entry);
1479 self_entries.next();
1480 }
1481 Ordering::Equal => {
1482 if self_entry.scan_id != other_entry.scan_id {
1483 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1484 updated_entries.push(entry);
1485 }
1486
1487 self_entries.next();
1488 other_entries.next();
1489 }
1490 Ordering::Greater => {
1491 removed_entries.push(other_entry.id.to_proto());
1492 other_entries.next();
1493 }
1494 }
1495 }
1496 (Some(self_entry), None) => {
1497 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1498 updated_entries.push(entry);
1499 self_entries.next();
1500 }
1501 (None, Some(other_entry)) => {
1502 removed_entries.push(other_entry.id.to_proto());
1503 other_entries.next();
1504 }
1505 (None, None) => break,
1506 }
1507 }
1508
1509 proto::UpdateWorktree {
1510 project_id,
1511 worktree_id,
1512 abs_path: self.abs_path().to_string_lossy().into(),
1513 root_name: self.root_name().to_string(),
1514 updated_entries,
1515 removed_entries,
1516 scan_id: self.scan_id as u64,
1517 is_last_update: self.completed_scan_id == self.scan_id,
1518 }
1519 }
1520
1521 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1522 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1523 let abs_path = self.abs_path.join(&entry.path);
1524 match smol::block_on(build_gitignore(&abs_path, fs)) {
1525 Ok(ignore) => {
1526 self.ignores_by_parent_abs_path.insert(
1527 abs_path.parent().unwrap().into(),
1528 (Arc::new(ignore), self.scan_id),
1529 );
1530 }
1531 Err(error) => {
1532 log::error!(
1533 "error loading .gitignore file {:?} - {:?}",
1534 &entry.path,
1535 error
1536 );
1537 }
1538 }
1539 }
1540
1541 self.reuse_entry_id(&mut entry);
1542
1543 if entry.kind == EntryKind::PendingDir {
1544 if let Some(existing_entry) =
1545 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1546 {
1547 entry.kind = existing_entry.kind;
1548 }
1549 }
1550
1551 let scan_id = self.scan_id;
1552 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1553 if let Some(removed) = removed {
1554 if removed.id != entry.id {
1555 self.entries_by_id.remove(&removed.id, &());
1556 }
1557 }
1558 self.entries_by_id.insert_or_replace(
1559 PathEntry {
1560 id: entry.id,
1561 path: entry.path.clone(),
1562 is_ignored: entry.is_ignored,
1563 scan_id,
1564 },
1565 &(),
1566 );
1567
1568 entry
1569 }
1570
1571 fn populate_dir(
1572 &mut self,
1573 parent_path: Arc<Path>,
1574 entries: impl IntoIterator<Item = Entry>,
1575 ignore: Option<Arc<Gitignore>>,
1576 fs: &dyn Fs,
1577 ) {
1578 let mut parent_entry = if let Some(parent_entry) =
1579 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1580 {
1581 parent_entry.clone()
1582 } else {
1583 log::warn!(
1584 "populating a directory {:?} that has been removed",
1585 parent_path
1586 );
1587 return;
1588 };
1589
1590 match parent_entry.kind {
1591 EntryKind::PendingDir => {
1592 parent_entry.kind = EntryKind::Dir;
1593 }
1594 EntryKind::Dir => {}
1595 _ => return,
1596 }
1597
1598 if let Some(ignore) = ignore {
1599 self.ignores_by_parent_abs_path.insert(
1600 self.abs_path.join(&parent_path).into(),
1601 (ignore, self.scan_id),
1602 );
1603 }
1604
1605 if parent_path.file_name() == Some(&DOT_GIT) {
1606 let abs_path = self.abs_path.join(&parent_path);
1607 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1608
1609 let key = RepositoryWorkDirectory(content_path.clone());
1610 if self.repository_entries.get(&key).is_none() {
1611 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1612 self.repository_entries.insert(
1613 key,
1614 RepositoryEntry {
1615 git_dir_path: parent_path.clone(),
1616 git_dir_entry_id: parent_entry.id,
1617 scan_id: 0,
1618 },
1619 );
1620
1621 self.git_repositories.insert(parent_entry.id, repo)
1622 }
1623 }
1624 }
1625
1626 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1627 let mut entries_by_id_edits = Vec::new();
1628
1629 for mut entry in entries {
1630 self.reuse_entry_id(&mut entry);
1631 entries_by_id_edits.push(Edit::Insert(PathEntry {
1632 id: entry.id,
1633 path: entry.path.clone(),
1634 is_ignored: entry.is_ignored,
1635 scan_id: self.scan_id,
1636 }));
1637 entries_by_path_edits.push(Edit::Insert(entry));
1638 }
1639
1640 self.entries_by_path.edit(entries_by_path_edits, &());
1641 self.entries_by_id.edit(entries_by_id_edits, &());
1642 }
1643
1644 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1645 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1646 entry.id = removed_entry_id;
1647 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1648 entry.id = existing_entry.id;
1649 }
1650 }
1651
1652 fn remove_path(&mut self, path: &Path) {
1653 let mut new_entries;
1654 let removed_entries;
1655 {
1656 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1657 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1658 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1659 new_entries.push_tree(cursor.suffix(&()), &());
1660 }
1661 self.entries_by_path = new_entries;
1662
1663 let mut entries_by_id_edits = Vec::new();
1664 for entry in removed_entries.cursor::<()>() {
1665 let removed_entry_id = self
1666 .removed_entry_ids
1667 .entry(entry.inode)
1668 .or_insert(entry.id);
1669 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1670 entries_by_id_edits.push(Edit::Remove(entry.id));
1671 }
1672 self.entries_by_id.edit(entries_by_id_edits, &());
1673
1674 if path.file_name() == Some(&GITIGNORE) {
1675 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1676 if let Some((_, scan_id)) = self
1677 .ignores_by_parent_abs_path
1678 .get_mut(abs_parent_path.as_path())
1679 {
1680 *scan_id = self.snapshot.scan_id;
1681 }
1682 } else if path.file_name() == Some(&DOT_GIT) {
1683 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1684 self.snapshot
1685 .repository_entries
1686 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1687 }
1688 }
1689
1690 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1691 let mut inodes = TreeSet::default();
1692 for ancestor in path.ancestors().skip(1) {
1693 if let Some(entry) = self.entry_for_path(ancestor) {
1694 inodes.insert(entry.inode);
1695 }
1696 }
1697 inodes
1698 }
1699
1700 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1701 let mut new_ignores = Vec::new();
1702 for ancestor in abs_path.ancestors().skip(1) {
1703 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1704 new_ignores.push((ancestor, Some(ignore.clone())));
1705 } else {
1706 new_ignores.push((ancestor, None));
1707 }
1708 }
1709
1710 let mut ignore_stack = IgnoreStack::none();
1711 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1712 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1713 ignore_stack = IgnoreStack::all();
1714 break;
1715 } else if let Some(ignore) = ignore {
1716 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1717 }
1718 }
1719
1720 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1721 ignore_stack = IgnoreStack::all();
1722 }
1723
1724 ignore_stack
1725 }
1726}
1727
1728async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1729 let contents = fs.load(abs_path).await?;
1730 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1731 let mut builder = GitignoreBuilder::new(parent);
1732 for line in contents.lines() {
1733 builder.add_line(Some(abs_path.into()), line)?;
1734 }
1735 Ok(builder.build()?)
1736}
1737
1738impl WorktreeId {
1739 pub fn from_usize(handle_id: usize) -> Self {
1740 Self(handle_id)
1741 }
1742
1743 pub(crate) fn from_proto(id: u64) -> Self {
1744 Self(id as usize)
1745 }
1746
1747 pub fn to_proto(&self) -> u64 {
1748 self.0 as u64
1749 }
1750
1751 pub fn to_usize(&self) -> usize {
1752 self.0
1753 }
1754}
1755
1756impl fmt::Display for WorktreeId {
1757 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1758 self.0.fmt(f)
1759 }
1760}
1761
1762impl Deref for Worktree {
1763 type Target = Snapshot;
1764
1765 fn deref(&self) -> &Self::Target {
1766 match self {
1767 Worktree::Local(worktree) => &worktree.snapshot,
1768 Worktree::Remote(worktree) => &worktree.snapshot,
1769 }
1770 }
1771}
1772
1773impl Deref for LocalWorktree {
1774 type Target = LocalSnapshot;
1775
1776 fn deref(&self) -> &Self::Target {
1777 &self.snapshot
1778 }
1779}
1780
1781impl Deref for RemoteWorktree {
1782 type Target = Snapshot;
1783
1784 fn deref(&self) -> &Self::Target {
1785 &self.snapshot
1786 }
1787}
1788
1789impl fmt::Debug for LocalWorktree {
1790 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1791 self.snapshot.fmt(f)
1792 }
1793}
1794
1795impl fmt::Debug for Snapshot {
1796 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1797 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1798 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1799
1800 impl<'a> fmt::Debug for EntriesByPath<'a> {
1801 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1802 f.debug_map()
1803 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1804 .finish()
1805 }
1806 }
1807
1808 impl<'a> fmt::Debug for EntriesById<'a> {
1809 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1810 f.debug_list().entries(self.0.iter()).finish()
1811 }
1812 }
1813
1814 f.debug_struct("Snapshot")
1815 .field("id", &self.id)
1816 .field("root_name", &self.root_name)
1817 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1818 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1819 .finish()
1820 }
1821}
1822
1823#[derive(Clone, PartialEq)]
1824pub struct File {
1825 pub worktree: ModelHandle<Worktree>,
1826 pub path: Arc<Path>,
1827 pub mtime: SystemTime,
1828 pub(crate) entry_id: ProjectEntryId,
1829 pub(crate) is_local: bool,
1830 pub(crate) is_deleted: bool,
1831}
1832
1833impl language::File for File {
1834 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1835 if self.is_local {
1836 Some(self)
1837 } else {
1838 None
1839 }
1840 }
1841
1842 fn mtime(&self) -> SystemTime {
1843 self.mtime
1844 }
1845
1846 fn path(&self) -> &Arc<Path> {
1847 &self.path
1848 }
1849
1850 fn full_path(&self, cx: &AppContext) -> PathBuf {
1851 let mut full_path = PathBuf::new();
1852 let worktree = self.worktree.read(cx);
1853
1854 if worktree.is_visible() {
1855 full_path.push(worktree.root_name());
1856 } else {
1857 let path = worktree.abs_path();
1858
1859 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1860 full_path.push("~");
1861 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1862 } else {
1863 full_path.push(path)
1864 }
1865 }
1866
1867 if self.path.components().next().is_some() {
1868 full_path.push(&self.path);
1869 }
1870
1871 full_path
1872 }
1873
1874 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1875 /// of its worktree, then this method will return the name of the worktree itself.
1876 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1877 self.path
1878 .file_name()
1879 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1880 }
1881
1882 fn is_deleted(&self) -> bool {
1883 self.is_deleted
1884 }
1885
1886 fn as_any(&self) -> &dyn Any {
1887 self
1888 }
1889
1890 fn to_proto(&self) -> rpc::proto::File {
1891 rpc::proto::File {
1892 worktree_id: self.worktree.id() as u64,
1893 entry_id: self.entry_id.to_proto(),
1894 path: self.path.to_string_lossy().into(),
1895 mtime: Some(self.mtime.into()),
1896 is_deleted: self.is_deleted,
1897 }
1898 }
1899}
1900
1901impl language::LocalFile for File {
1902 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1903 self.worktree
1904 .read(cx)
1905 .as_local()
1906 .unwrap()
1907 .abs_path
1908 .join(&self.path)
1909 }
1910
1911 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1912 let worktree = self.worktree.read(cx).as_local().unwrap();
1913 let abs_path = worktree.absolutize(&self.path);
1914 let fs = worktree.fs.clone();
1915 cx.background()
1916 .spawn(async move { fs.load(&abs_path).await })
1917 }
1918
1919 fn buffer_reloaded(
1920 &self,
1921 buffer_id: u64,
1922 version: &clock::Global,
1923 fingerprint: RopeFingerprint,
1924 line_ending: LineEnding,
1925 mtime: SystemTime,
1926 cx: &mut AppContext,
1927 ) {
1928 let worktree = self.worktree.read(cx).as_local().unwrap();
1929 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1930 worktree
1931 .client
1932 .send(proto::BufferReloaded {
1933 project_id,
1934 buffer_id,
1935 version: serialize_version(version),
1936 mtime: Some(mtime.into()),
1937 fingerprint: serialize_fingerprint(fingerprint),
1938 line_ending: serialize_line_ending(line_ending) as i32,
1939 })
1940 .log_err();
1941 }
1942 }
1943}
1944
1945impl File {
1946 pub fn from_proto(
1947 proto: rpc::proto::File,
1948 worktree: ModelHandle<Worktree>,
1949 cx: &AppContext,
1950 ) -> Result<Self> {
1951 let worktree_id = worktree
1952 .read(cx)
1953 .as_remote()
1954 .ok_or_else(|| anyhow!("not remote"))?
1955 .id();
1956
1957 if worktree_id.to_proto() != proto.worktree_id {
1958 return Err(anyhow!("worktree id does not match file"));
1959 }
1960
1961 Ok(Self {
1962 worktree,
1963 path: Path::new(&proto.path).into(),
1964 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1965 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1966 is_local: false,
1967 is_deleted: proto.is_deleted,
1968 })
1969 }
1970
1971 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1972 file.and_then(|f| f.as_any().downcast_ref())
1973 }
1974
1975 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1976 self.worktree.read(cx).id()
1977 }
1978
1979 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1980 if self.is_deleted {
1981 None
1982 } else {
1983 Some(self.entry_id)
1984 }
1985 }
1986}
1987
1988#[derive(Clone, Debug, PartialEq, Eq)]
1989pub struct Entry {
1990 pub id: ProjectEntryId,
1991 pub kind: EntryKind,
1992 pub path: Arc<Path>,
1993 pub inode: u64,
1994 pub mtime: SystemTime,
1995 pub is_symlink: bool,
1996 pub is_ignored: bool,
1997}
1998
1999#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2000pub enum EntryKind {
2001 PendingDir,
2002 Dir,
2003 File(CharBag),
2004}
2005
2006#[derive(Clone, Copy, Debug)]
2007pub enum PathChange {
2008 Added,
2009 Removed,
2010 Updated,
2011 AddedOrUpdated,
2012}
2013
2014impl Entry {
2015 fn new(
2016 path: Arc<Path>,
2017 metadata: &fs::Metadata,
2018 next_entry_id: &AtomicUsize,
2019 root_char_bag: CharBag,
2020 ) -> Self {
2021 Self {
2022 id: ProjectEntryId::new(next_entry_id),
2023 kind: if metadata.is_dir {
2024 EntryKind::PendingDir
2025 } else {
2026 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2027 },
2028 path,
2029 inode: metadata.inode,
2030 mtime: metadata.mtime,
2031 is_symlink: metadata.is_symlink,
2032 is_ignored: false,
2033 }
2034 }
2035
2036 pub fn is_dir(&self) -> bool {
2037 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2038 }
2039
2040 pub fn is_file(&self) -> bool {
2041 matches!(self.kind, EntryKind::File(_))
2042 }
2043}
2044
2045impl sum_tree::Item for Entry {
2046 type Summary = EntrySummary;
2047
2048 fn summary(&self) -> Self::Summary {
2049 let visible_count = if self.is_ignored { 0 } else { 1 };
2050 let file_count;
2051 let visible_file_count;
2052 if self.is_file() {
2053 file_count = 1;
2054 visible_file_count = visible_count;
2055 } else {
2056 file_count = 0;
2057 visible_file_count = 0;
2058 }
2059
2060 EntrySummary {
2061 max_path: self.path.clone(),
2062 count: 1,
2063 visible_count,
2064 file_count,
2065 visible_file_count,
2066 }
2067 }
2068}
2069
2070impl sum_tree::KeyedItem for Entry {
2071 type Key = PathKey;
2072
2073 fn key(&self) -> Self::Key {
2074 PathKey(self.path.clone())
2075 }
2076}
2077
2078#[derive(Clone, Debug)]
2079pub struct EntrySummary {
2080 max_path: Arc<Path>,
2081 count: usize,
2082 visible_count: usize,
2083 file_count: usize,
2084 visible_file_count: usize,
2085}
2086
2087impl Default for EntrySummary {
2088 fn default() -> Self {
2089 Self {
2090 max_path: Arc::from(Path::new("")),
2091 count: 0,
2092 visible_count: 0,
2093 file_count: 0,
2094 visible_file_count: 0,
2095 }
2096 }
2097}
2098
2099impl sum_tree::Summary for EntrySummary {
2100 type Context = ();
2101
2102 fn add_summary(&mut self, rhs: &Self, _: &()) {
2103 self.max_path = rhs.max_path.clone();
2104 self.count += rhs.count;
2105 self.visible_count += rhs.visible_count;
2106 self.file_count += rhs.file_count;
2107 self.visible_file_count += rhs.visible_file_count;
2108 }
2109}
2110
2111#[derive(Clone, Debug)]
2112struct PathEntry {
2113 id: ProjectEntryId,
2114 path: Arc<Path>,
2115 is_ignored: bool,
2116 scan_id: usize,
2117}
2118
2119impl sum_tree::Item for PathEntry {
2120 type Summary = PathEntrySummary;
2121
2122 fn summary(&self) -> Self::Summary {
2123 PathEntrySummary { max_id: self.id }
2124 }
2125}
2126
2127impl sum_tree::KeyedItem for PathEntry {
2128 type Key = ProjectEntryId;
2129
2130 fn key(&self) -> Self::Key {
2131 self.id
2132 }
2133}
2134
2135#[derive(Clone, Debug, Default)]
2136struct PathEntrySummary {
2137 max_id: ProjectEntryId,
2138}
2139
2140impl sum_tree::Summary for PathEntrySummary {
2141 type Context = ();
2142
2143 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2144 self.max_id = summary.max_id;
2145 }
2146}
2147
2148impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2149 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2150 *self = summary.max_id;
2151 }
2152}
2153
2154#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2155pub struct PathKey(Arc<Path>);
2156
2157impl Default for PathKey {
2158 fn default() -> Self {
2159 Self(Path::new("").into())
2160 }
2161}
2162
2163impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2164 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2165 self.0 = summary.max_path.clone();
2166 }
2167}
2168
2169struct BackgroundScanner {
2170 snapshot: Mutex<LocalSnapshot>,
2171 fs: Arc<dyn Fs>,
2172 status_updates_tx: UnboundedSender<ScanState>,
2173 executor: Arc<executor::Background>,
2174 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2175 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2176 finished_initial_scan: bool,
2177}
2178
2179impl BackgroundScanner {
2180 fn new(
2181 snapshot: LocalSnapshot,
2182 fs: Arc<dyn Fs>,
2183 status_updates_tx: UnboundedSender<ScanState>,
2184 executor: Arc<executor::Background>,
2185 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2186 ) -> Self {
2187 Self {
2188 fs,
2189 status_updates_tx,
2190 executor,
2191 refresh_requests_rx,
2192 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2193 snapshot: Mutex::new(snapshot),
2194 finished_initial_scan: false,
2195 }
2196 }
2197
2198 async fn run(
2199 &mut self,
2200 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2201 ) {
2202 use futures::FutureExt as _;
2203
2204 let (root_abs_path, root_inode) = {
2205 let snapshot = self.snapshot.lock();
2206 (
2207 snapshot.abs_path.clone(),
2208 snapshot.root_entry().map(|e| e.inode),
2209 )
2210 };
2211
2212 // Populate ignores above the root.
2213 let ignore_stack;
2214 for ancestor in root_abs_path.ancestors().skip(1) {
2215 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2216 {
2217 self.snapshot
2218 .lock()
2219 .ignores_by_parent_abs_path
2220 .insert(ancestor.into(), (ignore.into(), 0));
2221 }
2222 }
2223 {
2224 let mut snapshot = self.snapshot.lock();
2225 snapshot.scan_id += 1;
2226 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2227 if ignore_stack.is_all() {
2228 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2229 root_entry.is_ignored = true;
2230 snapshot.insert_entry(root_entry, self.fs.as_ref());
2231 }
2232 }
2233 };
2234
2235 // Perform an initial scan of the directory.
2236 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2237 smol::block_on(scan_job_tx.send(ScanJob {
2238 abs_path: root_abs_path,
2239 path: Arc::from(Path::new("")),
2240 ignore_stack,
2241 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2242 scan_queue: scan_job_tx.clone(),
2243 }))
2244 .unwrap();
2245 drop(scan_job_tx);
2246 self.scan_dirs(true, scan_job_rx).await;
2247 {
2248 let mut snapshot = self.snapshot.lock();
2249 snapshot.completed_scan_id = snapshot.scan_id;
2250 }
2251 self.send_status_update(false, None);
2252
2253 // Process any any FS events that occurred while performing the initial scan.
2254 // For these events, update events cannot be as precise, because we didn't
2255 // have the previous state loaded yet.
2256 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2257 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2258 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2259 paths.extend(more_events.into_iter().map(|e| e.path));
2260 }
2261 self.process_events(paths).await;
2262 }
2263
2264 self.finished_initial_scan = true;
2265
2266 // Continue processing events until the worktree is dropped.
2267 loop {
2268 select_biased! {
2269 // Process any path refresh requests from the worktree. Prioritize
2270 // these before handling changes reported by the filesystem.
2271 request = self.refresh_requests_rx.recv().fuse() => {
2272 let Ok((paths, barrier)) = request else { break };
2273 if !self.process_refresh_request(paths, barrier).await {
2274 return;
2275 }
2276 }
2277
2278 events = events_rx.next().fuse() => {
2279 let Some(events) = events else { break };
2280 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2281 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2282 paths.extend(more_events.into_iter().map(|e| e.path));
2283 }
2284 self.process_events(paths).await;
2285 }
2286 }
2287 }
2288 }
2289
2290 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2291 self.reload_entries_for_paths(paths, None).await;
2292 self.send_status_update(false, Some(barrier))
2293 }
2294
2295 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2296 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2297 if let Some(mut paths) = self
2298 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2299 .await
2300 {
2301 paths.sort_unstable();
2302 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2303 }
2304 drop(scan_job_tx);
2305 self.scan_dirs(false, scan_job_rx).await;
2306
2307 self.update_ignore_statuses().await;
2308
2309 let mut snapshot = self.snapshot.lock();
2310
2311 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2312 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2313 snapshot.git_repositories = git_repositories;
2314
2315 snapshot.removed_entry_ids.clear();
2316 snapshot.completed_scan_id = snapshot.scan_id;
2317 drop(snapshot);
2318
2319 self.send_status_update(false, None);
2320 }
2321
2322 async fn scan_dirs(
2323 &self,
2324 enable_progress_updates: bool,
2325 scan_jobs_rx: channel::Receiver<ScanJob>,
2326 ) {
2327 use futures::FutureExt as _;
2328
2329 if self
2330 .status_updates_tx
2331 .unbounded_send(ScanState::Started)
2332 .is_err()
2333 {
2334 return;
2335 }
2336
2337 let progress_update_count = AtomicUsize::new(0);
2338 self.executor
2339 .scoped(|scope| {
2340 for _ in 0..self.executor.num_cpus() {
2341 scope.spawn(async {
2342 let mut last_progress_update_count = 0;
2343 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2344 futures::pin_mut!(progress_update_timer);
2345
2346 loop {
2347 select_biased! {
2348 // Process any path refresh requests before moving on to process
2349 // the scan queue, so that user operations are prioritized.
2350 request = self.refresh_requests_rx.recv().fuse() => {
2351 let Ok((paths, barrier)) = request else { break };
2352 if !self.process_refresh_request(paths, barrier).await {
2353 return;
2354 }
2355 }
2356
2357 // Send periodic progress updates to the worktree. Use an atomic counter
2358 // to ensure that only one of the workers sends a progress update after
2359 // the update interval elapses.
2360 _ = progress_update_timer => {
2361 match progress_update_count.compare_exchange(
2362 last_progress_update_count,
2363 last_progress_update_count + 1,
2364 SeqCst,
2365 SeqCst
2366 ) {
2367 Ok(_) => {
2368 last_progress_update_count += 1;
2369 self.send_status_update(true, None);
2370 }
2371 Err(count) => {
2372 last_progress_update_count = count;
2373 }
2374 }
2375 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2376 }
2377
2378 // Recursively load directories from the file system.
2379 job = scan_jobs_rx.recv().fuse() => {
2380 let Ok(job) = job else { break };
2381 if let Err(err) = self.scan_dir(&job).await {
2382 if job.path.as_ref() != Path::new("") {
2383 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2384 }
2385 }
2386 }
2387 }
2388 }
2389 })
2390 }
2391 })
2392 .await;
2393 }
2394
2395 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2396 let mut prev_state = self.prev_state.lock();
2397 let snapshot = self.snapshot.lock().clone();
2398 let mut old_snapshot = snapshot.snapshot.clone();
2399 mem::swap(&mut old_snapshot, &mut prev_state.0);
2400 let changed_paths = mem::take(&mut prev_state.1);
2401 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2402 self.status_updates_tx
2403 .unbounded_send(ScanState::Updated {
2404 snapshot,
2405 changes,
2406 scanning,
2407 barrier,
2408 })
2409 .is_ok()
2410 }
2411
2412 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2413 let mut new_entries: Vec<Entry> = Vec::new();
2414 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2415 let mut ignore_stack = job.ignore_stack.clone();
2416 let mut new_ignore = None;
2417 let (root_abs_path, root_char_bag, next_entry_id) = {
2418 let snapshot = self.snapshot.lock();
2419 (
2420 snapshot.abs_path().clone(),
2421 snapshot.root_char_bag,
2422 snapshot.next_entry_id.clone(),
2423 )
2424 };
2425 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2426 while let Some(child_abs_path) = child_paths.next().await {
2427 let child_abs_path: Arc<Path> = match child_abs_path {
2428 Ok(child_abs_path) => child_abs_path.into(),
2429 Err(error) => {
2430 log::error!("error processing entry {:?}", error);
2431 continue;
2432 }
2433 };
2434
2435 let child_name = child_abs_path.file_name().unwrap();
2436 let child_path: Arc<Path> = job.path.join(child_name).into();
2437 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2438 Ok(Some(metadata)) => metadata,
2439 Ok(None) => continue,
2440 Err(err) => {
2441 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2442 continue;
2443 }
2444 };
2445
2446 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2447 if child_name == *GITIGNORE {
2448 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2449 Ok(ignore) => {
2450 let ignore = Arc::new(ignore);
2451 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2452 new_ignore = Some(ignore);
2453 }
2454 Err(error) => {
2455 log::error!(
2456 "error loading .gitignore file {:?} - {:?}",
2457 child_name,
2458 error
2459 );
2460 }
2461 }
2462
2463 // Update ignore status of any child entries we've already processed to reflect the
2464 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2465 // there should rarely be too numerous. Update the ignore stack associated with any
2466 // new jobs as well.
2467 let mut new_jobs = new_jobs.iter_mut();
2468 for entry in &mut new_entries {
2469 let entry_abs_path = root_abs_path.join(&entry.path);
2470 entry.is_ignored =
2471 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2472
2473 if entry.is_dir() {
2474 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2475 job.ignore_stack = if entry.is_ignored {
2476 IgnoreStack::all()
2477 } else {
2478 ignore_stack.clone()
2479 };
2480 }
2481 }
2482 }
2483 }
2484
2485 let mut child_entry = Entry::new(
2486 child_path.clone(),
2487 &child_metadata,
2488 &next_entry_id,
2489 root_char_bag,
2490 );
2491
2492 if child_entry.is_dir() {
2493 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2494 child_entry.is_ignored = is_ignored;
2495
2496 // Avoid recursing until crash in the case of a recursive symlink
2497 if !job.ancestor_inodes.contains(&child_entry.inode) {
2498 let mut ancestor_inodes = job.ancestor_inodes.clone();
2499 ancestor_inodes.insert(child_entry.inode);
2500
2501 new_jobs.push(Some(ScanJob {
2502 abs_path: child_abs_path,
2503 path: child_path,
2504 ignore_stack: if is_ignored {
2505 IgnoreStack::all()
2506 } else {
2507 ignore_stack.clone()
2508 },
2509 ancestor_inodes,
2510 scan_queue: job.scan_queue.clone(),
2511 }));
2512 } else {
2513 new_jobs.push(None);
2514 }
2515 } else {
2516 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2517 }
2518
2519 new_entries.push(child_entry);
2520 }
2521
2522 self.snapshot.lock().populate_dir(
2523 job.path.clone(),
2524 new_entries,
2525 new_ignore,
2526 self.fs.as_ref(),
2527 );
2528
2529 for new_job in new_jobs {
2530 if let Some(new_job) = new_job {
2531 job.scan_queue.send(new_job).await.unwrap();
2532 }
2533 }
2534
2535 Ok(())
2536 }
2537
2538 async fn reload_entries_for_paths(
2539 &self,
2540 mut abs_paths: Vec<PathBuf>,
2541 scan_queue_tx: Option<Sender<ScanJob>>,
2542 ) -> Option<Vec<Arc<Path>>> {
2543 let doing_recursive_update = scan_queue_tx.is_some();
2544
2545 abs_paths.sort_unstable();
2546 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2547
2548 let root_abs_path = self.snapshot.lock().abs_path.clone();
2549 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2550 let metadata = futures::future::join_all(
2551 abs_paths
2552 .iter()
2553 .map(|abs_path| self.fs.metadata(&abs_path))
2554 .collect::<Vec<_>>(),
2555 )
2556 .await;
2557
2558 let mut snapshot = self.snapshot.lock();
2559 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2560 snapshot.scan_id += 1;
2561 if is_idle && !doing_recursive_update {
2562 snapshot.completed_scan_id = snapshot.scan_id;
2563 }
2564
2565 // Remove any entries for paths that no longer exist or are being recursively
2566 // refreshed. Do this before adding any new entries, so that renames can be
2567 // detected regardless of the order of the paths.
2568 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2569 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2570 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2571 if matches!(metadata, Ok(None)) || doing_recursive_update {
2572 snapshot.remove_path(path);
2573 }
2574 event_paths.push(path.into());
2575 } else {
2576 log::error!(
2577 "unexpected event {:?} for root path {:?}",
2578 abs_path,
2579 root_canonical_path
2580 );
2581 }
2582 }
2583
2584 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2585 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2586
2587 match metadata {
2588 Ok(Some(metadata)) => {
2589 let ignore_stack =
2590 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2591 let mut fs_entry = Entry::new(
2592 path.clone(),
2593 &metadata,
2594 snapshot.next_entry_id.as_ref(),
2595 snapshot.root_char_bag,
2596 );
2597 fs_entry.is_ignored = ignore_stack.is_all();
2598 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2599
2600 let scan_id = snapshot.scan_id;
2601
2602 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2603 repo.repo.lock().reload_index();
2604 repo.scan_id = scan_id;
2605 }
2606
2607 let repo_with_path_in_dotgit = snapshot
2608 .repository_entries
2609 .iter()
2610 .find_map(|(key, repo)| repo.in_dot_git(&path).then(|| key.clone()));
2611 if let Some(key) = repo_with_path_in_dotgit {
2612 snapshot
2613 .repository_entries
2614 .update(&key, |entry| entry.scan_id = scan_id);
2615 }
2616
2617 if let Some(scan_queue_tx) = &scan_queue_tx {
2618 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2619 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2620 ancestor_inodes.insert(metadata.inode);
2621 smol::block_on(scan_queue_tx.send(ScanJob {
2622 abs_path,
2623 path,
2624 ignore_stack,
2625 ancestor_inodes,
2626 scan_queue: scan_queue_tx.clone(),
2627 }))
2628 .unwrap();
2629 }
2630 }
2631 }
2632 Ok(None) => {}
2633 Err(err) => {
2634 // TODO - create a special 'error' entry in the entries tree to mark this
2635 log::error!("error reading file on event {:?}", err);
2636 }
2637 }
2638 }
2639
2640 Some(event_paths)
2641 }
2642
2643 async fn update_ignore_statuses(&self) {
2644 use futures::FutureExt as _;
2645
2646 let mut snapshot = self.snapshot.lock().clone();
2647 let mut ignores_to_update = Vec::new();
2648 let mut ignores_to_delete = Vec::new();
2649 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2650 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2651 if *scan_id > snapshot.completed_scan_id
2652 && snapshot.entry_for_path(parent_path).is_some()
2653 {
2654 ignores_to_update.push(parent_abs_path.clone());
2655 }
2656
2657 let ignore_path = parent_path.join(&*GITIGNORE);
2658 if snapshot.entry_for_path(ignore_path).is_none() {
2659 ignores_to_delete.push(parent_abs_path.clone());
2660 }
2661 }
2662 }
2663
2664 for parent_abs_path in ignores_to_delete {
2665 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2666 self.snapshot
2667 .lock()
2668 .ignores_by_parent_abs_path
2669 .remove(&parent_abs_path);
2670 }
2671
2672 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2673 ignores_to_update.sort_unstable();
2674 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2675 while let Some(parent_abs_path) = ignores_to_update.next() {
2676 while ignores_to_update
2677 .peek()
2678 .map_or(false, |p| p.starts_with(&parent_abs_path))
2679 {
2680 ignores_to_update.next().unwrap();
2681 }
2682
2683 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2684 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2685 abs_path: parent_abs_path,
2686 ignore_stack,
2687 ignore_queue: ignore_queue_tx.clone(),
2688 }))
2689 .unwrap();
2690 }
2691 drop(ignore_queue_tx);
2692
2693 self.executor
2694 .scoped(|scope| {
2695 for _ in 0..self.executor.num_cpus() {
2696 scope.spawn(async {
2697 loop {
2698 select_biased! {
2699 // Process any path refresh requests before moving on to process
2700 // the queue of ignore statuses.
2701 request = self.refresh_requests_rx.recv().fuse() => {
2702 let Ok((paths, barrier)) = request else { break };
2703 if !self.process_refresh_request(paths, barrier).await {
2704 return;
2705 }
2706 }
2707
2708 // Recursively process directories whose ignores have changed.
2709 job = ignore_queue_rx.recv().fuse() => {
2710 let Ok(job) = job else { break };
2711 self.update_ignore_status(job, &snapshot).await;
2712 }
2713 }
2714 }
2715 });
2716 }
2717 })
2718 .await;
2719 }
2720
2721 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2722 let mut ignore_stack = job.ignore_stack;
2723 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2724 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2725 }
2726
2727 let mut entries_by_id_edits = Vec::new();
2728 let mut entries_by_path_edits = Vec::new();
2729 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2730 for mut entry in snapshot.child_entries(path).cloned() {
2731 let was_ignored = entry.is_ignored;
2732 let abs_path = snapshot.abs_path().join(&entry.path);
2733 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2734 if entry.is_dir() {
2735 let child_ignore_stack = if entry.is_ignored {
2736 IgnoreStack::all()
2737 } else {
2738 ignore_stack.clone()
2739 };
2740 job.ignore_queue
2741 .send(UpdateIgnoreStatusJob {
2742 abs_path: abs_path.into(),
2743 ignore_stack: child_ignore_stack,
2744 ignore_queue: job.ignore_queue.clone(),
2745 })
2746 .await
2747 .unwrap();
2748 }
2749
2750 if entry.is_ignored != was_ignored {
2751 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2752 path_entry.scan_id = snapshot.scan_id;
2753 path_entry.is_ignored = entry.is_ignored;
2754 entries_by_id_edits.push(Edit::Insert(path_entry));
2755 entries_by_path_edits.push(Edit::Insert(entry));
2756 }
2757 }
2758
2759 let mut snapshot = self.snapshot.lock();
2760 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2761 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2762 }
2763
2764 fn build_change_set(
2765 &self,
2766 old_snapshot: &Snapshot,
2767 new_snapshot: &Snapshot,
2768 event_paths: Vec<Arc<Path>>,
2769 ) -> HashMap<Arc<Path>, PathChange> {
2770 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2771
2772 let mut changes = HashMap::default();
2773 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2774 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2775 let received_before_initialized = !self.finished_initial_scan;
2776
2777 for path in event_paths {
2778 let path = PathKey(path);
2779 old_paths.seek(&path, Bias::Left, &());
2780 new_paths.seek(&path, Bias::Left, &());
2781
2782 loop {
2783 match (old_paths.item(), new_paths.item()) {
2784 (Some(old_entry), Some(new_entry)) => {
2785 if old_entry.path > path.0
2786 && new_entry.path > path.0
2787 && !old_entry.path.starts_with(&path.0)
2788 && !new_entry.path.starts_with(&path.0)
2789 {
2790 break;
2791 }
2792
2793 match Ord::cmp(&old_entry.path, &new_entry.path) {
2794 Ordering::Less => {
2795 changes.insert(old_entry.path.clone(), Removed);
2796 old_paths.next(&());
2797 }
2798 Ordering::Equal => {
2799 if received_before_initialized {
2800 // If the worktree was not fully initialized when this event was generated,
2801 // we can't know whether this entry was added during the scan or whether
2802 // it was merely updated.
2803 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2804 } else if old_entry.mtime != new_entry.mtime {
2805 changes.insert(new_entry.path.clone(), Updated);
2806 }
2807 old_paths.next(&());
2808 new_paths.next(&());
2809 }
2810 Ordering::Greater => {
2811 changes.insert(new_entry.path.clone(), Added);
2812 new_paths.next(&());
2813 }
2814 }
2815 }
2816 (Some(old_entry), None) => {
2817 changes.insert(old_entry.path.clone(), Removed);
2818 old_paths.next(&());
2819 }
2820 (None, Some(new_entry)) => {
2821 changes.insert(new_entry.path.clone(), Added);
2822 new_paths.next(&());
2823 }
2824 (None, None) => break,
2825 }
2826 }
2827 }
2828 changes
2829 }
2830
2831 async fn progress_timer(&self, running: bool) {
2832 if !running {
2833 return futures::future::pending().await;
2834 }
2835
2836 #[cfg(any(test, feature = "test-support"))]
2837 if self.fs.is_fake() {
2838 return self.executor.simulate_random_delay().await;
2839 }
2840
2841 smol::Timer::after(Duration::from_millis(100)).await;
2842 }
2843}
2844
2845fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2846 let mut result = root_char_bag;
2847 result.extend(
2848 path.to_string_lossy()
2849 .chars()
2850 .map(|c| c.to_ascii_lowercase()),
2851 );
2852 result
2853}
2854
2855struct ScanJob {
2856 abs_path: Arc<Path>,
2857 path: Arc<Path>,
2858 ignore_stack: Arc<IgnoreStack>,
2859 scan_queue: Sender<ScanJob>,
2860 ancestor_inodes: TreeSet<u64>,
2861}
2862
2863struct UpdateIgnoreStatusJob {
2864 abs_path: Arc<Path>,
2865 ignore_stack: Arc<IgnoreStack>,
2866 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2867}
2868
2869pub trait WorktreeHandle {
2870 #[cfg(any(test, feature = "test-support"))]
2871 fn flush_fs_events<'a>(
2872 &self,
2873 cx: &'a gpui::TestAppContext,
2874 ) -> futures::future::LocalBoxFuture<'a, ()>;
2875}
2876
2877impl WorktreeHandle for ModelHandle<Worktree> {
2878 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2879 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2880 // extra directory scans, and emit extra scan-state notifications.
2881 //
2882 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2883 // to ensure that all redundant FS events have already been processed.
2884 #[cfg(any(test, feature = "test-support"))]
2885 fn flush_fs_events<'a>(
2886 &self,
2887 cx: &'a gpui::TestAppContext,
2888 ) -> futures::future::LocalBoxFuture<'a, ()> {
2889 use smol::future::FutureExt;
2890
2891 let filename = "fs-event-sentinel";
2892 let tree = self.clone();
2893 let (fs, root_path) = self.read_with(cx, |tree, _| {
2894 let tree = tree.as_local().unwrap();
2895 (tree.fs.clone(), tree.abs_path().clone())
2896 });
2897
2898 async move {
2899 fs.create_file(&root_path.join(filename), Default::default())
2900 .await
2901 .unwrap();
2902 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2903 .await;
2904
2905 fs.remove_file(&root_path.join(filename), Default::default())
2906 .await
2907 .unwrap();
2908 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2909 .await;
2910
2911 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2912 .await;
2913 }
2914 .boxed_local()
2915 }
2916}
2917
2918#[derive(Clone, Debug)]
2919struct TraversalProgress<'a> {
2920 max_path: &'a Path,
2921 count: usize,
2922 visible_count: usize,
2923 file_count: usize,
2924 visible_file_count: usize,
2925}
2926
2927impl<'a> TraversalProgress<'a> {
2928 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2929 match (include_ignored, include_dirs) {
2930 (true, true) => self.count,
2931 (true, false) => self.file_count,
2932 (false, true) => self.visible_count,
2933 (false, false) => self.visible_file_count,
2934 }
2935 }
2936}
2937
2938impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2939 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2940 self.max_path = summary.max_path.as_ref();
2941 self.count += summary.count;
2942 self.visible_count += summary.visible_count;
2943 self.file_count += summary.file_count;
2944 self.visible_file_count += summary.visible_file_count;
2945 }
2946}
2947
2948impl<'a> Default for TraversalProgress<'a> {
2949 fn default() -> Self {
2950 Self {
2951 max_path: Path::new(""),
2952 count: 0,
2953 visible_count: 0,
2954 file_count: 0,
2955 visible_file_count: 0,
2956 }
2957 }
2958}
2959
2960pub struct Traversal<'a> {
2961 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2962 include_ignored: bool,
2963 include_dirs: bool,
2964}
2965
2966impl<'a> Traversal<'a> {
2967 pub fn advance(&mut self) -> bool {
2968 self.advance_to_offset(self.offset() + 1)
2969 }
2970
2971 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2972 self.cursor.seek_forward(
2973 &TraversalTarget::Count {
2974 count: offset,
2975 include_dirs: self.include_dirs,
2976 include_ignored: self.include_ignored,
2977 },
2978 Bias::Right,
2979 &(),
2980 )
2981 }
2982
2983 pub fn advance_to_sibling(&mut self) -> bool {
2984 while let Some(entry) = self.cursor.item() {
2985 self.cursor.seek_forward(
2986 &TraversalTarget::PathSuccessor(&entry.path),
2987 Bias::Left,
2988 &(),
2989 );
2990 if let Some(entry) = self.cursor.item() {
2991 if (self.include_dirs || !entry.is_dir())
2992 && (self.include_ignored || !entry.is_ignored)
2993 {
2994 return true;
2995 }
2996 }
2997 }
2998 false
2999 }
3000
3001 pub fn entry(&self) -> Option<&'a Entry> {
3002 self.cursor.item()
3003 }
3004
3005 pub fn offset(&self) -> usize {
3006 self.cursor
3007 .start()
3008 .count(self.include_dirs, self.include_ignored)
3009 }
3010}
3011
3012impl<'a> Iterator for Traversal<'a> {
3013 type Item = &'a Entry;
3014
3015 fn next(&mut self) -> Option<Self::Item> {
3016 if let Some(item) = self.entry() {
3017 self.advance();
3018 Some(item)
3019 } else {
3020 None
3021 }
3022 }
3023}
3024
3025#[derive(Debug)]
3026enum TraversalTarget<'a> {
3027 Path(&'a Path),
3028 PathSuccessor(&'a Path),
3029 Count {
3030 count: usize,
3031 include_ignored: bool,
3032 include_dirs: bool,
3033 },
3034}
3035
3036impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3037 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3038 match self {
3039 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3040 TraversalTarget::PathSuccessor(path) => {
3041 if !cursor_location.max_path.starts_with(path) {
3042 Ordering::Equal
3043 } else {
3044 Ordering::Greater
3045 }
3046 }
3047 TraversalTarget::Count {
3048 count,
3049 include_dirs,
3050 include_ignored,
3051 } => Ord::cmp(
3052 count,
3053 &cursor_location.count(*include_dirs, *include_ignored),
3054 ),
3055 }
3056 }
3057}
3058
3059struct ChildEntriesIter<'a> {
3060 parent_path: &'a Path,
3061 traversal: Traversal<'a>,
3062}
3063
3064impl<'a> Iterator for ChildEntriesIter<'a> {
3065 type Item = &'a Entry;
3066
3067 fn next(&mut self) -> Option<Self::Item> {
3068 if let Some(item) = self.traversal.entry() {
3069 if item.path.starts_with(&self.parent_path) {
3070 self.traversal.advance_to_sibling();
3071 return Some(item);
3072 }
3073 }
3074 None
3075 }
3076}
3077
3078impl<'a> From<&'a Entry> for proto::Entry {
3079 fn from(entry: &'a Entry) -> Self {
3080 Self {
3081 id: entry.id.to_proto(),
3082 is_dir: entry.is_dir(),
3083 path: entry.path.to_string_lossy().into(),
3084 inode: entry.inode,
3085 mtime: Some(entry.mtime.into()),
3086 is_symlink: entry.is_symlink,
3087 is_ignored: entry.is_ignored,
3088 }
3089 }
3090}
3091
3092impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3093 type Error = anyhow::Error;
3094
3095 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3096 if let Some(mtime) = entry.mtime {
3097 let kind = if entry.is_dir {
3098 EntryKind::Dir
3099 } else {
3100 let mut char_bag = *root_char_bag;
3101 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3102 EntryKind::File(char_bag)
3103 };
3104 let path: Arc<Path> = PathBuf::from(entry.path).into();
3105 Ok(Entry {
3106 id: ProjectEntryId::from_proto(entry.id),
3107 kind,
3108 path,
3109 inode: entry.inode,
3110 mtime: mtime.into(),
3111 is_symlink: entry.is_symlink,
3112 is_ignored: entry.is_ignored,
3113 })
3114 } else {
3115 Err(anyhow!(
3116 "missing mtime in remote worktree entry {:?}",
3117 entry.path
3118 ))
3119 }
3120 }
3121}
3122
3123#[cfg(test)]
3124mod tests {
3125 use super::*;
3126 use fs::repository::FakeGitRepository;
3127 use fs::{FakeFs, RealFs};
3128 use gpui::{executor::Deterministic, TestAppContext};
3129 use pretty_assertions::assert_eq;
3130 use rand::prelude::*;
3131 use serde_json::json;
3132 use std::{env, fmt::Write};
3133 use util::{http::FakeHttpClient, test::temp_tree};
3134
3135 #[gpui::test]
3136 async fn test_traversal(cx: &mut TestAppContext) {
3137 let fs = FakeFs::new(cx.background());
3138 fs.insert_tree(
3139 "/root",
3140 json!({
3141 ".gitignore": "a/b\n",
3142 "a": {
3143 "b": "",
3144 "c": "",
3145 }
3146 }),
3147 )
3148 .await;
3149
3150 let http_client = FakeHttpClient::with_404_response();
3151 let client = cx.read(|cx| Client::new(http_client, cx));
3152
3153 let tree = Worktree::local(
3154 client,
3155 Path::new("/root"),
3156 true,
3157 fs,
3158 Default::default(),
3159 &mut cx.to_async(),
3160 )
3161 .await
3162 .unwrap();
3163 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3164 .await;
3165
3166 tree.read_with(cx, |tree, _| {
3167 assert_eq!(
3168 tree.entries(false)
3169 .map(|entry| entry.path.as_ref())
3170 .collect::<Vec<_>>(),
3171 vec![
3172 Path::new(""),
3173 Path::new(".gitignore"),
3174 Path::new("a"),
3175 Path::new("a/c"),
3176 ]
3177 );
3178 assert_eq!(
3179 tree.entries(true)
3180 .map(|entry| entry.path.as_ref())
3181 .collect::<Vec<_>>(),
3182 vec![
3183 Path::new(""),
3184 Path::new(".gitignore"),
3185 Path::new("a"),
3186 Path::new("a/b"),
3187 Path::new("a/c"),
3188 ]
3189 );
3190 })
3191 }
3192
3193 #[gpui::test(iterations = 10)]
3194 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3195 let fs = FakeFs::new(cx.background());
3196 fs.insert_tree(
3197 "/root",
3198 json!({
3199 "lib": {
3200 "a": {
3201 "a.txt": ""
3202 },
3203 "b": {
3204 "b.txt": ""
3205 }
3206 }
3207 }),
3208 )
3209 .await;
3210 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3211 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3212
3213 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3214 let tree = Worktree::local(
3215 client,
3216 Path::new("/root"),
3217 true,
3218 fs.clone(),
3219 Default::default(),
3220 &mut cx.to_async(),
3221 )
3222 .await
3223 .unwrap();
3224
3225 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3226 .await;
3227
3228 tree.read_with(cx, |tree, _| {
3229 assert_eq!(
3230 tree.entries(false)
3231 .map(|entry| entry.path.as_ref())
3232 .collect::<Vec<_>>(),
3233 vec![
3234 Path::new(""),
3235 Path::new("lib"),
3236 Path::new("lib/a"),
3237 Path::new("lib/a/a.txt"),
3238 Path::new("lib/a/lib"),
3239 Path::new("lib/b"),
3240 Path::new("lib/b/b.txt"),
3241 Path::new("lib/b/lib"),
3242 ]
3243 );
3244 });
3245
3246 fs.rename(
3247 Path::new("/root/lib/a/lib"),
3248 Path::new("/root/lib/a/lib-2"),
3249 Default::default(),
3250 )
3251 .await
3252 .unwrap();
3253 executor.run_until_parked();
3254 tree.read_with(cx, |tree, _| {
3255 assert_eq!(
3256 tree.entries(false)
3257 .map(|entry| entry.path.as_ref())
3258 .collect::<Vec<_>>(),
3259 vec![
3260 Path::new(""),
3261 Path::new("lib"),
3262 Path::new("lib/a"),
3263 Path::new("lib/a/a.txt"),
3264 Path::new("lib/a/lib-2"),
3265 Path::new("lib/b"),
3266 Path::new("lib/b/b.txt"),
3267 Path::new("lib/b/lib"),
3268 ]
3269 );
3270 });
3271 }
3272
3273 #[gpui::test]
3274 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3275 let parent_dir = temp_tree(json!({
3276 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3277 "tree": {
3278 ".git": {},
3279 ".gitignore": "ignored-dir\n",
3280 "tracked-dir": {
3281 "tracked-file1": "",
3282 "ancestor-ignored-file1": "",
3283 },
3284 "ignored-dir": {
3285 "ignored-file1": ""
3286 }
3287 }
3288 }));
3289 let dir = parent_dir.path().join("tree");
3290
3291 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3292
3293 let tree = Worktree::local(
3294 client,
3295 dir.as_path(),
3296 true,
3297 Arc::new(RealFs),
3298 Default::default(),
3299 &mut cx.to_async(),
3300 )
3301 .await
3302 .unwrap();
3303 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3304 .await;
3305 tree.flush_fs_events(cx).await;
3306 cx.read(|cx| {
3307 let tree = tree.read(cx);
3308 assert!(
3309 !tree
3310 .entry_for_path("tracked-dir/tracked-file1")
3311 .unwrap()
3312 .is_ignored
3313 );
3314 assert!(
3315 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3316 .unwrap()
3317 .is_ignored
3318 );
3319 assert!(
3320 tree.entry_for_path("ignored-dir/ignored-file1")
3321 .unwrap()
3322 .is_ignored
3323 );
3324 });
3325
3326 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3327 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3328 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3329 tree.flush_fs_events(cx).await;
3330 cx.read(|cx| {
3331 let tree = tree.read(cx);
3332 assert!(
3333 !tree
3334 .entry_for_path("tracked-dir/tracked-file2")
3335 .unwrap()
3336 .is_ignored
3337 );
3338 assert!(
3339 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3340 .unwrap()
3341 .is_ignored
3342 );
3343 assert!(
3344 tree.entry_for_path("ignored-dir/ignored-file2")
3345 .unwrap()
3346 .is_ignored
3347 );
3348 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3349 });
3350 }
3351
3352 #[gpui::test]
3353 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3354 let root = temp_tree(json!({
3355 "dir1": {
3356 ".git": {},
3357 "deps": {
3358 "dep1": {
3359 ".git": {},
3360 "src": {
3361 "a.txt": ""
3362 }
3363 }
3364 },
3365 "src": {
3366 "b.txt": ""
3367 }
3368 },
3369 "c.txt": "",
3370 }));
3371
3372 let http_client = FakeHttpClient::with_404_response();
3373 let client = cx.read(|cx| Client::new(http_client, cx));
3374 let tree = Worktree::local(
3375 client,
3376 root.path(),
3377 true,
3378 Arc::new(RealFs),
3379 Default::default(),
3380 &mut cx.to_async(),
3381 )
3382 .await
3383 .unwrap();
3384
3385 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3386 .await;
3387 tree.flush_fs_events(cx).await;
3388
3389 tree.read_with(cx, |tree, _cx| {
3390 let tree = tree.as_local().unwrap();
3391
3392 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3393
3394 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3395 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3396 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3397
3398 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3399 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3400 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3401 });
3402
3403 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3404 let tree = tree.as_local().unwrap();
3405 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3406 });
3407
3408 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3409 tree.flush_fs_events(cx).await;
3410
3411 tree.read_with(cx, |tree, _cx| {
3412 let tree = tree.as_local().unwrap();
3413 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3414 assert_ne!(
3415 original_scan_id, new_scan_id,
3416 "original {original_scan_id}, new {new_scan_id}"
3417 );
3418 });
3419
3420 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3421 tree.flush_fs_events(cx).await;
3422
3423 tree.read_with(cx, |tree, _cx| {
3424 let tree = tree.as_local().unwrap();
3425
3426 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3427 });
3428 }
3429
3430 #[test]
3431 fn test_changed_repos() {
3432 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3433 RepositoryEntry {
3434 scan_id,
3435 git_dir_path: git_dir_path.as_ref().into(),
3436 git_dir_entry_id: ProjectEntryId(0),
3437 }
3438 }
3439
3440 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3441 prev_repos.insert(
3442 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3443 fake_entry("/.git", 0),
3444 );
3445 prev_repos.insert(
3446 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3447 fake_entry("/a/.git", 0),
3448 );
3449 prev_repos.insert(
3450 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3451 fake_entry("/a/b/.git", 0),
3452 );
3453
3454 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3455 new_repos.insert(
3456 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3457 fake_entry("/a/.git", 1),
3458 );
3459 new_repos.insert(
3460 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3461 fake_entry("/a/b/.git", 0),
3462 );
3463 new_repos.insert(
3464 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3465 fake_entry("/a/c/.git", 0),
3466 );
3467
3468 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3469
3470 // Deletion retained
3471 assert!(res
3472 .iter()
3473 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3474 .is_some());
3475
3476 // Update retained
3477 assert!(res
3478 .iter()
3479 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3480 .is_some());
3481
3482 // Addition retained
3483 assert!(res
3484 .iter()
3485 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3486 .is_some());
3487
3488 // Nochange, not retained
3489 assert!(res
3490 .iter()
3491 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3492 .is_none());
3493 }
3494
3495 #[gpui::test]
3496 async fn test_write_file(cx: &mut TestAppContext) {
3497 let dir = temp_tree(json!({
3498 ".git": {},
3499 ".gitignore": "ignored-dir\n",
3500 "tracked-dir": {},
3501 "ignored-dir": {}
3502 }));
3503
3504 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3505
3506 let tree = Worktree::local(
3507 client,
3508 dir.path(),
3509 true,
3510 Arc::new(RealFs),
3511 Default::default(),
3512 &mut cx.to_async(),
3513 )
3514 .await
3515 .unwrap();
3516 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3517 .await;
3518 tree.flush_fs_events(cx).await;
3519
3520 tree.update(cx, |tree, cx| {
3521 tree.as_local().unwrap().write_file(
3522 Path::new("tracked-dir/file.txt"),
3523 "hello".into(),
3524 Default::default(),
3525 cx,
3526 )
3527 })
3528 .await
3529 .unwrap();
3530 tree.update(cx, |tree, cx| {
3531 tree.as_local().unwrap().write_file(
3532 Path::new("ignored-dir/file.txt"),
3533 "world".into(),
3534 Default::default(),
3535 cx,
3536 )
3537 })
3538 .await
3539 .unwrap();
3540
3541 tree.read_with(cx, |tree, _| {
3542 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3543 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3544 assert!(!tracked.is_ignored);
3545 assert!(ignored.is_ignored);
3546 });
3547 }
3548
3549 #[gpui::test(iterations = 30)]
3550 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3551 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3552
3553 let fs = FakeFs::new(cx.background());
3554 fs.insert_tree(
3555 "/root",
3556 json!({
3557 "b": {},
3558 "c": {},
3559 "d": {},
3560 }),
3561 )
3562 .await;
3563
3564 let tree = Worktree::local(
3565 client,
3566 "/root".as_ref(),
3567 true,
3568 fs,
3569 Default::default(),
3570 &mut cx.to_async(),
3571 )
3572 .await
3573 .unwrap();
3574
3575 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3576
3577 let entry = tree
3578 .update(cx, |tree, cx| {
3579 tree.as_local_mut()
3580 .unwrap()
3581 .create_entry("a/e".as_ref(), true, cx)
3582 })
3583 .await
3584 .unwrap();
3585 assert!(entry.is_dir());
3586
3587 cx.foreground().run_until_parked();
3588 tree.read_with(cx, |tree, _| {
3589 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3590 });
3591
3592 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3593 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3594 snapshot1.apply_remote_update(update).unwrap();
3595 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3596 }
3597
3598 #[gpui::test(iterations = 100)]
3599 async fn test_random_worktree_operations_during_initial_scan(
3600 cx: &mut TestAppContext,
3601 mut rng: StdRng,
3602 ) {
3603 let operations = env::var("OPERATIONS")
3604 .map(|o| o.parse().unwrap())
3605 .unwrap_or(5);
3606 let initial_entries = env::var("INITIAL_ENTRIES")
3607 .map(|o| o.parse().unwrap())
3608 .unwrap_or(20);
3609
3610 let root_dir = Path::new("/test");
3611 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3612 fs.as_fake().insert_tree(root_dir, json!({})).await;
3613 for _ in 0..initial_entries {
3614 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3615 }
3616 log::info!("generated initial tree");
3617
3618 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3619 let worktree = Worktree::local(
3620 client.clone(),
3621 root_dir,
3622 true,
3623 fs.clone(),
3624 Default::default(),
3625 &mut cx.to_async(),
3626 )
3627 .await
3628 .unwrap();
3629
3630 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3631
3632 for _ in 0..operations {
3633 worktree
3634 .update(cx, |worktree, cx| {
3635 randomly_mutate_worktree(worktree, &mut rng, cx)
3636 })
3637 .await
3638 .log_err();
3639 worktree.read_with(cx, |tree, _| {
3640 tree.as_local().unwrap().snapshot.check_invariants()
3641 });
3642
3643 if rng.gen_bool(0.6) {
3644 let new_snapshot =
3645 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3646 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3647 snapshot.apply_remote_update(update.clone()).unwrap();
3648 assert_eq!(
3649 snapshot.to_vec(true),
3650 new_snapshot.to_vec(true),
3651 "incorrect snapshot after update {:?}",
3652 update
3653 );
3654 }
3655 }
3656
3657 worktree
3658 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3659 .await;
3660 worktree.read_with(cx, |tree, _| {
3661 tree.as_local().unwrap().snapshot.check_invariants()
3662 });
3663
3664 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3665 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3666 snapshot.apply_remote_update(update.clone()).unwrap();
3667 assert_eq!(
3668 snapshot.to_vec(true),
3669 new_snapshot.to_vec(true),
3670 "incorrect snapshot after update {:?}",
3671 update
3672 );
3673 }
3674
3675 #[gpui::test(iterations = 100)]
3676 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3677 let operations = env::var("OPERATIONS")
3678 .map(|o| o.parse().unwrap())
3679 .unwrap_or(40);
3680 let initial_entries = env::var("INITIAL_ENTRIES")
3681 .map(|o| o.parse().unwrap())
3682 .unwrap_or(20);
3683
3684 let root_dir = Path::new("/test");
3685 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3686 fs.as_fake().insert_tree(root_dir, json!({})).await;
3687 for _ in 0..initial_entries {
3688 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3689 }
3690 log::info!("generated initial tree");
3691
3692 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3693 let worktree = Worktree::local(
3694 client.clone(),
3695 root_dir,
3696 true,
3697 fs.clone(),
3698 Default::default(),
3699 &mut cx.to_async(),
3700 )
3701 .await
3702 .unwrap();
3703
3704 worktree
3705 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3706 .await;
3707
3708 // After the initial scan is complete, the `UpdatedEntries` event can
3709 // be used to follow along with all changes to the worktree's snapshot.
3710 worktree.update(cx, |tree, cx| {
3711 let mut paths = tree
3712 .as_local()
3713 .unwrap()
3714 .paths()
3715 .cloned()
3716 .collect::<Vec<_>>();
3717
3718 cx.subscribe(&worktree, move |tree, _, event, _| {
3719 if let Event::UpdatedEntries(changes) = event {
3720 for (path, change_type) in changes.iter() {
3721 let path = path.clone();
3722 let ix = match paths.binary_search(&path) {
3723 Ok(ix) | Err(ix) => ix,
3724 };
3725 match change_type {
3726 PathChange::Added => {
3727 assert_ne!(paths.get(ix), Some(&path));
3728 paths.insert(ix, path);
3729 }
3730 PathChange::Removed => {
3731 assert_eq!(paths.get(ix), Some(&path));
3732 paths.remove(ix);
3733 }
3734 PathChange::Updated => {
3735 assert_eq!(paths.get(ix), Some(&path));
3736 }
3737 PathChange::AddedOrUpdated => {
3738 if paths[ix] != path {
3739 paths.insert(ix, path);
3740 }
3741 }
3742 }
3743 }
3744 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3745 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3746 }
3747 })
3748 .detach();
3749 });
3750
3751 let mut snapshots = Vec::new();
3752 let mut mutations_len = operations;
3753 while mutations_len > 1 {
3754 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3755 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3756 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3757 let len = rng.gen_range(0..=buffered_event_count);
3758 log::info!("flushing {} events", len);
3759 fs.as_fake().flush_events(len).await;
3760 } else {
3761 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3762 mutations_len -= 1;
3763 }
3764
3765 cx.foreground().run_until_parked();
3766 if rng.gen_bool(0.2) {
3767 log::info!("storing snapshot {}", snapshots.len());
3768 let snapshot =
3769 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3770 snapshots.push(snapshot);
3771 }
3772 }
3773
3774 log::info!("quiescing");
3775 fs.as_fake().flush_events(usize::MAX).await;
3776 cx.foreground().run_until_parked();
3777 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3778 snapshot.check_invariants();
3779
3780 {
3781 let new_worktree = Worktree::local(
3782 client.clone(),
3783 root_dir,
3784 true,
3785 fs.clone(),
3786 Default::default(),
3787 &mut cx.to_async(),
3788 )
3789 .await
3790 .unwrap();
3791 new_worktree
3792 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3793 .await;
3794 let new_snapshot =
3795 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3796 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3797 }
3798
3799 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3800 let include_ignored = rng.gen::<bool>();
3801 if !include_ignored {
3802 let mut entries_by_path_edits = Vec::new();
3803 let mut entries_by_id_edits = Vec::new();
3804 for entry in prev_snapshot
3805 .entries_by_id
3806 .cursor::<()>()
3807 .filter(|e| e.is_ignored)
3808 {
3809 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3810 entries_by_id_edits.push(Edit::Remove(entry.id));
3811 }
3812
3813 prev_snapshot
3814 .entries_by_path
3815 .edit(entries_by_path_edits, &());
3816 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3817 }
3818
3819 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3820 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3821 assert_eq!(
3822 prev_snapshot.to_vec(include_ignored),
3823 snapshot.to_vec(include_ignored),
3824 "wrong update for snapshot {i}. update: {:?}",
3825 update
3826 );
3827 }
3828 }
3829
3830 fn randomly_mutate_worktree(
3831 worktree: &mut Worktree,
3832 rng: &mut impl Rng,
3833 cx: &mut ModelContext<Worktree>,
3834 ) -> Task<Result<()>> {
3835 let worktree = worktree.as_local_mut().unwrap();
3836 let snapshot = worktree.snapshot();
3837 let entry = snapshot.entries(false).choose(rng).unwrap();
3838
3839 match rng.gen_range(0_u32..100) {
3840 0..=33 if entry.path.as_ref() != Path::new("") => {
3841 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3842 worktree.delete_entry(entry.id, cx).unwrap()
3843 }
3844 ..=66 if entry.path.as_ref() != Path::new("") => {
3845 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3846 let new_parent_path = if other_entry.is_dir() {
3847 other_entry.path.clone()
3848 } else {
3849 other_entry.path.parent().unwrap().into()
3850 };
3851 let mut new_path = new_parent_path.join(gen_name(rng));
3852 if new_path.starts_with(&entry.path) {
3853 new_path = gen_name(rng).into();
3854 }
3855
3856 log::info!(
3857 "renaming entry {:?} ({}) to {:?}",
3858 entry.path,
3859 entry.id.0,
3860 new_path
3861 );
3862 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3863 cx.foreground().spawn(async move {
3864 task.await?;
3865 Ok(())
3866 })
3867 }
3868 _ => {
3869 let task = if entry.is_dir() {
3870 let child_path = entry.path.join(gen_name(rng));
3871 let is_dir = rng.gen_bool(0.3);
3872 log::info!(
3873 "creating {} at {:?}",
3874 if is_dir { "dir" } else { "file" },
3875 child_path,
3876 );
3877 worktree.create_entry(child_path, is_dir, cx)
3878 } else {
3879 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3880 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3881 };
3882 cx.foreground().spawn(async move {
3883 task.await?;
3884 Ok(())
3885 })
3886 }
3887 }
3888 }
3889
3890 async fn randomly_mutate_fs(
3891 fs: &Arc<dyn Fs>,
3892 root_path: &Path,
3893 insertion_probability: f64,
3894 rng: &mut impl Rng,
3895 ) {
3896 let mut files = Vec::new();
3897 let mut dirs = Vec::new();
3898 for path in fs.as_fake().paths() {
3899 if path.starts_with(root_path) {
3900 if fs.is_file(&path).await {
3901 files.push(path);
3902 } else {
3903 dirs.push(path);
3904 }
3905 }
3906 }
3907
3908 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3909 let path = dirs.choose(rng).unwrap();
3910 let new_path = path.join(gen_name(rng));
3911
3912 if rng.gen() {
3913 log::info!(
3914 "creating dir {:?}",
3915 new_path.strip_prefix(root_path).unwrap()
3916 );
3917 fs.create_dir(&new_path).await.unwrap();
3918 } else {
3919 log::info!(
3920 "creating file {:?}",
3921 new_path.strip_prefix(root_path).unwrap()
3922 );
3923 fs.create_file(&new_path, Default::default()).await.unwrap();
3924 }
3925 } else if rng.gen_bool(0.05) {
3926 let ignore_dir_path = dirs.choose(rng).unwrap();
3927 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3928
3929 let subdirs = dirs
3930 .iter()
3931 .filter(|d| d.starts_with(&ignore_dir_path))
3932 .cloned()
3933 .collect::<Vec<_>>();
3934 let subfiles = files
3935 .iter()
3936 .filter(|d| d.starts_with(&ignore_dir_path))
3937 .cloned()
3938 .collect::<Vec<_>>();
3939 let files_to_ignore = {
3940 let len = rng.gen_range(0..=subfiles.len());
3941 subfiles.choose_multiple(rng, len)
3942 };
3943 let dirs_to_ignore = {
3944 let len = rng.gen_range(0..subdirs.len());
3945 subdirs.choose_multiple(rng, len)
3946 };
3947
3948 let mut ignore_contents = String::new();
3949 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3950 writeln!(
3951 ignore_contents,
3952 "{}",
3953 path_to_ignore
3954 .strip_prefix(&ignore_dir_path)
3955 .unwrap()
3956 .to_str()
3957 .unwrap()
3958 )
3959 .unwrap();
3960 }
3961 log::info!(
3962 "creating gitignore {:?} with contents:\n{}",
3963 ignore_path.strip_prefix(&root_path).unwrap(),
3964 ignore_contents
3965 );
3966 fs.save(
3967 &ignore_path,
3968 &ignore_contents.as_str().into(),
3969 Default::default(),
3970 )
3971 .await
3972 .unwrap();
3973 } else {
3974 let old_path = {
3975 let file_path = files.choose(rng);
3976 let dir_path = dirs[1..].choose(rng);
3977 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3978 };
3979
3980 let is_rename = rng.gen();
3981 if is_rename {
3982 let new_path_parent = dirs
3983 .iter()
3984 .filter(|d| !d.starts_with(old_path))
3985 .choose(rng)
3986 .unwrap();
3987
3988 let overwrite_existing_dir =
3989 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3990 let new_path = if overwrite_existing_dir {
3991 fs.remove_dir(
3992 &new_path_parent,
3993 RemoveOptions {
3994 recursive: true,
3995 ignore_if_not_exists: true,
3996 },
3997 )
3998 .await
3999 .unwrap();
4000 new_path_parent.to_path_buf()
4001 } else {
4002 new_path_parent.join(gen_name(rng))
4003 };
4004
4005 log::info!(
4006 "renaming {:?} to {}{:?}",
4007 old_path.strip_prefix(&root_path).unwrap(),
4008 if overwrite_existing_dir {
4009 "overwrite "
4010 } else {
4011 ""
4012 },
4013 new_path.strip_prefix(&root_path).unwrap()
4014 );
4015 fs.rename(
4016 &old_path,
4017 &new_path,
4018 fs::RenameOptions {
4019 overwrite: true,
4020 ignore_if_exists: true,
4021 },
4022 )
4023 .await
4024 .unwrap();
4025 } else if fs.is_file(&old_path).await {
4026 log::info!(
4027 "deleting file {:?}",
4028 old_path.strip_prefix(&root_path).unwrap()
4029 );
4030 fs.remove_file(old_path, Default::default()).await.unwrap();
4031 } else {
4032 log::info!(
4033 "deleting dir {:?}",
4034 old_path.strip_prefix(&root_path).unwrap()
4035 );
4036 fs.remove_dir(
4037 &old_path,
4038 RemoveOptions {
4039 recursive: true,
4040 ignore_if_not_exists: true,
4041 },
4042 )
4043 .await
4044 .unwrap();
4045 }
4046 }
4047 }
4048
4049 fn gen_name(rng: &mut impl Rng) -> String {
4050 (0..6)
4051 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4052 .map(char::from)
4053 .collect()
4054 }
4055
4056 impl LocalSnapshot {
4057 fn check_invariants(&self) {
4058 assert_eq!(
4059 self.entries_by_path
4060 .cursor::<()>()
4061 .map(|e| (&e.path, e.id))
4062 .collect::<Vec<_>>(),
4063 self.entries_by_id
4064 .cursor::<()>()
4065 .map(|e| (&e.path, e.id))
4066 .collect::<collections::BTreeSet<_>>()
4067 .into_iter()
4068 .collect::<Vec<_>>(),
4069 "entries_by_path and entries_by_id are inconsistent"
4070 );
4071
4072 let mut files = self.files(true, 0);
4073 let mut visible_files = self.files(false, 0);
4074 for entry in self.entries_by_path.cursor::<()>() {
4075 if entry.is_file() {
4076 assert_eq!(files.next().unwrap().inode, entry.inode);
4077 if !entry.is_ignored {
4078 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4079 }
4080 }
4081 }
4082
4083 assert!(files.next().is_none());
4084 assert!(visible_files.next().is_none());
4085
4086 let mut bfs_paths = Vec::new();
4087 let mut stack = vec![Path::new("")];
4088 while let Some(path) = stack.pop() {
4089 bfs_paths.push(path);
4090 let ix = stack.len();
4091 for child_entry in self.child_entries(path) {
4092 stack.insert(ix, &child_entry.path);
4093 }
4094 }
4095
4096 let dfs_paths_via_iter = self
4097 .entries_by_path
4098 .cursor::<()>()
4099 .map(|e| e.path.as_ref())
4100 .collect::<Vec<_>>();
4101 assert_eq!(bfs_paths, dfs_paths_via_iter);
4102
4103 let dfs_paths_via_traversal = self
4104 .entries(true)
4105 .map(|e| e.path.as_ref())
4106 .collect::<Vec<_>>();
4107 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4108
4109 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4110 let ignore_parent_path =
4111 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4112 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4113 assert!(self
4114 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4115 .is_some());
4116 }
4117 }
4118
4119 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4120 let mut paths = Vec::new();
4121 for entry in self.entries_by_path.cursor::<()>() {
4122 if include_ignored || !entry.is_ignored {
4123 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4124 }
4125 }
4126 paths.sort_by(|a, b| a.0.cmp(b.0));
4127 paths
4128 }
4129 }
4130}