1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105
106 /// A number that increases every time the worktree begins scanning
107 /// a set of paths from the filesystem. This scanning could be caused
108 /// by some operation performed on the worktree, such as reading or
109 /// writing a file, or by an event reported by the filesystem.
110 scan_id: usize,
111
112 /// The latest scan id that has completed, and whose preceding scans
113 /// have all completed. The current `scan_id` could be more than one
114 /// greater than the `completed_scan_id` if operations are performed
115 /// on the worktree while it is processing a file-system event.
116 completed_scan_id: usize,
117}
118
119#[derive(Clone)]
120pub struct GitRepositoryEntry {
121 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
122
123 pub(crate) scan_id: usize,
124 // Path to folder containing the .git file or directory
125 pub(crate) content_path: Arc<Path>,
126 // Path to the actual .git folder.
127 // Note: if .git is a file, this points to the folder indicated by the .git file
128 pub(crate) git_dir_path: Arc<Path>,
129}
130
131impl std::fmt::Debug for GitRepositoryEntry {
132 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
133 f.debug_struct("GitRepositoryEntry")
134 .field("content_path", &self.content_path)
135 .field("git_dir_path", &self.git_dir_path)
136 .finish()
137 }
138}
139
140#[derive(Debug)]
141pub struct LocalSnapshot {
142 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
143 git_repositories: Vec<GitRepositoryEntry>,
144 removed_entry_ids: HashMap<u64, ProjectEntryId>,
145 next_entry_id: Arc<AtomicUsize>,
146 snapshot: Snapshot,
147}
148
149impl Clone for LocalSnapshot {
150 fn clone(&self) -> Self {
151 Self {
152 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
153 git_repositories: self.git_repositories.iter().cloned().collect(),
154 removed_entry_ids: self.removed_entry_ids.clone(),
155 next_entry_id: self.next_entry_id.clone(),
156 snapshot: self.snapshot.clone(),
157 }
158 }
159}
160
161impl Deref for LocalSnapshot {
162 type Target = Snapshot;
163
164 fn deref(&self) -> &Self::Target {
165 &self.snapshot
166 }
167}
168
169impl DerefMut for LocalSnapshot {
170 fn deref_mut(&mut self) -> &mut Self::Target {
171 &mut self.snapshot
172 }
173}
174
175enum ScanState {
176 Started,
177 Updated {
178 snapshot: LocalSnapshot,
179 changes: HashMap<Arc<Path>, PathChange>,
180 barrier: Option<barrier::Sender>,
181 scanning: bool,
182 },
183}
184
185struct ShareState {
186 project_id: u64,
187 snapshots_tx: watch::Sender<LocalSnapshot>,
188 resume_updates: watch::Sender<()>,
189 _maintain_remote_snapshot: Task<Option<()>>,
190}
191
192pub enum Event {
193 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
194 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
195}
196
197impl Entity for Worktree {
198 type Event = Event;
199}
200
201impl Worktree {
202 pub async fn local(
203 client: Arc<Client>,
204 path: impl Into<Arc<Path>>,
205 visible: bool,
206 fs: Arc<dyn Fs>,
207 next_entry_id: Arc<AtomicUsize>,
208 cx: &mut AsyncAppContext,
209 ) -> Result<ModelHandle<Self>> {
210 // After determining whether the root entry is a file or a directory, populate the
211 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
212 let abs_path = path.into();
213 let metadata = fs
214 .metadata(&abs_path)
215 .await
216 .context("failed to stat worktree path")?;
217
218 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
219 let root_name = abs_path
220 .file_name()
221 .map_or(String::new(), |f| f.to_string_lossy().to_string());
222
223 let mut snapshot = LocalSnapshot {
224 ignores_by_parent_abs_path: Default::default(),
225 git_repositories: Default::default(),
226 removed_entry_ids: Default::default(),
227 next_entry_id,
228 snapshot: Snapshot {
229 id: WorktreeId::from_usize(cx.model_id()),
230 abs_path: abs_path.clone(),
231 root_name: root_name.clone(),
232 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
233 entries_by_path: Default::default(),
234 entries_by_id: Default::default(),
235 scan_id: 1,
236 completed_scan_id: 0,
237 },
238 };
239
240 if let Some(metadata) = metadata {
241 snapshot.insert_entry(
242 Entry::new(
243 Arc::from(Path::new("")),
244 &metadata,
245 &snapshot.next_entry_id,
246 snapshot.root_char_bag,
247 ),
248 fs.as_ref(),
249 );
250 }
251
252 let (path_changes_tx, path_changes_rx) = channel::unbounded();
253 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
254
255 cx.spawn_weak(|this, mut cx| async move {
256 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
257 this.update(&mut cx, |this, cx| {
258 let this = this.as_local_mut().unwrap();
259 match state {
260 ScanState::Started => {
261 *this.is_scanning.0.borrow_mut() = true;
262 }
263 ScanState::Updated {
264 snapshot,
265 changes,
266 barrier,
267 scanning,
268 } => {
269 *this.is_scanning.0.borrow_mut() = scanning;
270 this.set_snapshot(snapshot, cx);
271 cx.emit(Event::UpdatedEntries(changes));
272 drop(barrier);
273 }
274 }
275 cx.notify();
276 });
277 }
278 })
279 .detach();
280
281 let background_scanner_task = cx.background().spawn({
282 let fs = fs.clone();
283 let snapshot = snapshot.clone();
284 let background = cx.background().clone();
285 async move {
286 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
287 BackgroundScanner::new(
288 snapshot,
289 fs,
290 scan_states_tx,
291 background,
292 path_changes_rx,
293 )
294 .run(events)
295 .await;
296 }
297 });
298
299 Worktree::Local(LocalWorktree {
300 snapshot,
301 is_scanning: watch::channel_with(true),
302 share: None,
303 path_changes_tx,
304 _background_scanner_task: background_scanner_task,
305 diagnostics: Default::default(),
306 diagnostic_summaries: Default::default(),
307 client,
308 fs,
309 visible,
310 })
311 }))
312 }
313
314 pub fn remote(
315 project_remote_id: u64,
316 replica_id: ReplicaId,
317 worktree: proto::WorktreeMetadata,
318 client: Arc<Client>,
319 cx: &mut AppContext,
320 ) -> ModelHandle<Self> {
321 cx.add_model(|cx: &mut ModelContext<Self>| {
322 let snapshot = Snapshot {
323 id: WorktreeId(worktree.id as usize),
324 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
325 root_name: worktree.root_name.clone(),
326 root_char_bag: worktree
327 .root_name
328 .chars()
329 .map(|c| c.to_ascii_lowercase())
330 .collect(),
331 entries_by_path: Default::default(),
332 entries_by_id: Default::default(),
333 scan_id: 1,
334 completed_scan_id: 0,
335 };
336
337 let (updates_tx, mut updates_rx) = mpsc::unbounded();
338 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
339 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
340
341 cx.background()
342 .spawn({
343 let background_snapshot = background_snapshot.clone();
344 async move {
345 while let Some(update) = updates_rx.next().await {
346 if let Err(error) =
347 background_snapshot.lock().apply_remote_update(update)
348 {
349 log::error!("error applying worktree update: {}", error);
350 }
351 snapshot_updated_tx.send(()).await.ok();
352 }
353 }
354 })
355 .detach();
356
357 cx.spawn_weak(|this, mut cx| async move {
358 while (snapshot_updated_rx.recv().await).is_some() {
359 if let Some(this) = this.upgrade(&cx) {
360 this.update(&mut cx, |this, cx| {
361 let this = this.as_remote_mut().unwrap();
362 this.snapshot = this.background_snapshot.lock().clone();
363 cx.emit(Event::UpdatedEntries(Default::default()));
364 cx.notify();
365 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
366 if this.observed_snapshot(*scan_id) {
367 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
368 let _ = tx.send(());
369 } else {
370 break;
371 }
372 }
373 });
374 } else {
375 break;
376 }
377 }
378 })
379 .detach();
380
381 Worktree::Remote(RemoteWorktree {
382 project_id: project_remote_id,
383 replica_id,
384 snapshot: snapshot.clone(),
385 background_snapshot,
386 updates_tx: Some(updates_tx),
387 snapshot_subscriptions: Default::default(),
388 client: client.clone(),
389 diagnostic_summaries: Default::default(),
390 visible: worktree.visible,
391 disconnected: false,
392 })
393 })
394 }
395
396 pub fn as_local(&self) -> Option<&LocalWorktree> {
397 if let Worktree::Local(worktree) = self {
398 Some(worktree)
399 } else {
400 None
401 }
402 }
403
404 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
405 if let Worktree::Remote(worktree) = self {
406 Some(worktree)
407 } else {
408 None
409 }
410 }
411
412 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
413 if let Worktree::Local(worktree) = self {
414 Some(worktree)
415 } else {
416 None
417 }
418 }
419
420 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
421 if let Worktree::Remote(worktree) = self {
422 Some(worktree)
423 } else {
424 None
425 }
426 }
427
428 pub fn is_local(&self) -> bool {
429 matches!(self, Worktree::Local(_))
430 }
431
432 pub fn is_remote(&self) -> bool {
433 !self.is_local()
434 }
435
436 pub fn snapshot(&self) -> Snapshot {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot().snapshot,
439 Worktree::Remote(worktree) => worktree.snapshot(),
440 }
441 }
442
443 pub fn scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
447 }
448 }
449
450 pub fn completed_scan_id(&self) -> usize {
451 match self {
452 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
453 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
454 }
455 }
456
457 pub fn is_visible(&self) -> bool {
458 match self {
459 Worktree::Local(worktree) => worktree.visible,
460 Worktree::Remote(worktree) => worktree.visible,
461 }
462 }
463
464 pub fn replica_id(&self) -> ReplicaId {
465 match self {
466 Worktree::Local(_) => 0,
467 Worktree::Remote(worktree) => worktree.replica_id,
468 }
469 }
470
471 pub fn diagnostic_summaries(
472 &self,
473 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
474 match self {
475 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
476 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
477 }
478 .iter()
479 .flat_map(|(path, summaries)| {
480 summaries
481 .iter()
482 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
483 })
484 }
485
486 pub fn abs_path(&self) -> Arc<Path> {
487 match self {
488 Worktree::Local(worktree) => worktree.abs_path.clone(),
489 Worktree::Remote(worktree) => worktree.abs_path.clone(),
490 }
491 }
492}
493
494impl LocalWorktree {
495 pub fn contains_abs_path(&self, path: &Path) -> bool {
496 path.starts_with(&self.abs_path)
497 }
498
499 fn absolutize(&self, path: &Path) -> PathBuf {
500 if path.file_name().is_some() {
501 self.abs_path.join(path)
502 } else {
503 self.abs_path.to_path_buf()
504 }
505 }
506
507 pub(crate) fn load_buffer(
508 &mut self,
509 path: &Path,
510 cx: &mut ModelContext<Worktree>,
511 ) -> Task<Result<ModelHandle<Buffer>>> {
512 let path = Arc::from(path);
513 cx.spawn(move |this, mut cx| async move {
514 let (file, contents, diff_base) = this
515 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
516 .await?;
517 Ok(cx.add_model(|cx| {
518 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
519 buffer.git_diff_recalc(cx);
520 buffer
521 }))
522 })
523 }
524
525 pub fn diagnostics_for_path(
526 &self,
527 path: &Path,
528 ) -> Vec<(
529 LanguageServerId,
530 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
531 )> {
532 self.diagnostics.get(path).cloned().unwrap_or_default()
533 }
534
535 pub fn update_diagnostics(
536 &mut self,
537 server_id: LanguageServerId,
538 worktree_path: Arc<Path>,
539 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
540 _: &mut ModelContext<Worktree>,
541 ) -> Result<bool> {
542 let summaries_by_server_id = self
543 .diagnostic_summaries
544 .entry(worktree_path.clone())
545 .or_default();
546
547 let old_summary = summaries_by_server_id
548 .remove(&server_id)
549 .unwrap_or_default();
550
551 let new_summary = DiagnosticSummary::new(&diagnostics);
552 if new_summary.is_empty() {
553 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
554 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
555 diagnostics_by_server_id.remove(ix);
556 }
557 if diagnostics_by_server_id.is_empty() {
558 self.diagnostics.remove(&worktree_path);
559 }
560 }
561 } else {
562 summaries_by_server_id.insert(server_id, new_summary);
563 let diagnostics_by_server_id =
564 self.diagnostics.entry(worktree_path.clone()).or_default();
565 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
566 Ok(ix) => {
567 diagnostics_by_server_id[ix] = (server_id, diagnostics);
568 }
569 Err(ix) => {
570 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
571 }
572 }
573 }
574
575 if !old_summary.is_empty() || !new_summary.is_empty() {
576 if let Some(share) = self.share.as_ref() {
577 self.client
578 .send(proto::UpdateDiagnosticSummary {
579 project_id: share.project_id,
580 worktree_id: self.id().to_proto(),
581 summary: Some(proto::DiagnosticSummary {
582 path: worktree_path.to_string_lossy().to_string(),
583 language_server_id: server_id.0 as u64,
584 error_count: new_summary.error_count as u32,
585 warning_count: new_summary.warning_count as u32,
586 }),
587 })
588 .log_err();
589 }
590 }
591
592 Ok(!old_summary.is_empty() || !new_summary.is_empty())
593 }
594
595 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
596 let updated_repos = Self::changed_repos(
597 &self.snapshot.git_repositories,
598 &new_snapshot.git_repositories,
599 );
600 self.snapshot = new_snapshot;
601
602 if let Some(share) = self.share.as_mut() {
603 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
604 }
605
606 if !updated_repos.is_empty() {
607 cx.emit(Event::UpdatedGitRepositories(updated_repos));
608 }
609 }
610
611 fn changed_repos(
612 old_repos: &[GitRepositoryEntry],
613 new_repos: &[GitRepositoryEntry],
614 ) -> Vec<GitRepositoryEntry> {
615 fn diff<'a>(
616 a: &'a [GitRepositoryEntry],
617 b: &'a [GitRepositoryEntry],
618 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
619 ) {
620 for a_repo in a {
621 let matched = b.iter().find(|b_repo| {
622 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
623 });
624
625 if matched.is_none() {
626 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
627 }
628 }
629 }
630
631 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
632
633 diff(old_repos, new_repos, &mut updated);
634 diff(new_repos, old_repos, &mut updated);
635
636 updated.into_values().collect()
637 }
638
639 pub fn scan_complete(&self) -> impl Future<Output = ()> {
640 let mut is_scanning_rx = self.is_scanning.1.clone();
641 async move {
642 let mut is_scanning = is_scanning_rx.borrow().clone();
643 while is_scanning {
644 if let Some(value) = is_scanning_rx.recv().await {
645 is_scanning = value;
646 } else {
647 break;
648 }
649 }
650 }
651 }
652
653 pub fn snapshot(&self) -> LocalSnapshot {
654 self.snapshot.clone()
655 }
656
657 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
658 proto::WorktreeMetadata {
659 id: self.id().to_proto(),
660 root_name: self.root_name().to_string(),
661 visible: self.visible,
662 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
663 }
664 }
665
666 fn load(
667 &self,
668 path: &Path,
669 cx: &mut ModelContext<Worktree>,
670 ) -> Task<Result<(File, String, Option<String>)>> {
671 let handle = cx.handle();
672 let path = Arc::from(path);
673 let abs_path = self.absolutize(&path);
674 let fs = self.fs.clone();
675 let snapshot = self.snapshot();
676
677 cx.spawn(|this, mut cx| async move {
678 let text = fs.load(&abs_path).await?;
679
680 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
681 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
682 let repo_relative = repo_relative.to_owned();
683 cx.background()
684 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
685 .await
686 } else {
687 None
688 }
689 } else {
690 None
691 };
692
693 // Eagerly populate the snapshot with an updated entry for the loaded file
694 let entry = this
695 .update(&mut cx, |this, cx| {
696 this.as_local().unwrap().refresh_entry(path, None, cx)
697 })
698 .await?;
699
700 Ok((
701 File {
702 entry_id: entry.id,
703 worktree: handle,
704 path: entry.path,
705 mtime: entry.mtime,
706 is_local: true,
707 is_deleted: false,
708 },
709 text,
710 diff_base,
711 ))
712 })
713 }
714
715 pub fn save_buffer(
716 &self,
717 buffer_handle: ModelHandle<Buffer>,
718 path: Arc<Path>,
719 has_changed_file: bool,
720 cx: &mut ModelContext<Worktree>,
721 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
722 let handle = cx.handle();
723 let buffer = buffer_handle.read(cx);
724
725 let rpc = self.client.clone();
726 let buffer_id = buffer.remote_id();
727 let project_id = self.share.as_ref().map(|share| share.project_id);
728
729 let text = buffer.as_rope().clone();
730 let fingerprint = text.fingerprint();
731 let version = buffer.version();
732 let save = self.write_file(path, text, buffer.line_ending(), cx);
733
734 cx.as_mut().spawn(|mut cx| async move {
735 let entry = save.await?;
736
737 if has_changed_file {
738 let new_file = Arc::new(File {
739 entry_id: entry.id,
740 worktree: handle,
741 path: entry.path,
742 mtime: entry.mtime,
743 is_local: true,
744 is_deleted: false,
745 });
746
747 if let Some(project_id) = project_id {
748 rpc.send(proto::UpdateBufferFile {
749 project_id,
750 buffer_id,
751 file: Some(new_file.to_proto()),
752 })
753 .log_err();
754 }
755
756 buffer_handle.update(&mut cx, |buffer, cx| {
757 if has_changed_file {
758 buffer.file_updated(new_file, cx).detach();
759 }
760 });
761 }
762
763 if let Some(project_id) = project_id {
764 rpc.send(proto::BufferSaved {
765 project_id,
766 buffer_id,
767 version: serialize_version(&version),
768 mtime: Some(entry.mtime.into()),
769 fingerprint: serialize_fingerprint(fingerprint),
770 })?;
771 }
772
773 buffer_handle.update(&mut cx, |buffer, cx| {
774 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
775 });
776
777 Ok((version, fingerprint, entry.mtime))
778 })
779 }
780
781 pub fn create_entry(
782 &self,
783 path: impl Into<Arc<Path>>,
784 is_dir: bool,
785 cx: &mut ModelContext<Worktree>,
786 ) -> Task<Result<Entry>> {
787 let path = path.into();
788 let abs_path = self.absolutize(&path);
789 let fs = self.fs.clone();
790 let write = cx.background().spawn(async move {
791 if is_dir {
792 fs.create_dir(&abs_path).await
793 } else {
794 fs.save(&abs_path, &Default::default(), Default::default())
795 .await
796 }
797 });
798
799 cx.spawn(|this, mut cx| async move {
800 write.await?;
801 this.update(&mut cx, |this, cx| {
802 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
803 })
804 .await
805 })
806 }
807
808 pub fn write_file(
809 &self,
810 path: impl Into<Arc<Path>>,
811 text: Rope,
812 line_ending: LineEnding,
813 cx: &mut ModelContext<Worktree>,
814 ) -> Task<Result<Entry>> {
815 let path = path.into();
816 let abs_path = self.absolutize(&path);
817 let fs = self.fs.clone();
818 let write = cx
819 .background()
820 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
821
822 cx.spawn(|this, mut cx| async move {
823 write.await?;
824 this.update(&mut cx, |this, cx| {
825 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
826 })
827 .await
828 })
829 }
830
831 pub fn delete_entry(
832 &self,
833 entry_id: ProjectEntryId,
834 cx: &mut ModelContext<Worktree>,
835 ) -> Option<Task<Result<()>>> {
836 let entry = self.entry_for_id(entry_id)?.clone();
837 let abs_path = self.abs_path.clone();
838 let fs = self.fs.clone();
839
840 let delete = cx.background().spawn(async move {
841 let mut abs_path = fs.canonicalize(&abs_path).await?;
842 if entry.path.file_name().is_some() {
843 abs_path = abs_path.join(&entry.path);
844 }
845 if entry.is_file() {
846 fs.remove_file(&abs_path, Default::default()).await?;
847 } else {
848 fs.remove_dir(
849 &abs_path,
850 RemoveOptions {
851 recursive: true,
852 ignore_if_not_exists: false,
853 },
854 )
855 .await?;
856 }
857 anyhow::Ok(abs_path)
858 });
859
860 Some(cx.spawn(|this, mut cx| async move {
861 let abs_path = delete.await?;
862 let (tx, mut rx) = barrier::channel();
863 this.update(&mut cx, |this, _| {
864 this.as_local_mut()
865 .unwrap()
866 .path_changes_tx
867 .try_send((vec![abs_path], tx))
868 })?;
869 rx.recv().await;
870 Ok(())
871 }))
872 }
873
874 pub fn rename_entry(
875 &self,
876 entry_id: ProjectEntryId,
877 new_path: impl Into<Arc<Path>>,
878 cx: &mut ModelContext<Worktree>,
879 ) -> Option<Task<Result<Entry>>> {
880 let old_path = self.entry_for_id(entry_id)?.path.clone();
881 let new_path = new_path.into();
882 let abs_old_path = self.absolutize(&old_path);
883 let abs_new_path = self.absolutize(&new_path);
884 let fs = self.fs.clone();
885 let rename = cx.background().spawn(async move {
886 fs.rename(&abs_old_path, &abs_new_path, Default::default())
887 .await
888 });
889
890 Some(cx.spawn(|this, mut cx| async move {
891 rename.await?;
892 this.update(&mut cx, |this, cx| {
893 this.as_local_mut()
894 .unwrap()
895 .refresh_entry(new_path.clone(), Some(old_path), cx)
896 })
897 .await
898 }))
899 }
900
901 pub fn copy_entry(
902 &self,
903 entry_id: ProjectEntryId,
904 new_path: impl Into<Arc<Path>>,
905 cx: &mut ModelContext<Worktree>,
906 ) -> Option<Task<Result<Entry>>> {
907 let old_path = self.entry_for_id(entry_id)?.path.clone();
908 let new_path = new_path.into();
909 let abs_old_path = self.absolutize(&old_path);
910 let abs_new_path = self.absolutize(&new_path);
911 let fs = self.fs.clone();
912 let copy = cx.background().spawn(async move {
913 copy_recursive(
914 fs.as_ref(),
915 &abs_old_path,
916 &abs_new_path,
917 Default::default(),
918 )
919 .await
920 });
921
922 Some(cx.spawn(|this, mut cx| async move {
923 copy.await?;
924 this.update(&mut cx, |this, cx| {
925 this.as_local_mut()
926 .unwrap()
927 .refresh_entry(new_path.clone(), None, cx)
928 })
929 .await
930 }))
931 }
932
933 fn refresh_entry(
934 &self,
935 path: Arc<Path>,
936 old_path: Option<Arc<Path>>,
937 cx: &mut ModelContext<Worktree>,
938 ) -> Task<Result<Entry>> {
939 let fs = self.fs.clone();
940 let abs_root_path = self.abs_path.clone();
941 let path_changes_tx = self.path_changes_tx.clone();
942 cx.spawn_weak(move |this, mut cx| async move {
943 let abs_path = fs.canonicalize(&abs_root_path).await?;
944 let mut paths = Vec::with_capacity(2);
945 paths.push(if path.file_name().is_some() {
946 abs_path.join(&path)
947 } else {
948 abs_path.clone()
949 });
950 if let Some(old_path) = old_path {
951 paths.push(if old_path.file_name().is_some() {
952 abs_path.join(&old_path)
953 } else {
954 abs_path.clone()
955 });
956 }
957
958 let (tx, mut rx) = barrier::channel();
959 path_changes_tx.try_send((paths, tx))?;
960 rx.recv().await;
961 this.upgrade(&cx)
962 .ok_or_else(|| anyhow!("worktree was dropped"))?
963 .update(&mut cx, |this, _| {
964 this.entry_for_path(path)
965 .cloned()
966 .ok_or_else(|| anyhow!("failed to read path after update"))
967 })
968 })
969 }
970
971 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
972 let (share_tx, share_rx) = oneshot::channel();
973
974 if let Some(share) = self.share.as_mut() {
975 let _ = share_tx.send(());
976 *share.resume_updates.borrow_mut() = ();
977 } else {
978 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
979 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
980 let worktree_id = cx.model_id() as u64;
981
982 for (path, summaries) in &self.diagnostic_summaries {
983 for (&server_id, summary) in summaries {
984 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
985 project_id,
986 worktree_id,
987 summary: Some(summary.to_proto(server_id, &path)),
988 }) {
989 return Task::ready(Err(e));
990 }
991 }
992 }
993
994 let _maintain_remote_snapshot = cx.background().spawn({
995 let client = self.client.clone();
996 async move {
997 let mut share_tx = Some(share_tx);
998 let mut prev_snapshot = LocalSnapshot {
999 ignores_by_parent_abs_path: Default::default(),
1000 git_repositories: Default::default(),
1001 removed_entry_ids: Default::default(),
1002 next_entry_id: Default::default(),
1003 snapshot: Snapshot {
1004 id: WorktreeId(worktree_id as usize),
1005 abs_path: Path::new("").into(),
1006 root_name: Default::default(),
1007 root_char_bag: Default::default(),
1008 entries_by_path: Default::default(),
1009 entries_by_id: Default::default(),
1010 scan_id: 0,
1011 completed_scan_id: 0,
1012 },
1013 };
1014 while let Some(snapshot) = snapshots_rx.recv().await {
1015 #[cfg(any(test, feature = "test-support"))]
1016 const MAX_CHUNK_SIZE: usize = 2;
1017 #[cfg(not(any(test, feature = "test-support")))]
1018 const MAX_CHUNK_SIZE: usize = 256;
1019
1020 let update =
1021 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1022 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1023 let _ = resume_updates_rx.try_recv();
1024 while let Err(error) = client.request(update.clone()).await {
1025 log::error!("failed to send worktree update: {}", error);
1026 log::info!("waiting to resume updates");
1027 if resume_updates_rx.next().await.is_none() {
1028 return Ok(());
1029 }
1030 }
1031 }
1032
1033 if let Some(share_tx) = share_tx.take() {
1034 let _ = share_tx.send(());
1035 }
1036
1037 prev_snapshot = snapshot;
1038 }
1039
1040 Ok::<_, anyhow::Error>(())
1041 }
1042 .log_err()
1043 });
1044
1045 self.share = Some(ShareState {
1046 project_id,
1047 snapshots_tx,
1048 resume_updates: resume_updates_tx,
1049 _maintain_remote_snapshot,
1050 });
1051 }
1052
1053 cx.foreground()
1054 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1055 }
1056
1057 pub fn unshare(&mut self) {
1058 self.share.take();
1059 }
1060
1061 pub fn is_shared(&self) -> bool {
1062 self.share.is_some()
1063 }
1064}
1065
1066impl RemoteWorktree {
1067 fn snapshot(&self) -> Snapshot {
1068 self.snapshot.clone()
1069 }
1070
1071 pub fn disconnected_from_host(&mut self) {
1072 self.updates_tx.take();
1073 self.snapshot_subscriptions.clear();
1074 self.disconnected = true;
1075 }
1076
1077 pub fn save_buffer(
1078 &self,
1079 buffer_handle: ModelHandle<Buffer>,
1080 cx: &mut ModelContext<Worktree>,
1081 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1082 let buffer = buffer_handle.read(cx);
1083 let buffer_id = buffer.remote_id();
1084 let version = buffer.version();
1085 let rpc = self.client.clone();
1086 let project_id = self.project_id;
1087 cx.as_mut().spawn(|mut cx| async move {
1088 let response = rpc
1089 .request(proto::SaveBuffer {
1090 project_id,
1091 buffer_id,
1092 version: serialize_version(&version),
1093 })
1094 .await?;
1095 let version = deserialize_version(&response.version);
1096 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1097 let mtime = response
1098 .mtime
1099 .ok_or_else(|| anyhow!("missing mtime"))?
1100 .into();
1101
1102 buffer_handle.update(&mut cx, |buffer, cx| {
1103 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1104 });
1105
1106 Ok((version, fingerprint, mtime))
1107 })
1108 }
1109
1110 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1111 if let Some(updates_tx) = &self.updates_tx {
1112 updates_tx
1113 .unbounded_send(update)
1114 .expect("consumer runs to completion");
1115 }
1116 }
1117
1118 fn observed_snapshot(&self, scan_id: usize) -> bool {
1119 self.completed_scan_id >= scan_id
1120 }
1121
1122 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1123 let (tx, rx) = oneshot::channel();
1124 if self.observed_snapshot(scan_id) {
1125 let _ = tx.send(());
1126 } else if self.disconnected {
1127 drop(tx);
1128 } else {
1129 match self
1130 .snapshot_subscriptions
1131 .binary_search_by_key(&scan_id, |probe| probe.0)
1132 {
1133 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1134 }
1135 }
1136
1137 async move {
1138 rx.await?;
1139 Ok(())
1140 }
1141 }
1142
1143 pub fn update_diagnostic_summary(
1144 &mut self,
1145 path: Arc<Path>,
1146 summary: &proto::DiagnosticSummary,
1147 ) {
1148 let server_id = LanguageServerId(summary.language_server_id as usize);
1149 let summary = DiagnosticSummary {
1150 error_count: summary.error_count as usize,
1151 warning_count: summary.warning_count as usize,
1152 };
1153
1154 if summary.is_empty() {
1155 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1156 summaries.remove(&server_id);
1157 if summaries.is_empty() {
1158 self.diagnostic_summaries.remove(&path);
1159 }
1160 }
1161 } else {
1162 self.diagnostic_summaries
1163 .entry(path)
1164 .or_default()
1165 .insert(server_id, summary);
1166 }
1167 }
1168
1169 pub fn insert_entry(
1170 &mut self,
1171 entry: proto::Entry,
1172 scan_id: usize,
1173 cx: &mut ModelContext<Worktree>,
1174 ) -> Task<Result<Entry>> {
1175 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1176 cx.spawn(|this, mut cx| async move {
1177 wait_for_snapshot.await?;
1178 this.update(&mut cx, |worktree, _| {
1179 let worktree = worktree.as_remote_mut().unwrap();
1180 let mut snapshot = worktree.background_snapshot.lock();
1181 let entry = snapshot.insert_entry(entry);
1182 worktree.snapshot = snapshot.clone();
1183 entry
1184 })
1185 })
1186 }
1187
1188 pub(crate) fn delete_entry(
1189 &mut self,
1190 id: ProjectEntryId,
1191 scan_id: usize,
1192 cx: &mut ModelContext<Worktree>,
1193 ) -> Task<Result<()>> {
1194 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1195 cx.spawn(|this, mut cx| async move {
1196 wait_for_snapshot.await?;
1197 this.update(&mut cx, |worktree, _| {
1198 let worktree = worktree.as_remote_mut().unwrap();
1199 let mut snapshot = worktree.background_snapshot.lock();
1200 snapshot.delete_entry(id);
1201 worktree.snapshot = snapshot.clone();
1202 });
1203 Ok(())
1204 })
1205 }
1206}
1207
1208impl Snapshot {
1209 pub fn id(&self) -> WorktreeId {
1210 self.id
1211 }
1212
1213 pub fn abs_path(&self) -> &Arc<Path> {
1214 &self.abs_path
1215 }
1216
1217 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1218 self.entries_by_id.get(&entry_id, &()).is_some()
1219 }
1220
1221 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1222 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1223 let old_entry = self.entries_by_id.insert_or_replace(
1224 PathEntry {
1225 id: entry.id,
1226 path: entry.path.clone(),
1227 is_ignored: entry.is_ignored,
1228 scan_id: 0,
1229 },
1230 &(),
1231 );
1232 if let Some(old_entry) = old_entry {
1233 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1234 }
1235 self.entries_by_path.insert_or_replace(entry.clone(), &());
1236 Ok(entry)
1237 }
1238
1239 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1240 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1241 self.entries_by_path = {
1242 let mut cursor = self.entries_by_path.cursor();
1243 let mut new_entries_by_path =
1244 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1245 while let Some(entry) = cursor.item() {
1246 if entry.path.starts_with(&removed_entry.path) {
1247 self.entries_by_id.remove(&entry.id, &());
1248 cursor.next(&());
1249 } else {
1250 break;
1251 }
1252 }
1253 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1254 new_entries_by_path
1255 };
1256
1257 Some(removed_entry.path)
1258 }
1259
1260 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1261 let mut entries_by_path_edits = Vec::new();
1262 let mut entries_by_id_edits = Vec::new();
1263 for entry_id in update.removed_entries {
1264 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1265 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1266 entries_by_id_edits.push(Edit::Remove(entry.id));
1267 }
1268 }
1269
1270 for entry in update.updated_entries {
1271 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1272 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1273 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1274 }
1275 entries_by_id_edits.push(Edit::Insert(PathEntry {
1276 id: entry.id,
1277 path: entry.path.clone(),
1278 is_ignored: entry.is_ignored,
1279 scan_id: 0,
1280 }));
1281 entries_by_path_edits.push(Edit::Insert(entry));
1282 }
1283
1284 self.entries_by_path.edit(entries_by_path_edits, &());
1285 self.entries_by_id.edit(entries_by_id_edits, &());
1286 self.scan_id = update.scan_id as usize;
1287 if update.is_last_update {
1288 self.completed_scan_id = update.scan_id as usize;
1289 }
1290
1291 Ok(())
1292 }
1293
1294 pub fn file_count(&self) -> usize {
1295 self.entries_by_path.summary().file_count
1296 }
1297
1298 pub fn visible_file_count(&self) -> usize {
1299 self.entries_by_path.summary().visible_file_count
1300 }
1301
1302 fn traverse_from_offset(
1303 &self,
1304 include_dirs: bool,
1305 include_ignored: bool,
1306 start_offset: usize,
1307 ) -> Traversal {
1308 let mut cursor = self.entries_by_path.cursor();
1309 cursor.seek(
1310 &TraversalTarget::Count {
1311 count: start_offset,
1312 include_dirs,
1313 include_ignored,
1314 },
1315 Bias::Right,
1316 &(),
1317 );
1318 Traversal {
1319 cursor,
1320 include_dirs,
1321 include_ignored,
1322 }
1323 }
1324
1325 fn traverse_from_path(
1326 &self,
1327 include_dirs: bool,
1328 include_ignored: bool,
1329 path: &Path,
1330 ) -> Traversal {
1331 let mut cursor = self.entries_by_path.cursor();
1332 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1333 Traversal {
1334 cursor,
1335 include_dirs,
1336 include_ignored,
1337 }
1338 }
1339
1340 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1341 self.traverse_from_offset(false, include_ignored, start)
1342 }
1343
1344 pub fn entries(&self, include_ignored: bool) -> Traversal {
1345 self.traverse_from_offset(true, include_ignored, 0)
1346 }
1347
1348 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1349 let empty_path = Path::new("");
1350 self.entries_by_path
1351 .cursor::<()>()
1352 .filter(move |entry| entry.path.as_ref() != empty_path)
1353 .map(|entry| &entry.path)
1354 }
1355
1356 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1357 let mut cursor = self.entries_by_path.cursor();
1358 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1359 let traversal = Traversal {
1360 cursor,
1361 include_dirs: true,
1362 include_ignored: true,
1363 };
1364 ChildEntriesIter {
1365 traversal,
1366 parent_path,
1367 }
1368 }
1369
1370 pub fn root_entry(&self) -> Option<&Entry> {
1371 self.entry_for_path("")
1372 }
1373
1374 pub fn root_name(&self) -> &str {
1375 &self.root_name
1376 }
1377
1378 pub fn scan_id(&self) -> usize {
1379 self.scan_id
1380 }
1381
1382 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1383 let path = path.as_ref();
1384 self.traverse_from_path(true, true, path)
1385 .entry()
1386 .and_then(|entry| {
1387 if entry.path.as_ref() == path {
1388 Some(entry)
1389 } else {
1390 None
1391 }
1392 })
1393 }
1394
1395 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1396 let entry = self.entries_by_id.get(&id, &())?;
1397 self.entry_for_path(&entry.path)
1398 }
1399
1400 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1401 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1402 }
1403}
1404
1405impl LocalSnapshot {
1406 // Gives the most specific git repository for a given path
1407 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1408 self.git_repositories
1409 .iter()
1410 .rev() //git_repository is ordered lexicographically
1411 .find(|repo| repo.manages(path))
1412 .cloned()
1413 }
1414
1415 pub(crate) fn repo_with_dot_git_containing(
1416 &mut self,
1417 path: &Path,
1418 ) -> Option<&mut GitRepositoryEntry> {
1419 // Git repositories cannot be nested, so we don't need to reverse the order
1420 self.git_repositories
1421 .iter_mut()
1422 .find(|repo| repo.in_dot_git(path))
1423 }
1424
1425 #[cfg(test)]
1426 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1427 let root_name = self.root_name.clone();
1428 proto::UpdateWorktree {
1429 project_id,
1430 worktree_id: self.id().to_proto(),
1431 abs_path: self.abs_path().to_string_lossy().into(),
1432 root_name,
1433 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1434 removed_entries: Default::default(),
1435 scan_id: self.scan_id as u64,
1436 is_last_update: true,
1437 }
1438 }
1439
1440 pub(crate) fn build_update(
1441 &self,
1442 other: &Self,
1443 project_id: u64,
1444 worktree_id: u64,
1445 include_ignored: bool,
1446 ) -> proto::UpdateWorktree {
1447 let mut updated_entries = Vec::new();
1448 let mut removed_entries = Vec::new();
1449 let mut self_entries = self
1450 .entries_by_id
1451 .cursor::<()>()
1452 .filter(|e| include_ignored || !e.is_ignored)
1453 .peekable();
1454 let mut other_entries = other
1455 .entries_by_id
1456 .cursor::<()>()
1457 .filter(|e| include_ignored || !e.is_ignored)
1458 .peekable();
1459 loop {
1460 match (self_entries.peek(), other_entries.peek()) {
1461 (Some(self_entry), Some(other_entry)) => {
1462 match Ord::cmp(&self_entry.id, &other_entry.id) {
1463 Ordering::Less => {
1464 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1465 updated_entries.push(entry);
1466 self_entries.next();
1467 }
1468 Ordering::Equal => {
1469 if self_entry.scan_id != other_entry.scan_id {
1470 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1471 updated_entries.push(entry);
1472 }
1473
1474 self_entries.next();
1475 other_entries.next();
1476 }
1477 Ordering::Greater => {
1478 removed_entries.push(other_entry.id.to_proto());
1479 other_entries.next();
1480 }
1481 }
1482 }
1483 (Some(self_entry), None) => {
1484 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1485 updated_entries.push(entry);
1486 self_entries.next();
1487 }
1488 (None, Some(other_entry)) => {
1489 removed_entries.push(other_entry.id.to_proto());
1490 other_entries.next();
1491 }
1492 (None, None) => break,
1493 }
1494 }
1495
1496 proto::UpdateWorktree {
1497 project_id,
1498 worktree_id,
1499 abs_path: self.abs_path().to_string_lossy().into(),
1500 root_name: self.root_name().to_string(),
1501 updated_entries,
1502 removed_entries,
1503 scan_id: self.scan_id as u64,
1504 is_last_update: self.completed_scan_id == self.scan_id,
1505 }
1506 }
1507
1508 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1509 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1510 let abs_path = self.abs_path.join(&entry.path);
1511 match smol::block_on(build_gitignore(&abs_path, fs)) {
1512 Ok(ignore) => {
1513 self.ignores_by_parent_abs_path.insert(
1514 abs_path.parent().unwrap().into(),
1515 (Arc::new(ignore), self.scan_id),
1516 );
1517 }
1518 Err(error) => {
1519 log::error!(
1520 "error loading .gitignore file {:?} - {:?}",
1521 &entry.path,
1522 error
1523 );
1524 }
1525 }
1526 }
1527
1528 self.reuse_entry_id(&mut entry);
1529
1530 if entry.kind == EntryKind::PendingDir {
1531 if let Some(existing_entry) =
1532 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1533 {
1534 entry.kind = existing_entry.kind;
1535 }
1536 }
1537
1538 let scan_id = self.scan_id;
1539 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1540 if let Some(removed) = removed {
1541 if removed.id != entry.id {
1542 self.entries_by_id.remove(&removed.id, &());
1543 }
1544 }
1545 self.entries_by_id.insert_or_replace(
1546 PathEntry {
1547 id: entry.id,
1548 path: entry.path.clone(),
1549 is_ignored: entry.is_ignored,
1550 scan_id,
1551 },
1552 &(),
1553 );
1554
1555 entry
1556 }
1557
1558 fn populate_dir(
1559 &mut self,
1560 parent_path: Arc<Path>,
1561 entries: impl IntoIterator<Item = Entry>,
1562 ignore: Option<Arc<Gitignore>>,
1563 fs: &dyn Fs,
1564 ) {
1565 let mut parent_entry = if let Some(parent_entry) =
1566 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1567 {
1568 parent_entry.clone()
1569 } else {
1570 log::warn!(
1571 "populating a directory {:?} that has been removed",
1572 parent_path
1573 );
1574 return;
1575 };
1576
1577 match parent_entry.kind {
1578 EntryKind::PendingDir => {
1579 parent_entry.kind = EntryKind::Dir;
1580 }
1581 EntryKind::Dir => {}
1582 _ => return,
1583 }
1584
1585 if let Some(ignore) = ignore {
1586 self.ignores_by_parent_abs_path.insert(
1587 self.abs_path.join(&parent_path).into(),
1588 (ignore, self.scan_id),
1589 );
1590 }
1591
1592 if parent_path.file_name() == Some(&DOT_GIT) {
1593 let abs_path = self.abs_path.join(&parent_path);
1594 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1595 if let Err(ix) = self
1596 .git_repositories
1597 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1598 {
1599 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1600 self.git_repositories.insert(
1601 ix,
1602 GitRepositoryEntry {
1603 repo,
1604 scan_id: 0,
1605 content_path,
1606 git_dir_path: parent_path,
1607 },
1608 );
1609 }
1610 }
1611 }
1612
1613 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1614 let mut entries_by_id_edits = Vec::new();
1615
1616 for mut entry in entries {
1617 self.reuse_entry_id(&mut entry);
1618 entries_by_id_edits.push(Edit::Insert(PathEntry {
1619 id: entry.id,
1620 path: entry.path.clone(),
1621 is_ignored: entry.is_ignored,
1622 scan_id: self.scan_id,
1623 }));
1624 entries_by_path_edits.push(Edit::Insert(entry));
1625 }
1626
1627 self.entries_by_path.edit(entries_by_path_edits, &());
1628 self.entries_by_id.edit(entries_by_id_edits, &());
1629 }
1630
1631 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1632 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1633 entry.id = removed_entry_id;
1634 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1635 entry.id = existing_entry.id;
1636 }
1637 }
1638
1639 fn remove_path(&mut self, path: &Path) {
1640 let mut new_entries;
1641 let removed_entries;
1642 {
1643 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1644 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1645 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1646 new_entries.push_tree(cursor.suffix(&()), &());
1647 }
1648 self.entries_by_path = new_entries;
1649
1650 let mut entries_by_id_edits = Vec::new();
1651 for entry in removed_entries.cursor::<()>() {
1652 let removed_entry_id = self
1653 .removed_entry_ids
1654 .entry(entry.inode)
1655 .or_insert(entry.id);
1656 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1657 entries_by_id_edits.push(Edit::Remove(entry.id));
1658 }
1659 self.entries_by_id.edit(entries_by_id_edits, &());
1660
1661 if path.file_name() == Some(&GITIGNORE) {
1662 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1663 if let Some((_, scan_id)) = self
1664 .ignores_by_parent_abs_path
1665 .get_mut(abs_parent_path.as_path())
1666 {
1667 *scan_id = self.snapshot.scan_id;
1668 }
1669 } else if path.file_name() == Some(&DOT_GIT) {
1670 let parent_path = path.parent().unwrap();
1671 if let Ok(ix) = self
1672 .git_repositories
1673 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1674 {
1675 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1676 }
1677 }
1678 }
1679
1680 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1681 let mut inodes = TreeSet::default();
1682 for ancestor in path.ancestors().skip(1) {
1683 if let Some(entry) = self.entry_for_path(ancestor) {
1684 inodes.insert(entry.inode);
1685 }
1686 }
1687 inodes
1688 }
1689
1690 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1691 let mut new_ignores = Vec::new();
1692 for ancestor in abs_path.ancestors().skip(1) {
1693 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1694 new_ignores.push((ancestor, Some(ignore.clone())));
1695 } else {
1696 new_ignores.push((ancestor, None));
1697 }
1698 }
1699
1700 let mut ignore_stack = IgnoreStack::none();
1701 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1702 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1703 ignore_stack = IgnoreStack::all();
1704 break;
1705 } else if let Some(ignore) = ignore {
1706 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1707 }
1708 }
1709
1710 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1711 ignore_stack = IgnoreStack::all();
1712 }
1713
1714 ignore_stack
1715 }
1716
1717 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1718 &self.git_repositories
1719 }
1720}
1721
1722impl GitRepositoryEntry {
1723 // Note that these paths should be relative to the worktree root.
1724 pub(crate) fn manages(&self, path: &Path) -> bool {
1725 path.starts_with(self.content_path.as_ref())
1726 }
1727
1728 // Note that this path should be relative to the worktree root.
1729 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1730 path.starts_with(self.git_dir_path.as_ref())
1731 }
1732}
1733
1734async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1735 let contents = fs.load(abs_path).await?;
1736 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1737 let mut builder = GitignoreBuilder::new(parent);
1738 for line in contents.lines() {
1739 builder.add_line(Some(abs_path.into()), line)?;
1740 }
1741 Ok(builder.build()?)
1742}
1743
1744impl WorktreeId {
1745 pub fn from_usize(handle_id: usize) -> Self {
1746 Self(handle_id)
1747 }
1748
1749 pub(crate) fn from_proto(id: u64) -> Self {
1750 Self(id as usize)
1751 }
1752
1753 pub fn to_proto(&self) -> u64 {
1754 self.0 as u64
1755 }
1756
1757 pub fn to_usize(&self) -> usize {
1758 self.0
1759 }
1760}
1761
1762impl fmt::Display for WorktreeId {
1763 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1764 self.0.fmt(f)
1765 }
1766}
1767
1768impl Deref for Worktree {
1769 type Target = Snapshot;
1770
1771 fn deref(&self) -> &Self::Target {
1772 match self {
1773 Worktree::Local(worktree) => &worktree.snapshot,
1774 Worktree::Remote(worktree) => &worktree.snapshot,
1775 }
1776 }
1777}
1778
1779impl Deref for LocalWorktree {
1780 type Target = LocalSnapshot;
1781
1782 fn deref(&self) -> &Self::Target {
1783 &self.snapshot
1784 }
1785}
1786
1787impl Deref for RemoteWorktree {
1788 type Target = Snapshot;
1789
1790 fn deref(&self) -> &Self::Target {
1791 &self.snapshot
1792 }
1793}
1794
1795impl fmt::Debug for LocalWorktree {
1796 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1797 self.snapshot.fmt(f)
1798 }
1799}
1800
1801impl fmt::Debug for Snapshot {
1802 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1803 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1804 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1805
1806 impl<'a> fmt::Debug for EntriesByPath<'a> {
1807 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1808 f.debug_map()
1809 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1810 .finish()
1811 }
1812 }
1813
1814 impl<'a> fmt::Debug for EntriesById<'a> {
1815 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1816 f.debug_list().entries(self.0.iter()).finish()
1817 }
1818 }
1819
1820 f.debug_struct("Snapshot")
1821 .field("id", &self.id)
1822 .field("root_name", &self.root_name)
1823 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1824 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1825 .finish()
1826 }
1827}
1828
1829#[derive(Clone, PartialEq)]
1830pub struct File {
1831 pub worktree: ModelHandle<Worktree>,
1832 pub path: Arc<Path>,
1833 pub mtime: SystemTime,
1834 pub(crate) entry_id: ProjectEntryId,
1835 pub(crate) is_local: bool,
1836 pub(crate) is_deleted: bool,
1837}
1838
1839impl language::File for File {
1840 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1841 if self.is_local {
1842 Some(self)
1843 } else {
1844 None
1845 }
1846 }
1847
1848 fn mtime(&self) -> SystemTime {
1849 self.mtime
1850 }
1851
1852 fn path(&self) -> &Arc<Path> {
1853 &self.path
1854 }
1855
1856 fn full_path(&self, cx: &AppContext) -> PathBuf {
1857 let mut full_path = PathBuf::new();
1858 let worktree = self.worktree.read(cx);
1859
1860 if worktree.is_visible() {
1861 full_path.push(worktree.root_name());
1862 } else {
1863 let path = worktree.abs_path();
1864
1865 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1866 full_path.push("~");
1867 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1868 } else {
1869 full_path.push(path)
1870 }
1871 }
1872
1873 if self.path.components().next().is_some() {
1874 full_path.push(&self.path);
1875 }
1876
1877 full_path
1878 }
1879
1880 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1881 /// of its worktree, then this method will return the name of the worktree itself.
1882 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1883 self.path
1884 .file_name()
1885 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1886 }
1887
1888 fn is_deleted(&self) -> bool {
1889 self.is_deleted
1890 }
1891
1892 fn as_any(&self) -> &dyn Any {
1893 self
1894 }
1895
1896 fn to_proto(&self) -> rpc::proto::File {
1897 rpc::proto::File {
1898 worktree_id: self.worktree.id() as u64,
1899 entry_id: self.entry_id.to_proto(),
1900 path: self.path.to_string_lossy().into(),
1901 mtime: Some(self.mtime.into()),
1902 is_deleted: self.is_deleted,
1903 }
1904 }
1905}
1906
1907impl language::LocalFile for File {
1908 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1909 self.worktree
1910 .read(cx)
1911 .as_local()
1912 .unwrap()
1913 .abs_path
1914 .join(&self.path)
1915 }
1916
1917 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1918 let worktree = self.worktree.read(cx).as_local().unwrap();
1919 let abs_path = worktree.absolutize(&self.path);
1920 let fs = worktree.fs.clone();
1921 cx.background()
1922 .spawn(async move { fs.load(&abs_path).await })
1923 }
1924
1925 fn buffer_reloaded(
1926 &self,
1927 buffer_id: u64,
1928 version: &clock::Global,
1929 fingerprint: RopeFingerprint,
1930 line_ending: LineEnding,
1931 mtime: SystemTime,
1932 cx: &mut AppContext,
1933 ) {
1934 let worktree = self.worktree.read(cx).as_local().unwrap();
1935 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1936 worktree
1937 .client
1938 .send(proto::BufferReloaded {
1939 project_id,
1940 buffer_id,
1941 version: serialize_version(version),
1942 mtime: Some(mtime.into()),
1943 fingerprint: serialize_fingerprint(fingerprint),
1944 line_ending: serialize_line_ending(line_ending) as i32,
1945 })
1946 .log_err();
1947 }
1948 }
1949}
1950
1951impl File {
1952 pub fn from_proto(
1953 proto: rpc::proto::File,
1954 worktree: ModelHandle<Worktree>,
1955 cx: &AppContext,
1956 ) -> Result<Self> {
1957 let worktree_id = worktree
1958 .read(cx)
1959 .as_remote()
1960 .ok_or_else(|| anyhow!("not remote"))?
1961 .id();
1962
1963 if worktree_id.to_proto() != proto.worktree_id {
1964 return Err(anyhow!("worktree id does not match file"));
1965 }
1966
1967 Ok(Self {
1968 worktree,
1969 path: Path::new(&proto.path).into(),
1970 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1971 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1972 is_local: false,
1973 is_deleted: proto.is_deleted,
1974 })
1975 }
1976
1977 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1978 file.and_then(|f| f.as_any().downcast_ref())
1979 }
1980
1981 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1982 self.worktree.read(cx).id()
1983 }
1984
1985 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1986 if self.is_deleted {
1987 None
1988 } else {
1989 Some(self.entry_id)
1990 }
1991 }
1992}
1993
1994#[derive(Clone, Debug, PartialEq, Eq)]
1995pub struct Entry {
1996 pub id: ProjectEntryId,
1997 pub kind: EntryKind,
1998 pub path: Arc<Path>,
1999 pub inode: u64,
2000 pub mtime: SystemTime,
2001 pub is_symlink: bool,
2002 pub is_ignored: bool,
2003}
2004
2005#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2006pub enum EntryKind {
2007 PendingDir,
2008 Dir,
2009 File(CharBag),
2010}
2011
2012#[derive(Clone, Copy, Debug)]
2013pub enum PathChange {
2014 Added,
2015 Removed,
2016 Updated,
2017 AddedOrUpdated,
2018}
2019
2020impl Entry {
2021 fn new(
2022 path: Arc<Path>,
2023 metadata: &fs::Metadata,
2024 next_entry_id: &AtomicUsize,
2025 root_char_bag: CharBag,
2026 ) -> Self {
2027 Self {
2028 id: ProjectEntryId::new(next_entry_id),
2029 kind: if metadata.is_dir {
2030 EntryKind::PendingDir
2031 } else {
2032 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2033 },
2034 path,
2035 inode: metadata.inode,
2036 mtime: metadata.mtime,
2037 is_symlink: metadata.is_symlink,
2038 is_ignored: false,
2039 }
2040 }
2041
2042 pub fn is_dir(&self) -> bool {
2043 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2044 }
2045
2046 pub fn is_file(&self) -> bool {
2047 matches!(self.kind, EntryKind::File(_))
2048 }
2049}
2050
2051impl sum_tree::Item for Entry {
2052 type Summary = EntrySummary;
2053
2054 fn summary(&self) -> Self::Summary {
2055 let visible_count = if self.is_ignored { 0 } else { 1 };
2056 let file_count;
2057 let visible_file_count;
2058 if self.is_file() {
2059 file_count = 1;
2060 visible_file_count = visible_count;
2061 } else {
2062 file_count = 0;
2063 visible_file_count = 0;
2064 }
2065
2066 EntrySummary {
2067 max_path: self.path.clone(),
2068 count: 1,
2069 visible_count,
2070 file_count,
2071 visible_file_count,
2072 }
2073 }
2074}
2075
2076impl sum_tree::KeyedItem for Entry {
2077 type Key = PathKey;
2078
2079 fn key(&self) -> Self::Key {
2080 PathKey(self.path.clone())
2081 }
2082}
2083
2084#[derive(Clone, Debug)]
2085pub struct EntrySummary {
2086 max_path: Arc<Path>,
2087 count: usize,
2088 visible_count: usize,
2089 file_count: usize,
2090 visible_file_count: usize,
2091}
2092
2093impl Default for EntrySummary {
2094 fn default() -> Self {
2095 Self {
2096 max_path: Arc::from(Path::new("")),
2097 count: 0,
2098 visible_count: 0,
2099 file_count: 0,
2100 visible_file_count: 0,
2101 }
2102 }
2103}
2104
2105impl sum_tree::Summary for EntrySummary {
2106 type Context = ();
2107
2108 fn add_summary(&mut self, rhs: &Self, _: &()) {
2109 self.max_path = rhs.max_path.clone();
2110 self.count += rhs.count;
2111 self.visible_count += rhs.visible_count;
2112 self.file_count += rhs.file_count;
2113 self.visible_file_count += rhs.visible_file_count;
2114 }
2115}
2116
2117#[derive(Clone, Debug)]
2118struct PathEntry {
2119 id: ProjectEntryId,
2120 path: Arc<Path>,
2121 is_ignored: bool,
2122 scan_id: usize,
2123}
2124
2125impl sum_tree::Item for PathEntry {
2126 type Summary = PathEntrySummary;
2127
2128 fn summary(&self) -> Self::Summary {
2129 PathEntrySummary { max_id: self.id }
2130 }
2131}
2132
2133impl sum_tree::KeyedItem for PathEntry {
2134 type Key = ProjectEntryId;
2135
2136 fn key(&self) -> Self::Key {
2137 self.id
2138 }
2139}
2140
2141#[derive(Clone, Debug, Default)]
2142struct PathEntrySummary {
2143 max_id: ProjectEntryId,
2144}
2145
2146impl sum_tree::Summary for PathEntrySummary {
2147 type Context = ();
2148
2149 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2150 self.max_id = summary.max_id;
2151 }
2152}
2153
2154impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2155 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2156 *self = summary.max_id;
2157 }
2158}
2159
2160#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2161pub struct PathKey(Arc<Path>);
2162
2163impl Default for PathKey {
2164 fn default() -> Self {
2165 Self(Path::new("").into())
2166 }
2167}
2168
2169impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2170 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2171 self.0 = summary.max_path.clone();
2172 }
2173}
2174
2175struct BackgroundScanner {
2176 snapshot: Mutex<LocalSnapshot>,
2177 fs: Arc<dyn Fs>,
2178 status_updates_tx: UnboundedSender<ScanState>,
2179 executor: Arc<executor::Background>,
2180 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2181 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2182 finished_initial_scan: bool,
2183}
2184
2185impl BackgroundScanner {
2186 fn new(
2187 snapshot: LocalSnapshot,
2188 fs: Arc<dyn Fs>,
2189 status_updates_tx: UnboundedSender<ScanState>,
2190 executor: Arc<executor::Background>,
2191 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2192 ) -> Self {
2193 Self {
2194 fs,
2195 status_updates_tx,
2196 executor,
2197 refresh_requests_rx,
2198 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2199 snapshot: Mutex::new(snapshot),
2200 finished_initial_scan: false,
2201 }
2202 }
2203
2204 async fn run(
2205 &mut self,
2206 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2207 ) {
2208 use futures::FutureExt as _;
2209
2210 let (root_abs_path, root_inode) = {
2211 let snapshot = self.snapshot.lock();
2212 (
2213 snapshot.abs_path.clone(),
2214 snapshot.root_entry().map(|e| e.inode),
2215 )
2216 };
2217
2218 // Populate ignores above the root.
2219 let ignore_stack;
2220 for ancestor in root_abs_path.ancestors().skip(1) {
2221 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2222 {
2223 self.snapshot
2224 .lock()
2225 .ignores_by_parent_abs_path
2226 .insert(ancestor.into(), (ignore.into(), 0));
2227 }
2228 }
2229 {
2230 let mut snapshot = self.snapshot.lock();
2231 snapshot.scan_id += 1;
2232 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2233 if ignore_stack.is_all() {
2234 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2235 root_entry.is_ignored = true;
2236 snapshot.insert_entry(root_entry, self.fs.as_ref());
2237 }
2238 }
2239 };
2240
2241 // Perform an initial scan of the directory.
2242 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2243 smol::block_on(scan_job_tx.send(ScanJob {
2244 abs_path: root_abs_path,
2245 path: Arc::from(Path::new("")),
2246 ignore_stack,
2247 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2248 scan_queue: scan_job_tx.clone(),
2249 }))
2250 .unwrap();
2251 drop(scan_job_tx);
2252 self.scan_dirs(true, scan_job_rx).await;
2253 {
2254 let mut snapshot = self.snapshot.lock();
2255 snapshot.completed_scan_id = snapshot.scan_id;
2256 }
2257 self.send_status_update(false, None);
2258
2259 // Process any any FS events that occurred while performing the initial scan.
2260 // For these events, update events cannot be as precise, because we didn't
2261 // have the previous state loaded yet.
2262 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2263 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2264 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2265 paths.extend(more_events.into_iter().map(|e| e.path));
2266 }
2267 self.process_events(paths).await;
2268 }
2269
2270 self.finished_initial_scan = true;
2271
2272 // Continue processing events until the worktree is dropped.
2273 loop {
2274 select_biased! {
2275 // Process any path refresh requests from the worktree. Prioritize
2276 // these before handling changes reported by the filesystem.
2277 request = self.refresh_requests_rx.recv().fuse() => {
2278 let Ok((paths, barrier)) = request else { break };
2279 if !self.process_refresh_request(paths, barrier).await {
2280 return;
2281 }
2282 }
2283
2284 events = events_rx.next().fuse() => {
2285 let Some(events) = events else { break };
2286 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2287 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2288 paths.extend(more_events.into_iter().map(|e| e.path));
2289 }
2290 self.process_events(paths).await;
2291 }
2292 }
2293 }
2294 }
2295
2296 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2297 self.reload_entries_for_paths(paths, None).await;
2298 self.send_status_update(false, Some(barrier))
2299 }
2300
2301 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2302 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2303 if let Some(mut paths) = self
2304 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2305 .await
2306 {
2307 paths.sort_unstable();
2308 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2309 }
2310 drop(scan_job_tx);
2311 self.scan_dirs(false, scan_job_rx).await;
2312
2313 self.update_ignore_statuses().await;
2314
2315 let mut snapshot = self.snapshot.lock();
2316 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2317 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2318 snapshot.git_repositories = git_repositories;
2319 snapshot.removed_entry_ids.clear();
2320 snapshot.completed_scan_id = snapshot.scan_id;
2321 drop(snapshot);
2322
2323 self.send_status_update(false, None);
2324 }
2325
2326 async fn scan_dirs(
2327 &self,
2328 enable_progress_updates: bool,
2329 scan_jobs_rx: channel::Receiver<ScanJob>,
2330 ) {
2331 use futures::FutureExt as _;
2332
2333 if self
2334 .status_updates_tx
2335 .unbounded_send(ScanState::Started)
2336 .is_err()
2337 {
2338 return;
2339 }
2340
2341 let progress_update_count = AtomicUsize::new(0);
2342 self.executor
2343 .scoped(|scope| {
2344 for _ in 0..self.executor.num_cpus() {
2345 scope.spawn(async {
2346 let mut last_progress_update_count = 0;
2347 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2348 futures::pin_mut!(progress_update_timer);
2349
2350 loop {
2351 select_biased! {
2352 // Process any path refresh requests before moving on to process
2353 // the scan queue, so that user operations are prioritized.
2354 request = self.refresh_requests_rx.recv().fuse() => {
2355 let Ok((paths, barrier)) = request else { break };
2356 if !self.process_refresh_request(paths, barrier).await {
2357 return;
2358 }
2359 }
2360
2361 // Send periodic progress updates to the worktree. Use an atomic counter
2362 // to ensure that only one of the workers sends a progress update after
2363 // the update interval elapses.
2364 _ = progress_update_timer => {
2365 match progress_update_count.compare_exchange(
2366 last_progress_update_count,
2367 last_progress_update_count + 1,
2368 SeqCst,
2369 SeqCst
2370 ) {
2371 Ok(_) => {
2372 last_progress_update_count += 1;
2373 self.send_status_update(true, None);
2374 }
2375 Err(count) => {
2376 last_progress_update_count = count;
2377 }
2378 }
2379 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2380 }
2381
2382 // Recursively load directories from the file system.
2383 job = scan_jobs_rx.recv().fuse() => {
2384 let Ok(job) = job else { break };
2385 if let Err(err) = self.scan_dir(&job).await {
2386 if job.path.as_ref() != Path::new("") {
2387 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2388 }
2389 }
2390 }
2391 }
2392 }
2393 })
2394 }
2395 })
2396 .await;
2397 }
2398
2399 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2400 let mut prev_state = self.prev_state.lock();
2401 let snapshot = self.snapshot.lock().clone();
2402 let mut old_snapshot = snapshot.snapshot.clone();
2403 mem::swap(&mut old_snapshot, &mut prev_state.0);
2404 let changed_paths = mem::take(&mut prev_state.1);
2405 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2406 self.status_updates_tx
2407 .unbounded_send(ScanState::Updated {
2408 snapshot,
2409 changes,
2410 scanning,
2411 barrier,
2412 })
2413 .is_ok()
2414 }
2415
2416 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2417 let mut new_entries: Vec<Entry> = Vec::new();
2418 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2419 let mut ignore_stack = job.ignore_stack.clone();
2420 let mut new_ignore = None;
2421 let (root_abs_path, root_char_bag, next_entry_id) = {
2422 let snapshot = self.snapshot.lock();
2423 (
2424 snapshot.abs_path().clone(),
2425 snapshot.root_char_bag,
2426 snapshot.next_entry_id.clone(),
2427 )
2428 };
2429 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2430 while let Some(child_abs_path) = child_paths.next().await {
2431 let child_abs_path: Arc<Path> = match child_abs_path {
2432 Ok(child_abs_path) => child_abs_path.into(),
2433 Err(error) => {
2434 log::error!("error processing entry {:?}", error);
2435 continue;
2436 }
2437 };
2438
2439 let child_name = child_abs_path.file_name().unwrap();
2440 let child_path: Arc<Path> = job.path.join(child_name).into();
2441 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2442 Ok(Some(metadata)) => metadata,
2443 Ok(None) => continue,
2444 Err(err) => {
2445 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2446 continue;
2447 }
2448 };
2449
2450 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2451 if child_name == *GITIGNORE {
2452 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2453 Ok(ignore) => {
2454 let ignore = Arc::new(ignore);
2455 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2456 new_ignore = Some(ignore);
2457 }
2458 Err(error) => {
2459 log::error!(
2460 "error loading .gitignore file {:?} - {:?}",
2461 child_name,
2462 error
2463 );
2464 }
2465 }
2466
2467 // Update ignore status of any child entries we've already processed to reflect the
2468 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2469 // there should rarely be too numerous. Update the ignore stack associated with any
2470 // new jobs as well.
2471 let mut new_jobs = new_jobs.iter_mut();
2472 for entry in &mut new_entries {
2473 let entry_abs_path = root_abs_path.join(&entry.path);
2474 entry.is_ignored =
2475 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2476
2477 if entry.is_dir() {
2478 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2479 job.ignore_stack = if entry.is_ignored {
2480 IgnoreStack::all()
2481 } else {
2482 ignore_stack.clone()
2483 };
2484 }
2485 }
2486 }
2487 }
2488
2489 let mut child_entry = Entry::new(
2490 child_path.clone(),
2491 &child_metadata,
2492 &next_entry_id,
2493 root_char_bag,
2494 );
2495
2496 if child_entry.is_dir() {
2497 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2498 child_entry.is_ignored = is_ignored;
2499
2500 // Avoid recursing until crash in the case of a recursive symlink
2501 if !job.ancestor_inodes.contains(&child_entry.inode) {
2502 let mut ancestor_inodes = job.ancestor_inodes.clone();
2503 ancestor_inodes.insert(child_entry.inode);
2504
2505 new_jobs.push(Some(ScanJob {
2506 abs_path: child_abs_path,
2507 path: child_path,
2508 ignore_stack: if is_ignored {
2509 IgnoreStack::all()
2510 } else {
2511 ignore_stack.clone()
2512 },
2513 ancestor_inodes,
2514 scan_queue: job.scan_queue.clone(),
2515 }));
2516 } else {
2517 new_jobs.push(None);
2518 }
2519 } else {
2520 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2521 }
2522
2523 new_entries.push(child_entry);
2524 }
2525
2526 self.snapshot.lock().populate_dir(
2527 job.path.clone(),
2528 new_entries,
2529 new_ignore,
2530 self.fs.as_ref(),
2531 );
2532
2533 for new_job in new_jobs {
2534 if let Some(new_job) = new_job {
2535 job.scan_queue.send(new_job).await.unwrap();
2536 }
2537 }
2538
2539 Ok(())
2540 }
2541
2542 async fn reload_entries_for_paths(
2543 &self,
2544 mut abs_paths: Vec<PathBuf>,
2545 scan_queue_tx: Option<Sender<ScanJob>>,
2546 ) -> Option<Vec<Arc<Path>>> {
2547 let doing_recursive_update = scan_queue_tx.is_some();
2548
2549 abs_paths.sort_unstable();
2550 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2551
2552 let root_abs_path = self.snapshot.lock().abs_path.clone();
2553 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2554 let metadata = futures::future::join_all(
2555 abs_paths
2556 .iter()
2557 .map(|abs_path| self.fs.metadata(&abs_path))
2558 .collect::<Vec<_>>(),
2559 )
2560 .await;
2561
2562 let mut snapshot = self.snapshot.lock();
2563 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2564 snapshot.scan_id += 1;
2565 if is_idle && !doing_recursive_update {
2566 snapshot.completed_scan_id = snapshot.scan_id;
2567 }
2568
2569 // Remove any entries for paths that no longer exist or are being recursively
2570 // refreshed. Do this before adding any new entries, so that renames can be
2571 // detected regardless of the order of the paths.
2572 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2573 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2574 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2575 if matches!(metadata, Ok(None)) || doing_recursive_update {
2576 snapshot.remove_path(path);
2577 }
2578 event_paths.push(path.into());
2579 } else {
2580 log::error!(
2581 "unexpected event {:?} for root path {:?}",
2582 abs_path,
2583 root_canonical_path
2584 );
2585 }
2586 }
2587
2588 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2589 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2590
2591 match metadata {
2592 Ok(Some(metadata)) => {
2593 let ignore_stack =
2594 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2595 let mut fs_entry = Entry::new(
2596 path.clone(),
2597 &metadata,
2598 snapshot.next_entry_id.as_ref(),
2599 snapshot.root_char_bag,
2600 );
2601 fs_entry.is_ignored = ignore_stack.is_all();
2602 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2603
2604 let scan_id = snapshot.scan_id;
2605 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2606 repo.repo.lock().reload_index();
2607 repo.scan_id = scan_id;
2608 }
2609
2610 if let Some(scan_queue_tx) = &scan_queue_tx {
2611 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2612 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2613 ancestor_inodes.insert(metadata.inode);
2614 smol::block_on(scan_queue_tx.send(ScanJob {
2615 abs_path,
2616 path,
2617 ignore_stack,
2618 ancestor_inodes,
2619 scan_queue: scan_queue_tx.clone(),
2620 }))
2621 .unwrap();
2622 }
2623 }
2624 }
2625 Ok(None) => {}
2626 Err(err) => {
2627 // TODO - create a special 'error' entry in the entries tree to mark this
2628 log::error!("error reading file on event {:?}", err);
2629 }
2630 }
2631 }
2632
2633 Some(event_paths)
2634 }
2635
2636 async fn update_ignore_statuses(&self) {
2637 use futures::FutureExt as _;
2638
2639 let mut snapshot = self.snapshot.lock().clone();
2640 let mut ignores_to_update = Vec::new();
2641 let mut ignores_to_delete = Vec::new();
2642 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2643 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2644 if *scan_id > snapshot.completed_scan_id
2645 && snapshot.entry_for_path(parent_path).is_some()
2646 {
2647 ignores_to_update.push(parent_abs_path.clone());
2648 }
2649
2650 let ignore_path = parent_path.join(&*GITIGNORE);
2651 if snapshot.entry_for_path(ignore_path).is_none() {
2652 ignores_to_delete.push(parent_abs_path.clone());
2653 }
2654 }
2655 }
2656
2657 for parent_abs_path in ignores_to_delete {
2658 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2659 self.snapshot
2660 .lock()
2661 .ignores_by_parent_abs_path
2662 .remove(&parent_abs_path);
2663 }
2664
2665 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2666 ignores_to_update.sort_unstable();
2667 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2668 while let Some(parent_abs_path) = ignores_to_update.next() {
2669 while ignores_to_update
2670 .peek()
2671 .map_or(false, |p| p.starts_with(&parent_abs_path))
2672 {
2673 ignores_to_update.next().unwrap();
2674 }
2675
2676 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2677 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2678 abs_path: parent_abs_path,
2679 ignore_stack,
2680 ignore_queue: ignore_queue_tx.clone(),
2681 }))
2682 .unwrap();
2683 }
2684 drop(ignore_queue_tx);
2685
2686 self.executor
2687 .scoped(|scope| {
2688 for _ in 0..self.executor.num_cpus() {
2689 scope.spawn(async {
2690 loop {
2691 select_biased! {
2692 // Process any path refresh requests before moving on to process
2693 // the queue of ignore statuses.
2694 request = self.refresh_requests_rx.recv().fuse() => {
2695 let Ok((paths, barrier)) = request else { break };
2696 if !self.process_refresh_request(paths, barrier).await {
2697 return;
2698 }
2699 }
2700
2701 // Recursively process directories whose ignores have changed.
2702 job = ignore_queue_rx.recv().fuse() => {
2703 let Ok(job) = job else { break };
2704 self.update_ignore_status(job, &snapshot).await;
2705 }
2706 }
2707 }
2708 });
2709 }
2710 })
2711 .await;
2712 }
2713
2714 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2715 let mut ignore_stack = job.ignore_stack;
2716 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2717 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2718 }
2719
2720 let mut entries_by_id_edits = Vec::new();
2721 let mut entries_by_path_edits = Vec::new();
2722 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2723 for mut entry in snapshot.child_entries(path).cloned() {
2724 let was_ignored = entry.is_ignored;
2725 let abs_path = snapshot.abs_path().join(&entry.path);
2726 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2727 if entry.is_dir() {
2728 let child_ignore_stack = if entry.is_ignored {
2729 IgnoreStack::all()
2730 } else {
2731 ignore_stack.clone()
2732 };
2733 job.ignore_queue
2734 .send(UpdateIgnoreStatusJob {
2735 abs_path: abs_path.into(),
2736 ignore_stack: child_ignore_stack,
2737 ignore_queue: job.ignore_queue.clone(),
2738 })
2739 .await
2740 .unwrap();
2741 }
2742
2743 if entry.is_ignored != was_ignored {
2744 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2745 path_entry.scan_id = snapshot.scan_id;
2746 path_entry.is_ignored = entry.is_ignored;
2747 entries_by_id_edits.push(Edit::Insert(path_entry));
2748 entries_by_path_edits.push(Edit::Insert(entry));
2749 }
2750 }
2751
2752 let mut snapshot = self.snapshot.lock();
2753 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2754 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2755 }
2756
2757 fn build_change_set(
2758 &self,
2759 old_snapshot: &Snapshot,
2760 new_snapshot: &Snapshot,
2761 event_paths: Vec<Arc<Path>>,
2762 ) -> HashMap<Arc<Path>, PathChange> {
2763 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2764
2765 let mut changes = HashMap::default();
2766 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2767 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2768 let received_before_initialized = !self.finished_initial_scan;
2769
2770 for path in event_paths {
2771 let path = PathKey(path);
2772 old_paths.seek(&path, Bias::Left, &());
2773 new_paths.seek(&path, Bias::Left, &());
2774
2775 loop {
2776 match (old_paths.item(), new_paths.item()) {
2777 (Some(old_entry), Some(new_entry)) => {
2778 if old_entry.path > path.0
2779 && new_entry.path > path.0
2780 && !old_entry.path.starts_with(&path.0)
2781 && !new_entry.path.starts_with(&path.0)
2782 {
2783 break;
2784 }
2785
2786 match Ord::cmp(&old_entry.path, &new_entry.path) {
2787 Ordering::Less => {
2788 changes.insert(old_entry.path.clone(), Removed);
2789 old_paths.next(&());
2790 }
2791 Ordering::Equal => {
2792 if received_before_initialized {
2793 // If the worktree was not fully initialized when this event was generated,
2794 // we can't know whether this entry was added during the scan or whether
2795 // it was merely updated.
2796 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2797 } else if old_entry.mtime != new_entry.mtime {
2798 changes.insert(new_entry.path.clone(), Updated);
2799 }
2800 old_paths.next(&());
2801 new_paths.next(&());
2802 }
2803 Ordering::Greater => {
2804 changes.insert(new_entry.path.clone(), Added);
2805 new_paths.next(&());
2806 }
2807 }
2808 }
2809 (Some(old_entry), None) => {
2810 changes.insert(old_entry.path.clone(), Removed);
2811 old_paths.next(&());
2812 }
2813 (None, Some(new_entry)) => {
2814 changes.insert(new_entry.path.clone(), Added);
2815 new_paths.next(&());
2816 }
2817 (None, None) => break,
2818 }
2819 }
2820 }
2821 changes
2822 }
2823
2824 async fn progress_timer(&self, running: bool) {
2825 if !running {
2826 return futures::future::pending().await;
2827 }
2828
2829 #[cfg(any(test, feature = "test-support"))]
2830 if self.fs.is_fake() {
2831 return self.executor.simulate_random_delay().await;
2832 }
2833
2834 smol::Timer::after(Duration::from_millis(100)).await;
2835 }
2836}
2837
2838fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2839 let mut result = root_char_bag;
2840 result.extend(
2841 path.to_string_lossy()
2842 .chars()
2843 .map(|c| c.to_ascii_lowercase()),
2844 );
2845 result
2846}
2847
2848struct ScanJob {
2849 abs_path: Arc<Path>,
2850 path: Arc<Path>,
2851 ignore_stack: Arc<IgnoreStack>,
2852 scan_queue: Sender<ScanJob>,
2853 ancestor_inodes: TreeSet<u64>,
2854}
2855
2856struct UpdateIgnoreStatusJob {
2857 abs_path: Arc<Path>,
2858 ignore_stack: Arc<IgnoreStack>,
2859 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2860}
2861
2862pub trait WorktreeHandle {
2863 #[cfg(any(test, feature = "test-support"))]
2864 fn flush_fs_events<'a>(
2865 &self,
2866 cx: &'a gpui::TestAppContext,
2867 ) -> futures::future::LocalBoxFuture<'a, ()>;
2868}
2869
2870impl WorktreeHandle for ModelHandle<Worktree> {
2871 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2872 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2873 // extra directory scans, and emit extra scan-state notifications.
2874 //
2875 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2876 // to ensure that all redundant FS events have already been processed.
2877 #[cfg(any(test, feature = "test-support"))]
2878 fn flush_fs_events<'a>(
2879 &self,
2880 cx: &'a gpui::TestAppContext,
2881 ) -> futures::future::LocalBoxFuture<'a, ()> {
2882 use smol::future::FutureExt;
2883
2884 let filename = "fs-event-sentinel";
2885 let tree = self.clone();
2886 let (fs, root_path) = self.read_with(cx, |tree, _| {
2887 let tree = tree.as_local().unwrap();
2888 (tree.fs.clone(), tree.abs_path().clone())
2889 });
2890
2891 async move {
2892 fs.create_file(&root_path.join(filename), Default::default())
2893 .await
2894 .unwrap();
2895 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2896 .await;
2897
2898 fs.remove_file(&root_path.join(filename), Default::default())
2899 .await
2900 .unwrap();
2901 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2902 .await;
2903
2904 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2905 .await;
2906 }
2907 .boxed_local()
2908 }
2909}
2910
2911#[derive(Clone, Debug)]
2912struct TraversalProgress<'a> {
2913 max_path: &'a Path,
2914 count: usize,
2915 visible_count: usize,
2916 file_count: usize,
2917 visible_file_count: usize,
2918}
2919
2920impl<'a> TraversalProgress<'a> {
2921 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2922 match (include_ignored, include_dirs) {
2923 (true, true) => self.count,
2924 (true, false) => self.file_count,
2925 (false, true) => self.visible_count,
2926 (false, false) => self.visible_file_count,
2927 }
2928 }
2929}
2930
2931impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2932 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2933 self.max_path = summary.max_path.as_ref();
2934 self.count += summary.count;
2935 self.visible_count += summary.visible_count;
2936 self.file_count += summary.file_count;
2937 self.visible_file_count += summary.visible_file_count;
2938 }
2939}
2940
2941impl<'a> Default for TraversalProgress<'a> {
2942 fn default() -> Self {
2943 Self {
2944 max_path: Path::new(""),
2945 count: 0,
2946 visible_count: 0,
2947 file_count: 0,
2948 visible_file_count: 0,
2949 }
2950 }
2951}
2952
2953pub struct Traversal<'a> {
2954 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2955 include_ignored: bool,
2956 include_dirs: bool,
2957}
2958
2959impl<'a> Traversal<'a> {
2960 pub fn advance(&mut self) -> bool {
2961 self.advance_to_offset(self.offset() + 1)
2962 }
2963
2964 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2965 self.cursor.seek_forward(
2966 &TraversalTarget::Count {
2967 count: offset,
2968 include_dirs: self.include_dirs,
2969 include_ignored: self.include_ignored,
2970 },
2971 Bias::Right,
2972 &(),
2973 )
2974 }
2975
2976 pub fn advance_to_sibling(&mut self) -> bool {
2977 while let Some(entry) = self.cursor.item() {
2978 self.cursor.seek_forward(
2979 &TraversalTarget::PathSuccessor(&entry.path),
2980 Bias::Left,
2981 &(),
2982 );
2983 if let Some(entry) = self.cursor.item() {
2984 if (self.include_dirs || !entry.is_dir())
2985 && (self.include_ignored || !entry.is_ignored)
2986 {
2987 return true;
2988 }
2989 }
2990 }
2991 false
2992 }
2993
2994 pub fn entry(&self) -> Option<&'a Entry> {
2995 self.cursor.item()
2996 }
2997
2998 pub fn offset(&self) -> usize {
2999 self.cursor
3000 .start()
3001 .count(self.include_dirs, self.include_ignored)
3002 }
3003}
3004
3005impl<'a> Iterator for Traversal<'a> {
3006 type Item = &'a Entry;
3007
3008 fn next(&mut self) -> Option<Self::Item> {
3009 if let Some(item) = self.entry() {
3010 self.advance();
3011 Some(item)
3012 } else {
3013 None
3014 }
3015 }
3016}
3017
3018#[derive(Debug)]
3019enum TraversalTarget<'a> {
3020 Path(&'a Path),
3021 PathSuccessor(&'a Path),
3022 Count {
3023 count: usize,
3024 include_ignored: bool,
3025 include_dirs: bool,
3026 },
3027}
3028
3029impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3030 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3031 match self {
3032 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3033 TraversalTarget::PathSuccessor(path) => {
3034 if !cursor_location.max_path.starts_with(path) {
3035 Ordering::Equal
3036 } else {
3037 Ordering::Greater
3038 }
3039 }
3040 TraversalTarget::Count {
3041 count,
3042 include_dirs,
3043 include_ignored,
3044 } => Ord::cmp(
3045 count,
3046 &cursor_location.count(*include_dirs, *include_ignored),
3047 ),
3048 }
3049 }
3050}
3051
3052struct ChildEntriesIter<'a> {
3053 parent_path: &'a Path,
3054 traversal: Traversal<'a>,
3055}
3056
3057impl<'a> Iterator for ChildEntriesIter<'a> {
3058 type Item = &'a Entry;
3059
3060 fn next(&mut self) -> Option<Self::Item> {
3061 if let Some(item) = self.traversal.entry() {
3062 if item.path.starts_with(&self.parent_path) {
3063 self.traversal.advance_to_sibling();
3064 return Some(item);
3065 }
3066 }
3067 None
3068 }
3069}
3070
3071impl<'a> From<&'a Entry> for proto::Entry {
3072 fn from(entry: &'a Entry) -> Self {
3073 Self {
3074 id: entry.id.to_proto(),
3075 is_dir: entry.is_dir(),
3076 path: entry.path.to_string_lossy().into(),
3077 inode: entry.inode,
3078 mtime: Some(entry.mtime.into()),
3079 is_symlink: entry.is_symlink,
3080 is_ignored: entry.is_ignored,
3081 }
3082 }
3083}
3084
3085impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3086 type Error = anyhow::Error;
3087
3088 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3089 if let Some(mtime) = entry.mtime {
3090 let kind = if entry.is_dir {
3091 EntryKind::Dir
3092 } else {
3093 let mut char_bag = *root_char_bag;
3094 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3095 EntryKind::File(char_bag)
3096 };
3097 let path: Arc<Path> = PathBuf::from(entry.path).into();
3098 Ok(Entry {
3099 id: ProjectEntryId::from_proto(entry.id),
3100 kind,
3101 path,
3102 inode: entry.inode,
3103 mtime: mtime.into(),
3104 is_symlink: entry.is_symlink,
3105 is_ignored: entry.is_ignored,
3106 })
3107 } else {
3108 Err(anyhow!(
3109 "missing mtime in remote worktree entry {:?}",
3110 entry.path
3111 ))
3112 }
3113 }
3114}
3115
3116#[cfg(test)]
3117mod tests {
3118 use super::*;
3119 use fs::repository::FakeGitRepository;
3120 use fs::{FakeFs, RealFs};
3121 use gpui::{executor::Deterministic, TestAppContext};
3122 use pretty_assertions::assert_eq;
3123 use rand::prelude::*;
3124 use serde_json::json;
3125 use std::{env, fmt::Write};
3126 use util::{http::FakeHttpClient, test::temp_tree};
3127
3128 #[gpui::test]
3129 async fn test_traversal(cx: &mut TestAppContext) {
3130 let fs = FakeFs::new(cx.background());
3131 fs.insert_tree(
3132 "/root",
3133 json!({
3134 ".gitignore": "a/b\n",
3135 "a": {
3136 "b": "",
3137 "c": "",
3138 }
3139 }),
3140 )
3141 .await;
3142
3143 let http_client = FakeHttpClient::with_404_response();
3144 let client = cx.read(|cx| Client::new(http_client, cx));
3145
3146 let tree = Worktree::local(
3147 client,
3148 Path::new("/root"),
3149 true,
3150 fs,
3151 Default::default(),
3152 &mut cx.to_async(),
3153 )
3154 .await
3155 .unwrap();
3156 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3157 .await;
3158
3159 tree.read_with(cx, |tree, _| {
3160 assert_eq!(
3161 tree.entries(false)
3162 .map(|entry| entry.path.as_ref())
3163 .collect::<Vec<_>>(),
3164 vec![
3165 Path::new(""),
3166 Path::new(".gitignore"),
3167 Path::new("a"),
3168 Path::new("a/c"),
3169 ]
3170 );
3171 assert_eq!(
3172 tree.entries(true)
3173 .map(|entry| entry.path.as_ref())
3174 .collect::<Vec<_>>(),
3175 vec![
3176 Path::new(""),
3177 Path::new(".gitignore"),
3178 Path::new("a"),
3179 Path::new("a/b"),
3180 Path::new("a/c"),
3181 ]
3182 );
3183 })
3184 }
3185
3186 #[gpui::test(iterations = 10)]
3187 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3188 let fs = FakeFs::new(cx.background());
3189 fs.insert_tree(
3190 "/root",
3191 json!({
3192 "lib": {
3193 "a": {
3194 "a.txt": ""
3195 },
3196 "b": {
3197 "b.txt": ""
3198 }
3199 }
3200 }),
3201 )
3202 .await;
3203 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3204 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3205
3206 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3207 let tree = Worktree::local(
3208 client,
3209 Path::new("/root"),
3210 true,
3211 fs.clone(),
3212 Default::default(),
3213 &mut cx.to_async(),
3214 )
3215 .await
3216 .unwrap();
3217
3218 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3219 .await;
3220
3221 tree.read_with(cx, |tree, _| {
3222 assert_eq!(
3223 tree.entries(false)
3224 .map(|entry| entry.path.as_ref())
3225 .collect::<Vec<_>>(),
3226 vec![
3227 Path::new(""),
3228 Path::new("lib"),
3229 Path::new("lib/a"),
3230 Path::new("lib/a/a.txt"),
3231 Path::new("lib/a/lib"),
3232 Path::new("lib/b"),
3233 Path::new("lib/b/b.txt"),
3234 Path::new("lib/b/lib"),
3235 ]
3236 );
3237 });
3238
3239 fs.rename(
3240 Path::new("/root/lib/a/lib"),
3241 Path::new("/root/lib/a/lib-2"),
3242 Default::default(),
3243 )
3244 .await
3245 .unwrap();
3246 executor.run_until_parked();
3247 tree.read_with(cx, |tree, _| {
3248 assert_eq!(
3249 tree.entries(false)
3250 .map(|entry| entry.path.as_ref())
3251 .collect::<Vec<_>>(),
3252 vec![
3253 Path::new(""),
3254 Path::new("lib"),
3255 Path::new("lib/a"),
3256 Path::new("lib/a/a.txt"),
3257 Path::new("lib/a/lib-2"),
3258 Path::new("lib/b"),
3259 Path::new("lib/b/b.txt"),
3260 Path::new("lib/b/lib"),
3261 ]
3262 );
3263 });
3264 }
3265
3266 #[gpui::test]
3267 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3268 let parent_dir = temp_tree(json!({
3269 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3270 "tree": {
3271 ".git": {},
3272 ".gitignore": "ignored-dir\n",
3273 "tracked-dir": {
3274 "tracked-file1": "",
3275 "ancestor-ignored-file1": "",
3276 },
3277 "ignored-dir": {
3278 "ignored-file1": ""
3279 }
3280 }
3281 }));
3282 let dir = parent_dir.path().join("tree");
3283
3284 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3285
3286 let tree = Worktree::local(
3287 client,
3288 dir.as_path(),
3289 true,
3290 Arc::new(RealFs),
3291 Default::default(),
3292 &mut cx.to_async(),
3293 )
3294 .await
3295 .unwrap();
3296 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3297 .await;
3298 tree.flush_fs_events(cx).await;
3299 cx.read(|cx| {
3300 let tree = tree.read(cx);
3301 assert!(
3302 !tree
3303 .entry_for_path("tracked-dir/tracked-file1")
3304 .unwrap()
3305 .is_ignored
3306 );
3307 assert!(
3308 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3309 .unwrap()
3310 .is_ignored
3311 );
3312 assert!(
3313 tree.entry_for_path("ignored-dir/ignored-file1")
3314 .unwrap()
3315 .is_ignored
3316 );
3317 });
3318
3319 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3320 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3321 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3322 tree.flush_fs_events(cx).await;
3323 cx.read(|cx| {
3324 let tree = tree.read(cx);
3325 assert!(
3326 !tree
3327 .entry_for_path("tracked-dir/tracked-file2")
3328 .unwrap()
3329 .is_ignored
3330 );
3331 assert!(
3332 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3333 .unwrap()
3334 .is_ignored
3335 );
3336 assert!(
3337 tree.entry_for_path("ignored-dir/ignored-file2")
3338 .unwrap()
3339 .is_ignored
3340 );
3341 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3342 });
3343 }
3344
3345 #[gpui::test]
3346 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3347 let root = temp_tree(json!({
3348 "dir1": {
3349 ".git": {},
3350 "deps": {
3351 "dep1": {
3352 ".git": {},
3353 "src": {
3354 "a.txt": ""
3355 }
3356 }
3357 },
3358 "src": {
3359 "b.txt": ""
3360 }
3361 },
3362 "c.txt": "",
3363 }));
3364
3365 let http_client = FakeHttpClient::with_404_response();
3366 let client = cx.read(|cx| Client::new(http_client, cx));
3367 let tree = Worktree::local(
3368 client,
3369 root.path(),
3370 true,
3371 Arc::new(RealFs),
3372 Default::default(),
3373 &mut cx.to_async(),
3374 )
3375 .await
3376 .unwrap();
3377
3378 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3379 .await;
3380 tree.flush_fs_events(cx).await;
3381
3382 tree.read_with(cx, |tree, _cx| {
3383 let tree = tree.as_local().unwrap();
3384
3385 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3386
3387 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3388 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3389 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3390
3391 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3392 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3393 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3394 });
3395
3396 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3397 let tree = tree.as_local().unwrap();
3398 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3399 });
3400
3401 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3402 tree.flush_fs_events(cx).await;
3403
3404 tree.read_with(cx, |tree, _cx| {
3405 let tree = tree.as_local().unwrap();
3406 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3407 assert_ne!(
3408 original_scan_id, new_scan_id,
3409 "original {original_scan_id}, new {new_scan_id}"
3410 );
3411 });
3412
3413 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3414 tree.flush_fs_events(cx).await;
3415
3416 tree.read_with(cx, |tree, _cx| {
3417 let tree = tree.as_local().unwrap();
3418
3419 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3420 });
3421 }
3422
3423 #[test]
3424 fn test_changed_repos() {
3425 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3426 GitRepositoryEntry {
3427 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3428 scan_id,
3429 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3430 git_dir_path: git_dir_path.as_ref().into(),
3431 }
3432 }
3433
3434 let prev_repos: Vec<GitRepositoryEntry> = vec![
3435 fake_entry("/.git", 0),
3436 fake_entry("/a/.git", 0),
3437 fake_entry("/a/b/.git", 0),
3438 ];
3439
3440 let new_repos: Vec<GitRepositoryEntry> = vec![
3441 fake_entry("/a/.git", 1),
3442 fake_entry("/a/b/.git", 0),
3443 fake_entry("/a/c/.git", 0),
3444 ];
3445
3446 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3447
3448 // Deletion retained
3449 assert!(res
3450 .iter()
3451 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3452 .is_some());
3453
3454 // Update retained
3455 assert!(res
3456 .iter()
3457 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3458 .is_some());
3459
3460 // Addition retained
3461 assert!(res
3462 .iter()
3463 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3464 .is_some());
3465
3466 // Nochange, not retained
3467 assert!(res
3468 .iter()
3469 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3470 .is_none());
3471 }
3472
3473 #[gpui::test]
3474 async fn test_write_file(cx: &mut TestAppContext) {
3475 let dir = temp_tree(json!({
3476 ".git": {},
3477 ".gitignore": "ignored-dir\n",
3478 "tracked-dir": {},
3479 "ignored-dir": {}
3480 }));
3481
3482 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3483
3484 let tree = Worktree::local(
3485 client,
3486 dir.path(),
3487 true,
3488 Arc::new(RealFs),
3489 Default::default(),
3490 &mut cx.to_async(),
3491 )
3492 .await
3493 .unwrap();
3494 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3495 .await;
3496 tree.flush_fs_events(cx).await;
3497
3498 tree.update(cx, |tree, cx| {
3499 tree.as_local().unwrap().write_file(
3500 Path::new("tracked-dir/file.txt"),
3501 "hello".into(),
3502 Default::default(),
3503 cx,
3504 )
3505 })
3506 .await
3507 .unwrap();
3508 tree.update(cx, |tree, cx| {
3509 tree.as_local().unwrap().write_file(
3510 Path::new("ignored-dir/file.txt"),
3511 "world".into(),
3512 Default::default(),
3513 cx,
3514 )
3515 })
3516 .await
3517 .unwrap();
3518
3519 tree.read_with(cx, |tree, _| {
3520 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3521 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3522 assert!(!tracked.is_ignored);
3523 assert!(ignored.is_ignored);
3524 });
3525 }
3526
3527 #[gpui::test(iterations = 30)]
3528 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3529 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3530
3531 let fs = FakeFs::new(cx.background());
3532 fs.insert_tree(
3533 "/root",
3534 json!({
3535 "b": {},
3536 "c": {},
3537 "d": {},
3538 }),
3539 )
3540 .await;
3541
3542 let tree = Worktree::local(
3543 client,
3544 "/root".as_ref(),
3545 true,
3546 fs,
3547 Default::default(),
3548 &mut cx.to_async(),
3549 )
3550 .await
3551 .unwrap();
3552
3553 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3554
3555 let entry = tree
3556 .update(cx, |tree, cx| {
3557 tree.as_local_mut()
3558 .unwrap()
3559 .create_entry("a/e".as_ref(), true, cx)
3560 })
3561 .await
3562 .unwrap();
3563 assert!(entry.is_dir());
3564
3565 cx.foreground().run_until_parked();
3566 tree.read_with(cx, |tree, _| {
3567 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3568 });
3569
3570 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3571 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3572 snapshot1.apply_remote_update(update).unwrap();
3573 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3574 }
3575
3576 #[gpui::test(iterations = 100)]
3577 async fn test_random_worktree_operations_during_initial_scan(
3578 cx: &mut TestAppContext,
3579 mut rng: StdRng,
3580 ) {
3581 let operations = env::var("OPERATIONS")
3582 .map(|o| o.parse().unwrap())
3583 .unwrap_or(5);
3584 let initial_entries = env::var("INITIAL_ENTRIES")
3585 .map(|o| o.parse().unwrap())
3586 .unwrap_or(20);
3587
3588 let root_dir = Path::new("/test");
3589 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3590 fs.as_fake().insert_tree(root_dir, json!({})).await;
3591 for _ in 0..initial_entries {
3592 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3593 }
3594 log::info!("generated initial tree");
3595
3596 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3597 let worktree = Worktree::local(
3598 client.clone(),
3599 root_dir,
3600 true,
3601 fs.clone(),
3602 Default::default(),
3603 &mut cx.to_async(),
3604 )
3605 .await
3606 .unwrap();
3607
3608 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3609
3610 for _ in 0..operations {
3611 worktree
3612 .update(cx, |worktree, cx| {
3613 randomly_mutate_worktree(worktree, &mut rng, cx)
3614 })
3615 .await
3616 .log_err();
3617 worktree.read_with(cx, |tree, _| {
3618 tree.as_local().unwrap().snapshot.check_invariants()
3619 });
3620
3621 if rng.gen_bool(0.6) {
3622 let new_snapshot =
3623 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3624 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3625 snapshot.apply_remote_update(update.clone()).unwrap();
3626 assert_eq!(
3627 snapshot.to_vec(true),
3628 new_snapshot.to_vec(true),
3629 "incorrect snapshot after update {:?}",
3630 update
3631 );
3632 }
3633 }
3634
3635 worktree
3636 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3637 .await;
3638 worktree.read_with(cx, |tree, _| {
3639 tree.as_local().unwrap().snapshot.check_invariants()
3640 });
3641
3642 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3643 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3644 snapshot.apply_remote_update(update.clone()).unwrap();
3645 assert_eq!(
3646 snapshot.to_vec(true),
3647 new_snapshot.to_vec(true),
3648 "incorrect snapshot after update {:?}",
3649 update
3650 );
3651 }
3652
3653 #[gpui::test(iterations = 100)]
3654 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3655 let operations = env::var("OPERATIONS")
3656 .map(|o| o.parse().unwrap())
3657 .unwrap_or(40);
3658 let initial_entries = env::var("INITIAL_ENTRIES")
3659 .map(|o| o.parse().unwrap())
3660 .unwrap_or(20);
3661
3662 let root_dir = Path::new("/test");
3663 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3664 fs.as_fake().insert_tree(root_dir, json!({})).await;
3665 for _ in 0..initial_entries {
3666 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3667 }
3668 log::info!("generated initial tree");
3669
3670 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3671 let worktree = Worktree::local(
3672 client.clone(),
3673 root_dir,
3674 true,
3675 fs.clone(),
3676 Default::default(),
3677 &mut cx.to_async(),
3678 )
3679 .await
3680 .unwrap();
3681
3682 worktree
3683 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3684 .await;
3685
3686 // After the initial scan is complete, the `UpdatedEntries` event can
3687 // be used to follow along with all changes to the worktree's snapshot.
3688 worktree.update(cx, |tree, cx| {
3689 let mut paths = tree
3690 .as_local()
3691 .unwrap()
3692 .paths()
3693 .cloned()
3694 .collect::<Vec<_>>();
3695
3696 cx.subscribe(&worktree, move |tree, _, event, _| {
3697 if let Event::UpdatedEntries(changes) = event {
3698 for (path, change_type) in changes.iter() {
3699 let path = path.clone();
3700 let ix = match paths.binary_search(&path) {
3701 Ok(ix) | Err(ix) => ix,
3702 };
3703 match change_type {
3704 PathChange::Added => {
3705 assert_ne!(paths.get(ix), Some(&path));
3706 paths.insert(ix, path);
3707 }
3708 PathChange::Removed => {
3709 assert_eq!(paths.get(ix), Some(&path));
3710 paths.remove(ix);
3711 }
3712 PathChange::Updated => {
3713 assert_eq!(paths.get(ix), Some(&path));
3714 }
3715 PathChange::AddedOrUpdated => {
3716 if paths[ix] != path {
3717 paths.insert(ix, path);
3718 }
3719 }
3720 }
3721 }
3722 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3723 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3724 }
3725 })
3726 .detach();
3727 });
3728
3729 let mut snapshots = Vec::new();
3730 let mut mutations_len = operations;
3731 while mutations_len > 1 {
3732 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3733 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3734 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3735 let len = rng.gen_range(0..=buffered_event_count);
3736 log::info!("flushing {} events", len);
3737 fs.as_fake().flush_events(len).await;
3738 } else {
3739 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3740 mutations_len -= 1;
3741 }
3742
3743 cx.foreground().run_until_parked();
3744 if rng.gen_bool(0.2) {
3745 log::info!("storing snapshot {}", snapshots.len());
3746 let snapshot =
3747 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3748 snapshots.push(snapshot);
3749 }
3750 }
3751
3752 log::info!("quiescing");
3753 fs.as_fake().flush_events(usize::MAX).await;
3754 cx.foreground().run_until_parked();
3755 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3756 snapshot.check_invariants();
3757
3758 {
3759 let new_worktree = Worktree::local(
3760 client.clone(),
3761 root_dir,
3762 true,
3763 fs.clone(),
3764 Default::default(),
3765 &mut cx.to_async(),
3766 )
3767 .await
3768 .unwrap();
3769 new_worktree
3770 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3771 .await;
3772 let new_snapshot =
3773 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3774 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3775 }
3776
3777 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3778 let include_ignored = rng.gen::<bool>();
3779 if !include_ignored {
3780 let mut entries_by_path_edits = Vec::new();
3781 let mut entries_by_id_edits = Vec::new();
3782 for entry in prev_snapshot
3783 .entries_by_id
3784 .cursor::<()>()
3785 .filter(|e| e.is_ignored)
3786 {
3787 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3788 entries_by_id_edits.push(Edit::Remove(entry.id));
3789 }
3790
3791 prev_snapshot
3792 .entries_by_path
3793 .edit(entries_by_path_edits, &());
3794 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3795 }
3796
3797 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3798 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3799 assert_eq!(
3800 prev_snapshot.to_vec(include_ignored),
3801 snapshot.to_vec(include_ignored),
3802 "wrong update for snapshot {i}. update: {:?}",
3803 update
3804 );
3805 }
3806 }
3807
3808 fn randomly_mutate_worktree(
3809 worktree: &mut Worktree,
3810 rng: &mut impl Rng,
3811 cx: &mut ModelContext<Worktree>,
3812 ) -> Task<Result<()>> {
3813 let worktree = worktree.as_local_mut().unwrap();
3814 let snapshot = worktree.snapshot();
3815 let entry = snapshot.entries(false).choose(rng).unwrap();
3816
3817 match rng.gen_range(0_u32..100) {
3818 0..=33 if entry.path.as_ref() != Path::new("") => {
3819 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3820 worktree.delete_entry(entry.id, cx).unwrap()
3821 }
3822 ..=66 if entry.path.as_ref() != Path::new("") => {
3823 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3824 let new_parent_path = if other_entry.is_dir() {
3825 other_entry.path.clone()
3826 } else {
3827 other_entry.path.parent().unwrap().into()
3828 };
3829 let mut new_path = new_parent_path.join(gen_name(rng));
3830 if new_path.starts_with(&entry.path) {
3831 new_path = gen_name(rng).into();
3832 }
3833
3834 log::info!(
3835 "renaming entry {:?} ({}) to {:?}",
3836 entry.path,
3837 entry.id.0,
3838 new_path
3839 );
3840 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3841 cx.foreground().spawn(async move {
3842 task.await?;
3843 Ok(())
3844 })
3845 }
3846 _ => {
3847 let task = if entry.is_dir() {
3848 let child_path = entry.path.join(gen_name(rng));
3849 let is_dir = rng.gen_bool(0.3);
3850 log::info!(
3851 "creating {} at {:?}",
3852 if is_dir { "dir" } else { "file" },
3853 child_path,
3854 );
3855 worktree.create_entry(child_path, is_dir, cx)
3856 } else {
3857 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3858 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3859 };
3860 cx.foreground().spawn(async move {
3861 task.await?;
3862 Ok(())
3863 })
3864 }
3865 }
3866 }
3867
3868 async fn randomly_mutate_fs(
3869 fs: &Arc<dyn Fs>,
3870 root_path: &Path,
3871 insertion_probability: f64,
3872 rng: &mut impl Rng,
3873 ) {
3874 let mut files = Vec::new();
3875 let mut dirs = Vec::new();
3876 for path in fs.as_fake().paths() {
3877 if path.starts_with(root_path) {
3878 if fs.is_file(&path).await {
3879 files.push(path);
3880 } else {
3881 dirs.push(path);
3882 }
3883 }
3884 }
3885
3886 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3887 let path = dirs.choose(rng).unwrap();
3888 let new_path = path.join(gen_name(rng));
3889
3890 if rng.gen() {
3891 log::info!(
3892 "creating dir {:?}",
3893 new_path.strip_prefix(root_path).unwrap()
3894 );
3895 fs.create_dir(&new_path).await.unwrap();
3896 } else {
3897 log::info!(
3898 "creating file {:?}",
3899 new_path.strip_prefix(root_path).unwrap()
3900 );
3901 fs.create_file(&new_path, Default::default()).await.unwrap();
3902 }
3903 } else if rng.gen_bool(0.05) {
3904 let ignore_dir_path = dirs.choose(rng).unwrap();
3905 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3906
3907 let subdirs = dirs
3908 .iter()
3909 .filter(|d| d.starts_with(&ignore_dir_path))
3910 .cloned()
3911 .collect::<Vec<_>>();
3912 let subfiles = files
3913 .iter()
3914 .filter(|d| d.starts_with(&ignore_dir_path))
3915 .cloned()
3916 .collect::<Vec<_>>();
3917 let files_to_ignore = {
3918 let len = rng.gen_range(0..=subfiles.len());
3919 subfiles.choose_multiple(rng, len)
3920 };
3921 let dirs_to_ignore = {
3922 let len = rng.gen_range(0..subdirs.len());
3923 subdirs.choose_multiple(rng, len)
3924 };
3925
3926 let mut ignore_contents = String::new();
3927 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3928 writeln!(
3929 ignore_contents,
3930 "{}",
3931 path_to_ignore
3932 .strip_prefix(&ignore_dir_path)
3933 .unwrap()
3934 .to_str()
3935 .unwrap()
3936 )
3937 .unwrap();
3938 }
3939 log::info!(
3940 "creating gitignore {:?} with contents:\n{}",
3941 ignore_path.strip_prefix(&root_path).unwrap(),
3942 ignore_contents
3943 );
3944 fs.save(
3945 &ignore_path,
3946 &ignore_contents.as_str().into(),
3947 Default::default(),
3948 )
3949 .await
3950 .unwrap();
3951 } else {
3952 let old_path = {
3953 let file_path = files.choose(rng);
3954 let dir_path = dirs[1..].choose(rng);
3955 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3956 };
3957
3958 let is_rename = rng.gen();
3959 if is_rename {
3960 let new_path_parent = dirs
3961 .iter()
3962 .filter(|d| !d.starts_with(old_path))
3963 .choose(rng)
3964 .unwrap();
3965
3966 let overwrite_existing_dir =
3967 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3968 let new_path = if overwrite_existing_dir {
3969 fs.remove_dir(
3970 &new_path_parent,
3971 RemoveOptions {
3972 recursive: true,
3973 ignore_if_not_exists: true,
3974 },
3975 )
3976 .await
3977 .unwrap();
3978 new_path_parent.to_path_buf()
3979 } else {
3980 new_path_parent.join(gen_name(rng))
3981 };
3982
3983 log::info!(
3984 "renaming {:?} to {}{:?}",
3985 old_path.strip_prefix(&root_path).unwrap(),
3986 if overwrite_existing_dir {
3987 "overwrite "
3988 } else {
3989 ""
3990 },
3991 new_path.strip_prefix(&root_path).unwrap()
3992 );
3993 fs.rename(
3994 &old_path,
3995 &new_path,
3996 fs::RenameOptions {
3997 overwrite: true,
3998 ignore_if_exists: true,
3999 },
4000 )
4001 .await
4002 .unwrap();
4003 } else if fs.is_file(&old_path).await {
4004 log::info!(
4005 "deleting file {:?}",
4006 old_path.strip_prefix(&root_path).unwrap()
4007 );
4008 fs.remove_file(old_path, Default::default()).await.unwrap();
4009 } else {
4010 log::info!(
4011 "deleting dir {:?}",
4012 old_path.strip_prefix(&root_path).unwrap()
4013 );
4014 fs.remove_dir(
4015 &old_path,
4016 RemoveOptions {
4017 recursive: true,
4018 ignore_if_not_exists: true,
4019 },
4020 )
4021 .await
4022 .unwrap();
4023 }
4024 }
4025 }
4026
4027 fn gen_name(rng: &mut impl Rng) -> String {
4028 (0..6)
4029 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4030 .map(char::from)
4031 .collect()
4032 }
4033
4034 impl LocalSnapshot {
4035 fn check_invariants(&self) {
4036 assert_eq!(
4037 self.entries_by_path
4038 .cursor::<()>()
4039 .map(|e| (&e.path, e.id))
4040 .collect::<Vec<_>>(),
4041 self.entries_by_id
4042 .cursor::<()>()
4043 .map(|e| (&e.path, e.id))
4044 .collect::<collections::BTreeSet<_>>()
4045 .into_iter()
4046 .collect::<Vec<_>>(),
4047 "entries_by_path and entries_by_id are inconsistent"
4048 );
4049
4050 let mut files = self.files(true, 0);
4051 let mut visible_files = self.files(false, 0);
4052 for entry in self.entries_by_path.cursor::<()>() {
4053 if entry.is_file() {
4054 assert_eq!(files.next().unwrap().inode, entry.inode);
4055 if !entry.is_ignored {
4056 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4057 }
4058 }
4059 }
4060
4061 assert!(files.next().is_none());
4062 assert!(visible_files.next().is_none());
4063
4064 let mut bfs_paths = Vec::new();
4065 let mut stack = vec![Path::new("")];
4066 while let Some(path) = stack.pop() {
4067 bfs_paths.push(path);
4068 let ix = stack.len();
4069 for child_entry in self.child_entries(path) {
4070 stack.insert(ix, &child_entry.path);
4071 }
4072 }
4073
4074 let dfs_paths_via_iter = self
4075 .entries_by_path
4076 .cursor::<()>()
4077 .map(|e| e.path.as_ref())
4078 .collect::<Vec<_>>();
4079 assert_eq!(bfs_paths, dfs_paths_via_iter);
4080
4081 let dfs_paths_via_traversal = self
4082 .entries(true)
4083 .map(|e| e.path.as_ref())
4084 .collect::<Vec<_>>();
4085 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4086
4087 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4088 let ignore_parent_path =
4089 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4090 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4091 assert!(self
4092 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4093 .is_some());
4094 }
4095 }
4096
4097 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4098 let mut paths = Vec::new();
4099 for entry in self.entries_by_path.cursor::<()>() {
4100 if include_ignored || !entry.is_ignored {
4101 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4102 }
4103 }
4104 paths.sort_by(|a, b| a.0.cmp(b.0));
4105 paths
4106 }
4107 }
4108}