1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98
99 /// A number that increases every time the worktree begins scanning
100 /// a set of paths from the filesystem. This scanning could be caused
101 /// by some operation performed on the worktree, such as reading or
102 /// writing a file, or by an event reported by the filesystem.
103 scan_id: usize,
104
105 /// The latest scan id that has completed, and whose preceding scans
106 /// have all completed. The current `scan_id` could be more than one
107 /// greater than the `completed_scan_id` if operations are performed
108 /// on the worktree while it is processing a file-system event.
109 completed_scan_id: usize,
110}
111
112#[derive(Clone)]
113pub struct GitRepositoryEntry {
114 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
115
116 pub(crate) scan_id: usize,
117 // Path to folder containing the .git file or directory
118 pub(crate) content_path: Arc<Path>,
119 // Path to the actual .git folder.
120 // Note: if .git is a file, this points to the folder indicated by the .git file
121 pub(crate) git_dir_path: Arc<Path>,
122}
123
124impl std::fmt::Debug for GitRepositoryEntry {
125 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
126 f.debug_struct("GitRepositoryEntry")
127 .field("content_path", &self.content_path)
128 .field("git_dir_path", &self.git_dir_path)
129 .finish()
130 }
131}
132
133#[derive(Debug)]
134pub struct LocalSnapshot {
135 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
136 git_repositories: Vec<GitRepositoryEntry>,
137 removed_entry_ids: HashMap<u64, ProjectEntryId>,
138 next_entry_id: Arc<AtomicUsize>,
139 snapshot: Snapshot,
140}
141
142impl Clone for LocalSnapshot {
143 fn clone(&self) -> Self {
144 Self {
145 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
146 git_repositories: self.git_repositories.iter().cloned().collect(),
147 removed_entry_ids: self.removed_entry_ids.clone(),
148 next_entry_id: self.next_entry_id.clone(),
149 snapshot: self.snapshot.clone(),
150 }
151 }
152}
153
154impl Deref for LocalSnapshot {
155 type Target = Snapshot;
156
157 fn deref(&self) -> &Self::Target {
158 &self.snapshot
159 }
160}
161
162impl DerefMut for LocalSnapshot {
163 fn deref_mut(&mut self) -> &mut Self::Target {
164 &mut self.snapshot
165 }
166}
167
168enum ScanState {
169 Started,
170 Updated {
171 snapshot: LocalSnapshot,
172 changes: HashMap<Arc<Path>, PathChange>,
173 barrier: Option<barrier::Sender>,
174 scanning: bool,
175 },
176}
177
178struct ShareState {
179 project_id: u64,
180 snapshots_tx: watch::Sender<LocalSnapshot>,
181 resume_updates: watch::Sender<()>,
182 _maintain_remote_snapshot: Task<Option<()>>,
183}
184
185pub enum Event {
186 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
187 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
188}
189
190impl Entity for Worktree {
191 type Event = Event;
192}
193
194impl Worktree {
195 pub async fn local(
196 client: Arc<Client>,
197 path: impl Into<Arc<Path>>,
198 visible: bool,
199 fs: Arc<dyn Fs>,
200 next_entry_id: Arc<AtomicUsize>,
201 cx: &mut AsyncAppContext,
202 ) -> Result<ModelHandle<Self>> {
203 // After determining whether the root entry is a file or a directory, populate the
204 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
205 let abs_path = path.into();
206 let metadata = fs
207 .metadata(&abs_path)
208 .await
209 .context("failed to stat worktree path")?;
210
211 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
212 let root_name = abs_path
213 .file_name()
214 .map_or(String::new(), |f| f.to_string_lossy().to_string());
215
216 let mut snapshot = LocalSnapshot {
217 ignores_by_parent_abs_path: Default::default(),
218 git_repositories: Default::default(),
219 removed_entry_ids: Default::default(),
220 next_entry_id,
221 snapshot: Snapshot {
222 id: WorktreeId::from_usize(cx.model_id()),
223 abs_path: abs_path.clone(),
224 root_name: root_name.clone(),
225 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
226 entries_by_path: Default::default(),
227 entries_by_id: Default::default(),
228 scan_id: 1,
229 completed_scan_id: 0,
230 },
231 };
232
233 if let Some(metadata) = metadata {
234 snapshot.insert_entry(
235 Entry::new(
236 Arc::from(Path::new("")),
237 &metadata,
238 &snapshot.next_entry_id,
239 snapshot.root_char_bag,
240 ),
241 fs.as_ref(),
242 );
243 }
244
245 let (path_changes_tx, path_changes_rx) = channel::unbounded();
246 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
247
248 cx.spawn_weak(|this, mut cx| async move {
249 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
250 this.update(&mut cx, |this, cx| {
251 let this = this.as_local_mut().unwrap();
252 match state {
253 ScanState::Started => {
254 *this.is_scanning.0.borrow_mut() = true;
255 }
256 ScanState::Updated {
257 snapshot,
258 changes,
259 barrier,
260 scanning,
261 } => {
262 *this.is_scanning.0.borrow_mut() = scanning;
263 this.set_snapshot(snapshot, cx);
264 cx.emit(Event::UpdatedEntries(changes));
265 drop(barrier);
266 }
267 }
268 cx.notify();
269 });
270 }
271 })
272 .detach();
273
274 let background_scanner_task = cx.background().spawn({
275 let fs = fs.clone();
276 let snapshot = snapshot.clone();
277 let background = cx.background().clone();
278 async move {
279 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
280 BackgroundScanner::new(
281 snapshot,
282 fs,
283 scan_states_tx,
284 background,
285 path_changes_rx,
286 )
287 .run(events)
288 .await;
289 }
290 });
291
292 Worktree::Local(LocalWorktree {
293 snapshot,
294 is_scanning: watch::channel_with(true),
295 share: None,
296 path_changes_tx,
297 _background_scanner_task: background_scanner_task,
298 diagnostics: Default::default(),
299 diagnostic_summaries: Default::default(),
300 client,
301 fs,
302 visible,
303 })
304 }))
305 }
306
307 pub fn remote(
308 project_remote_id: u64,
309 replica_id: ReplicaId,
310 worktree: proto::WorktreeMetadata,
311 client: Arc<Client>,
312 cx: &mut AppContext,
313 ) -> ModelHandle<Self> {
314 cx.add_model(|cx: &mut ModelContext<Self>| {
315 let snapshot = Snapshot {
316 id: WorktreeId(worktree.id as usize),
317 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
318 root_name: worktree.root_name.clone(),
319 root_char_bag: worktree
320 .root_name
321 .chars()
322 .map(|c| c.to_ascii_lowercase())
323 .collect(),
324 entries_by_path: Default::default(),
325 entries_by_id: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 };
329
330 let (updates_tx, mut updates_rx) = mpsc::unbounded();
331 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
332 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
333
334 cx.background()
335 .spawn({
336 let background_snapshot = background_snapshot.clone();
337 async move {
338 while let Some(update) = updates_rx.next().await {
339 if let Err(error) =
340 background_snapshot.lock().apply_remote_update(update)
341 {
342 log::error!("error applying worktree update: {}", error);
343 }
344 snapshot_updated_tx.send(()).await.ok();
345 }
346 }
347 })
348 .detach();
349
350 cx.spawn_weak(|this, mut cx| async move {
351 while (snapshot_updated_rx.recv().await).is_some() {
352 if let Some(this) = this.upgrade(&cx) {
353 this.update(&mut cx, |this, cx| {
354 let this = this.as_remote_mut().unwrap();
355 this.snapshot = this.background_snapshot.lock().clone();
356 cx.emit(Event::UpdatedEntries(Default::default()));
357 cx.notify();
358 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
359 if this.observed_snapshot(*scan_id) {
360 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
361 let _ = tx.send(());
362 } else {
363 break;
364 }
365 }
366 });
367 } else {
368 break;
369 }
370 }
371 })
372 .detach();
373
374 Worktree::Remote(RemoteWorktree {
375 project_id: project_remote_id,
376 replica_id,
377 snapshot: snapshot.clone(),
378 background_snapshot,
379 updates_tx: Some(updates_tx),
380 snapshot_subscriptions: Default::default(),
381 client: client.clone(),
382 diagnostic_summaries: Default::default(),
383 visible: worktree.visible,
384 disconnected: false,
385 })
386 })
387 }
388
389 pub fn as_local(&self) -> Option<&LocalWorktree> {
390 if let Worktree::Local(worktree) = self {
391 Some(worktree)
392 } else {
393 None
394 }
395 }
396
397 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
398 if let Worktree::Remote(worktree) = self {
399 Some(worktree)
400 } else {
401 None
402 }
403 }
404
405 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
406 if let Worktree::Local(worktree) = self {
407 Some(worktree)
408 } else {
409 None
410 }
411 }
412
413 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
414 if let Worktree::Remote(worktree) = self {
415 Some(worktree)
416 } else {
417 None
418 }
419 }
420
421 pub fn is_local(&self) -> bool {
422 matches!(self, Worktree::Local(_))
423 }
424
425 pub fn is_remote(&self) -> bool {
426 !self.is_local()
427 }
428
429 pub fn snapshot(&self) -> Snapshot {
430 match self {
431 Worktree::Local(worktree) => worktree.snapshot().snapshot,
432 Worktree::Remote(worktree) => worktree.snapshot(),
433 }
434 }
435
436 pub fn scan_id(&self) -> usize {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot.scan_id,
439 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
440 }
441 }
442
443 pub fn completed_scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
447 }
448 }
449
450 pub fn is_visible(&self) -> bool {
451 match self {
452 Worktree::Local(worktree) => worktree.visible,
453 Worktree::Remote(worktree) => worktree.visible,
454 }
455 }
456
457 pub fn replica_id(&self) -> ReplicaId {
458 match self {
459 Worktree::Local(_) => 0,
460 Worktree::Remote(worktree) => worktree.replica_id,
461 }
462 }
463
464 pub fn diagnostic_summaries(
465 &self,
466 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
467 match self {
468 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
469 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
470 }
471 .iter()
472 .map(|(path, summary)| (path.0.clone(), *summary))
473 }
474
475 pub fn abs_path(&self) -> Arc<Path> {
476 match self {
477 Worktree::Local(worktree) => worktree.abs_path.clone(),
478 Worktree::Remote(worktree) => worktree.abs_path.clone(),
479 }
480 }
481}
482
483impl LocalWorktree {
484 pub fn contains_abs_path(&self, path: &Path) -> bool {
485 path.starts_with(&self.abs_path)
486 }
487
488 fn absolutize(&self, path: &Path) -> PathBuf {
489 if path.file_name().is_some() {
490 self.abs_path.join(path)
491 } else {
492 self.abs_path.to_path_buf()
493 }
494 }
495
496 pub(crate) fn load_buffer(
497 &mut self,
498 path: &Path,
499 cx: &mut ModelContext<Worktree>,
500 ) -> Task<Result<ModelHandle<Buffer>>> {
501 let path = Arc::from(path);
502 cx.spawn(move |this, mut cx| async move {
503 let (file, contents, diff_base) = this
504 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
505 .await?;
506 Ok(cx.add_model(|cx| {
507 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
508 buffer.git_diff_recalc(cx);
509 buffer
510 }))
511 })
512 }
513
514 pub fn diagnostics_for_path(
515 &self,
516 path: &Path,
517 ) -> Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)> {
518 self.diagnostics.get(path).cloned().unwrap_or_default()
519 }
520
521 pub fn update_diagnostics(
522 &mut self,
523 server_id: usize,
524 worktree_path: Arc<Path>,
525 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
526 _: &mut ModelContext<Worktree>,
527 ) -> Result<bool> {
528 self.diagnostics.remove(&worktree_path);
529 let old_summary = self
530 .diagnostic_summaries
531 .remove(&PathKey(worktree_path.clone()))
532 .unwrap_or_default();
533 let new_summary = DiagnosticSummary::new(server_id, &diagnostics);
534 if !new_summary.is_empty() {
535 self.diagnostic_summaries
536 .insert(PathKey(worktree_path.clone()), new_summary);
537 let diagnostics_by_server_id =
538 self.diagnostics.entry(worktree_path.clone()).or_default();
539 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
540 Ok(ix) => {
541 diagnostics_by_server_id[ix] = (server_id, diagnostics);
542 }
543
544 Err(ix) => {
545 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
546 }
547 }
548 }
549
550 let updated = !old_summary.is_empty() || !new_summary.is_empty();
551 if updated {
552 if let Some(share) = self.share.as_ref() {
553 self.client
554 .send(proto::UpdateDiagnosticSummary {
555 project_id: share.project_id,
556 worktree_id: self.id().to_proto(),
557 summary: Some(proto::DiagnosticSummary {
558 path: worktree_path.to_string_lossy().to_string(),
559 language_server_id: server_id as u64,
560 error_count: new_summary.error_count as u32,
561 warning_count: new_summary.warning_count as u32,
562 }),
563 })
564 .log_err();
565 }
566 }
567
568 Ok(updated)
569 }
570
571 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
572 let updated_repos = Self::changed_repos(
573 &self.snapshot.git_repositories,
574 &new_snapshot.git_repositories,
575 );
576 self.snapshot = new_snapshot;
577
578 if let Some(share) = self.share.as_mut() {
579 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
580 }
581
582 if !updated_repos.is_empty() {
583 cx.emit(Event::UpdatedGitRepositories(updated_repos));
584 }
585 }
586
587 fn changed_repos(
588 old_repos: &[GitRepositoryEntry],
589 new_repos: &[GitRepositoryEntry],
590 ) -> Vec<GitRepositoryEntry> {
591 fn diff<'a>(
592 a: &'a [GitRepositoryEntry],
593 b: &'a [GitRepositoryEntry],
594 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
595 ) {
596 for a_repo in a {
597 let matched = b.iter().find(|b_repo| {
598 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
599 });
600
601 if matched.is_none() {
602 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
603 }
604 }
605 }
606
607 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
608
609 diff(old_repos, new_repos, &mut updated);
610 diff(new_repos, old_repos, &mut updated);
611
612 updated.into_values().collect()
613 }
614
615 pub fn scan_complete(&self) -> impl Future<Output = ()> {
616 let mut is_scanning_rx = self.is_scanning.1.clone();
617 async move {
618 let mut is_scanning = is_scanning_rx.borrow().clone();
619 while is_scanning {
620 if let Some(value) = is_scanning_rx.recv().await {
621 is_scanning = value;
622 } else {
623 break;
624 }
625 }
626 }
627 }
628
629 pub fn snapshot(&self) -> LocalSnapshot {
630 self.snapshot.clone()
631 }
632
633 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
634 proto::WorktreeMetadata {
635 id: self.id().to_proto(),
636 root_name: self.root_name().to_string(),
637 visible: self.visible,
638 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
639 }
640 }
641
642 fn load(
643 &self,
644 path: &Path,
645 cx: &mut ModelContext<Worktree>,
646 ) -> Task<Result<(File, String, Option<String>)>> {
647 let handle = cx.handle();
648 let path = Arc::from(path);
649 let abs_path = self.absolutize(&path);
650 let fs = self.fs.clone();
651 let snapshot = self.snapshot();
652
653 cx.spawn(|this, mut cx| async move {
654 let text = fs.load(&abs_path).await?;
655
656 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
657 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
658 let repo_relative = repo_relative.to_owned();
659 cx.background()
660 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
661 .await
662 } else {
663 None
664 }
665 } else {
666 None
667 };
668
669 // Eagerly populate the snapshot with an updated entry for the loaded file
670 let entry = this
671 .update(&mut cx, |this, cx| {
672 this.as_local().unwrap().refresh_entry(path, None, cx)
673 })
674 .await?;
675
676 Ok((
677 File {
678 entry_id: entry.id,
679 worktree: handle,
680 path: entry.path,
681 mtime: entry.mtime,
682 is_local: true,
683 is_deleted: false,
684 },
685 text,
686 diff_base,
687 ))
688 })
689 }
690
691 pub fn save_buffer(
692 &self,
693 buffer_handle: ModelHandle<Buffer>,
694 path: Arc<Path>,
695 has_changed_file: bool,
696 cx: &mut ModelContext<Worktree>,
697 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
698 let handle = cx.handle();
699 let buffer = buffer_handle.read(cx);
700
701 let rpc = self.client.clone();
702 let buffer_id = buffer.remote_id();
703 let project_id = self.share.as_ref().map(|share| share.project_id);
704
705 let text = buffer.as_rope().clone();
706 let fingerprint = text.fingerprint();
707 let version = buffer.version();
708 let save = self.write_file(path, text, buffer.line_ending(), cx);
709
710 cx.as_mut().spawn(|mut cx| async move {
711 let entry = save.await?;
712
713 if has_changed_file {
714 let new_file = Arc::new(File {
715 entry_id: entry.id,
716 worktree: handle,
717 path: entry.path,
718 mtime: entry.mtime,
719 is_local: true,
720 is_deleted: false,
721 });
722
723 if let Some(project_id) = project_id {
724 rpc.send(proto::UpdateBufferFile {
725 project_id,
726 buffer_id,
727 file: Some(new_file.to_proto()),
728 })
729 .log_err();
730 }
731
732 buffer_handle.update(&mut cx, |buffer, cx| {
733 if has_changed_file {
734 buffer.file_updated(new_file, cx).detach();
735 }
736 });
737 }
738
739 if let Some(project_id) = project_id {
740 rpc.send(proto::BufferSaved {
741 project_id,
742 buffer_id,
743 version: serialize_version(&version),
744 mtime: Some(entry.mtime.into()),
745 fingerprint: serialize_fingerprint(fingerprint),
746 })?;
747 }
748
749 buffer_handle.update(&mut cx, |buffer, cx| {
750 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
751 });
752
753 Ok((version, fingerprint, entry.mtime))
754 })
755 }
756
757 pub fn create_entry(
758 &self,
759 path: impl Into<Arc<Path>>,
760 is_dir: bool,
761 cx: &mut ModelContext<Worktree>,
762 ) -> Task<Result<Entry>> {
763 let path = path.into();
764 let abs_path = self.absolutize(&path);
765 let fs = self.fs.clone();
766 let write = cx.background().spawn(async move {
767 if is_dir {
768 fs.create_dir(&abs_path).await
769 } else {
770 fs.save(&abs_path, &Default::default(), Default::default())
771 .await
772 }
773 });
774
775 cx.spawn(|this, mut cx| async move {
776 write.await?;
777 this.update(&mut cx, |this, cx| {
778 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
779 })
780 .await
781 })
782 }
783
784 pub fn write_file(
785 &self,
786 path: impl Into<Arc<Path>>,
787 text: Rope,
788 line_ending: LineEnding,
789 cx: &mut ModelContext<Worktree>,
790 ) -> Task<Result<Entry>> {
791 let path = path.into();
792 let abs_path = self.absolutize(&path);
793 let fs = self.fs.clone();
794 let write = cx
795 .background()
796 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
797
798 cx.spawn(|this, mut cx| async move {
799 write.await?;
800 this.update(&mut cx, |this, cx| {
801 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
802 })
803 .await
804 })
805 }
806
807 pub fn delete_entry(
808 &self,
809 entry_id: ProjectEntryId,
810 cx: &mut ModelContext<Worktree>,
811 ) -> Option<Task<Result<()>>> {
812 let entry = self.entry_for_id(entry_id)?.clone();
813 let abs_path = self.abs_path.clone();
814 let fs = self.fs.clone();
815
816 let delete = cx.background().spawn(async move {
817 let mut abs_path = fs.canonicalize(&abs_path).await?;
818 if entry.path.file_name().is_some() {
819 abs_path = abs_path.join(&entry.path);
820 }
821 if entry.is_file() {
822 fs.remove_file(&abs_path, Default::default()).await?;
823 } else {
824 fs.remove_dir(
825 &abs_path,
826 RemoveOptions {
827 recursive: true,
828 ignore_if_not_exists: false,
829 },
830 )
831 .await?;
832 }
833 anyhow::Ok(abs_path)
834 });
835
836 Some(cx.spawn(|this, mut cx| async move {
837 let abs_path = delete.await?;
838 let (tx, mut rx) = barrier::channel();
839 this.update(&mut cx, |this, _| {
840 this.as_local_mut()
841 .unwrap()
842 .path_changes_tx
843 .try_send((vec![abs_path], tx))
844 })?;
845 rx.recv().await;
846 Ok(())
847 }))
848 }
849
850 pub fn rename_entry(
851 &self,
852 entry_id: ProjectEntryId,
853 new_path: impl Into<Arc<Path>>,
854 cx: &mut ModelContext<Worktree>,
855 ) -> Option<Task<Result<Entry>>> {
856 let old_path = self.entry_for_id(entry_id)?.path.clone();
857 let new_path = new_path.into();
858 let abs_old_path = self.absolutize(&old_path);
859 let abs_new_path = self.absolutize(&new_path);
860 let fs = self.fs.clone();
861 let rename = cx.background().spawn(async move {
862 fs.rename(&abs_old_path, &abs_new_path, Default::default())
863 .await
864 });
865
866 Some(cx.spawn(|this, mut cx| async move {
867 rename.await?;
868 this.update(&mut cx, |this, cx| {
869 this.as_local_mut()
870 .unwrap()
871 .refresh_entry(new_path.clone(), Some(old_path), cx)
872 })
873 .await
874 }))
875 }
876
877 pub fn copy_entry(
878 &self,
879 entry_id: ProjectEntryId,
880 new_path: impl Into<Arc<Path>>,
881 cx: &mut ModelContext<Worktree>,
882 ) -> Option<Task<Result<Entry>>> {
883 let old_path = self.entry_for_id(entry_id)?.path.clone();
884 let new_path = new_path.into();
885 let abs_old_path = self.absolutize(&old_path);
886 let abs_new_path = self.absolutize(&new_path);
887 let fs = self.fs.clone();
888 let copy = cx.background().spawn(async move {
889 copy_recursive(
890 fs.as_ref(),
891 &abs_old_path,
892 &abs_new_path,
893 Default::default(),
894 )
895 .await
896 });
897
898 Some(cx.spawn(|this, mut cx| async move {
899 copy.await?;
900 this.update(&mut cx, |this, cx| {
901 this.as_local_mut()
902 .unwrap()
903 .refresh_entry(new_path.clone(), None, cx)
904 })
905 .await
906 }))
907 }
908
909 fn refresh_entry(
910 &self,
911 path: Arc<Path>,
912 old_path: Option<Arc<Path>>,
913 cx: &mut ModelContext<Worktree>,
914 ) -> Task<Result<Entry>> {
915 let fs = self.fs.clone();
916 let abs_root_path = self.abs_path.clone();
917 let path_changes_tx = self.path_changes_tx.clone();
918 cx.spawn_weak(move |this, mut cx| async move {
919 let abs_path = fs.canonicalize(&abs_root_path).await?;
920 let mut paths = Vec::with_capacity(2);
921 paths.push(if path.file_name().is_some() {
922 abs_path.join(&path)
923 } else {
924 abs_path.clone()
925 });
926 if let Some(old_path) = old_path {
927 paths.push(if old_path.file_name().is_some() {
928 abs_path.join(&old_path)
929 } else {
930 abs_path.clone()
931 });
932 }
933
934 let (tx, mut rx) = barrier::channel();
935 path_changes_tx.try_send((paths, tx))?;
936 rx.recv().await;
937 this.upgrade(&cx)
938 .ok_or_else(|| anyhow!("worktree was dropped"))?
939 .update(&mut cx, |this, _| {
940 this.entry_for_path(path)
941 .cloned()
942 .ok_or_else(|| anyhow!("failed to read path after update"))
943 })
944 })
945 }
946
947 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
948 let (share_tx, share_rx) = oneshot::channel();
949
950 if let Some(share) = self.share.as_mut() {
951 let _ = share_tx.send(());
952 *share.resume_updates.borrow_mut() = ();
953 } else {
954 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
955 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
956 let worktree_id = cx.model_id() as u64;
957
958 for (path, summary) in self.diagnostic_summaries.iter() {
959 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
960 project_id,
961 worktree_id,
962 summary: Some(summary.to_proto(&path.0)),
963 }) {
964 return Task::ready(Err(e));
965 }
966 }
967
968 let _maintain_remote_snapshot = cx.background().spawn({
969 let client = self.client.clone();
970 async move {
971 let mut share_tx = Some(share_tx);
972 let mut prev_snapshot = LocalSnapshot {
973 ignores_by_parent_abs_path: Default::default(),
974 git_repositories: Default::default(),
975 removed_entry_ids: Default::default(),
976 next_entry_id: Default::default(),
977 snapshot: Snapshot {
978 id: WorktreeId(worktree_id as usize),
979 abs_path: Path::new("").into(),
980 root_name: Default::default(),
981 root_char_bag: Default::default(),
982 entries_by_path: Default::default(),
983 entries_by_id: Default::default(),
984 scan_id: 0,
985 completed_scan_id: 0,
986 },
987 };
988 while let Some(snapshot) = snapshots_rx.recv().await {
989 #[cfg(any(test, feature = "test-support"))]
990 const MAX_CHUNK_SIZE: usize = 2;
991 #[cfg(not(any(test, feature = "test-support")))]
992 const MAX_CHUNK_SIZE: usize = 256;
993
994 let update =
995 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
996 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
997 let _ = resume_updates_rx.try_recv();
998 while let Err(error) = client.request(update.clone()).await {
999 log::error!("failed to send worktree update: {}", error);
1000 log::info!("waiting to resume updates");
1001 if resume_updates_rx.next().await.is_none() {
1002 return Ok(());
1003 }
1004 }
1005 }
1006
1007 if let Some(share_tx) = share_tx.take() {
1008 let _ = share_tx.send(());
1009 }
1010
1011 prev_snapshot = snapshot;
1012 }
1013
1014 Ok::<_, anyhow::Error>(())
1015 }
1016 .log_err()
1017 });
1018
1019 self.share = Some(ShareState {
1020 project_id,
1021 snapshots_tx,
1022 resume_updates: resume_updates_tx,
1023 _maintain_remote_snapshot,
1024 });
1025 }
1026
1027 cx.foreground()
1028 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1029 }
1030
1031 pub fn unshare(&mut self) {
1032 self.share.take();
1033 }
1034
1035 pub fn is_shared(&self) -> bool {
1036 self.share.is_some()
1037 }
1038}
1039
1040impl RemoteWorktree {
1041 fn snapshot(&self) -> Snapshot {
1042 self.snapshot.clone()
1043 }
1044
1045 pub fn disconnected_from_host(&mut self) {
1046 self.updates_tx.take();
1047 self.snapshot_subscriptions.clear();
1048 self.disconnected = true;
1049 }
1050
1051 pub fn save_buffer(
1052 &self,
1053 buffer_handle: ModelHandle<Buffer>,
1054 cx: &mut ModelContext<Worktree>,
1055 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1056 let buffer = buffer_handle.read(cx);
1057 let buffer_id = buffer.remote_id();
1058 let version = buffer.version();
1059 let rpc = self.client.clone();
1060 let project_id = self.project_id;
1061 cx.as_mut().spawn(|mut cx| async move {
1062 let response = rpc
1063 .request(proto::SaveBuffer {
1064 project_id,
1065 buffer_id,
1066 version: serialize_version(&version),
1067 })
1068 .await?;
1069 let version = deserialize_version(&response.version);
1070 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1071 let mtime = response
1072 .mtime
1073 .ok_or_else(|| anyhow!("missing mtime"))?
1074 .into();
1075
1076 buffer_handle.update(&mut cx, |buffer, cx| {
1077 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1078 });
1079
1080 Ok((version, fingerprint, mtime))
1081 })
1082 }
1083
1084 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1085 if let Some(updates_tx) = &self.updates_tx {
1086 updates_tx
1087 .unbounded_send(update)
1088 .expect("consumer runs to completion");
1089 }
1090 }
1091
1092 fn observed_snapshot(&self, scan_id: usize) -> bool {
1093 self.completed_scan_id >= scan_id
1094 }
1095
1096 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1097 let (tx, rx) = oneshot::channel();
1098 if self.observed_snapshot(scan_id) {
1099 let _ = tx.send(());
1100 } else if self.disconnected {
1101 drop(tx);
1102 } else {
1103 match self
1104 .snapshot_subscriptions
1105 .binary_search_by_key(&scan_id, |probe| probe.0)
1106 {
1107 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1108 }
1109 }
1110
1111 async move {
1112 rx.await?;
1113 Ok(())
1114 }
1115 }
1116
1117 pub fn update_diagnostic_summary(
1118 &mut self,
1119 path: Arc<Path>,
1120 summary: &proto::DiagnosticSummary,
1121 ) {
1122 let summary = DiagnosticSummary {
1123 language_server_id: summary.language_server_id as usize,
1124 error_count: summary.error_count as usize,
1125 warning_count: summary.warning_count as usize,
1126 };
1127 if summary.is_empty() {
1128 self.diagnostic_summaries.remove(&PathKey(path));
1129 } else {
1130 self.diagnostic_summaries.insert(PathKey(path), summary);
1131 }
1132 }
1133
1134 pub fn insert_entry(
1135 &mut self,
1136 entry: proto::Entry,
1137 scan_id: usize,
1138 cx: &mut ModelContext<Worktree>,
1139 ) -> Task<Result<Entry>> {
1140 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1141 cx.spawn(|this, mut cx| async move {
1142 wait_for_snapshot.await?;
1143 this.update(&mut cx, |worktree, _| {
1144 let worktree = worktree.as_remote_mut().unwrap();
1145 let mut snapshot = worktree.background_snapshot.lock();
1146 let entry = snapshot.insert_entry(entry);
1147 worktree.snapshot = snapshot.clone();
1148 entry
1149 })
1150 })
1151 }
1152
1153 pub(crate) fn delete_entry(
1154 &mut self,
1155 id: ProjectEntryId,
1156 scan_id: usize,
1157 cx: &mut ModelContext<Worktree>,
1158 ) -> Task<Result<()>> {
1159 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1160 cx.spawn(|this, mut cx| async move {
1161 wait_for_snapshot.await?;
1162 this.update(&mut cx, |worktree, _| {
1163 let worktree = worktree.as_remote_mut().unwrap();
1164 let mut snapshot = worktree.background_snapshot.lock();
1165 snapshot.delete_entry(id);
1166 worktree.snapshot = snapshot.clone();
1167 });
1168 Ok(())
1169 })
1170 }
1171}
1172
1173impl Snapshot {
1174 pub fn id(&self) -> WorktreeId {
1175 self.id
1176 }
1177
1178 pub fn abs_path(&self) -> &Arc<Path> {
1179 &self.abs_path
1180 }
1181
1182 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1183 self.entries_by_id.get(&entry_id, &()).is_some()
1184 }
1185
1186 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1187 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1188 let old_entry = self.entries_by_id.insert_or_replace(
1189 PathEntry {
1190 id: entry.id,
1191 path: entry.path.clone(),
1192 is_ignored: entry.is_ignored,
1193 scan_id: 0,
1194 },
1195 &(),
1196 );
1197 if let Some(old_entry) = old_entry {
1198 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1199 }
1200 self.entries_by_path.insert_or_replace(entry.clone(), &());
1201 Ok(entry)
1202 }
1203
1204 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1205 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1206 self.entries_by_path = {
1207 let mut cursor = self.entries_by_path.cursor();
1208 let mut new_entries_by_path =
1209 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1210 while let Some(entry) = cursor.item() {
1211 if entry.path.starts_with(&removed_entry.path) {
1212 self.entries_by_id.remove(&entry.id, &());
1213 cursor.next(&());
1214 } else {
1215 break;
1216 }
1217 }
1218 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1219 new_entries_by_path
1220 };
1221
1222 Some(removed_entry.path)
1223 }
1224
1225 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1226 let mut entries_by_path_edits = Vec::new();
1227 let mut entries_by_id_edits = Vec::new();
1228 for entry_id in update.removed_entries {
1229 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1230 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1231 entries_by_id_edits.push(Edit::Remove(entry.id));
1232 }
1233 }
1234
1235 for entry in update.updated_entries {
1236 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1237 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1238 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1239 }
1240 entries_by_id_edits.push(Edit::Insert(PathEntry {
1241 id: entry.id,
1242 path: entry.path.clone(),
1243 is_ignored: entry.is_ignored,
1244 scan_id: 0,
1245 }));
1246 entries_by_path_edits.push(Edit::Insert(entry));
1247 }
1248
1249 self.entries_by_path.edit(entries_by_path_edits, &());
1250 self.entries_by_id.edit(entries_by_id_edits, &());
1251 self.scan_id = update.scan_id as usize;
1252 if update.is_last_update {
1253 self.completed_scan_id = update.scan_id as usize;
1254 }
1255
1256 Ok(())
1257 }
1258
1259 pub fn file_count(&self) -> usize {
1260 self.entries_by_path.summary().file_count
1261 }
1262
1263 pub fn visible_file_count(&self) -> usize {
1264 self.entries_by_path.summary().visible_file_count
1265 }
1266
1267 fn traverse_from_offset(
1268 &self,
1269 include_dirs: bool,
1270 include_ignored: bool,
1271 start_offset: usize,
1272 ) -> Traversal {
1273 let mut cursor = self.entries_by_path.cursor();
1274 cursor.seek(
1275 &TraversalTarget::Count {
1276 count: start_offset,
1277 include_dirs,
1278 include_ignored,
1279 },
1280 Bias::Right,
1281 &(),
1282 );
1283 Traversal {
1284 cursor,
1285 include_dirs,
1286 include_ignored,
1287 }
1288 }
1289
1290 fn traverse_from_path(
1291 &self,
1292 include_dirs: bool,
1293 include_ignored: bool,
1294 path: &Path,
1295 ) -> Traversal {
1296 let mut cursor = self.entries_by_path.cursor();
1297 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1298 Traversal {
1299 cursor,
1300 include_dirs,
1301 include_ignored,
1302 }
1303 }
1304
1305 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1306 self.traverse_from_offset(false, include_ignored, start)
1307 }
1308
1309 pub fn entries(&self, include_ignored: bool) -> Traversal {
1310 self.traverse_from_offset(true, include_ignored, 0)
1311 }
1312
1313 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1314 let empty_path = Path::new("");
1315 self.entries_by_path
1316 .cursor::<()>()
1317 .filter(move |entry| entry.path.as_ref() != empty_path)
1318 .map(|entry| &entry.path)
1319 }
1320
1321 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1322 let mut cursor = self.entries_by_path.cursor();
1323 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1324 let traversal = Traversal {
1325 cursor,
1326 include_dirs: true,
1327 include_ignored: true,
1328 };
1329 ChildEntriesIter {
1330 traversal,
1331 parent_path,
1332 }
1333 }
1334
1335 pub fn root_entry(&self) -> Option<&Entry> {
1336 self.entry_for_path("")
1337 }
1338
1339 pub fn root_name(&self) -> &str {
1340 &self.root_name
1341 }
1342
1343 pub fn scan_id(&self) -> usize {
1344 self.scan_id
1345 }
1346
1347 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1348 let path = path.as_ref();
1349 self.traverse_from_path(true, true, path)
1350 .entry()
1351 .and_then(|entry| {
1352 if entry.path.as_ref() == path {
1353 Some(entry)
1354 } else {
1355 None
1356 }
1357 })
1358 }
1359
1360 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1361 let entry = self.entries_by_id.get(&id, &())?;
1362 self.entry_for_path(&entry.path)
1363 }
1364
1365 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1366 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1367 }
1368}
1369
1370impl LocalSnapshot {
1371 // Gives the most specific git repository for a given path
1372 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1373 self.git_repositories
1374 .iter()
1375 .rev() //git_repository is ordered lexicographically
1376 .find(|repo| repo.manages(path))
1377 .cloned()
1378 }
1379
1380 pub(crate) fn repo_with_dot_git_containing(
1381 &mut self,
1382 path: &Path,
1383 ) -> Option<&mut GitRepositoryEntry> {
1384 // Git repositories cannot be nested, so we don't need to reverse the order
1385 self.git_repositories
1386 .iter_mut()
1387 .find(|repo| repo.in_dot_git(path))
1388 }
1389
1390 #[cfg(test)]
1391 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1392 let root_name = self.root_name.clone();
1393 proto::UpdateWorktree {
1394 project_id,
1395 worktree_id: self.id().to_proto(),
1396 abs_path: self.abs_path().to_string_lossy().into(),
1397 root_name,
1398 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1399 removed_entries: Default::default(),
1400 scan_id: self.scan_id as u64,
1401 is_last_update: true,
1402 }
1403 }
1404
1405 pub(crate) fn build_update(
1406 &self,
1407 other: &Self,
1408 project_id: u64,
1409 worktree_id: u64,
1410 include_ignored: bool,
1411 ) -> proto::UpdateWorktree {
1412 let mut updated_entries = Vec::new();
1413 let mut removed_entries = Vec::new();
1414 let mut self_entries = self
1415 .entries_by_id
1416 .cursor::<()>()
1417 .filter(|e| include_ignored || !e.is_ignored)
1418 .peekable();
1419 let mut other_entries = other
1420 .entries_by_id
1421 .cursor::<()>()
1422 .filter(|e| include_ignored || !e.is_ignored)
1423 .peekable();
1424 loop {
1425 match (self_entries.peek(), other_entries.peek()) {
1426 (Some(self_entry), Some(other_entry)) => {
1427 match Ord::cmp(&self_entry.id, &other_entry.id) {
1428 Ordering::Less => {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 self_entries.next();
1432 }
1433 Ordering::Equal => {
1434 if self_entry.scan_id != other_entry.scan_id {
1435 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1436 updated_entries.push(entry);
1437 }
1438
1439 self_entries.next();
1440 other_entries.next();
1441 }
1442 Ordering::Greater => {
1443 removed_entries.push(other_entry.id.to_proto());
1444 other_entries.next();
1445 }
1446 }
1447 }
1448 (Some(self_entry), None) => {
1449 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1450 updated_entries.push(entry);
1451 self_entries.next();
1452 }
1453 (None, Some(other_entry)) => {
1454 removed_entries.push(other_entry.id.to_proto());
1455 other_entries.next();
1456 }
1457 (None, None) => break,
1458 }
1459 }
1460
1461 proto::UpdateWorktree {
1462 project_id,
1463 worktree_id,
1464 abs_path: self.abs_path().to_string_lossy().into(),
1465 root_name: self.root_name().to_string(),
1466 updated_entries,
1467 removed_entries,
1468 scan_id: self.scan_id as u64,
1469 is_last_update: self.completed_scan_id == self.scan_id,
1470 }
1471 }
1472
1473 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1474 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1475 let abs_path = self.abs_path.join(&entry.path);
1476 match smol::block_on(build_gitignore(&abs_path, fs)) {
1477 Ok(ignore) => {
1478 self.ignores_by_parent_abs_path.insert(
1479 abs_path.parent().unwrap().into(),
1480 (Arc::new(ignore), self.scan_id),
1481 );
1482 }
1483 Err(error) => {
1484 log::error!(
1485 "error loading .gitignore file {:?} - {:?}",
1486 &entry.path,
1487 error
1488 );
1489 }
1490 }
1491 }
1492
1493 self.reuse_entry_id(&mut entry);
1494
1495 if entry.kind == EntryKind::PendingDir {
1496 if let Some(existing_entry) =
1497 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1498 {
1499 entry.kind = existing_entry.kind;
1500 }
1501 }
1502
1503 let scan_id = self.scan_id;
1504 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1505 if let Some(removed) = removed {
1506 if removed.id != entry.id {
1507 self.entries_by_id.remove(&removed.id, &());
1508 }
1509 }
1510 self.entries_by_id.insert_or_replace(
1511 PathEntry {
1512 id: entry.id,
1513 path: entry.path.clone(),
1514 is_ignored: entry.is_ignored,
1515 scan_id,
1516 },
1517 &(),
1518 );
1519
1520 entry
1521 }
1522
1523 fn populate_dir(
1524 &mut self,
1525 parent_path: Arc<Path>,
1526 entries: impl IntoIterator<Item = Entry>,
1527 ignore: Option<Arc<Gitignore>>,
1528 fs: &dyn Fs,
1529 ) {
1530 let mut parent_entry = if let Some(parent_entry) =
1531 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1532 {
1533 parent_entry.clone()
1534 } else {
1535 log::warn!(
1536 "populating a directory {:?} that has been removed",
1537 parent_path
1538 );
1539 return;
1540 };
1541
1542 match parent_entry.kind {
1543 EntryKind::PendingDir => {
1544 parent_entry.kind = EntryKind::Dir;
1545 }
1546 EntryKind::Dir => {}
1547 _ => return,
1548 }
1549
1550 if let Some(ignore) = ignore {
1551 self.ignores_by_parent_abs_path.insert(
1552 self.abs_path.join(&parent_path).into(),
1553 (ignore, self.scan_id),
1554 );
1555 }
1556
1557 if parent_path.file_name() == Some(&DOT_GIT) {
1558 let abs_path = self.abs_path.join(&parent_path);
1559 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1560 if let Err(ix) = self
1561 .git_repositories
1562 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1563 {
1564 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1565 self.git_repositories.insert(
1566 ix,
1567 GitRepositoryEntry {
1568 repo,
1569 scan_id: 0,
1570 content_path,
1571 git_dir_path: parent_path,
1572 },
1573 );
1574 }
1575 }
1576 }
1577
1578 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1579 let mut entries_by_id_edits = Vec::new();
1580
1581 for mut entry in entries {
1582 self.reuse_entry_id(&mut entry);
1583 entries_by_id_edits.push(Edit::Insert(PathEntry {
1584 id: entry.id,
1585 path: entry.path.clone(),
1586 is_ignored: entry.is_ignored,
1587 scan_id: self.scan_id,
1588 }));
1589 entries_by_path_edits.push(Edit::Insert(entry));
1590 }
1591
1592 self.entries_by_path.edit(entries_by_path_edits, &());
1593 self.entries_by_id.edit(entries_by_id_edits, &());
1594 }
1595
1596 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1597 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1598 entry.id = removed_entry_id;
1599 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1600 entry.id = existing_entry.id;
1601 }
1602 }
1603
1604 fn remove_path(&mut self, path: &Path) {
1605 let mut new_entries;
1606 let removed_entries;
1607 {
1608 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1609 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1610 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1611 new_entries.push_tree(cursor.suffix(&()), &());
1612 }
1613 self.entries_by_path = new_entries;
1614
1615 let mut entries_by_id_edits = Vec::new();
1616 for entry in removed_entries.cursor::<()>() {
1617 let removed_entry_id = self
1618 .removed_entry_ids
1619 .entry(entry.inode)
1620 .or_insert(entry.id);
1621 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1622 entries_by_id_edits.push(Edit::Remove(entry.id));
1623 }
1624 self.entries_by_id.edit(entries_by_id_edits, &());
1625
1626 if path.file_name() == Some(&GITIGNORE) {
1627 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1628 if let Some((_, scan_id)) = self
1629 .ignores_by_parent_abs_path
1630 .get_mut(abs_parent_path.as_path())
1631 {
1632 *scan_id = self.snapshot.scan_id;
1633 }
1634 } else if path.file_name() == Some(&DOT_GIT) {
1635 let parent_path = path.parent().unwrap();
1636 if let Ok(ix) = self
1637 .git_repositories
1638 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1639 {
1640 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1641 }
1642 }
1643 }
1644
1645 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1646 let mut inodes = TreeSet::default();
1647 for ancestor in path.ancestors().skip(1) {
1648 if let Some(entry) = self.entry_for_path(ancestor) {
1649 inodes.insert(entry.inode);
1650 }
1651 }
1652 inodes
1653 }
1654
1655 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1656 let mut new_ignores = Vec::new();
1657 for ancestor in abs_path.ancestors().skip(1) {
1658 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1659 new_ignores.push((ancestor, Some(ignore.clone())));
1660 } else {
1661 new_ignores.push((ancestor, None));
1662 }
1663 }
1664
1665 let mut ignore_stack = IgnoreStack::none();
1666 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1667 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1668 ignore_stack = IgnoreStack::all();
1669 break;
1670 } else if let Some(ignore) = ignore {
1671 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1672 }
1673 }
1674
1675 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1676 ignore_stack = IgnoreStack::all();
1677 }
1678
1679 ignore_stack
1680 }
1681
1682 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1683 &self.git_repositories
1684 }
1685}
1686
1687impl GitRepositoryEntry {
1688 // Note that these paths should be relative to the worktree root.
1689 pub(crate) fn manages(&self, path: &Path) -> bool {
1690 path.starts_with(self.content_path.as_ref())
1691 }
1692
1693 // Note that this path should be relative to the worktree root.
1694 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1695 path.starts_with(self.git_dir_path.as_ref())
1696 }
1697}
1698
1699async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1700 let contents = fs.load(abs_path).await?;
1701 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1702 let mut builder = GitignoreBuilder::new(parent);
1703 for line in contents.lines() {
1704 builder.add_line(Some(abs_path.into()), line)?;
1705 }
1706 Ok(builder.build()?)
1707}
1708
1709impl WorktreeId {
1710 pub fn from_usize(handle_id: usize) -> Self {
1711 Self(handle_id)
1712 }
1713
1714 pub(crate) fn from_proto(id: u64) -> Self {
1715 Self(id as usize)
1716 }
1717
1718 pub fn to_proto(&self) -> u64 {
1719 self.0 as u64
1720 }
1721
1722 pub fn to_usize(&self) -> usize {
1723 self.0
1724 }
1725}
1726
1727impl fmt::Display for WorktreeId {
1728 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1729 self.0.fmt(f)
1730 }
1731}
1732
1733impl Deref for Worktree {
1734 type Target = Snapshot;
1735
1736 fn deref(&self) -> &Self::Target {
1737 match self {
1738 Worktree::Local(worktree) => &worktree.snapshot,
1739 Worktree::Remote(worktree) => &worktree.snapshot,
1740 }
1741 }
1742}
1743
1744impl Deref for LocalWorktree {
1745 type Target = LocalSnapshot;
1746
1747 fn deref(&self) -> &Self::Target {
1748 &self.snapshot
1749 }
1750}
1751
1752impl Deref for RemoteWorktree {
1753 type Target = Snapshot;
1754
1755 fn deref(&self) -> &Self::Target {
1756 &self.snapshot
1757 }
1758}
1759
1760impl fmt::Debug for LocalWorktree {
1761 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1762 self.snapshot.fmt(f)
1763 }
1764}
1765
1766impl fmt::Debug for Snapshot {
1767 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1768 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1769 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1770
1771 impl<'a> fmt::Debug for EntriesByPath<'a> {
1772 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1773 f.debug_map()
1774 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1775 .finish()
1776 }
1777 }
1778
1779 impl<'a> fmt::Debug for EntriesById<'a> {
1780 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1781 f.debug_list().entries(self.0.iter()).finish()
1782 }
1783 }
1784
1785 f.debug_struct("Snapshot")
1786 .field("id", &self.id)
1787 .field("root_name", &self.root_name)
1788 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1789 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1790 .finish()
1791 }
1792}
1793
1794#[derive(Clone, PartialEq)]
1795pub struct File {
1796 pub worktree: ModelHandle<Worktree>,
1797 pub path: Arc<Path>,
1798 pub mtime: SystemTime,
1799 pub(crate) entry_id: ProjectEntryId,
1800 pub(crate) is_local: bool,
1801 pub(crate) is_deleted: bool,
1802}
1803
1804impl language::File for File {
1805 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1806 if self.is_local {
1807 Some(self)
1808 } else {
1809 None
1810 }
1811 }
1812
1813 fn mtime(&self) -> SystemTime {
1814 self.mtime
1815 }
1816
1817 fn path(&self) -> &Arc<Path> {
1818 &self.path
1819 }
1820
1821 fn full_path(&self, cx: &AppContext) -> PathBuf {
1822 let mut full_path = PathBuf::new();
1823 let worktree = self.worktree.read(cx);
1824
1825 if worktree.is_visible() {
1826 full_path.push(worktree.root_name());
1827 } else {
1828 let path = worktree.abs_path();
1829
1830 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1831 full_path.push("~");
1832 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1833 } else {
1834 full_path.push(path)
1835 }
1836 }
1837
1838 if self.path.components().next().is_some() {
1839 full_path.push(&self.path);
1840 }
1841
1842 full_path
1843 }
1844
1845 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1846 /// of its worktree, then this method will return the name of the worktree itself.
1847 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1848 self.path
1849 .file_name()
1850 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1851 }
1852
1853 fn is_deleted(&self) -> bool {
1854 self.is_deleted
1855 }
1856
1857 fn as_any(&self) -> &dyn Any {
1858 self
1859 }
1860
1861 fn to_proto(&self) -> rpc::proto::File {
1862 rpc::proto::File {
1863 worktree_id: self.worktree.id() as u64,
1864 entry_id: self.entry_id.to_proto(),
1865 path: self.path.to_string_lossy().into(),
1866 mtime: Some(self.mtime.into()),
1867 is_deleted: self.is_deleted,
1868 }
1869 }
1870}
1871
1872impl language::LocalFile for File {
1873 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1874 self.worktree
1875 .read(cx)
1876 .as_local()
1877 .unwrap()
1878 .abs_path
1879 .join(&self.path)
1880 }
1881
1882 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1883 let worktree = self.worktree.read(cx).as_local().unwrap();
1884 let abs_path = worktree.absolutize(&self.path);
1885 let fs = worktree.fs.clone();
1886 cx.background()
1887 .spawn(async move { fs.load(&abs_path).await })
1888 }
1889
1890 fn buffer_reloaded(
1891 &self,
1892 buffer_id: u64,
1893 version: &clock::Global,
1894 fingerprint: RopeFingerprint,
1895 line_ending: LineEnding,
1896 mtime: SystemTime,
1897 cx: &mut AppContext,
1898 ) {
1899 let worktree = self.worktree.read(cx).as_local().unwrap();
1900 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1901 worktree
1902 .client
1903 .send(proto::BufferReloaded {
1904 project_id,
1905 buffer_id,
1906 version: serialize_version(version),
1907 mtime: Some(mtime.into()),
1908 fingerprint: serialize_fingerprint(fingerprint),
1909 line_ending: serialize_line_ending(line_ending) as i32,
1910 })
1911 .log_err();
1912 }
1913 }
1914}
1915
1916impl File {
1917 pub fn from_proto(
1918 proto: rpc::proto::File,
1919 worktree: ModelHandle<Worktree>,
1920 cx: &AppContext,
1921 ) -> Result<Self> {
1922 let worktree_id = worktree
1923 .read(cx)
1924 .as_remote()
1925 .ok_or_else(|| anyhow!("not remote"))?
1926 .id();
1927
1928 if worktree_id.to_proto() != proto.worktree_id {
1929 return Err(anyhow!("worktree id does not match file"));
1930 }
1931
1932 Ok(Self {
1933 worktree,
1934 path: Path::new(&proto.path).into(),
1935 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1936 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1937 is_local: false,
1938 is_deleted: proto.is_deleted,
1939 })
1940 }
1941
1942 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1943 file.and_then(|f| f.as_any().downcast_ref())
1944 }
1945
1946 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1947 self.worktree.read(cx).id()
1948 }
1949
1950 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1951 if self.is_deleted {
1952 None
1953 } else {
1954 Some(self.entry_id)
1955 }
1956 }
1957}
1958
1959#[derive(Clone, Debug, PartialEq, Eq)]
1960pub struct Entry {
1961 pub id: ProjectEntryId,
1962 pub kind: EntryKind,
1963 pub path: Arc<Path>,
1964 pub inode: u64,
1965 pub mtime: SystemTime,
1966 pub is_symlink: bool,
1967 pub is_ignored: bool,
1968}
1969
1970#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1971pub enum EntryKind {
1972 PendingDir,
1973 Dir,
1974 File(CharBag),
1975}
1976
1977#[derive(Clone, Copy, Debug)]
1978pub enum PathChange {
1979 Added,
1980 Removed,
1981 Updated,
1982 AddedOrUpdated,
1983}
1984
1985impl Entry {
1986 fn new(
1987 path: Arc<Path>,
1988 metadata: &fs::Metadata,
1989 next_entry_id: &AtomicUsize,
1990 root_char_bag: CharBag,
1991 ) -> Self {
1992 Self {
1993 id: ProjectEntryId::new(next_entry_id),
1994 kind: if metadata.is_dir {
1995 EntryKind::PendingDir
1996 } else {
1997 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1998 },
1999 path,
2000 inode: metadata.inode,
2001 mtime: metadata.mtime,
2002 is_symlink: metadata.is_symlink,
2003 is_ignored: false,
2004 }
2005 }
2006
2007 pub fn is_dir(&self) -> bool {
2008 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2009 }
2010
2011 pub fn is_file(&self) -> bool {
2012 matches!(self.kind, EntryKind::File(_))
2013 }
2014}
2015
2016impl sum_tree::Item for Entry {
2017 type Summary = EntrySummary;
2018
2019 fn summary(&self) -> Self::Summary {
2020 let visible_count = if self.is_ignored { 0 } else { 1 };
2021 let file_count;
2022 let visible_file_count;
2023 if self.is_file() {
2024 file_count = 1;
2025 visible_file_count = visible_count;
2026 } else {
2027 file_count = 0;
2028 visible_file_count = 0;
2029 }
2030
2031 EntrySummary {
2032 max_path: self.path.clone(),
2033 count: 1,
2034 visible_count,
2035 file_count,
2036 visible_file_count,
2037 }
2038 }
2039}
2040
2041impl sum_tree::KeyedItem for Entry {
2042 type Key = PathKey;
2043
2044 fn key(&self) -> Self::Key {
2045 PathKey(self.path.clone())
2046 }
2047}
2048
2049#[derive(Clone, Debug)]
2050pub struct EntrySummary {
2051 max_path: Arc<Path>,
2052 count: usize,
2053 visible_count: usize,
2054 file_count: usize,
2055 visible_file_count: usize,
2056}
2057
2058impl Default for EntrySummary {
2059 fn default() -> Self {
2060 Self {
2061 max_path: Arc::from(Path::new("")),
2062 count: 0,
2063 visible_count: 0,
2064 file_count: 0,
2065 visible_file_count: 0,
2066 }
2067 }
2068}
2069
2070impl sum_tree::Summary for EntrySummary {
2071 type Context = ();
2072
2073 fn add_summary(&mut self, rhs: &Self, _: &()) {
2074 self.max_path = rhs.max_path.clone();
2075 self.count += rhs.count;
2076 self.visible_count += rhs.visible_count;
2077 self.file_count += rhs.file_count;
2078 self.visible_file_count += rhs.visible_file_count;
2079 }
2080}
2081
2082#[derive(Clone, Debug)]
2083struct PathEntry {
2084 id: ProjectEntryId,
2085 path: Arc<Path>,
2086 is_ignored: bool,
2087 scan_id: usize,
2088}
2089
2090impl sum_tree::Item for PathEntry {
2091 type Summary = PathEntrySummary;
2092
2093 fn summary(&self) -> Self::Summary {
2094 PathEntrySummary { max_id: self.id }
2095 }
2096}
2097
2098impl sum_tree::KeyedItem for PathEntry {
2099 type Key = ProjectEntryId;
2100
2101 fn key(&self) -> Self::Key {
2102 self.id
2103 }
2104}
2105
2106#[derive(Clone, Debug, Default)]
2107struct PathEntrySummary {
2108 max_id: ProjectEntryId,
2109}
2110
2111impl sum_tree::Summary for PathEntrySummary {
2112 type Context = ();
2113
2114 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2115 self.max_id = summary.max_id;
2116 }
2117}
2118
2119impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2120 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2121 *self = summary.max_id;
2122 }
2123}
2124
2125#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2126pub struct PathKey(Arc<Path>);
2127
2128impl Default for PathKey {
2129 fn default() -> Self {
2130 Self(Path::new("").into())
2131 }
2132}
2133
2134impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2135 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2136 self.0 = summary.max_path.clone();
2137 }
2138}
2139
2140struct BackgroundScanner {
2141 snapshot: Mutex<LocalSnapshot>,
2142 fs: Arc<dyn Fs>,
2143 status_updates_tx: UnboundedSender<ScanState>,
2144 executor: Arc<executor::Background>,
2145 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2146 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2147 finished_initial_scan: bool,
2148}
2149
2150impl BackgroundScanner {
2151 fn new(
2152 snapshot: LocalSnapshot,
2153 fs: Arc<dyn Fs>,
2154 status_updates_tx: UnboundedSender<ScanState>,
2155 executor: Arc<executor::Background>,
2156 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2157 ) -> Self {
2158 Self {
2159 fs,
2160 status_updates_tx,
2161 executor,
2162 refresh_requests_rx,
2163 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2164 snapshot: Mutex::new(snapshot),
2165 finished_initial_scan: false,
2166 }
2167 }
2168
2169 async fn run(
2170 &mut self,
2171 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2172 ) {
2173 use futures::FutureExt as _;
2174
2175 let (root_abs_path, root_inode) = {
2176 let snapshot = self.snapshot.lock();
2177 (
2178 snapshot.abs_path.clone(),
2179 snapshot.root_entry().map(|e| e.inode),
2180 )
2181 };
2182
2183 // Populate ignores above the root.
2184 let ignore_stack;
2185 for ancestor in root_abs_path.ancestors().skip(1) {
2186 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2187 {
2188 self.snapshot
2189 .lock()
2190 .ignores_by_parent_abs_path
2191 .insert(ancestor.into(), (ignore.into(), 0));
2192 }
2193 }
2194 {
2195 let mut snapshot = self.snapshot.lock();
2196 snapshot.scan_id += 1;
2197 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2198 if ignore_stack.is_all() {
2199 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2200 root_entry.is_ignored = true;
2201 snapshot.insert_entry(root_entry, self.fs.as_ref());
2202 }
2203 }
2204 };
2205
2206 // Perform an initial scan of the directory.
2207 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2208 smol::block_on(scan_job_tx.send(ScanJob {
2209 abs_path: root_abs_path,
2210 path: Arc::from(Path::new("")),
2211 ignore_stack,
2212 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2213 scan_queue: scan_job_tx.clone(),
2214 }))
2215 .unwrap();
2216 drop(scan_job_tx);
2217 self.scan_dirs(true, scan_job_rx).await;
2218 {
2219 let mut snapshot = self.snapshot.lock();
2220 snapshot.completed_scan_id = snapshot.scan_id;
2221 }
2222 self.send_status_update(false, None);
2223
2224 // Process any any FS events that occurred while performing the initial scan.
2225 // For these events, update events cannot be as precise, because we didn't
2226 // have the previous state loaded yet.
2227 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2228 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2229 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2230 paths.extend(more_events.into_iter().map(|e| e.path));
2231 }
2232 self.process_events(paths).await;
2233 }
2234
2235 self.finished_initial_scan = true;
2236
2237 // Continue processing events until the worktree is dropped.
2238 loop {
2239 select_biased! {
2240 // Process any path refresh requests from the worktree. Prioritize
2241 // these before handling changes reported by the filesystem.
2242 request = self.refresh_requests_rx.recv().fuse() => {
2243 let Ok((paths, barrier)) = request else { break };
2244 if !self.process_refresh_request(paths, barrier).await {
2245 return;
2246 }
2247 }
2248
2249 events = events_rx.next().fuse() => {
2250 let Some(events) = events else { break };
2251 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2252 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2253 paths.extend(more_events.into_iter().map(|e| e.path));
2254 }
2255 self.process_events(paths).await;
2256 }
2257 }
2258 }
2259 }
2260
2261 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2262 self.reload_entries_for_paths(paths, None).await;
2263 self.send_status_update(false, Some(barrier))
2264 }
2265
2266 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2267 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2268 if let Some(mut paths) = self
2269 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2270 .await
2271 {
2272 paths.sort_unstable();
2273 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2274 }
2275 drop(scan_job_tx);
2276 self.scan_dirs(false, scan_job_rx).await;
2277
2278 self.update_ignore_statuses().await;
2279
2280 let mut snapshot = self.snapshot.lock();
2281 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2282 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2283 snapshot.git_repositories = git_repositories;
2284 snapshot.removed_entry_ids.clear();
2285 snapshot.completed_scan_id = snapshot.scan_id;
2286 drop(snapshot);
2287
2288 self.send_status_update(false, None);
2289 }
2290
2291 async fn scan_dirs(
2292 &self,
2293 enable_progress_updates: bool,
2294 scan_jobs_rx: channel::Receiver<ScanJob>,
2295 ) {
2296 use futures::FutureExt as _;
2297
2298 if self
2299 .status_updates_tx
2300 .unbounded_send(ScanState::Started)
2301 .is_err()
2302 {
2303 return;
2304 }
2305
2306 let progress_update_count = AtomicUsize::new(0);
2307 self.executor
2308 .scoped(|scope| {
2309 for _ in 0..self.executor.num_cpus() {
2310 scope.spawn(async {
2311 let mut last_progress_update_count = 0;
2312 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2313 futures::pin_mut!(progress_update_timer);
2314
2315 loop {
2316 select_biased! {
2317 // Process any path refresh requests before moving on to process
2318 // the scan queue, so that user operations are prioritized.
2319 request = self.refresh_requests_rx.recv().fuse() => {
2320 let Ok((paths, barrier)) = request else { break };
2321 if !self.process_refresh_request(paths, barrier).await {
2322 return;
2323 }
2324 }
2325
2326 // Send periodic progress updates to the worktree. Use an atomic counter
2327 // to ensure that only one of the workers sends a progress update after
2328 // the update interval elapses.
2329 _ = progress_update_timer => {
2330 match progress_update_count.compare_exchange(
2331 last_progress_update_count,
2332 last_progress_update_count + 1,
2333 SeqCst,
2334 SeqCst
2335 ) {
2336 Ok(_) => {
2337 last_progress_update_count += 1;
2338 self.send_status_update(true, None);
2339 }
2340 Err(count) => {
2341 last_progress_update_count = count;
2342 }
2343 }
2344 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2345 }
2346
2347 // Recursively load directories from the file system.
2348 job = scan_jobs_rx.recv().fuse() => {
2349 let Ok(job) = job else { break };
2350 if let Err(err) = self.scan_dir(&job).await {
2351 if job.path.as_ref() != Path::new("") {
2352 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2353 }
2354 }
2355 }
2356 }
2357 }
2358 })
2359 }
2360 })
2361 .await;
2362 }
2363
2364 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2365 let mut prev_state = self.prev_state.lock();
2366 let snapshot = self.snapshot.lock().clone();
2367 let mut old_snapshot = snapshot.snapshot.clone();
2368 mem::swap(&mut old_snapshot, &mut prev_state.0);
2369 let changed_paths = mem::take(&mut prev_state.1);
2370 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2371 self.status_updates_tx
2372 .unbounded_send(ScanState::Updated {
2373 snapshot,
2374 changes,
2375 scanning,
2376 barrier,
2377 })
2378 .is_ok()
2379 }
2380
2381 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2382 let mut new_entries: Vec<Entry> = Vec::new();
2383 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2384 let mut ignore_stack = job.ignore_stack.clone();
2385 let mut new_ignore = None;
2386 let (root_abs_path, root_char_bag, next_entry_id) = {
2387 let snapshot = self.snapshot.lock();
2388 (
2389 snapshot.abs_path().clone(),
2390 snapshot.root_char_bag,
2391 snapshot.next_entry_id.clone(),
2392 )
2393 };
2394 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2395 while let Some(child_abs_path) = child_paths.next().await {
2396 let child_abs_path: Arc<Path> = match child_abs_path {
2397 Ok(child_abs_path) => child_abs_path.into(),
2398 Err(error) => {
2399 log::error!("error processing entry {:?}", error);
2400 continue;
2401 }
2402 };
2403
2404 let child_name = child_abs_path.file_name().unwrap();
2405 let child_path: Arc<Path> = job.path.join(child_name).into();
2406 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2407 Ok(Some(metadata)) => metadata,
2408 Ok(None) => continue,
2409 Err(err) => {
2410 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2411 continue;
2412 }
2413 };
2414
2415 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2416 if child_name == *GITIGNORE {
2417 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2418 Ok(ignore) => {
2419 let ignore = Arc::new(ignore);
2420 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2421 new_ignore = Some(ignore);
2422 }
2423 Err(error) => {
2424 log::error!(
2425 "error loading .gitignore file {:?} - {:?}",
2426 child_name,
2427 error
2428 );
2429 }
2430 }
2431
2432 // Update ignore status of any child entries we've already processed to reflect the
2433 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2434 // there should rarely be too numerous. Update the ignore stack associated with any
2435 // new jobs as well.
2436 let mut new_jobs = new_jobs.iter_mut();
2437 for entry in &mut new_entries {
2438 let entry_abs_path = root_abs_path.join(&entry.path);
2439 entry.is_ignored =
2440 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2441
2442 if entry.is_dir() {
2443 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2444 job.ignore_stack = if entry.is_ignored {
2445 IgnoreStack::all()
2446 } else {
2447 ignore_stack.clone()
2448 };
2449 }
2450 }
2451 }
2452 }
2453
2454 let mut child_entry = Entry::new(
2455 child_path.clone(),
2456 &child_metadata,
2457 &next_entry_id,
2458 root_char_bag,
2459 );
2460
2461 if child_entry.is_dir() {
2462 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2463 child_entry.is_ignored = is_ignored;
2464
2465 // Avoid recursing until crash in the case of a recursive symlink
2466 if !job.ancestor_inodes.contains(&child_entry.inode) {
2467 let mut ancestor_inodes = job.ancestor_inodes.clone();
2468 ancestor_inodes.insert(child_entry.inode);
2469
2470 new_jobs.push(Some(ScanJob {
2471 abs_path: child_abs_path,
2472 path: child_path,
2473 ignore_stack: if is_ignored {
2474 IgnoreStack::all()
2475 } else {
2476 ignore_stack.clone()
2477 },
2478 ancestor_inodes,
2479 scan_queue: job.scan_queue.clone(),
2480 }));
2481 } else {
2482 new_jobs.push(None);
2483 }
2484 } else {
2485 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2486 }
2487
2488 new_entries.push(child_entry);
2489 }
2490
2491 self.snapshot.lock().populate_dir(
2492 job.path.clone(),
2493 new_entries,
2494 new_ignore,
2495 self.fs.as_ref(),
2496 );
2497
2498 for new_job in new_jobs {
2499 if let Some(new_job) = new_job {
2500 job.scan_queue.send(new_job).await.unwrap();
2501 }
2502 }
2503
2504 Ok(())
2505 }
2506
2507 async fn reload_entries_for_paths(
2508 &self,
2509 mut abs_paths: Vec<PathBuf>,
2510 scan_queue_tx: Option<Sender<ScanJob>>,
2511 ) -> Option<Vec<Arc<Path>>> {
2512 let doing_recursive_update = scan_queue_tx.is_some();
2513
2514 abs_paths.sort_unstable();
2515 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2516
2517 let root_abs_path = self.snapshot.lock().abs_path.clone();
2518 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2519 let metadata = futures::future::join_all(
2520 abs_paths
2521 .iter()
2522 .map(|abs_path| self.fs.metadata(&abs_path))
2523 .collect::<Vec<_>>(),
2524 )
2525 .await;
2526
2527 let mut snapshot = self.snapshot.lock();
2528 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2529 snapshot.scan_id += 1;
2530 if is_idle && !doing_recursive_update {
2531 snapshot.completed_scan_id = snapshot.scan_id;
2532 }
2533
2534 // Remove any entries for paths that no longer exist or are being recursively
2535 // refreshed. Do this before adding any new entries, so that renames can be
2536 // detected regardless of the order of the paths.
2537 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2538 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2539 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2540 if matches!(metadata, Ok(None)) || doing_recursive_update {
2541 snapshot.remove_path(path);
2542 }
2543 event_paths.push(path.into());
2544 } else {
2545 log::error!(
2546 "unexpected event {:?} for root path {:?}",
2547 abs_path,
2548 root_canonical_path
2549 );
2550 }
2551 }
2552
2553 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2554 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2555
2556 match metadata {
2557 Ok(Some(metadata)) => {
2558 let ignore_stack =
2559 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2560 let mut fs_entry = Entry::new(
2561 path.clone(),
2562 &metadata,
2563 snapshot.next_entry_id.as_ref(),
2564 snapshot.root_char_bag,
2565 );
2566 fs_entry.is_ignored = ignore_stack.is_all();
2567 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2568
2569 let scan_id = snapshot.scan_id;
2570 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2571 repo.repo.lock().reload_index();
2572 repo.scan_id = scan_id;
2573 }
2574
2575 if let Some(scan_queue_tx) = &scan_queue_tx {
2576 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2577 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2578 ancestor_inodes.insert(metadata.inode);
2579 smol::block_on(scan_queue_tx.send(ScanJob {
2580 abs_path,
2581 path,
2582 ignore_stack,
2583 ancestor_inodes,
2584 scan_queue: scan_queue_tx.clone(),
2585 }))
2586 .unwrap();
2587 }
2588 }
2589 }
2590 Ok(None) => {}
2591 Err(err) => {
2592 // TODO - create a special 'error' entry in the entries tree to mark this
2593 log::error!("error reading file on event {:?}", err);
2594 }
2595 }
2596 }
2597
2598 Some(event_paths)
2599 }
2600
2601 async fn update_ignore_statuses(&self) {
2602 use futures::FutureExt as _;
2603
2604 let mut snapshot = self.snapshot.lock().clone();
2605 let mut ignores_to_update = Vec::new();
2606 let mut ignores_to_delete = Vec::new();
2607 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2608 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2609 if *scan_id > snapshot.completed_scan_id
2610 && snapshot.entry_for_path(parent_path).is_some()
2611 {
2612 ignores_to_update.push(parent_abs_path.clone());
2613 }
2614
2615 let ignore_path = parent_path.join(&*GITIGNORE);
2616 if snapshot.entry_for_path(ignore_path).is_none() {
2617 ignores_to_delete.push(parent_abs_path.clone());
2618 }
2619 }
2620 }
2621
2622 for parent_abs_path in ignores_to_delete {
2623 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2624 self.snapshot
2625 .lock()
2626 .ignores_by_parent_abs_path
2627 .remove(&parent_abs_path);
2628 }
2629
2630 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2631 ignores_to_update.sort_unstable();
2632 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2633 while let Some(parent_abs_path) = ignores_to_update.next() {
2634 while ignores_to_update
2635 .peek()
2636 .map_or(false, |p| p.starts_with(&parent_abs_path))
2637 {
2638 ignores_to_update.next().unwrap();
2639 }
2640
2641 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2642 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2643 abs_path: parent_abs_path,
2644 ignore_stack,
2645 ignore_queue: ignore_queue_tx.clone(),
2646 }))
2647 .unwrap();
2648 }
2649 drop(ignore_queue_tx);
2650
2651 self.executor
2652 .scoped(|scope| {
2653 for _ in 0..self.executor.num_cpus() {
2654 scope.spawn(async {
2655 loop {
2656 select_biased! {
2657 // Process any path refresh requests before moving on to process
2658 // the queue of ignore statuses.
2659 request = self.refresh_requests_rx.recv().fuse() => {
2660 let Ok((paths, barrier)) = request else { break };
2661 if !self.process_refresh_request(paths, barrier).await {
2662 return;
2663 }
2664 }
2665
2666 // Recursively process directories whose ignores have changed.
2667 job = ignore_queue_rx.recv().fuse() => {
2668 let Ok(job) = job else { break };
2669 self.update_ignore_status(job, &snapshot).await;
2670 }
2671 }
2672 }
2673 });
2674 }
2675 })
2676 .await;
2677 }
2678
2679 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2680 let mut ignore_stack = job.ignore_stack;
2681 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2682 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2683 }
2684
2685 let mut entries_by_id_edits = Vec::new();
2686 let mut entries_by_path_edits = Vec::new();
2687 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2688 for mut entry in snapshot.child_entries(path).cloned() {
2689 let was_ignored = entry.is_ignored;
2690 let abs_path = snapshot.abs_path().join(&entry.path);
2691 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2692 if entry.is_dir() {
2693 let child_ignore_stack = if entry.is_ignored {
2694 IgnoreStack::all()
2695 } else {
2696 ignore_stack.clone()
2697 };
2698 job.ignore_queue
2699 .send(UpdateIgnoreStatusJob {
2700 abs_path: abs_path.into(),
2701 ignore_stack: child_ignore_stack,
2702 ignore_queue: job.ignore_queue.clone(),
2703 })
2704 .await
2705 .unwrap();
2706 }
2707
2708 if entry.is_ignored != was_ignored {
2709 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2710 path_entry.scan_id = snapshot.scan_id;
2711 path_entry.is_ignored = entry.is_ignored;
2712 entries_by_id_edits.push(Edit::Insert(path_entry));
2713 entries_by_path_edits.push(Edit::Insert(entry));
2714 }
2715 }
2716
2717 let mut snapshot = self.snapshot.lock();
2718 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2719 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2720 }
2721
2722 fn build_change_set(
2723 &self,
2724 old_snapshot: &Snapshot,
2725 new_snapshot: &Snapshot,
2726 event_paths: Vec<Arc<Path>>,
2727 ) -> HashMap<Arc<Path>, PathChange> {
2728 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2729
2730 let mut changes = HashMap::default();
2731 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2732 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2733 let received_before_initialized = !self.finished_initial_scan;
2734
2735 for path in event_paths {
2736 let path = PathKey(path);
2737 old_paths.seek(&path, Bias::Left, &());
2738 new_paths.seek(&path, Bias::Left, &());
2739
2740 loop {
2741 match (old_paths.item(), new_paths.item()) {
2742 (Some(old_entry), Some(new_entry)) => {
2743 if old_entry.path > path.0
2744 && new_entry.path > path.0
2745 && !old_entry.path.starts_with(&path.0)
2746 && !new_entry.path.starts_with(&path.0)
2747 {
2748 break;
2749 }
2750
2751 match Ord::cmp(&old_entry.path, &new_entry.path) {
2752 Ordering::Less => {
2753 changes.insert(old_entry.path.clone(), Removed);
2754 old_paths.next(&());
2755 }
2756 Ordering::Equal => {
2757 if received_before_initialized {
2758 // If the worktree was not fully initialized when this event was generated,
2759 // we can't know whether this entry was added during the scan or whether
2760 // it was merely updated.
2761 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2762 } else if old_entry.mtime != new_entry.mtime {
2763 changes.insert(new_entry.path.clone(), Updated);
2764 }
2765 old_paths.next(&());
2766 new_paths.next(&());
2767 }
2768 Ordering::Greater => {
2769 changes.insert(new_entry.path.clone(), Added);
2770 new_paths.next(&());
2771 }
2772 }
2773 }
2774 (Some(old_entry), None) => {
2775 changes.insert(old_entry.path.clone(), Removed);
2776 old_paths.next(&());
2777 }
2778 (None, Some(new_entry)) => {
2779 changes.insert(new_entry.path.clone(), Added);
2780 new_paths.next(&());
2781 }
2782 (None, None) => break,
2783 }
2784 }
2785 }
2786 changes
2787 }
2788
2789 async fn progress_timer(&self, running: bool) {
2790 if !running {
2791 return futures::future::pending().await;
2792 }
2793
2794 #[cfg(any(test, feature = "test-support"))]
2795 if self.fs.is_fake() {
2796 return self.executor.simulate_random_delay().await;
2797 }
2798
2799 smol::Timer::after(Duration::from_millis(100)).await;
2800 }
2801}
2802
2803fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2804 let mut result = root_char_bag;
2805 result.extend(
2806 path.to_string_lossy()
2807 .chars()
2808 .map(|c| c.to_ascii_lowercase()),
2809 );
2810 result
2811}
2812
2813struct ScanJob {
2814 abs_path: Arc<Path>,
2815 path: Arc<Path>,
2816 ignore_stack: Arc<IgnoreStack>,
2817 scan_queue: Sender<ScanJob>,
2818 ancestor_inodes: TreeSet<u64>,
2819}
2820
2821struct UpdateIgnoreStatusJob {
2822 abs_path: Arc<Path>,
2823 ignore_stack: Arc<IgnoreStack>,
2824 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2825}
2826
2827pub trait WorktreeHandle {
2828 #[cfg(any(test, feature = "test-support"))]
2829 fn flush_fs_events<'a>(
2830 &self,
2831 cx: &'a gpui::TestAppContext,
2832 ) -> futures::future::LocalBoxFuture<'a, ()>;
2833}
2834
2835impl WorktreeHandle for ModelHandle<Worktree> {
2836 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2837 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2838 // extra directory scans, and emit extra scan-state notifications.
2839 //
2840 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2841 // to ensure that all redundant FS events have already been processed.
2842 #[cfg(any(test, feature = "test-support"))]
2843 fn flush_fs_events<'a>(
2844 &self,
2845 cx: &'a gpui::TestAppContext,
2846 ) -> futures::future::LocalBoxFuture<'a, ()> {
2847 use smol::future::FutureExt;
2848
2849 let filename = "fs-event-sentinel";
2850 let tree = self.clone();
2851 let (fs, root_path) = self.read_with(cx, |tree, _| {
2852 let tree = tree.as_local().unwrap();
2853 (tree.fs.clone(), tree.abs_path().clone())
2854 });
2855
2856 async move {
2857 fs.create_file(&root_path.join(filename), Default::default())
2858 .await
2859 .unwrap();
2860 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2861 .await;
2862
2863 fs.remove_file(&root_path.join(filename), Default::default())
2864 .await
2865 .unwrap();
2866 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2867 .await;
2868
2869 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2870 .await;
2871 }
2872 .boxed_local()
2873 }
2874}
2875
2876#[derive(Clone, Debug)]
2877struct TraversalProgress<'a> {
2878 max_path: &'a Path,
2879 count: usize,
2880 visible_count: usize,
2881 file_count: usize,
2882 visible_file_count: usize,
2883}
2884
2885impl<'a> TraversalProgress<'a> {
2886 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2887 match (include_ignored, include_dirs) {
2888 (true, true) => self.count,
2889 (true, false) => self.file_count,
2890 (false, true) => self.visible_count,
2891 (false, false) => self.visible_file_count,
2892 }
2893 }
2894}
2895
2896impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2897 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2898 self.max_path = summary.max_path.as_ref();
2899 self.count += summary.count;
2900 self.visible_count += summary.visible_count;
2901 self.file_count += summary.file_count;
2902 self.visible_file_count += summary.visible_file_count;
2903 }
2904}
2905
2906impl<'a> Default for TraversalProgress<'a> {
2907 fn default() -> Self {
2908 Self {
2909 max_path: Path::new(""),
2910 count: 0,
2911 visible_count: 0,
2912 file_count: 0,
2913 visible_file_count: 0,
2914 }
2915 }
2916}
2917
2918pub struct Traversal<'a> {
2919 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2920 include_ignored: bool,
2921 include_dirs: bool,
2922}
2923
2924impl<'a> Traversal<'a> {
2925 pub fn advance(&mut self) -> bool {
2926 self.advance_to_offset(self.offset() + 1)
2927 }
2928
2929 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2930 self.cursor.seek_forward(
2931 &TraversalTarget::Count {
2932 count: offset,
2933 include_dirs: self.include_dirs,
2934 include_ignored: self.include_ignored,
2935 },
2936 Bias::Right,
2937 &(),
2938 )
2939 }
2940
2941 pub fn advance_to_sibling(&mut self) -> bool {
2942 while let Some(entry) = self.cursor.item() {
2943 self.cursor.seek_forward(
2944 &TraversalTarget::PathSuccessor(&entry.path),
2945 Bias::Left,
2946 &(),
2947 );
2948 if let Some(entry) = self.cursor.item() {
2949 if (self.include_dirs || !entry.is_dir())
2950 && (self.include_ignored || !entry.is_ignored)
2951 {
2952 return true;
2953 }
2954 }
2955 }
2956 false
2957 }
2958
2959 pub fn entry(&self) -> Option<&'a Entry> {
2960 self.cursor.item()
2961 }
2962
2963 pub fn offset(&self) -> usize {
2964 self.cursor
2965 .start()
2966 .count(self.include_dirs, self.include_ignored)
2967 }
2968}
2969
2970impl<'a> Iterator for Traversal<'a> {
2971 type Item = &'a Entry;
2972
2973 fn next(&mut self) -> Option<Self::Item> {
2974 if let Some(item) = self.entry() {
2975 self.advance();
2976 Some(item)
2977 } else {
2978 None
2979 }
2980 }
2981}
2982
2983#[derive(Debug)]
2984enum TraversalTarget<'a> {
2985 Path(&'a Path),
2986 PathSuccessor(&'a Path),
2987 Count {
2988 count: usize,
2989 include_ignored: bool,
2990 include_dirs: bool,
2991 },
2992}
2993
2994impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2995 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2996 match self {
2997 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2998 TraversalTarget::PathSuccessor(path) => {
2999 if !cursor_location.max_path.starts_with(path) {
3000 Ordering::Equal
3001 } else {
3002 Ordering::Greater
3003 }
3004 }
3005 TraversalTarget::Count {
3006 count,
3007 include_dirs,
3008 include_ignored,
3009 } => Ord::cmp(
3010 count,
3011 &cursor_location.count(*include_dirs, *include_ignored),
3012 ),
3013 }
3014 }
3015}
3016
3017struct ChildEntriesIter<'a> {
3018 parent_path: &'a Path,
3019 traversal: Traversal<'a>,
3020}
3021
3022impl<'a> Iterator for ChildEntriesIter<'a> {
3023 type Item = &'a Entry;
3024
3025 fn next(&mut self) -> Option<Self::Item> {
3026 if let Some(item) = self.traversal.entry() {
3027 if item.path.starts_with(&self.parent_path) {
3028 self.traversal.advance_to_sibling();
3029 return Some(item);
3030 }
3031 }
3032 None
3033 }
3034}
3035
3036impl<'a> From<&'a Entry> for proto::Entry {
3037 fn from(entry: &'a Entry) -> Self {
3038 Self {
3039 id: entry.id.to_proto(),
3040 is_dir: entry.is_dir(),
3041 path: entry.path.to_string_lossy().into(),
3042 inode: entry.inode,
3043 mtime: Some(entry.mtime.into()),
3044 is_symlink: entry.is_symlink,
3045 is_ignored: entry.is_ignored,
3046 }
3047 }
3048}
3049
3050impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3051 type Error = anyhow::Error;
3052
3053 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3054 if let Some(mtime) = entry.mtime {
3055 let kind = if entry.is_dir {
3056 EntryKind::Dir
3057 } else {
3058 let mut char_bag = *root_char_bag;
3059 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3060 EntryKind::File(char_bag)
3061 };
3062 let path: Arc<Path> = PathBuf::from(entry.path).into();
3063 Ok(Entry {
3064 id: ProjectEntryId::from_proto(entry.id),
3065 kind,
3066 path,
3067 inode: entry.inode,
3068 mtime: mtime.into(),
3069 is_symlink: entry.is_symlink,
3070 is_ignored: entry.is_ignored,
3071 })
3072 } else {
3073 Err(anyhow!(
3074 "missing mtime in remote worktree entry {:?}",
3075 entry.path
3076 ))
3077 }
3078 }
3079}
3080
3081#[cfg(test)]
3082mod tests {
3083 use super::*;
3084 use fs::repository::FakeGitRepository;
3085 use fs::{FakeFs, RealFs};
3086 use gpui::{executor::Deterministic, TestAppContext};
3087 use pretty_assertions::assert_eq;
3088 use rand::prelude::*;
3089 use serde_json::json;
3090 use std::{env, fmt::Write};
3091 use util::{http::FakeHttpClient, test::temp_tree};
3092
3093 #[gpui::test]
3094 async fn test_traversal(cx: &mut TestAppContext) {
3095 let fs = FakeFs::new(cx.background());
3096 fs.insert_tree(
3097 "/root",
3098 json!({
3099 ".gitignore": "a/b\n",
3100 "a": {
3101 "b": "",
3102 "c": "",
3103 }
3104 }),
3105 )
3106 .await;
3107
3108 let http_client = FakeHttpClient::with_404_response();
3109 let client = cx.read(|cx| Client::new(http_client, cx));
3110
3111 let tree = Worktree::local(
3112 client,
3113 Path::new("/root"),
3114 true,
3115 fs,
3116 Default::default(),
3117 &mut cx.to_async(),
3118 )
3119 .await
3120 .unwrap();
3121 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3122 .await;
3123
3124 tree.read_with(cx, |tree, _| {
3125 assert_eq!(
3126 tree.entries(false)
3127 .map(|entry| entry.path.as_ref())
3128 .collect::<Vec<_>>(),
3129 vec![
3130 Path::new(""),
3131 Path::new(".gitignore"),
3132 Path::new("a"),
3133 Path::new("a/c"),
3134 ]
3135 );
3136 assert_eq!(
3137 tree.entries(true)
3138 .map(|entry| entry.path.as_ref())
3139 .collect::<Vec<_>>(),
3140 vec![
3141 Path::new(""),
3142 Path::new(".gitignore"),
3143 Path::new("a"),
3144 Path::new("a/b"),
3145 Path::new("a/c"),
3146 ]
3147 );
3148 })
3149 }
3150
3151 #[gpui::test(iterations = 10)]
3152 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3153 let fs = FakeFs::new(cx.background());
3154 fs.insert_tree(
3155 "/root",
3156 json!({
3157 "lib": {
3158 "a": {
3159 "a.txt": ""
3160 },
3161 "b": {
3162 "b.txt": ""
3163 }
3164 }
3165 }),
3166 )
3167 .await;
3168 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3169 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3170
3171 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3172 let tree = Worktree::local(
3173 client,
3174 Path::new("/root"),
3175 true,
3176 fs.clone(),
3177 Default::default(),
3178 &mut cx.to_async(),
3179 )
3180 .await
3181 .unwrap();
3182
3183 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3184 .await;
3185
3186 tree.read_with(cx, |tree, _| {
3187 assert_eq!(
3188 tree.entries(false)
3189 .map(|entry| entry.path.as_ref())
3190 .collect::<Vec<_>>(),
3191 vec![
3192 Path::new(""),
3193 Path::new("lib"),
3194 Path::new("lib/a"),
3195 Path::new("lib/a/a.txt"),
3196 Path::new("lib/a/lib"),
3197 Path::new("lib/b"),
3198 Path::new("lib/b/b.txt"),
3199 Path::new("lib/b/lib"),
3200 ]
3201 );
3202 });
3203
3204 fs.rename(
3205 Path::new("/root/lib/a/lib"),
3206 Path::new("/root/lib/a/lib-2"),
3207 Default::default(),
3208 )
3209 .await
3210 .unwrap();
3211 executor.run_until_parked();
3212 tree.read_with(cx, |tree, _| {
3213 assert_eq!(
3214 tree.entries(false)
3215 .map(|entry| entry.path.as_ref())
3216 .collect::<Vec<_>>(),
3217 vec![
3218 Path::new(""),
3219 Path::new("lib"),
3220 Path::new("lib/a"),
3221 Path::new("lib/a/a.txt"),
3222 Path::new("lib/a/lib-2"),
3223 Path::new("lib/b"),
3224 Path::new("lib/b/b.txt"),
3225 Path::new("lib/b/lib"),
3226 ]
3227 );
3228 });
3229 }
3230
3231 #[gpui::test]
3232 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3233 let parent_dir = temp_tree(json!({
3234 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3235 "tree": {
3236 ".git": {},
3237 ".gitignore": "ignored-dir\n",
3238 "tracked-dir": {
3239 "tracked-file1": "",
3240 "ancestor-ignored-file1": "",
3241 },
3242 "ignored-dir": {
3243 "ignored-file1": ""
3244 }
3245 }
3246 }));
3247 let dir = parent_dir.path().join("tree");
3248
3249 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3250
3251 let tree = Worktree::local(
3252 client,
3253 dir.as_path(),
3254 true,
3255 Arc::new(RealFs),
3256 Default::default(),
3257 &mut cx.to_async(),
3258 )
3259 .await
3260 .unwrap();
3261 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3262 .await;
3263 tree.flush_fs_events(cx).await;
3264 cx.read(|cx| {
3265 let tree = tree.read(cx);
3266 assert!(
3267 !tree
3268 .entry_for_path("tracked-dir/tracked-file1")
3269 .unwrap()
3270 .is_ignored
3271 );
3272 assert!(
3273 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3274 .unwrap()
3275 .is_ignored
3276 );
3277 assert!(
3278 tree.entry_for_path("ignored-dir/ignored-file1")
3279 .unwrap()
3280 .is_ignored
3281 );
3282 });
3283
3284 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3285 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3286 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3287 tree.flush_fs_events(cx).await;
3288 cx.read(|cx| {
3289 let tree = tree.read(cx);
3290 assert!(
3291 !tree
3292 .entry_for_path("tracked-dir/tracked-file2")
3293 .unwrap()
3294 .is_ignored
3295 );
3296 assert!(
3297 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3298 .unwrap()
3299 .is_ignored
3300 );
3301 assert!(
3302 tree.entry_for_path("ignored-dir/ignored-file2")
3303 .unwrap()
3304 .is_ignored
3305 );
3306 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3307 });
3308 }
3309
3310 #[gpui::test]
3311 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3312 let root = temp_tree(json!({
3313 "dir1": {
3314 ".git": {},
3315 "deps": {
3316 "dep1": {
3317 ".git": {},
3318 "src": {
3319 "a.txt": ""
3320 }
3321 }
3322 },
3323 "src": {
3324 "b.txt": ""
3325 }
3326 },
3327 "c.txt": "",
3328 }));
3329
3330 let http_client = FakeHttpClient::with_404_response();
3331 let client = cx.read(|cx| Client::new(http_client, cx));
3332 let tree = Worktree::local(
3333 client,
3334 root.path(),
3335 true,
3336 Arc::new(RealFs),
3337 Default::default(),
3338 &mut cx.to_async(),
3339 )
3340 .await
3341 .unwrap();
3342
3343 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3344 .await;
3345 tree.flush_fs_events(cx).await;
3346
3347 tree.read_with(cx, |tree, _cx| {
3348 let tree = tree.as_local().unwrap();
3349
3350 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3351
3352 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3353 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3354 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3355
3356 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3357 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3358 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3359 });
3360
3361 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3362 let tree = tree.as_local().unwrap();
3363 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3364 });
3365
3366 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3367 tree.flush_fs_events(cx).await;
3368
3369 tree.read_with(cx, |tree, _cx| {
3370 let tree = tree.as_local().unwrap();
3371 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3372 assert_ne!(
3373 original_scan_id, new_scan_id,
3374 "original {original_scan_id}, new {new_scan_id}"
3375 );
3376 });
3377
3378 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3379 tree.flush_fs_events(cx).await;
3380
3381 tree.read_with(cx, |tree, _cx| {
3382 let tree = tree.as_local().unwrap();
3383
3384 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3385 });
3386 }
3387
3388 #[test]
3389 fn test_changed_repos() {
3390 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3391 GitRepositoryEntry {
3392 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3393 scan_id,
3394 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3395 git_dir_path: git_dir_path.as_ref().into(),
3396 }
3397 }
3398
3399 let prev_repos: Vec<GitRepositoryEntry> = vec![
3400 fake_entry("/.git", 0),
3401 fake_entry("/a/.git", 0),
3402 fake_entry("/a/b/.git", 0),
3403 ];
3404
3405 let new_repos: Vec<GitRepositoryEntry> = vec![
3406 fake_entry("/a/.git", 1),
3407 fake_entry("/a/b/.git", 0),
3408 fake_entry("/a/c/.git", 0),
3409 ];
3410
3411 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3412
3413 // Deletion retained
3414 assert!(res
3415 .iter()
3416 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3417 .is_some());
3418
3419 // Update retained
3420 assert!(res
3421 .iter()
3422 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3423 .is_some());
3424
3425 // Addition retained
3426 assert!(res
3427 .iter()
3428 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3429 .is_some());
3430
3431 // Nochange, not retained
3432 assert!(res
3433 .iter()
3434 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3435 .is_none());
3436 }
3437
3438 #[gpui::test]
3439 async fn test_write_file(cx: &mut TestAppContext) {
3440 let dir = temp_tree(json!({
3441 ".git": {},
3442 ".gitignore": "ignored-dir\n",
3443 "tracked-dir": {},
3444 "ignored-dir": {}
3445 }));
3446
3447 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3448
3449 let tree = Worktree::local(
3450 client,
3451 dir.path(),
3452 true,
3453 Arc::new(RealFs),
3454 Default::default(),
3455 &mut cx.to_async(),
3456 )
3457 .await
3458 .unwrap();
3459 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3460 .await;
3461 tree.flush_fs_events(cx).await;
3462
3463 tree.update(cx, |tree, cx| {
3464 tree.as_local().unwrap().write_file(
3465 Path::new("tracked-dir/file.txt"),
3466 "hello".into(),
3467 Default::default(),
3468 cx,
3469 )
3470 })
3471 .await
3472 .unwrap();
3473 tree.update(cx, |tree, cx| {
3474 tree.as_local().unwrap().write_file(
3475 Path::new("ignored-dir/file.txt"),
3476 "world".into(),
3477 Default::default(),
3478 cx,
3479 )
3480 })
3481 .await
3482 .unwrap();
3483
3484 tree.read_with(cx, |tree, _| {
3485 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3486 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3487 assert!(!tracked.is_ignored);
3488 assert!(ignored.is_ignored);
3489 });
3490 }
3491
3492 #[gpui::test(iterations = 30)]
3493 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3494 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3495
3496 let fs = FakeFs::new(cx.background());
3497 fs.insert_tree(
3498 "/root",
3499 json!({
3500 "b": {},
3501 "c": {},
3502 "d": {},
3503 }),
3504 )
3505 .await;
3506
3507 let tree = Worktree::local(
3508 client,
3509 "/root".as_ref(),
3510 true,
3511 fs,
3512 Default::default(),
3513 &mut cx.to_async(),
3514 )
3515 .await
3516 .unwrap();
3517
3518 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3519
3520 let entry = tree
3521 .update(cx, |tree, cx| {
3522 tree.as_local_mut()
3523 .unwrap()
3524 .create_entry("a/e".as_ref(), true, cx)
3525 })
3526 .await
3527 .unwrap();
3528 assert!(entry.is_dir());
3529
3530 cx.foreground().run_until_parked();
3531 tree.read_with(cx, |tree, _| {
3532 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3533 });
3534
3535 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3536 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3537 snapshot1.apply_remote_update(update).unwrap();
3538 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3539 }
3540
3541 #[gpui::test(iterations = 100)]
3542 async fn test_random_worktree_operations_during_initial_scan(
3543 cx: &mut TestAppContext,
3544 mut rng: StdRng,
3545 ) {
3546 let operations = env::var("OPERATIONS")
3547 .map(|o| o.parse().unwrap())
3548 .unwrap_or(5);
3549 let initial_entries = env::var("INITIAL_ENTRIES")
3550 .map(|o| o.parse().unwrap())
3551 .unwrap_or(20);
3552
3553 let root_dir = Path::new("/test");
3554 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3555 fs.as_fake().insert_tree(root_dir, json!({})).await;
3556 for _ in 0..initial_entries {
3557 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3558 }
3559 log::info!("generated initial tree");
3560
3561 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3562 let worktree = Worktree::local(
3563 client.clone(),
3564 root_dir,
3565 true,
3566 fs.clone(),
3567 Default::default(),
3568 &mut cx.to_async(),
3569 )
3570 .await
3571 .unwrap();
3572
3573 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3574
3575 for _ in 0..operations {
3576 worktree
3577 .update(cx, |worktree, cx| {
3578 randomly_mutate_worktree(worktree, &mut rng, cx)
3579 })
3580 .await
3581 .log_err();
3582 worktree.read_with(cx, |tree, _| {
3583 tree.as_local().unwrap().snapshot.check_invariants()
3584 });
3585
3586 if rng.gen_bool(0.6) {
3587 let new_snapshot =
3588 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3589 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3590 snapshot.apply_remote_update(update.clone()).unwrap();
3591 assert_eq!(
3592 snapshot.to_vec(true),
3593 new_snapshot.to_vec(true),
3594 "incorrect snapshot after update {:?}",
3595 update
3596 );
3597 }
3598 }
3599
3600 worktree
3601 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3602 .await;
3603 worktree.read_with(cx, |tree, _| {
3604 tree.as_local().unwrap().snapshot.check_invariants()
3605 });
3606
3607 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3608 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3609 snapshot.apply_remote_update(update.clone()).unwrap();
3610 assert_eq!(
3611 snapshot.to_vec(true),
3612 new_snapshot.to_vec(true),
3613 "incorrect snapshot after update {:?}",
3614 update
3615 );
3616 }
3617
3618 #[gpui::test(iterations = 100)]
3619 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3620 let operations = env::var("OPERATIONS")
3621 .map(|o| o.parse().unwrap())
3622 .unwrap_or(40);
3623 let initial_entries = env::var("INITIAL_ENTRIES")
3624 .map(|o| o.parse().unwrap())
3625 .unwrap_or(20);
3626
3627 let root_dir = Path::new("/test");
3628 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3629 fs.as_fake().insert_tree(root_dir, json!({})).await;
3630 for _ in 0..initial_entries {
3631 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3632 }
3633 log::info!("generated initial tree");
3634
3635 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3636 let worktree = Worktree::local(
3637 client.clone(),
3638 root_dir,
3639 true,
3640 fs.clone(),
3641 Default::default(),
3642 &mut cx.to_async(),
3643 )
3644 .await
3645 .unwrap();
3646
3647 worktree
3648 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3649 .await;
3650
3651 // After the initial scan is complete, the `UpdatedEntries` event can
3652 // be used to follow along with all changes to the worktree's snapshot.
3653 worktree.update(cx, |tree, cx| {
3654 let mut paths = tree
3655 .as_local()
3656 .unwrap()
3657 .paths()
3658 .cloned()
3659 .collect::<Vec<_>>();
3660
3661 cx.subscribe(&worktree, move |tree, _, event, _| {
3662 if let Event::UpdatedEntries(changes) = event {
3663 for (path, change_type) in changes.iter() {
3664 let path = path.clone();
3665 let ix = match paths.binary_search(&path) {
3666 Ok(ix) | Err(ix) => ix,
3667 };
3668 match change_type {
3669 PathChange::Added => {
3670 assert_ne!(paths.get(ix), Some(&path));
3671 paths.insert(ix, path);
3672 }
3673 PathChange::Removed => {
3674 assert_eq!(paths.get(ix), Some(&path));
3675 paths.remove(ix);
3676 }
3677 PathChange::Updated => {
3678 assert_eq!(paths.get(ix), Some(&path));
3679 }
3680 PathChange::AddedOrUpdated => {
3681 if paths[ix] != path {
3682 paths.insert(ix, path);
3683 }
3684 }
3685 }
3686 }
3687 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3688 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3689 }
3690 })
3691 .detach();
3692 });
3693
3694 let mut snapshots = Vec::new();
3695 let mut mutations_len = operations;
3696 while mutations_len > 1 {
3697 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3698 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3699 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3700 let len = rng.gen_range(0..=buffered_event_count);
3701 log::info!("flushing {} events", len);
3702 fs.as_fake().flush_events(len).await;
3703 } else {
3704 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3705 mutations_len -= 1;
3706 }
3707
3708 cx.foreground().run_until_parked();
3709 if rng.gen_bool(0.2) {
3710 log::info!("storing snapshot {}", snapshots.len());
3711 let snapshot =
3712 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3713 snapshots.push(snapshot);
3714 }
3715 }
3716
3717 log::info!("quiescing");
3718 fs.as_fake().flush_events(usize::MAX).await;
3719 cx.foreground().run_until_parked();
3720 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3721 snapshot.check_invariants();
3722
3723 {
3724 let new_worktree = Worktree::local(
3725 client.clone(),
3726 root_dir,
3727 true,
3728 fs.clone(),
3729 Default::default(),
3730 &mut cx.to_async(),
3731 )
3732 .await
3733 .unwrap();
3734 new_worktree
3735 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3736 .await;
3737 let new_snapshot =
3738 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3739 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3740 }
3741
3742 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3743 let include_ignored = rng.gen::<bool>();
3744 if !include_ignored {
3745 let mut entries_by_path_edits = Vec::new();
3746 let mut entries_by_id_edits = Vec::new();
3747 for entry in prev_snapshot
3748 .entries_by_id
3749 .cursor::<()>()
3750 .filter(|e| e.is_ignored)
3751 {
3752 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3753 entries_by_id_edits.push(Edit::Remove(entry.id));
3754 }
3755
3756 prev_snapshot
3757 .entries_by_path
3758 .edit(entries_by_path_edits, &());
3759 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3760 }
3761
3762 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3763 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3764 assert_eq!(
3765 prev_snapshot.to_vec(include_ignored),
3766 snapshot.to_vec(include_ignored),
3767 "wrong update for snapshot {i}. update: {:?}",
3768 update
3769 );
3770 }
3771 }
3772
3773 fn randomly_mutate_worktree(
3774 worktree: &mut Worktree,
3775 rng: &mut impl Rng,
3776 cx: &mut ModelContext<Worktree>,
3777 ) -> Task<Result<()>> {
3778 let worktree = worktree.as_local_mut().unwrap();
3779 let snapshot = worktree.snapshot();
3780 let entry = snapshot.entries(false).choose(rng).unwrap();
3781
3782 match rng.gen_range(0_u32..100) {
3783 0..=33 if entry.path.as_ref() != Path::new("") => {
3784 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3785 worktree.delete_entry(entry.id, cx).unwrap()
3786 }
3787 ..=66 if entry.path.as_ref() != Path::new("") => {
3788 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3789 let new_parent_path = if other_entry.is_dir() {
3790 other_entry.path.clone()
3791 } else {
3792 other_entry.path.parent().unwrap().into()
3793 };
3794 let mut new_path = new_parent_path.join(gen_name(rng));
3795 if new_path.starts_with(&entry.path) {
3796 new_path = gen_name(rng).into();
3797 }
3798
3799 log::info!(
3800 "renaming entry {:?} ({}) to {:?}",
3801 entry.path,
3802 entry.id.0,
3803 new_path
3804 );
3805 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3806 cx.foreground().spawn(async move {
3807 task.await?;
3808 Ok(())
3809 })
3810 }
3811 _ => {
3812 let task = if entry.is_dir() {
3813 let child_path = entry.path.join(gen_name(rng));
3814 let is_dir = rng.gen_bool(0.3);
3815 log::info!(
3816 "creating {} at {:?}",
3817 if is_dir { "dir" } else { "file" },
3818 child_path,
3819 );
3820 worktree.create_entry(child_path, is_dir, cx)
3821 } else {
3822 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3823 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3824 };
3825 cx.foreground().spawn(async move {
3826 task.await?;
3827 Ok(())
3828 })
3829 }
3830 }
3831 }
3832
3833 async fn randomly_mutate_fs(
3834 fs: &Arc<dyn Fs>,
3835 root_path: &Path,
3836 insertion_probability: f64,
3837 rng: &mut impl Rng,
3838 ) {
3839 let mut files = Vec::new();
3840 let mut dirs = Vec::new();
3841 for path in fs.as_fake().paths() {
3842 if path.starts_with(root_path) {
3843 if fs.is_file(&path).await {
3844 files.push(path);
3845 } else {
3846 dirs.push(path);
3847 }
3848 }
3849 }
3850
3851 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3852 let path = dirs.choose(rng).unwrap();
3853 let new_path = path.join(gen_name(rng));
3854
3855 if rng.gen() {
3856 log::info!(
3857 "creating dir {:?}",
3858 new_path.strip_prefix(root_path).unwrap()
3859 );
3860 fs.create_dir(&new_path).await.unwrap();
3861 } else {
3862 log::info!(
3863 "creating file {:?}",
3864 new_path.strip_prefix(root_path).unwrap()
3865 );
3866 fs.create_file(&new_path, Default::default()).await.unwrap();
3867 }
3868 } else if rng.gen_bool(0.05) {
3869 let ignore_dir_path = dirs.choose(rng).unwrap();
3870 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3871
3872 let subdirs = dirs
3873 .iter()
3874 .filter(|d| d.starts_with(&ignore_dir_path))
3875 .cloned()
3876 .collect::<Vec<_>>();
3877 let subfiles = files
3878 .iter()
3879 .filter(|d| d.starts_with(&ignore_dir_path))
3880 .cloned()
3881 .collect::<Vec<_>>();
3882 let files_to_ignore = {
3883 let len = rng.gen_range(0..=subfiles.len());
3884 subfiles.choose_multiple(rng, len)
3885 };
3886 let dirs_to_ignore = {
3887 let len = rng.gen_range(0..subdirs.len());
3888 subdirs.choose_multiple(rng, len)
3889 };
3890
3891 let mut ignore_contents = String::new();
3892 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3893 writeln!(
3894 ignore_contents,
3895 "{}",
3896 path_to_ignore
3897 .strip_prefix(&ignore_dir_path)
3898 .unwrap()
3899 .to_str()
3900 .unwrap()
3901 )
3902 .unwrap();
3903 }
3904 log::info!(
3905 "creating gitignore {:?} with contents:\n{}",
3906 ignore_path.strip_prefix(&root_path).unwrap(),
3907 ignore_contents
3908 );
3909 fs.save(
3910 &ignore_path,
3911 &ignore_contents.as_str().into(),
3912 Default::default(),
3913 )
3914 .await
3915 .unwrap();
3916 } else {
3917 let old_path = {
3918 let file_path = files.choose(rng);
3919 let dir_path = dirs[1..].choose(rng);
3920 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3921 };
3922
3923 let is_rename = rng.gen();
3924 if is_rename {
3925 let new_path_parent = dirs
3926 .iter()
3927 .filter(|d| !d.starts_with(old_path))
3928 .choose(rng)
3929 .unwrap();
3930
3931 let overwrite_existing_dir =
3932 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3933 let new_path = if overwrite_existing_dir {
3934 fs.remove_dir(
3935 &new_path_parent,
3936 RemoveOptions {
3937 recursive: true,
3938 ignore_if_not_exists: true,
3939 },
3940 )
3941 .await
3942 .unwrap();
3943 new_path_parent.to_path_buf()
3944 } else {
3945 new_path_parent.join(gen_name(rng))
3946 };
3947
3948 log::info!(
3949 "renaming {:?} to {}{:?}",
3950 old_path.strip_prefix(&root_path).unwrap(),
3951 if overwrite_existing_dir {
3952 "overwrite "
3953 } else {
3954 ""
3955 },
3956 new_path.strip_prefix(&root_path).unwrap()
3957 );
3958 fs.rename(
3959 &old_path,
3960 &new_path,
3961 fs::RenameOptions {
3962 overwrite: true,
3963 ignore_if_exists: true,
3964 },
3965 )
3966 .await
3967 .unwrap();
3968 } else if fs.is_file(&old_path).await {
3969 log::info!(
3970 "deleting file {:?}",
3971 old_path.strip_prefix(&root_path).unwrap()
3972 );
3973 fs.remove_file(old_path, Default::default()).await.unwrap();
3974 } else {
3975 log::info!(
3976 "deleting dir {:?}",
3977 old_path.strip_prefix(&root_path).unwrap()
3978 );
3979 fs.remove_dir(
3980 &old_path,
3981 RemoveOptions {
3982 recursive: true,
3983 ignore_if_not_exists: true,
3984 },
3985 )
3986 .await
3987 .unwrap();
3988 }
3989 }
3990 }
3991
3992 fn gen_name(rng: &mut impl Rng) -> String {
3993 (0..6)
3994 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3995 .map(char::from)
3996 .collect()
3997 }
3998
3999 impl LocalSnapshot {
4000 fn check_invariants(&self) {
4001 assert_eq!(
4002 self.entries_by_path
4003 .cursor::<()>()
4004 .map(|e| (&e.path, e.id))
4005 .collect::<Vec<_>>(),
4006 self.entries_by_id
4007 .cursor::<()>()
4008 .map(|e| (&e.path, e.id))
4009 .collect::<collections::BTreeSet<_>>()
4010 .into_iter()
4011 .collect::<Vec<_>>(),
4012 "entries_by_path and entries_by_id are inconsistent"
4013 );
4014
4015 let mut files = self.files(true, 0);
4016 let mut visible_files = self.files(false, 0);
4017 for entry in self.entries_by_path.cursor::<()>() {
4018 if entry.is_file() {
4019 assert_eq!(files.next().unwrap().inode, entry.inode);
4020 if !entry.is_ignored {
4021 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4022 }
4023 }
4024 }
4025
4026 assert!(files.next().is_none());
4027 assert!(visible_files.next().is_none());
4028
4029 let mut bfs_paths = Vec::new();
4030 let mut stack = vec![Path::new("")];
4031 while let Some(path) = stack.pop() {
4032 bfs_paths.push(path);
4033 let ix = stack.len();
4034 for child_entry in self.child_entries(path) {
4035 stack.insert(ix, &child_entry.path);
4036 }
4037 }
4038
4039 let dfs_paths_via_iter = self
4040 .entries_by_path
4041 .cursor::<()>()
4042 .map(|e| e.path.as_ref())
4043 .collect::<Vec<_>>();
4044 assert_eq!(bfs_paths, dfs_paths_via_iter);
4045
4046 let dfs_paths_via_traversal = self
4047 .entries(true)
4048 .map(|e| e.path.as_ref())
4049 .collect::<Vec<_>>();
4050 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4051
4052 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4053 let ignore_parent_path =
4054 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4055 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4056 assert!(self
4057 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4058 .is_some());
4059 }
4060 }
4061
4062 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4063 let mut paths = Vec::new();
4064 for entry in self.entries_by_path.cursor::<()>() {
4065 if include_ignored || !entry.is_ignored {
4066 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4067 }
4068 }
4069 paths.sort_by(|a, b| a.0.cmp(b.0));
4070 paths
4071 }
4072 }
4073}