1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::File as _;
24use language::{
25 proto::{
26 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
27 serialize_version,
28 },
29 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
30};
31use parking_lot::Mutex;
32use postage::{
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::paths::HOME;
53use util::{ResultExt, TryFutureExt};
54
55#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
56pub struct WorktreeId(usize);
57
58#[allow(clippy::large_enum_variant)]
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 background_snapshot: Arc<Mutex<LocalSnapshot>>,
67 background_changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
68 last_scan_state_rx: watch::Receiver<ScanState>,
69 _background_scanner_task: Option<Task<()>>,
70 poll_task: Option<Task<()>>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
73 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
74 client: Arc<Client>,
75 fs: Arc<dyn Fs>,
76 visible: bool,
77}
78
79pub struct RemoteWorktree {
80 pub snapshot: Snapshot,
81 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
82 project_id: u64,
83 client: Arc<Client>,
84 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
85 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
86 replica_id: ReplicaId,
87 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
88 visible: bool,
89 disconnected: bool,
90}
91
92#[derive(Clone)]
93pub struct Snapshot {
94 id: WorktreeId,
95 abs_path: Arc<Path>,
96 root_name: String,
97 root_char_bag: CharBag,
98 entries_by_path: SumTree<Entry>,
99 entries_by_id: SumTree<PathEntry>,
100 scan_id: usize,
101 completed_scan_id: usize,
102}
103
104#[derive(Clone)]
105pub struct GitRepositoryEntry {
106 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
107
108 pub(crate) scan_id: usize,
109 // Path to folder containing the .git file or directory
110 pub(crate) content_path: Arc<Path>,
111 // Path to the actual .git folder.
112 // Note: if .git is a file, this points to the folder indicated by the .git file
113 pub(crate) git_dir_path: Arc<Path>,
114}
115
116impl std::fmt::Debug for GitRepositoryEntry {
117 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 f.debug_struct("GitRepositoryEntry")
119 .field("content_path", &self.content_path)
120 .field("git_dir_path", &self.git_dir_path)
121 .field("libgit_repository", &"LibGitRepository")
122 .finish()
123 }
124}
125
126pub struct LocalSnapshot {
127 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
128 git_repositories: Vec<GitRepositoryEntry>,
129 removed_entry_ids: HashMap<u64, ProjectEntryId>,
130 next_entry_id: Arc<AtomicUsize>,
131 snapshot: Snapshot,
132}
133
134impl Clone for LocalSnapshot {
135 fn clone(&self) -> Self {
136 Self {
137 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
138 git_repositories: self.git_repositories.iter().cloned().collect(),
139 removed_entry_ids: self.removed_entry_ids.clone(),
140 next_entry_id: self.next_entry_id.clone(),
141 snapshot: self.snapshot.clone(),
142 }
143 }
144}
145
146impl Deref for LocalSnapshot {
147 type Target = Snapshot;
148
149 fn deref(&self) -> &Self::Target {
150 &self.snapshot
151 }
152}
153
154impl DerefMut for LocalSnapshot {
155 fn deref_mut(&mut self) -> &mut Self::Target {
156 &mut self.snapshot
157 }
158}
159
160#[derive(Clone, Debug)]
161enum ScanState {
162 Idle,
163 /// The worktree is performing its initial scan of the filesystem.
164 Initializing,
165 /// The worktree is updating in response to filesystem events.
166 Updating,
167 Err(Arc<anyhow::Error>),
168}
169
170struct ShareState {
171 project_id: u64,
172 snapshots_tx: watch::Sender<LocalSnapshot>,
173 resume_updates: watch::Sender<()>,
174 _maintain_remote_snapshot: Task<Option<()>>,
175}
176
177pub enum Event {
178 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
179 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
180}
181
182impl Entity for Worktree {
183 type Event = Event;
184}
185
186impl Worktree {
187 pub async fn local(
188 client: Arc<Client>,
189 path: impl Into<Arc<Path>>,
190 visible: bool,
191 fs: Arc<dyn Fs>,
192 next_entry_id: Arc<AtomicUsize>,
193 cx: &mut AsyncAppContext,
194 ) -> Result<ModelHandle<Self>> {
195 let (tree, scan_states_tx) =
196 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
197 tree.update(cx, |tree, cx| {
198 let tree = tree.as_local_mut().unwrap();
199 let abs_path = tree.abs_path().clone();
200 let background_snapshot = tree.background_snapshot.clone();
201 let background_changes = tree.background_changes.clone();
202 let background = cx.background().clone();
203 tree._background_scanner_task = Some(cx.background().spawn(async move {
204 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
205 let scanner = BackgroundScanner::new(
206 background_snapshot,
207 background_changes,
208 scan_states_tx,
209 fs,
210 background,
211 );
212 scanner.run(events).await;
213 }));
214 });
215 Ok(tree)
216 }
217
218 pub fn remote(
219 project_remote_id: u64,
220 replica_id: ReplicaId,
221 worktree: proto::WorktreeMetadata,
222 client: Arc<Client>,
223 cx: &mut MutableAppContext,
224 ) -> ModelHandle<Self> {
225 let remote_id = worktree.id;
226 let root_char_bag: CharBag = worktree
227 .root_name
228 .chars()
229 .map(|c| c.to_ascii_lowercase())
230 .collect();
231 let root_name = worktree.root_name.clone();
232 let visible = worktree.visible;
233
234 let abs_path = PathBuf::from(worktree.abs_path);
235 let snapshot = Snapshot {
236 id: WorktreeId(remote_id as usize),
237 abs_path: Arc::from(abs_path.deref()),
238 root_name,
239 root_char_bag,
240 entries_by_path: Default::default(),
241 entries_by_id: Default::default(),
242 scan_id: 0,
243 completed_scan_id: 0,
244 };
245
246 let (updates_tx, mut updates_rx) = mpsc::unbounded();
247 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
248 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
249 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
250 Worktree::Remote(RemoteWorktree {
251 project_id: project_remote_id,
252 replica_id,
253 snapshot: snapshot.clone(),
254 background_snapshot: background_snapshot.clone(),
255 updates_tx: Some(updates_tx),
256 snapshot_subscriptions: Default::default(),
257 client: client.clone(),
258 diagnostic_summaries: Default::default(),
259 visible,
260 disconnected: false,
261 })
262 });
263
264 cx.background()
265 .spawn(async move {
266 while let Some(update) = updates_rx.next().await {
267 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
268 log::error!("error applying worktree update: {}", error);
269 }
270 snapshot_updated_tx.send(()).await.ok();
271 }
272 })
273 .detach();
274
275 cx.spawn(|mut cx| {
276 let this = worktree_handle.downgrade();
277 async move {
278 while (snapshot_updated_rx.recv().await).is_some() {
279 if let Some(this) = this.upgrade(&cx) {
280 this.update(&mut cx, |this, cx| {
281 this.poll_snapshot(cx);
282 let this = this.as_remote_mut().unwrap();
283 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
284 if this.observed_snapshot(*scan_id) {
285 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
286 let _ = tx.send(());
287 } else {
288 break;
289 }
290 }
291 });
292 } else {
293 break;
294 }
295 }
296 }
297 })
298 .detach();
299
300 worktree_handle
301 }
302
303 pub fn as_local(&self) -> Option<&LocalWorktree> {
304 if let Worktree::Local(worktree) = self {
305 Some(worktree)
306 } else {
307 None
308 }
309 }
310
311 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
312 if let Worktree::Remote(worktree) = self {
313 Some(worktree)
314 } else {
315 None
316 }
317 }
318
319 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
320 if let Worktree::Local(worktree) = self {
321 Some(worktree)
322 } else {
323 None
324 }
325 }
326
327 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
328 if let Worktree::Remote(worktree) = self {
329 Some(worktree)
330 } else {
331 None
332 }
333 }
334
335 pub fn is_local(&self) -> bool {
336 matches!(self, Worktree::Local(_))
337 }
338
339 pub fn is_remote(&self) -> bool {
340 !self.is_local()
341 }
342
343 pub fn snapshot(&self) -> Snapshot {
344 match self {
345 Worktree::Local(worktree) => worktree.snapshot().snapshot,
346 Worktree::Remote(worktree) => worktree.snapshot(),
347 }
348 }
349
350 pub fn scan_id(&self) -> usize {
351 match self {
352 Worktree::Local(worktree) => worktree.snapshot.scan_id,
353 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
354 }
355 }
356
357 pub fn completed_scan_id(&self) -> usize {
358 match self {
359 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
360 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
361 }
362 }
363
364 pub fn is_visible(&self) -> bool {
365 match self {
366 Worktree::Local(worktree) => worktree.visible,
367 Worktree::Remote(worktree) => worktree.visible,
368 }
369 }
370
371 pub fn replica_id(&self) -> ReplicaId {
372 match self {
373 Worktree::Local(_) => 0,
374 Worktree::Remote(worktree) => worktree.replica_id,
375 }
376 }
377
378 pub fn diagnostic_summaries(
379 &self,
380 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
381 match self {
382 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
383 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
384 }
385 .iter()
386 .map(|(path, summary)| (path.0.clone(), *summary))
387 }
388
389 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
390 match self {
391 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
392 Self::Remote(worktree) => worktree.poll_snapshot(cx),
393 };
394 }
395
396 pub fn abs_path(&self) -> Arc<Path> {
397 match self {
398 Worktree::Local(worktree) => worktree.abs_path.clone(),
399 Worktree::Remote(worktree) => worktree.abs_path.clone(),
400 }
401 }
402}
403
404impl LocalWorktree {
405 async fn create(
406 client: Arc<Client>,
407 path: impl Into<Arc<Path>>,
408 visible: bool,
409 fs: Arc<dyn Fs>,
410 next_entry_id: Arc<AtomicUsize>,
411 cx: &mut AsyncAppContext,
412 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
413 let abs_path = path.into();
414 let path: Arc<Path> = Arc::from(Path::new(""));
415
416 // After determining whether the root entry is a file or a directory, populate the
417 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
418 let root_name = abs_path
419 .file_name()
420 .map_or(String::new(), |f| f.to_string_lossy().to_string());
421 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
422 let metadata = fs
423 .metadata(&abs_path)
424 .await
425 .context("failed to stat worktree path")?;
426
427 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
428 let (mut last_scan_state_tx, last_scan_state_rx) =
429 watch::channel_with(ScanState::Initializing);
430 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
431 let mut snapshot = LocalSnapshot {
432 ignores_by_parent_abs_path: Default::default(),
433 git_repositories: Default::default(),
434 removed_entry_ids: Default::default(),
435 next_entry_id,
436 snapshot: Snapshot {
437 id: WorktreeId::from_usize(cx.model_id()),
438 abs_path,
439 root_name: root_name.clone(),
440 root_char_bag,
441 entries_by_path: Default::default(),
442 entries_by_id: Default::default(),
443 scan_id: 0,
444 completed_scan_id: 0,
445 },
446 };
447 if let Some(metadata) = metadata {
448 let entry = Entry::new(
449 path,
450 &metadata,
451 &snapshot.next_entry_id,
452 snapshot.root_char_bag,
453 );
454 snapshot.insert_entry(entry, fs.as_ref());
455 }
456
457 let tree = Self {
458 snapshot: snapshot.clone(),
459 background_snapshot: Arc::new(Mutex::new(snapshot)),
460 background_changes: Arc::new(Mutex::new(HashMap::default())),
461 last_scan_state_rx,
462 _background_scanner_task: None,
463 share: None,
464 poll_task: None,
465 diagnostics: Default::default(),
466 diagnostic_summaries: Default::default(),
467 client,
468 fs,
469 visible,
470 };
471
472 cx.spawn_weak(|this, mut cx| async move {
473 while let Some(scan_state) = scan_states_rx.next().await {
474 if let Some(this) = this.upgrade(&cx) {
475 last_scan_state_tx.blocking_send(scan_state).ok();
476 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
477 } else {
478 break;
479 }
480 }
481 })
482 .detach();
483
484 Worktree::Local(tree)
485 });
486
487 Ok((tree, scan_states_tx))
488 }
489
490 pub fn contains_abs_path(&self, path: &Path) -> bool {
491 path.starts_with(&self.abs_path)
492 }
493
494 fn absolutize(&self, path: &Path) -> PathBuf {
495 if path.file_name().is_some() {
496 self.abs_path.join(path)
497 } else {
498 self.abs_path.to_path_buf()
499 }
500 }
501
502 pub(crate) fn load_buffer(
503 &mut self,
504 path: &Path,
505 cx: &mut ModelContext<Worktree>,
506 ) -> Task<Result<ModelHandle<Buffer>>> {
507 let path = Arc::from(path);
508 cx.spawn(move |this, mut cx| async move {
509 let (file, contents, diff_base) = this
510 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
511 .await?;
512 Ok(cx.add_model(|cx| {
513 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
514 buffer.git_diff_recalc(cx);
515 buffer
516 }))
517 })
518 }
519
520 pub fn diagnostics_for_path(
521 &self,
522 path: &Path,
523 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
524 self.diagnostics.get(path).cloned()
525 }
526
527 pub fn update_diagnostics(
528 &mut self,
529 language_server_id: usize,
530 worktree_path: Arc<Path>,
531 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
532 _: &mut ModelContext<Worktree>,
533 ) -> Result<bool> {
534 self.diagnostics.remove(&worktree_path);
535 let old_summary = self
536 .diagnostic_summaries
537 .remove(&PathKey(worktree_path.clone()))
538 .unwrap_or_default();
539 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
540 if !new_summary.is_empty() {
541 self.diagnostic_summaries
542 .insert(PathKey(worktree_path.clone()), new_summary);
543 self.diagnostics.insert(worktree_path.clone(), diagnostics);
544 }
545
546 let updated = !old_summary.is_empty() || !new_summary.is_empty();
547 if updated {
548 if let Some(share) = self.share.as_ref() {
549 self.client
550 .send(proto::UpdateDiagnosticSummary {
551 project_id: share.project_id,
552 worktree_id: self.id().to_proto(),
553 summary: Some(proto::DiagnosticSummary {
554 path: worktree_path.to_string_lossy().to_string(),
555 language_server_id: language_server_id as u64,
556 error_count: new_summary.error_count as u32,
557 warning_count: new_summary.warning_count as u32,
558 }),
559 })
560 .log_err();
561 }
562 }
563
564 Ok(updated)
565 }
566
567 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
568 self.poll_task.take();
569
570 match self.scan_state() {
571 ScanState::Idle => {
572 let new_snapshot = self.background_snapshot.lock().clone();
573 let changes = mem::take(&mut *self.background_changes.lock());
574 let updated_repos = Self::changed_repos(
575 &self.snapshot.git_repositories,
576 &new_snapshot.git_repositories,
577 );
578 self.snapshot = new_snapshot;
579
580 if let Some(share) = self.share.as_mut() {
581 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
582 }
583
584 cx.emit(Event::UpdatedEntries(changes));
585
586 if !updated_repos.is_empty() {
587 cx.emit(Event::UpdatedGitRepositories(updated_repos));
588 }
589 }
590
591 ScanState::Initializing => {
592 let is_fake_fs = self.fs.is_fake();
593
594 let new_snapshot = self.background_snapshot.lock().clone();
595 let updated_repos = Self::changed_repos(
596 &self.snapshot.git_repositories,
597 &new_snapshot.git_repositories,
598 );
599 self.snapshot = new_snapshot;
600
601 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
602 if is_fake_fs {
603 #[cfg(any(test, feature = "test-support"))]
604 cx.background().simulate_random_delay().await;
605 } else {
606 smol::Timer::after(Duration::from_millis(100)).await;
607 }
608 if let Some(this) = this.upgrade(&cx) {
609 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
610 }
611 }));
612
613 cx.emit(Event::UpdatedEntries(Default::default()));
614
615 if !updated_repos.is_empty() {
616 cx.emit(Event::UpdatedGitRepositories(updated_repos));
617 }
618 }
619
620 _ => {
621 if force {
622 self.snapshot = self.background_snapshot.lock().clone();
623 }
624 }
625 }
626
627 cx.notify();
628 }
629
630 fn changed_repos(
631 old_repos: &[GitRepositoryEntry],
632 new_repos: &[GitRepositoryEntry],
633 ) -> Vec<GitRepositoryEntry> {
634 fn diff<'a>(
635 a: &'a [GitRepositoryEntry],
636 b: &'a [GitRepositoryEntry],
637 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
638 ) {
639 for a_repo in a {
640 let matched = b.iter().find(|b_repo| {
641 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
642 });
643
644 if matched.is_none() {
645 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
646 }
647 }
648 }
649
650 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
651
652 diff(old_repos, new_repos, &mut updated);
653 diff(new_repos, old_repos, &mut updated);
654
655 updated.into_values().collect()
656 }
657
658 pub fn scan_complete(&self) -> impl Future<Output = ()> {
659 let mut scan_state_rx = self.last_scan_state_rx.clone();
660 async move {
661 let mut scan_state = Some(scan_state_rx.borrow().clone());
662 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
663 scan_state = scan_state_rx.recv().await;
664 }
665 }
666 }
667
668 fn scan_state(&self) -> ScanState {
669 self.last_scan_state_rx.borrow().clone()
670 }
671
672 pub fn snapshot(&self) -> LocalSnapshot {
673 self.snapshot.clone()
674 }
675
676 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
677 proto::WorktreeMetadata {
678 id: self.id().to_proto(),
679 root_name: self.root_name().to_string(),
680 visible: self.visible,
681 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
682 }
683 }
684
685 fn load(
686 &self,
687 path: &Path,
688 cx: &mut ModelContext<Worktree>,
689 ) -> Task<Result<(File, String, Option<String>)>> {
690 let handle = cx.handle();
691 let path = Arc::from(path);
692 let abs_path = self.absolutize(&path);
693 let fs = self.fs.clone();
694 let snapshot = self.snapshot();
695
696 cx.spawn(|this, mut cx| async move {
697 let text = fs.load(&abs_path).await?;
698
699 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
700 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
701 let repo_relative = repo_relative.to_owned();
702 cx.background()
703 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
704 .await
705 } else {
706 None
707 }
708 } else {
709 None
710 };
711
712 // Eagerly populate the snapshot with an updated entry for the loaded file
713 let entry = this
714 .update(&mut cx, |this, cx| {
715 this.as_local()
716 .unwrap()
717 .refresh_entry(path, abs_path, None, cx)
718 })
719 .await?;
720
721 Ok((
722 File {
723 entry_id: entry.id,
724 worktree: handle,
725 path: entry.path,
726 mtime: entry.mtime,
727 is_local: true,
728 is_deleted: false,
729 },
730 text,
731 diff_base,
732 ))
733 })
734 }
735
736 pub fn save_buffer(
737 &self,
738 buffer_handle: ModelHandle<Buffer>,
739 path: Arc<Path>,
740 has_changed_file: bool,
741 cx: &mut ModelContext<Worktree>,
742 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
743 let handle = cx.handle();
744 let buffer = buffer_handle.read(cx);
745
746 let rpc = self.client.clone();
747 let buffer_id = buffer.remote_id();
748 let project_id = self.share.as_ref().map(|share| share.project_id);
749
750 let text = buffer.as_rope().clone();
751 let fingerprint = text.fingerprint();
752 let version = buffer.version();
753 let save = self.write_file(path, text, buffer.line_ending(), cx);
754
755 cx.as_mut().spawn(|mut cx| async move {
756 let entry = save.await?;
757
758 if has_changed_file {
759 let new_file = Arc::new(File {
760 entry_id: entry.id,
761 worktree: handle,
762 path: entry.path,
763 mtime: entry.mtime,
764 is_local: true,
765 is_deleted: false,
766 });
767
768 if let Some(project_id) = project_id {
769 rpc.send(proto::UpdateBufferFile {
770 project_id,
771 buffer_id,
772 file: Some(new_file.to_proto()),
773 })
774 .log_err();
775 }
776
777 buffer_handle.update(&mut cx, |buffer, cx| {
778 if has_changed_file {
779 buffer.file_updated(new_file, cx).detach();
780 }
781 });
782 }
783
784 if let Some(project_id) = project_id {
785 rpc.send(proto::BufferSaved {
786 project_id,
787 buffer_id,
788 version: serialize_version(&version),
789 mtime: Some(entry.mtime.into()),
790 fingerprint: serialize_fingerprint(fingerprint),
791 })?;
792 }
793
794 buffer_handle.update(&mut cx, |buffer, cx| {
795 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
796 });
797
798 Ok((version, fingerprint, entry.mtime))
799 })
800 }
801
802 pub fn create_entry(
803 &self,
804 path: impl Into<Arc<Path>>,
805 is_dir: bool,
806 cx: &mut ModelContext<Worktree>,
807 ) -> Task<Result<Entry>> {
808 self.write_entry_internal(
809 path,
810 if is_dir {
811 None
812 } else {
813 Some(Default::default())
814 },
815 cx,
816 )
817 }
818
819 pub fn write_file(
820 &self,
821 path: impl Into<Arc<Path>>,
822 text: Rope,
823 line_ending: LineEnding,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Task<Result<Entry>> {
826 self.write_entry_internal(path, Some((text, line_ending)), cx)
827 }
828
829 pub fn delete_entry(
830 &self,
831 entry_id: ProjectEntryId,
832 cx: &mut ModelContext<Worktree>,
833 ) -> Option<Task<Result<()>>> {
834 let entry = self.entry_for_id(entry_id)?.clone();
835 let abs_path = self.absolutize(&entry.path);
836 let delete = cx.background().spawn({
837 let fs = self.fs.clone();
838 let abs_path = abs_path;
839 async move {
840 if entry.is_file() {
841 fs.remove_file(&abs_path, Default::default()).await
842 } else {
843 fs.remove_dir(
844 &abs_path,
845 RemoveOptions {
846 recursive: true,
847 ignore_if_not_exists: false,
848 },
849 )
850 .await
851 }
852 }
853 });
854
855 Some(cx.spawn(|this, mut cx| async move {
856 delete.await?;
857 this.update(&mut cx, |this, cx| {
858 let this = this.as_local_mut().unwrap();
859 {
860 let mut snapshot = this.background_snapshot.lock();
861 snapshot.delete_entry(entry_id);
862 }
863 this.poll_snapshot(true, cx);
864 });
865 Ok(())
866 }))
867 }
868
869 pub fn rename_entry(
870 &self,
871 entry_id: ProjectEntryId,
872 new_path: impl Into<Arc<Path>>,
873 cx: &mut ModelContext<Worktree>,
874 ) -> Option<Task<Result<Entry>>> {
875 let old_path = self.entry_for_id(entry_id)?.path.clone();
876 let new_path = new_path.into();
877 let abs_old_path = self.absolutize(&old_path);
878 let abs_new_path = self.absolutize(new_path.as_ref());
879 let rename = cx.background().spawn({
880 let fs = self.fs.clone();
881 let abs_new_path = abs_new_path.clone();
882 async move {
883 fs.rename(&abs_old_path, &abs_new_path, Default::default())
884 .await
885 }
886 });
887
888 Some(cx.spawn(|this, mut cx| async move {
889 rename.await?;
890 let entry = this
891 .update(&mut cx, |this, cx| {
892 this.as_local_mut().unwrap().refresh_entry(
893 new_path.clone(),
894 abs_new_path,
895 Some(old_path),
896 cx,
897 )
898 })
899 .await?;
900 Ok(entry)
901 }))
902 }
903
904 pub fn copy_entry(
905 &self,
906 entry_id: ProjectEntryId,
907 new_path: impl Into<Arc<Path>>,
908 cx: &mut ModelContext<Worktree>,
909 ) -> Option<Task<Result<Entry>>> {
910 let old_path = self.entry_for_id(entry_id)?.path.clone();
911 let new_path = new_path.into();
912 let abs_old_path = self.absolutize(&old_path);
913 let abs_new_path = self.absolutize(&new_path);
914 let copy = cx.background().spawn({
915 let fs = self.fs.clone();
916 let abs_new_path = abs_new_path.clone();
917 async move {
918 copy_recursive(
919 fs.as_ref(),
920 &abs_old_path,
921 &abs_new_path,
922 Default::default(),
923 )
924 .await
925 }
926 });
927
928 Some(cx.spawn(|this, mut cx| async move {
929 copy.await?;
930 let entry = this
931 .update(&mut cx, |this, cx| {
932 this.as_local_mut().unwrap().refresh_entry(
933 new_path.clone(),
934 abs_new_path,
935 None,
936 cx,
937 )
938 })
939 .await?;
940 Ok(entry)
941 }))
942 }
943
944 fn write_entry_internal(
945 &self,
946 path: impl Into<Arc<Path>>,
947 text_if_file: Option<(Rope, LineEnding)>,
948 cx: &mut ModelContext<Worktree>,
949 ) -> Task<Result<Entry>> {
950 let path = path.into();
951 let abs_path = self.absolutize(&path);
952 let write = cx.background().spawn({
953 let fs = self.fs.clone();
954 let abs_path = abs_path.clone();
955 async move {
956 if let Some((text, line_ending)) = text_if_file {
957 fs.save(&abs_path, &text, line_ending).await
958 } else {
959 fs.create_dir(&abs_path).await
960 }
961 }
962 });
963
964 cx.spawn(|this, mut cx| async move {
965 write.await?;
966 let entry = this
967 .update(&mut cx, |this, cx| {
968 this.as_local_mut()
969 .unwrap()
970 .refresh_entry(path, abs_path, None, cx)
971 })
972 .await?;
973 Ok(entry)
974 })
975 }
976
977 fn refresh_entry(
978 &self,
979 path: Arc<Path>,
980 abs_path: PathBuf,
981 old_path: Option<Arc<Path>>,
982 cx: &mut ModelContext<Worktree>,
983 ) -> Task<Result<Entry>> {
984 let fs = self.fs.clone();
985 let root_char_bag;
986 let next_entry_id;
987 {
988 let snapshot = self.background_snapshot.lock();
989 root_char_bag = snapshot.root_char_bag;
990 next_entry_id = snapshot.next_entry_id.clone();
991 }
992 cx.spawn_weak(|this, mut cx| async move {
993 let metadata = fs
994 .metadata(&abs_path)
995 .await?
996 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
997 let this = this
998 .upgrade(&cx)
999 .ok_or_else(|| anyhow!("worktree was dropped"))?;
1000 this.update(&mut cx, |this, cx| {
1001 let this = this.as_local_mut().unwrap();
1002 let inserted_entry;
1003 {
1004 let mut snapshot = this.background_snapshot.lock();
1005 let mut changes = this.background_changes.lock();
1006 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
1007 entry.is_ignored = snapshot
1008 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
1009 .is_abs_path_ignored(&abs_path, entry.is_dir());
1010 if let Some(old_path) = old_path {
1011 snapshot.remove_path(&old_path);
1012 changes.insert(old_path.clone(), PathChange::Removed);
1013 }
1014 snapshot.scan_started();
1015 let exists = snapshot.entry_for_path(&entry.path).is_some();
1016 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
1017 changes.insert(
1018 inserted_entry.path.clone(),
1019 if exists {
1020 PathChange::Updated
1021 } else {
1022 PathChange::Added
1023 },
1024 );
1025 snapshot.scan_completed();
1026 }
1027 this.poll_snapshot(true, cx);
1028 Ok(inserted_entry)
1029 })
1030 })
1031 }
1032
1033 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1034 let (share_tx, share_rx) = oneshot::channel();
1035
1036 if let Some(share) = self.share.as_mut() {
1037 let _ = share_tx.send(());
1038 *share.resume_updates.borrow_mut() = ();
1039 } else {
1040 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1041 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1042 let worktree_id = cx.model_id() as u64;
1043
1044 for (path, summary) in self.diagnostic_summaries.iter() {
1045 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1046 project_id,
1047 worktree_id,
1048 summary: Some(summary.to_proto(&path.0)),
1049 }) {
1050 return Task::ready(Err(e));
1051 }
1052 }
1053
1054 let _maintain_remote_snapshot = cx.background().spawn({
1055 let client = self.client.clone();
1056 async move {
1057 let mut share_tx = Some(share_tx);
1058 let mut prev_snapshot = LocalSnapshot {
1059 ignores_by_parent_abs_path: Default::default(),
1060 git_repositories: Default::default(),
1061 removed_entry_ids: Default::default(),
1062 next_entry_id: Default::default(),
1063 snapshot: Snapshot {
1064 id: WorktreeId(worktree_id as usize),
1065 abs_path: Path::new("").into(),
1066 root_name: Default::default(),
1067 root_char_bag: Default::default(),
1068 entries_by_path: Default::default(),
1069 entries_by_id: Default::default(),
1070 scan_id: 0,
1071 completed_scan_id: 0,
1072 },
1073 };
1074 while let Some(snapshot) = snapshots_rx.recv().await {
1075 #[cfg(any(test, feature = "test-support"))]
1076 const MAX_CHUNK_SIZE: usize = 2;
1077 #[cfg(not(any(test, feature = "test-support")))]
1078 const MAX_CHUNK_SIZE: usize = 256;
1079
1080 let update =
1081 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1082 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1083 let _ = resume_updates_rx.try_recv();
1084 while let Err(error) = client.request(update.clone()).await {
1085 log::error!("failed to send worktree update: {}", error);
1086 log::info!("waiting to resume updates");
1087 if resume_updates_rx.next().await.is_none() {
1088 return Ok(());
1089 }
1090 }
1091 }
1092
1093 if let Some(share_tx) = share_tx.take() {
1094 let _ = share_tx.send(());
1095 }
1096
1097 prev_snapshot = snapshot;
1098 }
1099
1100 Ok::<_, anyhow::Error>(())
1101 }
1102 .log_err()
1103 });
1104
1105 self.share = Some(ShareState {
1106 project_id,
1107 snapshots_tx,
1108 resume_updates: resume_updates_tx,
1109 _maintain_remote_snapshot,
1110 });
1111 }
1112
1113 cx.foreground()
1114 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1115 }
1116
1117 pub fn unshare(&mut self) {
1118 self.share.take();
1119 }
1120
1121 pub fn is_shared(&self) -> bool {
1122 self.share.is_some()
1123 }
1124}
1125
1126impl RemoteWorktree {
1127 fn snapshot(&self) -> Snapshot {
1128 self.snapshot.clone()
1129 }
1130
1131 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1132 self.snapshot = self.background_snapshot.lock().clone();
1133 cx.emit(Event::UpdatedEntries(Default::default()));
1134 cx.notify();
1135 }
1136
1137 pub fn disconnected_from_host(&mut self) {
1138 self.updates_tx.take();
1139 self.snapshot_subscriptions.clear();
1140 self.disconnected = true;
1141 }
1142
1143 pub fn save_buffer(
1144 &self,
1145 buffer_handle: ModelHandle<Buffer>,
1146 cx: &mut ModelContext<Worktree>,
1147 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1148 let buffer = buffer_handle.read(cx);
1149 let buffer_id = buffer.remote_id();
1150 let version = buffer.version();
1151 let rpc = self.client.clone();
1152 let project_id = self.project_id;
1153 cx.as_mut().spawn(|mut cx| async move {
1154 let response = rpc
1155 .request(proto::SaveBuffer {
1156 project_id,
1157 buffer_id,
1158 version: serialize_version(&version),
1159 })
1160 .await?;
1161 let version = deserialize_version(response.version);
1162 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1163 let mtime = response
1164 .mtime
1165 .ok_or_else(|| anyhow!("missing mtime"))?
1166 .into();
1167
1168 buffer_handle.update(&mut cx, |buffer, cx| {
1169 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1170 });
1171
1172 Ok((version, fingerprint, mtime))
1173 })
1174 }
1175
1176 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1177 if let Some(updates_tx) = &self.updates_tx {
1178 updates_tx
1179 .unbounded_send(update)
1180 .expect("consumer runs to completion");
1181 }
1182 }
1183
1184 fn observed_snapshot(&self, scan_id: usize) -> bool {
1185 self.completed_scan_id >= scan_id
1186 }
1187
1188 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1189 let (tx, rx) = oneshot::channel();
1190 if self.observed_snapshot(scan_id) {
1191 let _ = tx.send(());
1192 } else if self.disconnected {
1193 drop(tx);
1194 } else {
1195 match self
1196 .snapshot_subscriptions
1197 .binary_search_by_key(&scan_id, |probe| probe.0)
1198 {
1199 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1200 }
1201 }
1202
1203 async move {
1204 rx.await?;
1205 Ok(())
1206 }
1207 }
1208
1209 pub fn update_diagnostic_summary(
1210 &mut self,
1211 path: Arc<Path>,
1212 summary: &proto::DiagnosticSummary,
1213 ) {
1214 let summary = DiagnosticSummary {
1215 language_server_id: summary.language_server_id as usize,
1216 error_count: summary.error_count as usize,
1217 warning_count: summary.warning_count as usize,
1218 };
1219 if summary.is_empty() {
1220 self.diagnostic_summaries.remove(&PathKey(path));
1221 } else {
1222 self.diagnostic_summaries.insert(PathKey(path), summary);
1223 }
1224 }
1225
1226 pub fn insert_entry(
1227 &mut self,
1228 entry: proto::Entry,
1229 scan_id: usize,
1230 cx: &mut ModelContext<Worktree>,
1231 ) -> Task<Result<Entry>> {
1232 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1233 cx.spawn(|this, mut cx| async move {
1234 wait_for_snapshot.await?;
1235 this.update(&mut cx, |worktree, _| {
1236 let worktree = worktree.as_remote_mut().unwrap();
1237 let mut snapshot = worktree.background_snapshot.lock();
1238 let entry = snapshot.insert_entry(entry);
1239 worktree.snapshot = snapshot.clone();
1240 entry
1241 })
1242 })
1243 }
1244
1245 pub(crate) fn delete_entry(
1246 &mut self,
1247 id: ProjectEntryId,
1248 scan_id: usize,
1249 cx: &mut ModelContext<Worktree>,
1250 ) -> Task<Result<()>> {
1251 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1252 cx.spawn(|this, mut cx| async move {
1253 wait_for_snapshot.await?;
1254 this.update(&mut cx, |worktree, _| {
1255 let worktree = worktree.as_remote_mut().unwrap();
1256 let mut snapshot = worktree.background_snapshot.lock();
1257 snapshot.delete_entry(id);
1258 worktree.snapshot = snapshot.clone();
1259 });
1260 Ok(())
1261 })
1262 }
1263}
1264
1265impl Snapshot {
1266 pub fn id(&self) -> WorktreeId {
1267 self.id
1268 }
1269
1270 pub fn abs_path(&self) -> &Arc<Path> {
1271 &self.abs_path
1272 }
1273
1274 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1275 self.entries_by_id.get(&entry_id, &()).is_some()
1276 }
1277
1278 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1279 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1280 let old_entry = self.entries_by_id.insert_or_replace(
1281 PathEntry {
1282 id: entry.id,
1283 path: entry.path.clone(),
1284 is_ignored: entry.is_ignored,
1285 scan_id: 0,
1286 },
1287 &(),
1288 );
1289 if let Some(old_entry) = old_entry {
1290 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1291 }
1292 self.entries_by_path.insert_or_replace(entry.clone(), &());
1293 Ok(entry)
1294 }
1295
1296 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1297 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1298 self.entries_by_path = {
1299 let mut cursor = self.entries_by_path.cursor();
1300 let mut new_entries_by_path =
1301 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1302 while let Some(entry) = cursor.item() {
1303 if entry.path.starts_with(&removed_entry.path) {
1304 self.entries_by_id.remove(&entry.id, &());
1305 cursor.next(&());
1306 } else {
1307 break;
1308 }
1309 }
1310 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1311 new_entries_by_path
1312 };
1313
1314 true
1315 } else {
1316 false
1317 }
1318 }
1319
1320 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1321 let mut entries_by_path_edits = Vec::new();
1322 let mut entries_by_id_edits = Vec::new();
1323 for entry_id in update.removed_entries {
1324 let entry = self
1325 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1326 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1327 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1328 entries_by_id_edits.push(Edit::Remove(entry.id));
1329 }
1330
1331 for entry in update.updated_entries {
1332 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1333 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1334 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1335 }
1336 entries_by_id_edits.push(Edit::Insert(PathEntry {
1337 id: entry.id,
1338 path: entry.path.clone(),
1339 is_ignored: entry.is_ignored,
1340 scan_id: 0,
1341 }));
1342 entries_by_path_edits.push(Edit::Insert(entry));
1343 }
1344
1345 self.entries_by_path.edit(entries_by_path_edits, &());
1346 self.entries_by_id.edit(entries_by_id_edits, &());
1347 self.scan_id = update.scan_id as usize;
1348 if update.is_last_update {
1349 self.completed_scan_id = update.scan_id as usize;
1350 }
1351
1352 Ok(())
1353 }
1354
1355 pub fn file_count(&self) -> usize {
1356 self.entries_by_path.summary().file_count
1357 }
1358
1359 pub fn visible_file_count(&self) -> usize {
1360 self.entries_by_path.summary().visible_file_count
1361 }
1362
1363 fn traverse_from_offset(
1364 &self,
1365 include_dirs: bool,
1366 include_ignored: bool,
1367 start_offset: usize,
1368 ) -> Traversal {
1369 let mut cursor = self.entries_by_path.cursor();
1370 cursor.seek(
1371 &TraversalTarget::Count {
1372 count: start_offset,
1373 include_dirs,
1374 include_ignored,
1375 },
1376 Bias::Right,
1377 &(),
1378 );
1379 Traversal {
1380 cursor,
1381 include_dirs,
1382 include_ignored,
1383 }
1384 }
1385
1386 fn traverse_from_path(
1387 &self,
1388 include_dirs: bool,
1389 include_ignored: bool,
1390 path: &Path,
1391 ) -> Traversal {
1392 let mut cursor = self.entries_by_path.cursor();
1393 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1394 Traversal {
1395 cursor,
1396 include_dirs,
1397 include_ignored,
1398 }
1399 }
1400
1401 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1402 self.traverse_from_offset(false, include_ignored, start)
1403 }
1404
1405 pub fn entries(&self, include_ignored: bool) -> Traversal {
1406 self.traverse_from_offset(true, include_ignored, 0)
1407 }
1408
1409 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1410 let empty_path = Path::new("");
1411 self.entries_by_path
1412 .cursor::<()>()
1413 .filter(move |entry| entry.path.as_ref() != empty_path)
1414 .map(|entry| &entry.path)
1415 }
1416
1417 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1418 let mut cursor = self.entries_by_path.cursor();
1419 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1420 let traversal = Traversal {
1421 cursor,
1422 include_dirs: true,
1423 include_ignored: true,
1424 };
1425 ChildEntriesIter {
1426 traversal,
1427 parent_path,
1428 }
1429 }
1430
1431 pub fn root_entry(&self) -> Option<&Entry> {
1432 self.entry_for_path("")
1433 }
1434
1435 pub fn root_name(&self) -> &str {
1436 &self.root_name
1437 }
1438
1439 pub fn scan_started(&mut self) {
1440 self.scan_id += 1;
1441 }
1442
1443 pub fn scan_completed(&mut self) {
1444 self.completed_scan_id = self.scan_id;
1445 }
1446
1447 pub fn scan_id(&self) -> usize {
1448 self.scan_id
1449 }
1450
1451 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1452 let path = path.as_ref();
1453 self.traverse_from_path(true, true, path)
1454 .entry()
1455 .and_then(|entry| {
1456 if entry.path.as_ref() == path {
1457 Some(entry)
1458 } else {
1459 None
1460 }
1461 })
1462 }
1463
1464 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1465 let entry = self.entries_by_id.get(&id, &())?;
1466 self.entry_for_path(&entry.path)
1467 }
1468
1469 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1470 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1471 }
1472}
1473
1474impl LocalSnapshot {
1475 // Gives the most specific git repository for a given path
1476 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1477 self.git_repositories
1478 .iter()
1479 .rev() //git_repository is ordered lexicographically
1480 .find(|repo| repo.manages(path))
1481 .cloned()
1482 }
1483
1484 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1485 // Git repositories cannot be nested, so we don't need to reverse the order
1486 self.git_repositories
1487 .iter_mut()
1488 .find(|repo| repo.in_dot_git(path))
1489 }
1490
1491 #[cfg(test)]
1492 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1493 let root_name = self.root_name.clone();
1494 proto::UpdateWorktree {
1495 project_id,
1496 worktree_id: self.id().to_proto(),
1497 abs_path: self.abs_path().to_string_lossy().into(),
1498 root_name,
1499 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1500 removed_entries: Default::default(),
1501 scan_id: self.scan_id as u64,
1502 is_last_update: true,
1503 }
1504 }
1505
1506 pub(crate) fn build_update(
1507 &self,
1508 other: &Self,
1509 project_id: u64,
1510 worktree_id: u64,
1511 include_ignored: bool,
1512 ) -> proto::UpdateWorktree {
1513 let mut updated_entries = Vec::new();
1514 let mut removed_entries = Vec::new();
1515 let mut self_entries = self
1516 .entries_by_id
1517 .cursor::<()>()
1518 .filter(|e| include_ignored || !e.is_ignored)
1519 .peekable();
1520 let mut other_entries = other
1521 .entries_by_id
1522 .cursor::<()>()
1523 .filter(|e| include_ignored || !e.is_ignored)
1524 .peekable();
1525 loop {
1526 match (self_entries.peek(), other_entries.peek()) {
1527 (Some(self_entry), Some(other_entry)) => {
1528 match Ord::cmp(&self_entry.id, &other_entry.id) {
1529 Ordering::Less => {
1530 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1531 updated_entries.push(entry);
1532 self_entries.next();
1533 }
1534 Ordering::Equal => {
1535 if self_entry.scan_id != other_entry.scan_id {
1536 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1537 updated_entries.push(entry);
1538 }
1539
1540 self_entries.next();
1541 other_entries.next();
1542 }
1543 Ordering::Greater => {
1544 removed_entries.push(other_entry.id.to_proto());
1545 other_entries.next();
1546 }
1547 }
1548 }
1549 (Some(self_entry), None) => {
1550 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1551 updated_entries.push(entry);
1552 self_entries.next();
1553 }
1554 (None, Some(other_entry)) => {
1555 removed_entries.push(other_entry.id.to_proto());
1556 other_entries.next();
1557 }
1558 (None, None) => break,
1559 }
1560 }
1561
1562 proto::UpdateWorktree {
1563 project_id,
1564 worktree_id,
1565 abs_path: self.abs_path().to_string_lossy().into(),
1566 root_name: self.root_name().to_string(),
1567 updated_entries,
1568 removed_entries,
1569 scan_id: self.scan_id as u64,
1570 is_last_update: self.completed_scan_id == self.scan_id,
1571 }
1572 }
1573
1574 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1575 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1576 let abs_path = self.abs_path.join(&entry.path);
1577 match smol::block_on(build_gitignore(&abs_path, fs)) {
1578 Ok(ignore) => {
1579 self.ignores_by_parent_abs_path.insert(
1580 abs_path.parent().unwrap().into(),
1581 (Arc::new(ignore), self.scan_id),
1582 );
1583 }
1584 Err(error) => {
1585 log::error!(
1586 "error loading .gitignore file {:?} - {:?}",
1587 &entry.path,
1588 error
1589 );
1590 }
1591 }
1592 }
1593
1594 self.reuse_entry_id(&mut entry);
1595
1596 if entry.kind == EntryKind::PendingDir {
1597 if let Some(existing_entry) =
1598 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1599 {
1600 entry.kind = existing_entry.kind;
1601 }
1602 }
1603
1604 let scan_id = self.scan_id;
1605 self.entries_by_path.insert_or_replace(entry.clone(), &());
1606 self.entries_by_id.insert_or_replace(
1607 PathEntry {
1608 id: entry.id,
1609 path: entry.path.clone(),
1610 is_ignored: entry.is_ignored,
1611 scan_id,
1612 },
1613 &(),
1614 );
1615
1616 entry
1617 }
1618
1619 fn populate_dir(
1620 &mut self,
1621 parent_path: Arc<Path>,
1622 entries: impl IntoIterator<Item = Entry>,
1623 ignore: Option<Arc<Gitignore>>,
1624 fs: &dyn Fs,
1625 ) {
1626 let mut parent_entry = if let Some(parent_entry) =
1627 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1628 {
1629 parent_entry.clone()
1630 } else {
1631 log::warn!(
1632 "populating a directory {:?} that has been removed",
1633 parent_path
1634 );
1635 return;
1636 };
1637
1638 if let Some(ignore) = ignore {
1639 self.ignores_by_parent_abs_path.insert(
1640 self.abs_path.join(&parent_path).into(),
1641 (ignore, self.scan_id),
1642 );
1643 }
1644 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1645 parent_entry.kind = EntryKind::Dir;
1646 } else {
1647 unreachable!();
1648 }
1649
1650 if parent_path.file_name() == Some(&DOT_GIT) {
1651 let abs_path = self.abs_path.join(&parent_path);
1652 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1653 if let Err(ix) = self
1654 .git_repositories
1655 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1656 {
1657 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1658 self.git_repositories.insert(
1659 ix,
1660 GitRepositoryEntry {
1661 repo,
1662 scan_id: 0,
1663 content_path,
1664 git_dir_path: parent_path,
1665 },
1666 );
1667 }
1668 }
1669 }
1670
1671 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1672 let mut entries_by_id_edits = Vec::new();
1673
1674 for mut entry in entries {
1675 self.reuse_entry_id(&mut entry);
1676 entries_by_id_edits.push(Edit::Insert(PathEntry {
1677 id: entry.id,
1678 path: entry.path.clone(),
1679 is_ignored: entry.is_ignored,
1680 scan_id: self.scan_id,
1681 }));
1682 entries_by_path_edits.push(Edit::Insert(entry));
1683 }
1684
1685 self.entries_by_path.edit(entries_by_path_edits, &());
1686 self.entries_by_id.edit(entries_by_id_edits, &());
1687 }
1688
1689 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1690 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1691 entry.id = removed_entry_id;
1692 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1693 entry.id = existing_entry.id;
1694 }
1695 }
1696
1697 fn remove_path(&mut self, path: &Path) {
1698 let mut new_entries;
1699 let removed_entries;
1700 {
1701 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1702 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1703 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1704 new_entries.push_tree(cursor.suffix(&()), &());
1705 }
1706 self.entries_by_path = new_entries;
1707
1708 let mut entries_by_id_edits = Vec::new();
1709 for entry in removed_entries.cursor::<()>() {
1710 let removed_entry_id = self
1711 .removed_entry_ids
1712 .entry(entry.inode)
1713 .or_insert(entry.id);
1714 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1715 entries_by_id_edits.push(Edit::Remove(entry.id));
1716 }
1717 self.entries_by_id.edit(entries_by_id_edits, &());
1718
1719 if path.file_name() == Some(&GITIGNORE) {
1720 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1721 if let Some((_, scan_id)) = self
1722 .ignores_by_parent_abs_path
1723 .get_mut(abs_parent_path.as_path())
1724 {
1725 *scan_id = self.snapshot.scan_id;
1726 }
1727 } else if path.file_name() == Some(&DOT_GIT) {
1728 let parent_path = path.parent().unwrap();
1729 if let Ok(ix) = self
1730 .git_repositories
1731 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1732 {
1733 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1734 }
1735 }
1736 }
1737
1738 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1739 let mut inodes = TreeSet::default();
1740 for ancestor in path.ancestors().skip(1) {
1741 if let Some(entry) = self.entry_for_path(ancestor) {
1742 inodes.insert(entry.inode);
1743 }
1744 }
1745 inodes
1746 }
1747
1748 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1749 let mut new_ignores = Vec::new();
1750 for ancestor in abs_path.ancestors().skip(1) {
1751 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1752 new_ignores.push((ancestor, Some(ignore.clone())));
1753 } else {
1754 new_ignores.push((ancestor, None));
1755 }
1756 }
1757
1758 let mut ignore_stack = IgnoreStack::none();
1759 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1760 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1761 ignore_stack = IgnoreStack::all();
1762 break;
1763 } else if let Some(ignore) = ignore {
1764 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1765 }
1766 }
1767
1768 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1769 ignore_stack = IgnoreStack::all();
1770 }
1771
1772 ignore_stack
1773 }
1774
1775 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1776 &self.git_repositories
1777 }
1778}
1779
1780impl GitRepositoryEntry {
1781 // Note that these paths should be relative to the worktree root.
1782 pub(crate) fn manages(&self, path: &Path) -> bool {
1783 path.starts_with(self.content_path.as_ref())
1784 }
1785
1786 // Note that theis path should be relative to the worktree root.
1787 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1788 path.starts_with(self.git_dir_path.as_ref())
1789 }
1790}
1791
1792async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1793 let contents = fs.load(abs_path).await?;
1794 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1795 let mut builder = GitignoreBuilder::new(parent);
1796 for line in contents.lines() {
1797 builder.add_line(Some(abs_path.into()), line)?;
1798 }
1799 Ok(builder.build()?)
1800}
1801
1802impl WorktreeId {
1803 pub fn from_usize(handle_id: usize) -> Self {
1804 Self(handle_id)
1805 }
1806
1807 pub(crate) fn from_proto(id: u64) -> Self {
1808 Self(id as usize)
1809 }
1810
1811 pub fn to_proto(&self) -> u64 {
1812 self.0 as u64
1813 }
1814
1815 pub fn to_usize(&self) -> usize {
1816 self.0
1817 }
1818}
1819
1820impl fmt::Display for WorktreeId {
1821 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1822 self.0.fmt(f)
1823 }
1824}
1825
1826impl Deref for Worktree {
1827 type Target = Snapshot;
1828
1829 fn deref(&self) -> &Self::Target {
1830 match self {
1831 Worktree::Local(worktree) => &worktree.snapshot,
1832 Worktree::Remote(worktree) => &worktree.snapshot,
1833 }
1834 }
1835}
1836
1837impl Deref for LocalWorktree {
1838 type Target = LocalSnapshot;
1839
1840 fn deref(&self) -> &Self::Target {
1841 &self.snapshot
1842 }
1843}
1844
1845impl Deref for RemoteWorktree {
1846 type Target = Snapshot;
1847
1848 fn deref(&self) -> &Self::Target {
1849 &self.snapshot
1850 }
1851}
1852
1853impl fmt::Debug for LocalWorktree {
1854 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1855 self.snapshot.fmt(f)
1856 }
1857}
1858
1859impl fmt::Debug for Snapshot {
1860 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1861 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1862 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1863
1864 impl<'a> fmt::Debug for EntriesByPath<'a> {
1865 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1866 f.debug_map()
1867 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1868 .finish()
1869 }
1870 }
1871
1872 impl<'a> fmt::Debug for EntriesById<'a> {
1873 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1874 f.debug_list().entries(self.0.iter()).finish()
1875 }
1876 }
1877
1878 f.debug_struct("Snapshot")
1879 .field("id", &self.id)
1880 .field("root_name", &self.root_name)
1881 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1882 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1883 .finish()
1884 }
1885}
1886
1887#[derive(Clone, PartialEq)]
1888pub struct File {
1889 pub worktree: ModelHandle<Worktree>,
1890 pub path: Arc<Path>,
1891 pub mtime: SystemTime,
1892 pub(crate) entry_id: ProjectEntryId,
1893 pub(crate) is_local: bool,
1894 pub(crate) is_deleted: bool,
1895}
1896
1897impl language::File for File {
1898 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1899 if self.is_local {
1900 Some(self)
1901 } else {
1902 None
1903 }
1904 }
1905
1906 fn mtime(&self) -> SystemTime {
1907 self.mtime
1908 }
1909
1910 fn path(&self) -> &Arc<Path> {
1911 &self.path
1912 }
1913
1914 fn full_path(&self, cx: &AppContext) -> PathBuf {
1915 let mut full_path = PathBuf::new();
1916 let worktree = self.worktree.read(cx);
1917
1918 if worktree.is_visible() {
1919 full_path.push(worktree.root_name());
1920 } else {
1921 let path = worktree.abs_path();
1922
1923 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1924 full_path.push("~");
1925 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1926 } else {
1927 full_path.push(path)
1928 }
1929 }
1930
1931 if self.path.components().next().is_some() {
1932 full_path.push(&self.path);
1933 }
1934
1935 full_path
1936 }
1937
1938 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1939 /// of its worktree, then this method will return the name of the worktree itself.
1940 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1941 self.path
1942 .file_name()
1943 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1944 }
1945
1946 fn is_deleted(&self) -> bool {
1947 self.is_deleted
1948 }
1949
1950 fn as_any(&self) -> &dyn Any {
1951 self
1952 }
1953
1954 fn to_proto(&self) -> rpc::proto::File {
1955 rpc::proto::File {
1956 worktree_id: self.worktree.id() as u64,
1957 entry_id: self.entry_id.to_proto(),
1958 path: self.path.to_string_lossy().into(),
1959 mtime: Some(self.mtime.into()),
1960 is_deleted: self.is_deleted,
1961 }
1962 }
1963}
1964
1965impl language::LocalFile for File {
1966 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1967 self.worktree
1968 .read(cx)
1969 .as_local()
1970 .unwrap()
1971 .abs_path
1972 .join(&self.path)
1973 }
1974
1975 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1976 let worktree = self.worktree.read(cx).as_local().unwrap();
1977 let abs_path = worktree.absolutize(&self.path);
1978 let fs = worktree.fs.clone();
1979 cx.background()
1980 .spawn(async move { fs.load(&abs_path).await })
1981 }
1982
1983 fn buffer_reloaded(
1984 &self,
1985 buffer_id: u64,
1986 version: &clock::Global,
1987 fingerprint: RopeFingerprint,
1988 line_ending: LineEnding,
1989 mtime: SystemTime,
1990 cx: &mut MutableAppContext,
1991 ) {
1992 let worktree = self.worktree.read(cx).as_local().unwrap();
1993 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1994 worktree
1995 .client
1996 .send(proto::BufferReloaded {
1997 project_id,
1998 buffer_id,
1999 version: serialize_version(version),
2000 mtime: Some(mtime.into()),
2001 fingerprint: serialize_fingerprint(fingerprint),
2002 line_ending: serialize_line_ending(line_ending) as i32,
2003 })
2004 .log_err();
2005 }
2006 }
2007}
2008
2009impl File {
2010 pub fn from_proto(
2011 proto: rpc::proto::File,
2012 worktree: ModelHandle<Worktree>,
2013 cx: &AppContext,
2014 ) -> Result<Self> {
2015 let worktree_id = worktree
2016 .read(cx)
2017 .as_remote()
2018 .ok_or_else(|| anyhow!("not remote"))?
2019 .id();
2020
2021 if worktree_id.to_proto() != proto.worktree_id {
2022 return Err(anyhow!("worktree id does not match file"));
2023 }
2024
2025 Ok(Self {
2026 worktree,
2027 path: Path::new(&proto.path).into(),
2028 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2029 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2030 is_local: false,
2031 is_deleted: proto.is_deleted,
2032 })
2033 }
2034
2035 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2036 file.and_then(|f| f.as_any().downcast_ref())
2037 }
2038
2039 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2040 self.worktree.read(cx).id()
2041 }
2042
2043 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2044 if self.is_deleted {
2045 None
2046 } else {
2047 Some(self.entry_id)
2048 }
2049 }
2050}
2051
2052#[derive(Clone, Debug, PartialEq, Eq)]
2053pub struct Entry {
2054 pub id: ProjectEntryId,
2055 pub kind: EntryKind,
2056 pub path: Arc<Path>,
2057 pub inode: u64,
2058 pub mtime: SystemTime,
2059 pub is_symlink: bool,
2060 pub is_ignored: bool,
2061}
2062
2063#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2064pub enum EntryKind {
2065 PendingDir,
2066 Dir,
2067 File(CharBag),
2068}
2069
2070#[derive(Clone, Copy, Debug)]
2071pub enum PathChange {
2072 Added,
2073 Removed,
2074 Updated,
2075}
2076
2077impl Entry {
2078 fn new(
2079 path: Arc<Path>,
2080 metadata: &fs::Metadata,
2081 next_entry_id: &AtomicUsize,
2082 root_char_bag: CharBag,
2083 ) -> Self {
2084 Self {
2085 id: ProjectEntryId::new(next_entry_id),
2086 kind: if metadata.is_dir {
2087 EntryKind::PendingDir
2088 } else {
2089 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2090 },
2091 path,
2092 inode: metadata.inode,
2093 mtime: metadata.mtime,
2094 is_symlink: metadata.is_symlink,
2095 is_ignored: false,
2096 }
2097 }
2098
2099 pub fn is_dir(&self) -> bool {
2100 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2101 }
2102
2103 pub fn is_file(&self) -> bool {
2104 matches!(self.kind, EntryKind::File(_))
2105 }
2106}
2107
2108impl sum_tree::Item for Entry {
2109 type Summary = EntrySummary;
2110
2111 fn summary(&self) -> Self::Summary {
2112 let visible_count = if self.is_ignored { 0 } else { 1 };
2113 let file_count;
2114 let visible_file_count;
2115 if self.is_file() {
2116 file_count = 1;
2117 visible_file_count = visible_count;
2118 } else {
2119 file_count = 0;
2120 visible_file_count = 0;
2121 }
2122
2123 EntrySummary {
2124 max_path: self.path.clone(),
2125 count: 1,
2126 visible_count,
2127 file_count,
2128 visible_file_count,
2129 }
2130 }
2131}
2132
2133impl sum_tree::KeyedItem for Entry {
2134 type Key = PathKey;
2135
2136 fn key(&self) -> Self::Key {
2137 PathKey(self.path.clone())
2138 }
2139}
2140
2141#[derive(Clone, Debug)]
2142pub struct EntrySummary {
2143 max_path: Arc<Path>,
2144 count: usize,
2145 visible_count: usize,
2146 file_count: usize,
2147 visible_file_count: usize,
2148}
2149
2150impl Default for EntrySummary {
2151 fn default() -> Self {
2152 Self {
2153 max_path: Arc::from(Path::new("")),
2154 count: 0,
2155 visible_count: 0,
2156 file_count: 0,
2157 visible_file_count: 0,
2158 }
2159 }
2160}
2161
2162impl sum_tree::Summary for EntrySummary {
2163 type Context = ();
2164
2165 fn add_summary(&mut self, rhs: &Self, _: &()) {
2166 self.max_path = rhs.max_path.clone();
2167 self.count += rhs.count;
2168 self.visible_count += rhs.visible_count;
2169 self.file_count += rhs.file_count;
2170 self.visible_file_count += rhs.visible_file_count;
2171 }
2172}
2173
2174#[derive(Clone, Debug)]
2175struct PathEntry {
2176 id: ProjectEntryId,
2177 path: Arc<Path>,
2178 is_ignored: bool,
2179 scan_id: usize,
2180}
2181
2182impl sum_tree::Item for PathEntry {
2183 type Summary = PathEntrySummary;
2184
2185 fn summary(&self) -> Self::Summary {
2186 PathEntrySummary { max_id: self.id }
2187 }
2188}
2189
2190impl sum_tree::KeyedItem for PathEntry {
2191 type Key = ProjectEntryId;
2192
2193 fn key(&self) -> Self::Key {
2194 self.id
2195 }
2196}
2197
2198#[derive(Clone, Debug, Default)]
2199struct PathEntrySummary {
2200 max_id: ProjectEntryId,
2201}
2202
2203impl sum_tree::Summary for PathEntrySummary {
2204 type Context = ();
2205
2206 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2207 self.max_id = summary.max_id;
2208 }
2209}
2210
2211impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2212 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2213 *self = summary.max_id;
2214 }
2215}
2216
2217#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2218pub struct PathKey(Arc<Path>);
2219
2220impl Default for PathKey {
2221 fn default() -> Self {
2222 Self(Path::new("").into())
2223 }
2224}
2225
2226impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2227 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2228 self.0 = summary.max_path.clone();
2229 }
2230}
2231
2232struct BackgroundScanner {
2233 fs: Arc<dyn Fs>,
2234 snapshot: Arc<Mutex<LocalSnapshot>>,
2235 changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
2236 notify: UnboundedSender<ScanState>,
2237 executor: Arc<executor::Background>,
2238}
2239
2240impl BackgroundScanner {
2241 fn new(
2242 snapshot: Arc<Mutex<LocalSnapshot>>,
2243 changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
2244 notify: UnboundedSender<ScanState>,
2245 fs: Arc<dyn Fs>,
2246 executor: Arc<executor::Background>,
2247 ) -> Self {
2248 Self {
2249 fs,
2250 snapshot,
2251 changes,
2252 notify,
2253 executor,
2254 }
2255 }
2256
2257 fn abs_path(&self) -> Arc<Path> {
2258 self.snapshot.lock().abs_path.clone()
2259 }
2260
2261 fn snapshot(&self) -> LocalSnapshot {
2262 self.snapshot.lock().clone()
2263 }
2264
2265 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2266 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2267 return;
2268 }
2269
2270 if let Err(err) = self.scan_dirs().await {
2271 if self
2272 .notify
2273 .unbounded_send(ScanState::Err(Arc::new(err)))
2274 .is_err()
2275 {
2276 return;
2277 }
2278 }
2279
2280 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2281 return;
2282 }
2283
2284 futures::pin_mut!(events_rx);
2285
2286 while let Some(mut events) = events_rx.next().await {
2287 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2288 events.extend(additional_events);
2289 }
2290
2291 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2292 break;
2293 }
2294
2295 if !self.process_events(events).await {
2296 break;
2297 }
2298
2299 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2300 break;
2301 }
2302 }
2303 }
2304
2305 async fn scan_dirs(&mut self) -> Result<()> {
2306 let root_char_bag;
2307 let root_abs_path;
2308 let root_inode;
2309 let is_dir;
2310 let next_entry_id;
2311 {
2312 let mut snapshot = self.snapshot.lock();
2313 snapshot.scan_started();
2314 root_char_bag = snapshot.root_char_bag;
2315 root_abs_path = snapshot.abs_path.clone();
2316 root_inode = snapshot.root_entry().map(|e| e.inode);
2317 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2318 next_entry_id = snapshot.next_entry_id.clone();
2319 };
2320
2321 // Populate ignores above the root.
2322 for ancestor in root_abs_path.ancestors().skip(1) {
2323 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2324 {
2325 self.snapshot
2326 .lock()
2327 .ignores_by_parent_abs_path
2328 .insert(ancestor.into(), (ignore.into(), 0));
2329 }
2330 }
2331
2332 let ignore_stack = {
2333 let mut snapshot = self.snapshot.lock();
2334 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2335 if ignore_stack.is_all() {
2336 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2337 root_entry.is_ignored = true;
2338 snapshot.insert_entry(root_entry, self.fs.as_ref());
2339 }
2340 }
2341 ignore_stack
2342 };
2343
2344 if is_dir {
2345 let path: Arc<Path> = Arc::from(Path::new(""));
2346 let mut ancestor_inodes = TreeSet::default();
2347 if let Some(root_inode) = root_inode {
2348 ancestor_inodes.insert(root_inode);
2349 }
2350
2351 let (tx, rx) = channel::unbounded();
2352 self.executor
2353 .block(tx.send(ScanJob {
2354 abs_path: root_abs_path.to_path_buf(),
2355 path,
2356 ignore_stack,
2357 ancestor_inodes,
2358 scan_queue: tx.clone(),
2359 }))
2360 .unwrap();
2361 drop(tx);
2362
2363 self.executor
2364 .scoped(|scope| {
2365 for _ in 0..self.executor.num_cpus() {
2366 scope.spawn(async {
2367 while let Ok(job) = rx.recv().await {
2368 if let Err(err) = self
2369 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2370 .await
2371 {
2372 log::error!("error scanning {:?}: {}", job.abs_path, err);
2373 }
2374 }
2375 });
2376 }
2377 })
2378 .await;
2379
2380 self.snapshot.lock().scan_completed();
2381 }
2382
2383 Ok(())
2384 }
2385
2386 async fn scan_dir(
2387 &self,
2388 root_char_bag: CharBag,
2389 next_entry_id: Arc<AtomicUsize>,
2390 job: &ScanJob,
2391 ) -> Result<()> {
2392 let mut new_entries: Vec<Entry> = Vec::new();
2393 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2394 let mut ignore_stack = job.ignore_stack.clone();
2395 let mut new_ignore = None;
2396
2397 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2398 while let Some(child_abs_path) = child_paths.next().await {
2399 let child_abs_path = match child_abs_path {
2400 Ok(child_abs_path) => child_abs_path,
2401 Err(error) => {
2402 log::error!("error processing entry {:?}", error);
2403 continue;
2404 }
2405 };
2406
2407 let child_name = child_abs_path.file_name().unwrap();
2408 let child_path: Arc<Path> = job.path.join(child_name).into();
2409 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2410 Ok(Some(metadata)) => metadata,
2411 Ok(None) => continue,
2412 Err(err) => {
2413 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2414 continue;
2415 }
2416 };
2417
2418 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2419 if child_name == *GITIGNORE {
2420 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2421 Ok(ignore) => {
2422 let ignore = Arc::new(ignore);
2423 ignore_stack =
2424 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2425 new_ignore = Some(ignore);
2426 }
2427 Err(error) => {
2428 log::error!(
2429 "error loading .gitignore file {:?} - {:?}",
2430 child_name,
2431 error
2432 );
2433 }
2434 }
2435
2436 // Update ignore status of any child entries we've already processed to reflect the
2437 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2438 // there should rarely be too numerous. Update the ignore stack associated with any
2439 // new jobs as well.
2440 let mut new_jobs = new_jobs.iter_mut();
2441 for entry in &mut new_entries {
2442 let entry_abs_path = self.abs_path().join(&entry.path);
2443 entry.is_ignored =
2444 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2445
2446 if entry.is_dir() {
2447 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2448 job.ignore_stack = if entry.is_ignored {
2449 IgnoreStack::all()
2450 } else {
2451 ignore_stack.clone()
2452 };
2453 }
2454 }
2455 }
2456 }
2457
2458 let mut child_entry = Entry::new(
2459 child_path.clone(),
2460 &child_metadata,
2461 &next_entry_id,
2462 root_char_bag,
2463 );
2464
2465 if child_entry.is_dir() {
2466 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2467 child_entry.is_ignored = is_ignored;
2468
2469 // Avoid recursing until crash in the case of a recursive symlink
2470 if !job.ancestor_inodes.contains(&child_entry.inode) {
2471 let mut ancestor_inodes = job.ancestor_inodes.clone();
2472 ancestor_inodes.insert(child_entry.inode);
2473
2474 new_jobs.push(Some(ScanJob {
2475 abs_path: child_abs_path,
2476 path: child_path,
2477 ignore_stack: if is_ignored {
2478 IgnoreStack::all()
2479 } else {
2480 ignore_stack.clone()
2481 },
2482 ancestor_inodes,
2483 scan_queue: job.scan_queue.clone(),
2484 }));
2485 } else {
2486 new_jobs.push(None);
2487 }
2488 } else {
2489 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2490 }
2491
2492 new_entries.push(child_entry);
2493 }
2494
2495 self.snapshot.lock().populate_dir(
2496 job.path.clone(),
2497 new_entries,
2498 new_ignore,
2499 self.fs.as_ref(),
2500 );
2501
2502 for new_job in new_jobs {
2503 if let Some(new_job) = new_job {
2504 job.scan_queue.send(new_job).await.unwrap();
2505 }
2506 }
2507
2508 Ok(())
2509 }
2510
2511 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2512 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2513 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2514
2515 let root_char_bag;
2516 let root_abs_path;
2517 let next_entry_id;
2518 let prev_snapshot;
2519 {
2520 let mut snapshot = self.snapshot.lock();
2521 prev_snapshot = snapshot.snapshot.clone();
2522 root_char_bag = snapshot.root_char_bag;
2523 root_abs_path = snapshot.abs_path.clone();
2524 next_entry_id = snapshot.next_entry_id.clone();
2525 snapshot.scan_started();
2526 }
2527
2528 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2529 path
2530 } else {
2531 return false;
2532 };
2533 let metadata = futures::future::join_all(
2534 events
2535 .iter()
2536 .map(|event| self.fs.metadata(&event.path))
2537 .collect::<Vec<_>>(),
2538 )
2539 .await;
2540
2541 // Hold the snapshot lock while clearing and re-inserting the root entries
2542 // for each event. This way, the snapshot is not observable to the foreground
2543 // thread while this operation is in-progress.
2544 let mut event_paths = Vec::with_capacity(events.len());
2545 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2546 {
2547 let mut snapshot = self.snapshot.lock();
2548 for event in &events {
2549 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2550 snapshot.remove_path(path);
2551 }
2552 }
2553
2554 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2555 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2556 Ok(path) => Arc::from(path.to_path_buf()),
2557 Err(_) => {
2558 log::error!(
2559 "unexpected event {:?} for root path {:?}",
2560 event.path,
2561 root_canonical_path
2562 );
2563 continue;
2564 }
2565 };
2566 event_paths.push(path.clone());
2567 let abs_path = root_abs_path.join(&path);
2568
2569 match metadata {
2570 Ok(Some(metadata)) => {
2571 let ignore_stack =
2572 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2573 let mut fs_entry = Entry::new(
2574 path.clone(),
2575 &metadata,
2576 snapshot.next_entry_id.as_ref(),
2577 snapshot.root_char_bag,
2578 );
2579 fs_entry.is_ignored = ignore_stack.is_all();
2580 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2581
2582 let scan_id = snapshot.scan_id;
2583 if let Some(repo) = snapshot.in_dot_git(&path) {
2584 repo.repo.lock().reload_index();
2585 repo.scan_id = scan_id;
2586 }
2587
2588 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2589 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2590 ancestor_inodes.insert(metadata.inode);
2591 self.executor
2592 .block(scan_queue_tx.send(ScanJob {
2593 abs_path,
2594 path,
2595 ignore_stack,
2596 ancestor_inodes,
2597 scan_queue: scan_queue_tx.clone(),
2598 }))
2599 .unwrap();
2600 }
2601 }
2602 Ok(None) => {}
2603 Err(err) => {
2604 // TODO - create a special 'error' entry in the entries tree to mark this
2605 log::error!("error reading file on event {:?}", err);
2606 }
2607 }
2608 }
2609 drop(scan_queue_tx);
2610 }
2611
2612 // Scan any directories that were created as part of this event batch.
2613 self.executor
2614 .scoped(|scope| {
2615 for _ in 0..self.executor.num_cpus() {
2616 scope.spawn(async {
2617 while let Ok(job) = scan_queue_rx.recv().await {
2618 if let Err(err) = self
2619 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2620 .await
2621 {
2622 log::error!("error scanning {:?}: {}", job.abs_path, err);
2623 }
2624 }
2625 });
2626 }
2627 })
2628 .await;
2629
2630 // Attempt to detect renames only over a single batch of file-system events.
2631 self.snapshot.lock().removed_entry_ids.clear();
2632
2633 self.update_ignore_statuses().await;
2634 self.update_git_repositories();
2635 self.build_change_set(prev_snapshot, event_paths);
2636 self.snapshot.lock().scan_completed();
2637 true
2638 }
2639
2640 async fn update_ignore_statuses(&self) {
2641 let mut snapshot = self.snapshot();
2642
2643 let mut ignores_to_update = Vec::new();
2644 let mut ignores_to_delete = Vec::new();
2645 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2646 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2647 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2648 ignores_to_update.push(parent_abs_path.clone());
2649 }
2650
2651 let ignore_path = parent_path.join(&*GITIGNORE);
2652 if snapshot.entry_for_path(ignore_path).is_none() {
2653 ignores_to_delete.push(parent_abs_path.clone());
2654 }
2655 }
2656 }
2657
2658 for parent_abs_path in ignores_to_delete {
2659 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2660 self.snapshot
2661 .lock()
2662 .ignores_by_parent_abs_path
2663 .remove(&parent_abs_path);
2664 }
2665
2666 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2667 ignores_to_update.sort_unstable();
2668 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2669 while let Some(parent_abs_path) = ignores_to_update.next() {
2670 while ignores_to_update
2671 .peek()
2672 .map_or(false, |p| p.starts_with(&parent_abs_path))
2673 {
2674 ignores_to_update.next().unwrap();
2675 }
2676
2677 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2678 ignore_queue_tx
2679 .send(UpdateIgnoreStatusJob {
2680 abs_path: parent_abs_path,
2681 ignore_stack,
2682 ignore_queue: ignore_queue_tx.clone(),
2683 })
2684 .await
2685 .unwrap();
2686 }
2687 drop(ignore_queue_tx);
2688
2689 self.executor
2690 .scoped(|scope| {
2691 for _ in 0..self.executor.num_cpus() {
2692 scope.spawn(async {
2693 while let Ok(job) = ignore_queue_rx.recv().await {
2694 self.update_ignore_status(job, &snapshot).await;
2695 }
2696 });
2697 }
2698 })
2699 .await;
2700 }
2701
2702 fn update_git_repositories(&self) {
2703 let mut snapshot = self.snapshot.lock();
2704 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2705 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2706 snapshot.git_repositories = git_repositories;
2707 }
2708
2709 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2710 let mut ignore_stack = job.ignore_stack;
2711 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2712 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2713 }
2714
2715 let mut entries_by_id_edits = Vec::new();
2716 let mut entries_by_path_edits = Vec::new();
2717 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2718 for mut entry in snapshot.child_entries(path).cloned() {
2719 let was_ignored = entry.is_ignored;
2720 let abs_path = self.abs_path().join(&entry.path);
2721 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2722 if entry.is_dir() {
2723 let child_ignore_stack = if entry.is_ignored {
2724 IgnoreStack::all()
2725 } else {
2726 ignore_stack.clone()
2727 };
2728 job.ignore_queue
2729 .send(UpdateIgnoreStatusJob {
2730 abs_path: abs_path.into(),
2731 ignore_stack: child_ignore_stack,
2732 ignore_queue: job.ignore_queue.clone(),
2733 })
2734 .await
2735 .unwrap();
2736 }
2737
2738 if entry.is_ignored != was_ignored {
2739 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2740 path_entry.scan_id = snapshot.scan_id;
2741 path_entry.is_ignored = entry.is_ignored;
2742 entries_by_id_edits.push(Edit::Insert(path_entry));
2743 entries_by_path_edits.push(Edit::Insert(entry));
2744 }
2745 }
2746
2747 let mut snapshot = self.snapshot.lock();
2748 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2749 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2750 }
2751
2752 fn build_change_set(&self, old_snapshot: Snapshot, event_paths: Vec<Arc<Path>>) {
2753 let new_snapshot = self.snapshot.lock();
2754 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2755 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2756
2757 let mut change_set = self.changes.lock();
2758 for path in event_paths {
2759 let path = PathKey(path);
2760 old_paths.seek(&path, Bias::Left, &());
2761 new_paths.seek(&path, Bias::Left, &());
2762
2763 loop {
2764 match (old_paths.item(), new_paths.item()) {
2765 (Some(old_entry), Some(new_entry)) => {
2766 if old_entry.path > path.0
2767 && new_entry.path > path.0
2768 && !old_entry.path.starts_with(&path.0)
2769 && !new_entry.path.starts_with(&path.0)
2770 {
2771 break;
2772 }
2773
2774 match Ord::cmp(&old_entry.path, &new_entry.path) {
2775 Ordering::Less => {
2776 change_set.insert(old_entry.path.clone(), PathChange::Removed);
2777 old_paths.next(&());
2778 }
2779 Ordering::Equal => {
2780 if old_entry.mtime != new_entry.mtime {
2781 change_set.insert(old_entry.path.clone(), PathChange::Updated);
2782 }
2783 old_paths.next(&());
2784 new_paths.next(&());
2785 }
2786 Ordering::Greater => {
2787 change_set.insert(new_entry.path.clone(), PathChange::Added);
2788 new_paths.next(&());
2789 }
2790 }
2791 }
2792 (Some(old_entry), None) => {
2793 change_set.insert(old_entry.path.clone(), PathChange::Removed);
2794 old_paths.next(&());
2795 }
2796 (None, Some(new_entry)) => {
2797 change_set.insert(new_entry.path.clone(), PathChange::Added);
2798 new_paths.next(&());
2799 }
2800 (None, None) => break,
2801 }
2802 }
2803 }
2804 }
2805}
2806
2807fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2808 let mut result = root_char_bag;
2809 result.extend(
2810 path.to_string_lossy()
2811 .chars()
2812 .map(|c| c.to_ascii_lowercase()),
2813 );
2814 result
2815}
2816
2817struct ScanJob {
2818 abs_path: PathBuf,
2819 path: Arc<Path>,
2820 ignore_stack: Arc<IgnoreStack>,
2821 scan_queue: Sender<ScanJob>,
2822 ancestor_inodes: TreeSet<u64>,
2823}
2824
2825struct UpdateIgnoreStatusJob {
2826 abs_path: Arc<Path>,
2827 ignore_stack: Arc<IgnoreStack>,
2828 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2829}
2830
2831pub trait WorktreeHandle {
2832 #[cfg(any(test, feature = "test-support"))]
2833 fn flush_fs_events<'a>(
2834 &self,
2835 cx: &'a gpui::TestAppContext,
2836 ) -> futures::future::LocalBoxFuture<'a, ()>;
2837}
2838
2839impl WorktreeHandle for ModelHandle<Worktree> {
2840 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2841 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2842 // extra directory scans, and emit extra scan-state notifications.
2843 //
2844 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2845 // to ensure that all redundant FS events have already been processed.
2846 #[cfg(any(test, feature = "test-support"))]
2847 fn flush_fs_events<'a>(
2848 &self,
2849 cx: &'a gpui::TestAppContext,
2850 ) -> futures::future::LocalBoxFuture<'a, ()> {
2851 use smol::future::FutureExt;
2852
2853 let filename = "fs-event-sentinel";
2854 let tree = self.clone();
2855 let (fs, root_path) = self.read_with(cx, |tree, _| {
2856 let tree = tree.as_local().unwrap();
2857 (tree.fs.clone(), tree.abs_path().clone())
2858 });
2859
2860 async move {
2861 fs.create_file(&root_path.join(filename), Default::default())
2862 .await
2863 .unwrap();
2864 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2865 .await;
2866
2867 fs.remove_file(&root_path.join(filename), Default::default())
2868 .await
2869 .unwrap();
2870 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2871 .await;
2872
2873 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2874 .await;
2875 }
2876 .boxed_local()
2877 }
2878}
2879
2880#[derive(Clone, Debug)]
2881struct TraversalProgress<'a> {
2882 max_path: &'a Path,
2883 count: usize,
2884 visible_count: usize,
2885 file_count: usize,
2886 visible_file_count: usize,
2887}
2888
2889impl<'a> TraversalProgress<'a> {
2890 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2891 match (include_ignored, include_dirs) {
2892 (true, true) => self.count,
2893 (true, false) => self.file_count,
2894 (false, true) => self.visible_count,
2895 (false, false) => self.visible_file_count,
2896 }
2897 }
2898}
2899
2900impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2901 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2902 self.max_path = summary.max_path.as_ref();
2903 self.count += summary.count;
2904 self.visible_count += summary.visible_count;
2905 self.file_count += summary.file_count;
2906 self.visible_file_count += summary.visible_file_count;
2907 }
2908}
2909
2910impl<'a> Default for TraversalProgress<'a> {
2911 fn default() -> Self {
2912 Self {
2913 max_path: Path::new(""),
2914 count: 0,
2915 visible_count: 0,
2916 file_count: 0,
2917 visible_file_count: 0,
2918 }
2919 }
2920}
2921
2922pub struct Traversal<'a> {
2923 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2924 include_ignored: bool,
2925 include_dirs: bool,
2926}
2927
2928impl<'a> Traversal<'a> {
2929 pub fn advance(&mut self) -> bool {
2930 self.advance_to_offset(self.offset() + 1)
2931 }
2932
2933 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2934 self.cursor.seek_forward(
2935 &TraversalTarget::Count {
2936 count: offset,
2937 include_dirs: self.include_dirs,
2938 include_ignored: self.include_ignored,
2939 },
2940 Bias::Right,
2941 &(),
2942 )
2943 }
2944
2945 pub fn advance_to_sibling(&mut self) -> bool {
2946 while let Some(entry) = self.cursor.item() {
2947 self.cursor.seek_forward(
2948 &TraversalTarget::PathSuccessor(&entry.path),
2949 Bias::Left,
2950 &(),
2951 );
2952 if let Some(entry) = self.cursor.item() {
2953 if (self.include_dirs || !entry.is_dir())
2954 && (self.include_ignored || !entry.is_ignored)
2955 {
2956 return true;
2957 }
2958 }
2959 }
2960 false
2961 }
2962
2963 pub fn entry(&self) -> Option<&'a Entry> {
2964 self.cursor.item()
2965 }
2966
2967 pub fn offset(&self) -> usize {
2968 self.cursor
2969 .start()
2970 .count(self.include_dirs, self.include_ignored)
2971 }
2972}
2973
2974impl<'a> Iterator for Traversal<'a> {
2975 type Item = &'a Entry;
2976
2977 fn next(&mut self) -> Option<Self::Item> {
2978 if let Some(item) = self.entry() {
2979 self.advance();
2980 Some(item)
2981 } else {
2982 None
2983 }
2984 }
2985}
2986
2987#[derive(Debug)]
2988enum TraversalTarget<'a> {
2989 Path(&'a Path),
2990 PathSuccessor(&'a Path),
2991 Count {
2992 count: usize,
2993 include_ignored: bool,
2994 include_dirs: bool,
2995 },
2996}
2997
2998impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2999 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3000 match self {
3001 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3002 TraversalTarget::PathSuccessor(path) => {
3003 if !cursor_location.max_path.starts_with(path) {
3004 Ordering::Equal
3005 } else {
3006 Ordering::Greater
3007 }
3008 }
3009 TraversalTarget::Count {
3010 count,
3011 include_dirs,
3012 include_ignored,
3013 } => Ord::cmp(
3014 count,
3015 &cursor_location.count(*include_dirs, *include_ignored),
3016 ),
3017 }
3018 }
3019}
3020
3021struct ChildEntriesIter<'a> {
3022 parent_path: &'a Path,
3023 traversal: Traversal<'a>,
3024}
3025
3026impl<'a> Iterator for ChildEntriesIter<'a> {
3027 type Item = &'a Entry;
3028
3029 fn next(&mut self) -> Option<Self::Item> {
3030 if let Some(item) = self.traversal.entry() {
3031 if item.path.starts_with(&self.parent_path) {
3032 self.traversal.advance_to_sibling();
3033 return Some(item);
3034 }
3035 }
3036 None
3037 }
3038}
3039
3040impl<'a> From<&'a Entry> for proto::Entry {
3041 fn from(entry: &'a Entry) -> Self {
3042 Self {
3043 id: entry.id.to_proto(),
3044 is_dir: entry.is_dir(),
3045 path: entry.path.to_string_lossy().into(),
3046 inode: entry.inode,
3047 mtime: Some(entry.mtime.into()),
3048 is_symlink: entry.is_symlink,
3049 is_ignored: entry.is_ignored,
3050 }
3051 }
3052}
3053
3054impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3055 type Error = anyhow::Error;
3056
3057 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3058 if let Some(mtime) = entry.mtime {
3059 let kind = if entry.is_dir {
3060 EntryKind::Dir
3061 } else {
3062 let mut char_bag = *root_char_bag;
3063 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3064 EntryKind::File(char_bag)
3065 };
3066 let path: Arc<Path> = PathBuf::from(entry.path).into();
3067 Ok(Entry {
3068 id: ProjectEntryId::from_proto(entry.id),
3069 kind,
3070 path,
3071 inode: entry.inode,
3072 mtime: mtime.into(),
3073 is_symlink: entry.is_symlink,
3074 is_ignored: entry.is_ignored,
3075 })
3076 } else {
3077 Err(anyhow!(
3078 "missing mtime in remote worktree entry {:?}",
3079 entry.path
3080 ))
3081 }
3082 }
3083}
3084
3085#[cfg(test)]
3086mod tests {
3087 use super::*;
3088 use anyhow::Result;
3089 use client::test::FakeHttpClient;
3090 use fs::repository::FakeGitRepository;
3091 use fs::{FakeFs, RealFs};
3092 use gpui::{executor::Deterministic, TestAppContext};
3093 use rand::prelude::*;
3094 use serde_json::json;
3095 use std::{
3096 env,
3097 fmt::Write,
3098 time::{SystemTime, UNIX_EPOCH},
3099 };
3100
3101 use util::test::temp_tree;
3102
3103 #[gpui::test]
3104 async fn test_traversal(cx: &mut TestAppContext) {
3105 let fs = FakeFs::new(cx.background());
3106 fs.insert_tree(
3107 "/root",
3108 json!({
3109 ".gitignore": "a/b\n",
3110 "a": {
3111 "b": "",
3112 "c": "",
3113 }
3114 }),
3115 )
3116 .await;
3117
3118 let http_client = FakeHttpClient::with_404_response();
3119 let client = cx.read(|cx| Client::new(http_client, cx));
3120
3121 let tree = Worktree::local(
3122 client,
3123 Arc::from(Path::new("/root")),
3124 true,
3125 fs,
3126 Default::default(),
3127 &mut cx.to_async(),
3128 )
3129 .await
3130 .unwrap();
3131 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3132 .await;
3133
3134 tree.read_with(cx, |tree, _| {
3135 assert_eq!(
3136 tree.entries(false)
3137 .map(|entry| entry.path.as_ref())
3138 .collect::<Vec<_>>(),
3139 vec![
3140 Path::new(""),
3141 Path::new(".gitignore"),
3142 Path::new("a"),
3143 Path::new("a/c"),
3144 ]
3145 );
3146 assert_eq!(
3147 tree.entries(true)
3148 .map(|entry| entry.path.as_ref())
3149 .collect::<Vec<_>>(),
3150 vec![
3151 Path::new(""),
3152 Path::new(".gitignore"),
3153 Path::new("a"),
3154 Path::new("a/b"),
3155 Path::new("a/c"),
3156 ]
3157 );
3158 })
3159 }
3160
3161 #[gpui::test(iterations = 10)]
3162 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3163 let fs = FakeFs::new(cx.background());
3164 fs.insert_tree(
3165 "/root",
3166 json!({
3167 "lib": {
3168 "a": {
3169 "a.txt": ""
3170 },
3171 "b": {
3172 "b.txt": ""
3173 }
3174 }
3175 }),
3176 )
3177 .await;
3178 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3179 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3180
3181 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3182 let tree = Worktree::local(
3183 client,
3184 Arc::from(Path::new("/root")),
3185 true,
3186 fs.clone(),
3187 Default::default(),
3188 &mut cx.to_async(),
3189 )
3190 .await
3191 .unwrap();
3192
3193 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3194 .await;
3195
3196 tree.read_with(cx, |tree, _| {
3197 assert_eq!(
3198 tree.entries(false)
3199 .map(|entry| entry.path.as_ref())
3200 .collect::<Vec<_>>(),
3201 vec![
3202 Path::new(""),
3203 Path::new("lib"),
3204 Path::new("lib/a"),
3205 Path::new("lib/a/a.txt"),
3206 Path::new("lib/a/lib"),
3207 Path::new("lib/b"),
3208 Path::new("lib/b/b.txt"),
3209 Path::new("lib/b/lib"),
3210 ]
3211 );
3212 });
3213
3214 fs.rename(
3215 Path::new("/root/lib/a/lib"),
3216 Path::new("/root/lib/a/lib-2"),
3217 Default::default(),
3218 )
3219 .await
3220 .unwrap();
3221 executor.run_until_parked();
3222 tree.read_with(cx, |tree, _| {
3223 assert_eq!(
3224 tree.entries(false)
3225 .map(|entry| entry.path.as_ref())
3226 .collect::<Vec<_>>(),
3227 vec![
3228 Path::new(""),
3229 Path::new("lib"),
3230 Path::new("lib/a"),
3231 Path::new("lib/a/a.txt"),
3232 Path::new("lib/a/lib-2"),
3233 Path::new("lib/b"),
3234 Path::new("lib/b/b.txt"),
3235 Path::new("lib/b/lib"),
3236 ]
3237 );
3238 });
3239 }
3240
3241 #[gpui::test]
3242 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3243 let parent_dir = temp_tree(json!({
3244 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3245 "tree": {
3246 ".git": {},
3247 ".gitignore": "ignored-dir\n",
3248 "tracked-dir": {
3249 "tracked-file1": "",
3250 "ancestor-ignored-file1": "",
3251 },
3252 "ignored-dir": {
3253 "ignored-file1": ""
3254 }
3255 }
3256 }));
3257 let dir = parent_dir.path().join("tree");
3258
3259 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3260
3261 let tree = Worktree::local(
3262 client,
3263 dir.as_path(),
3264 true,
3265 Arc::new(RealFs),
3266 Default::default(),
3267 &mut cx.to_async(),
3268 )
3269 .await
3270 .unwrap();
3271 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3272 .await;
3273 tree.flush_fs_events(cx).await;
3274 cx.read(|cx| {
3275 let tree = tree.read(cx);
3276 assert!(
3277 !tree
3278 .entry_for_path("tracked-dir/tracked-file1")
3279 .unwrap()
3280 .is_ignored
3281 );
3282 assert!(
3283 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3284 .unwrap()
3285 .is_ignored
3286 );
3287 assert!(
3288 tree.entry_for_path("ignored-dir/ignored-file1")
3289 .unwrap()
3290 .is_ignored
3291 );
3292 });
3293
3294 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3295 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3296 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3297 tree.flush_fs_events(cx).await;
3298 cx.read(|cx| {
3299 let tree = tree.read(cx);
3300 assert!(
3301 !tree
3302 .entry_for_path("tracked-dir/tracked-file2")
3303 .unwrap()
3304 .is_ignored
3305 );
3306 assert!(
3307 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3308 .unwrap()
3309 .is_ignored
3310 );
3311 assert!(
3312 tree.entry_for_path("ignored-dir/ignored-file2")
3313 .unwrap()
3314 .is_ignored
3315 );
3316 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3317 });
3318 }
3319
3320 #[gpui::test]
3321 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3322 let root = temp_tree(json!({
3323 "dir1": {
3324 ".git": {},
3325 "deps": {
3326 "dep1": {
3327 ".git": {},
3328 "src": {
3329 "a.txt": ""
3330 }
3331 }
3332 },
3333 "src": {
3334 "b.txt": ""
3335 }
3336 },
3337 "c.txt": "",
3338 }));
3339
3340 let http_client = FakeHttpClient::with_404_response();
3341 let client = cx.read(|cx| Client::new(http_client, cx));
3342 let tree = Worktree::local(
3343 client,
3344 root.path(),
3345 true,
3346 Arc::new(RealFs),
3347 Default::default(),
3348 &mut cx.to_async(),
3349 )
3350 .await
3351 .unwrap();
3352
3353 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3354 .await;
3355 tree.flush_fs_events(cx).await;
3356
3357 tree.read_with(cx, |tree, _cx| {
3358 let tree = tree.as_local().unwrap();
3359
3360 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3361
3362 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3363 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3364 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3365
3366 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3367 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3368 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3369 });
3370
3371 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3372 let tree = tree.as_local().unwrap();
3373 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3374 });
3375
3376 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3377 tree.flush_fs_events(cx).await;
3378
3379 tree.read_with(cx, |tree, _cx| {
3380 let tree = tree.as_local().unwrap();
3381 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3382 assert_ne!(
3383 original_scan_id, new_scan_id,
3384 "original {original_scan_id}, new {new_scan_id}"
3385 );
3386 });
3387
3388 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3389 tree.flush_fs_events(cx).await;
3390
3391 tree.read_with(cx, |tree, _cx| {
3392 let tree = tree.as_local().unwrap();
3393
3394 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3395 });
3396 }
3397
3398 #[test]
3399 fn test_changed_repos() {
3400 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3401 GitRepositoryEntry {
3402 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3403 scan_id,
3404 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3405 git_dir_path: git_dir_path.as_ref().into(),
3406 }
3407 }
3408
3409 let prev_repos: Vec<GitRepositoryEntry> = vec![
3410 fake_entry("/.git", 0),
3411 fake_entry("/a/.git", 0),
3412 fake_entry("/a/b/.git", 0),
3413 ];
3414
3415 let new_repos: Vec<GitRepositoryEntry> = vec![
3416 fake_entry("/a/.git", 1),
3417 fake_entry("/a/b/.git", 0),
3418 fake_entry("/a/c/.git", 0),
3419 ];
3420
3421 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3422
3423 // Deletion retained
3424 assert!(res
3425 .iter()
3426 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3427 .is_some());
3428
3429 // Update retained
3430 assert!(res
3431 .iter()
3432 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3433 .is_some());
3434
3435 // Addition retained
3436 assert!(res
3437 .iter()
3438 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3439 .is_some());
3440
3441 // Nochange, not retained
3442 assert!(res
3443 .iter()
3444 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3445 .is_none());
3446 }
3447
3448 #[gpui::test]
3449 async fn test_write_file(cx: &mut TestAppContext) {
3450 let dir = temp_tree(json!({
3451 ".git": {},
3452 ".gitignore": "ignored-dir\n",
3453 "tracked-dir": {},
3454 "ignored-dir": {}
3455 }));
3456
3457 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3458
3459 let tree = Worktree::local(
3460 client,
3461 dir.path(),
3462 true,
3463 Arc::new(RealFs),
3464 Default::default(),
3465 &mut cx.to_async(),
3466 )
3467 .await
3468 .unwrap();
3469 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3470 .await;
3471 tree.flush_fs_events(cx).await;
3472
3473 tree.update(cx, |tree, cx| {
3474 tree.as_local().unwrap().write_file(
3475 Path::new("tracked-dir/file.txt"),
3476 "hello".into(),
3477 Default::default(),
3478 cx,
3479 )
3480 })
3481 .await
3482 .unwrap();
3483 tree.update(cx, |tree, cx| {
3484 tree.as_local().unwrap().write_file(
3485 Path::new("ignored-dir/file.txt"),
3486 "world".into(),
3487 Default::default(),
3488 cx,
3489 )
3490 })
3491 .await
3492 .unwrap();
3493
3494 tree.read_with(cx, |tree, _| {
3495 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3496 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3497 assert!(!tracked.is_ignored);
3498 assert!(ignored.is_ignored);
3499 });
3500 }
3501
3502 #[gpui::test(iterations = 30)]
3503 async fn test_create_directory(cx: &mut TestAppContext) {
3504 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3505
3506 let fs = FakeFs::new(cx.background());
3507 fs.insert_tree(
3508 "/a",
3509 json!({
3510 "b": {},
3511 "c": {},
3512 "d": {},
3513 }),
3514 )
3515 .await;
3516
3517 let tree = Worktree::local(
3518 client,
3519 "/a".as_ref(),
3520 true,
3521 fs,
3522 Default::default(),
3523 &mut cx.to_async(),
3524 )
3525 .await
3526 .unwrap();
3527
3528 let entry = tree
3529 .update(cx, |tree, cx| {
3530 tree.as_local_mut()
3531 .unwrap()
3532 .create_entry("a/e".as_ref(), true, cx)
3533 })
3534 .await
3535 .unwrap();
3536 assert!(entry.is_dir());
3537
3538 cx.foreground().run_until_parked();
3539 tree.read_with(cx, |tree, _| {
3540 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3541 });
3542 }
3543
3544 #[gpui::test(iterations = 100)]
3545 fn test_random(mut rng: StdRng) {
3546 let operations = env::var("OPERATIONS")
3547 .map(|o| o.parse().unwrap())
3548 .unwrap_or(40);
3549 let initial_entries = env::var("INITIAL_ENTRIES")
3550 .map(|o| o.parse().unwrap())
3551 .unwrap_or(20);
3552
3553 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3554 for _ in 0..initial_entries {
3555 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3556 }
3557 log::info!("Generated initial tree");
3558
3559 let (notify_tx, _notify_rx) = mpsc::unbounded();
3560 let fs = Arc::new(RealFs);
3561 let next_entry_id = Arc::new(AtomicUsize::new(0));
3562 let mut initial_snapshot = LocalSnapshot {
3563 removed_entry_ids: Default::default(),
3564 ignores_by_parent_abs_path: Default::default(),
3565 git_repositories: Default::default(),
3566 next_entry_id: next_entry_id.clone(),
3567 snapshot: Snapshot {
3568 id: WorktreeId::from_usize(0),
3569 entries_by_path: Default::default(),
3570 entries_by_id: Default::default(),
3571 abs_path: root_dir.path().into(),
3572 root_name: Default::default(),
3573 root_char_bag: Default::default(),
3574 scan_id: 0,
3575 completed_scan_id: 0,
3576 },
3577 };
3578 initial_snapshot.insert_entry(
3579 Entry::new(
3580 Path::new("").into(),
3581 &smol::block_on(fs.metadata(root_dir.path()))
3582 .unwrap()
3583 .unwrap(),
3584 &next_entry_id,
3585 Default::default(),
3586 ),
3587 fs.as_ref(),
3588 );
3589 let mut scanner = BackgroundScanner::new(
3590 Arc::new(Mutex::new(initial_snapshot.clone())),
3591 Arc::new(Mutex::new(HashMap::default())),
3592 notify_tx,
3593 fs.clone(),
3594 Arc::new(gpui::executor::Background::new()),
3595 );
3596 smol::block_on(scanner.scan_dirs()).unwrap();
3597 scanner.snapshot().check_invariants();
3598
3599 let mut events = Vec::new();
3600 let mut snapshots = Vec::new();
3601 let mut mutations_len = operations;
3602 while mutations_len > 1 {
3603 if !events.is_empty() && rng.gen_bool(0.4) {
3604 let len = rng.gen_range(0..=events.len());
3605 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3606 log::info!("Delivering events: {:#?}", to_deliver);
3607 smol::block_on(scanner.process_events(to_deliver));
3608 scanner.snapshot().check_invariants();
3609 } else {
3610 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3611 mutations_len -= 1;
3612 }
3613
3614 if rng.gen_bool(0.2) {
3615 snapshots.push(scanner.snapshot());
3616 }
3617 }
3618 log::info!("Quiescing: {:#?}", events);
3619 smol::block_on(scanner.process_events(events));
3620 scanner.snapshot().check_invariants();
3621
3622 let (notify_tx, _notify_rx) = mpsc::unbounded();
3623 let mut new_scanner = BackgroundScanner::new(
3624 Arc::new(Mutex::new(initial_snapshot)),
3625 Arc::new(Mutex::new(HashMap::default())),
3626 notify_tx,
3627 scanner.fs.clone(),
3628 scanner.executor.clone(),
3629 );
3630 smol::block_on(new_scanner.scan_dirs()).unwrap();
3631 assert_eq!(
3632 scanner.snapshot().to_vec(true),
3633 new_scanner.snapshot().to_vec(true)
3634 );
3635
3636 for mut prev_snapshot in snapshots {
3637 let include_ignored = rng.gen::<bool>();
3638 if !include_ignored {
3639 let mut entries_by_path_edits = Vec::new();
3640 let mut entries_by_id_edits = Vec::new();
3641 for entry in prev_snapshot
3642 .entries_by_id
3643 .cursor::<()>()
3644 .filter(|e| e.is_ignored)
3645 {
3646 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3647 entries_by_id_edits.push(Edit::Remove(entry.id));
3648 }
3649
3650 prev_snapshot
3651 .entries_by_path
3652 .edit(entries_by_path_edits, &());
3653 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3654 }
3655
3656 let update = scanner
3657 .snapshot()
3658 .build_update(&prev_snapshot, 0, 0, include_ignored);
3659 prev_snapshot.apply_remote_update(update).unwrap();
3660 assert_eq!(
3661 prev_snapshot.to_vec(true),
3662 scanner.snapshot().to_vec(include_ignored)
3663 );
3664 }
3665 }
3666
3667 fn randomly_mutate_tree(
3668 root_path: &Path,
3669 insertion_probability: f64,
3670 rng: &mut impl Rng,
3671 ) -> Result<Vec<fsevent::Event>> {
3672 let root_path = root_path.canonicalize().unwrap();
3673 let (dirs, files) = read_dir_recursive(root_path.clone());
3674
3675 let mut events = Vec::new();
3676 let mut record_event = |path: PathBuf| {
3677 events.push(fsevent::Event {
3678 event_id: SystemTime::now()
3679 .duration_since(UNIX_EPOCH)
3680 .unwrap()
3681 .as_secs(),
3682 flags: fsevent::StreamFlags::empty(),
3683 path,
3684 });
3685 };
3686
3687 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3688 let path = dirs.choose(rng).unwrap();
3689 let new_path = path.join(gen_name(rng));
3690
3691 if rng.gen() {
3692 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3693 std::fs::create_dir(&new_path)?;
3694 } else {
3695 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3696 std::fs::write(&new_path, "")?;
3697 }
3698 record_event(new_path);
3699 } else if rng.gen_bool(0.05) {
3700 let ignore_dir_path = dirs.choose(rng).unwrap();
3701 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3702
3703 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3704 let files_to_ignore = {
3705 let len = rng.gen_range(0..=subfiles.len());
3706 subfiles.choose_multiple(rng, len)
3707 };
3708 let dirs_to_ignore = {
3709 let len = rng.gen_range(0..subdirs.len());
3710 subdirs.choose_multiple(rng, len)
3711 };
3712
3713 let mut ignore_contents = String::new();
3714 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3715 writeln!(
3716 ignore_contents,
3717 "{}",
3718 path_to_ignore
3719 .strip_prefix(&ignore_dir_path)?
3720 .to_str()
3721 .unwrap()
3722 )
3723 .unwrap();
3724 }
3725 log::info!(
3726 "Creating {:?} with contents:\n{}",
3727 ignore_path.strip_prefix(&root_path)?,
3728 ignore_contents
3729 );
3730 std::fs::write(&ignore_path, ignore_contents).unwrap();
3731 record_event(ignore_path);
3732 } else {
3733 let old_path = {
3734 let file_path = files.choose(rng);
3735 let dir_path = dirs[1..].choose(rng);
3736 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3737 };
3738
3739 let is_rename = rng.gen();
3740 if is_rename {
3741 let new_path_parent = dirs
3742 .iter()
3743 .filter(|d| !d.starts_with(old_path))
3744 .choose(rng)
3745 .unwrap();
3746
3747 let overwrite_existing_dir =
3748 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3749 let new_path = if overwrite_existing_dir {
3750 std::fs::remove_dir_all(&new_path_parent).ok();
3751 new_path_parent.to_path_buf()
3752 } else {
3753 new_path_parent.join(gen_name(rng))
3754 };
3755
3756 log::info!(
3757 "Renaming {:?} to {}{:?}",
3758 old_path.strip_prefix(&root_path)?,
3759 if overwrite_existing_dir {
3760 "overwrite "
3761 } else {
3762 ""
3763 },
3764 new_path.strip_prefix(&root_path)?
3765 );
3766 std::fs::rename(&old_path, &new_path)?;
3767 record_event(old_path.clone());
3768 record_event(new_path);
3769 } else if old_path.is_dir() {
3770 let (dirs, files) = read_dir_recursive(old_path.clone());
3771
3772 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3773 std::fs::remove_dir_all(&old_path).unwrap();
3774 for file in files {
3775 record_event(file);
3776 }
3777 for dir in dirs {
3778 record_event(dir);
3779 }
3780 } else {
3781 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3782 std::fs::remove_file(old_path).unwrap();
3783 record_event(old_path.clone());
3784 }
3785 }
3786
3787 Ok(events)
3788 }
3789
3790 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3791 let child_entries = std::fs::read_dir(&path).unwrap();
3792 let mut dirs = vec![path];
3793 let mut files = Vec::new();
3794 for child_entry in child_entries {
3795 let child_path = child_entry.unwrap().path();
3796 if child_path.is_dir() {
3797 let (child_dirs, child_files) = read_dir_recursive(child_path);
3798 dirs.extend(child_dirs);
3799 files.extend(child_files);
3800 } else {
3801 files.push(child_path);
3802 }
3803 }
3804 (dirs, files)
3805 }
3806
3807 fn gen_name(rng: &mut impl Rng) -> String {
3808 (0..6)
3809 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3810 .map(char::from)
3811 .collect()
3812 }
3813
3814 impl LocalSnapshot {
3815 fn check_invariants(&self) {
3816 let mut files = self.files(true, 0);
3817 let mut visible_files = self.files(false, 0);
3818 for entry in self.entries_by_path.cursor::<()>() {
3819 if entry.is_file() {
3820 assert_eq!(files.next().unwrap().inode, entry.inode);
3821 if !entry.is_ignored {
3822 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3823 }
3824 }
3825 }
3826 assert!(files.next().is_none());
3827 assert!(visible_files.next().is_none());
3828
3829 let mut bfs_paths = Vec::new();
3830 let mut stack = vec![Path::new("")];
3831 while let Some(path) = stack.pop() {
3832 bfs_paths.push(path);
3833 let ix = stack.len();
3834 for child_entry in self.child_entries(path) {
3835 stack.insert(ix, &child_entry.path);
3836 }
3837 }
3838
3839 let dfs_paths_via_iter = self
3840 .entries_by_path
3841 .cursor::<()>()
3842 .map(|e| e.path.as_ref())
3843 .collect::<Vec<_>>();
3844 assert_eq!(bfs_paths, dfs_paths_via_iter);
3845
3846 let dfs_paths_via_traversal = self
3847 .entries(true)
3848 .map(|e| e.path.as_ref())
3849 .collect::<Vec<_>>();
3850 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3851
3852 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3853 let ignore_parent_path =
3854 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3855 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3856 assert!(self
3857 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3858 .is_some());
3859 }
3860 }
3861
3862 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3863 let mut paths = Vec::new();
3864 for entry in self.entries_by_path.cursor::<()>() {
3865 if include_ignored || !entry.is_ignored {
3866 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3867 }
3868 }
3869 paths.sort_by(|a, b| a.0.cmp(b.0));
3870 paths
3871 }
3872 }
3873}