1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::File as _;
24use language::{
25 proto::{
26 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
27 serialize_version,
28 },
29 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
30};
31use parking_lot::Mutex;
32use postage::{
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36
37use smol::channel::{self, Sender};
38use std::{
39 any::Any,
40 cmp::{self, Ordering},
41 convert::TryFrom,
42 ffi::OsStr,
43 fmt,
44 future::Future,
45 mem,
46 ops::{Deref, DerefMut},
47 path::{Path, PathBuf},
48 sync::{atomic::AtomicUsize, Arc},
49 task::Poll,
50 time::{Duration, SystemTime},
51};
52use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
53use util::paths::HOME;
54use util::{ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59#[allow(clippy::large_enum_variant)]
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 background_snapshot: Arc<Mutex<LocalSnapshot>>,
68 last_scan_state_rx: watch::Receiver<ScanState>,
69 _background_scanner_task: Option<Task<()>>,
70 poll_task: Option<Task<()>>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
73 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
74 client: Arc<Client>,
75 fs: Arc<dyn Fs>,
76 visible: bool,
77}
78
79pub struct RemoteWorktree {
80 pub snapshot: Snapshot,
81 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
82 project_id: u64,
83 client: Arc<Client>,
84 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
85 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
86 replica_id: ReplicaId,
87 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
88 visible: bool,
89 disconnected: bool,
90}
91
92#[derive(Clone)]
93pub struct Snapshot {
94 id: WorktreeId,
95 abs_path: Arc<Path>,
96 root_name: String,
97 root_char_bag: CharBag,
98 entries_by_path: SumTree<Entry>,
99 entries_by_id: SumTree<PathEntry>,
100 scan_id: usize,
101 completed_scan_id: usize,
102}
103
104#[derive(Clone)]
105pub struct GitRepositoryEntry {
106 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
107
108 pub(crate) scan_id: usize,
109 // Path to folder containing the .git file or directory
110 pub(crate) content_path: Arc<Path>,
111 // Path to the actual .git folder.
112 // Note: if .git is a file, this points to the folder indicated by the .git file
113 pub(crate) git_dir_path: Arc<Path>,
114}
115
116impl std::fmt::Debug for GitRepositoryEntry {
117 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 f.debug_struct("GitRepositoryEntry")
119 .field("content_path", &self.content_path)
120 .field("git_dir_path", &self.git_dir_path)
121 .field("libgit_repository", &"LibGitRepository")
122 .finish()
123 }
124}
125
126pub struct LocalSnapshot {
127 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
128 git_repositories: Vec<GitRepositoryEntry>,
129 removed_entry_ids: HashMap<u64, ProjectEntryId>,
130 next_entry_id: Arc<AtomicUsize>,
131 snapshot: Snapshot,
132}
133
134impl Clone for LocalSnapshot {
135 fn clone(&self) -> Self {
136 Self {
137 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
138 git_repositories: self.git_repositories.iter().cloned().collect(),
139 removed_entry_ids: self.removed_entry_ids.clone(),
140 next_entry_id: self.next_entry_id.clone(),
141 snapshot: self.snapshot.clone(),
142 }
143 }
144}
145
146impl Deref for LocalSnapshot {
147 type Target = Snapshot;
148
149 fn deref(&self) -> &Self::Target {
150 &self.snapshot
151 }
152}
153
154impl DerefMut for LocalSnapshot {
155 fn deref_mut(&mut self) -> &mut Self::Target {
156 &mut self.snapshot
157 }
158}
159
160#[derive(Clone, Debug)]
161enum ScanState {
162 Idle,
163 /// The worktree is performing its initial scan of the filesystem.
164 Initializing,
165 /// The worktree is updating in response to filesystem events.
166 Updating,
167 Err(Arc<anyhow::Error>),
168}
169
170struct ShareState {
171 project_id: u64,
172 snapshots_tx: watch::Sender<LocalSnapshot>,
173 resume_updates: watch::Sender<()>,
174 _maintain_remote_snapshot: Task<Option<()>>,
175}
176
177pub enum Event {
178 UpdatedEntries,
179 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
180}
181
182impl Entity for Worktree {
183 type Event = Event;
184}
185
186impl Worktree {
187 pub async fn local(
188 client: Arc<Client>,
189 path: impl Into<Arc<Path>>,
190 visible: bool,
191 fs: Arc<dyn Fs>,
192 next_entry_id: Arc<AtomicUsize>,
193 cx: &mut AsyncAppContext,
194 ) -> Result<ModelHandle<Self>> {
195 let (tree, scan_states_tx) =
196 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
197 tree.update(cx, |tree, cx| {
198 let tree = tree.as_local_mut().unwrap();
199 let abs_path = tree.abs_path().clone();
200 let background_snapshot = tree.background_snapshot.clone();
201 let background = cx.background().clone();
202 tree._background_scanner_task = Some(cx.background().spawn(async move {
203 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
204 let scanner =
205 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
206 scanner.run(events).await;
207 }));
208 });
209 Ok(tree)
210 }
211
212 pub fn remote(
213 project_remote_id: u64,
214 replica_id: ReplicaId,
215 worktree: proto::WorktreeMetadata,
216 client: Arc<Client>,
217 cx: &mut MutableAppContext,
218 ) -> ModelHandle<Self> {
219 let remote_id = worktree.id;
220 let root_char_bag: CharBag = worktree
221 .root_name
222 .chars()
223 .map(|c| c.to_ascii_lowercase())
224 .collect();
225 let root_name = worktree.root_name.clone();
226 let visible = worktree.visible;
227
228 let abs_path = PathBuf::from(worktree.abs_path);
229 let snapshot = Snapshot {
230 id: WorktreeId(remote_id as usize),
231 abs_path: Arc::from(abs_path.deref()),
232 root_name,
233 root_char_bag,
234 entries_by_path: Default::default(),
235 entries_by_id: Default::default(),
236 scan_id: 0,
237 completed_scan_id: 0,
238 };
239
240 let (updates_tx, mut updates_rx) = mpsc::unbounded();
241 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
242 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
243 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
244 Worktree::Remote(RemoteWorktree {
245 project_id: project_remote_id,
246 replica_id,
247 snapshot: snapshot.clone(),
248 background_snapshot: background_snapshot.clone(),
249 updates_tx: Some(updates_tx),
250 snapshot_subscriptions: Default::default(),
251 client: client.clone(),
252 diagnostic_summaries: Default::default(),
253 visible,
254 disconnected: false,
255 })
256 });
257
258 cx.background()
259 .spawn(async move {
260 while let Some(update) = updates_rx.next().await {
261 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
262 log::error!("error applying worktree update: {}", error);
263 }
264 snapshot_updated_tx.send(()).await.ok();
265 }
266 })
267 .detach();
268
269 cx.spawn(|mut cx| {
270 let this = worktree_handle.downgrade();
271 async move {
272 while (snapshot_updated_rx.recv().await).is_some() {
273 if let Some(this) = this.upgrade(&cx) {
274 this.update(&mut cx, |this, cx| {
275 this.poll_snapshot(cx);
276 let this = this.as_remote_mut().unwrap();
277 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
278 if this.observed_snapshot(*scan_id) {
279 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
280 let _ = tx.send(());
281 } else {
282 break;
283 }
284 }
285 });
286 } else {
287 break;
288 }
289 }
290 }
291 })
292 .detach();
293
294 worktree_handle
295 }
296
297 pub fn as_local(&self) -> Option<&LocalWorktree> {
298 if let Worktree::Local(worktree) = self {
299 Some(worktree)
300 } else {
301 None
302 }
303 }
304
305 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
306 if let Worktree::Remote(worktree) = self {
307 Some(worktree)
308 } else {
309 None
310 }
311 }
312
313 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
314 if let Worktree::Local(worktree) = self {
315 Some(worktree)
316 } else {
317 None
318 }
319 }
320
321 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
322 if let Worktree::Remote(worktree) = self {
323 Some(worktree)
324 } else {
325 None
326 }
327 }
328
329 pub fn is_local(&self) -> bool {
330 matches!(self, Worktree::Local(_))
331 }
332
333 pub fn is_remote(&self) -> bool {
334 !self.is_local()
335 }
336
337 pub fn snapshot(&self) -> Snapshot {
338 match self {
339 Worktree::Local(worktree) => worktree.snapshot().snapshot,
340 Worktree::Remote(worktree) => worktree.snapshot(),
341 }
342 }
343
344 pub fn scan_id(&self) -> usize {
345 match self {
346 Worktree::Local(worktree) => worktree.snapshot.scan_id,
347 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
348 }
349 }
350
351 pub fn completed_scan_id(&self) -> usize {
352 match self {
353 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
354 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
355 }
356 }
357
358 pub fn is_visible(&self) -> bool {
359 match self {
360 Worktree::Local(worktree) => worktree.visible,
361 Worktree::Remote(worktree) => worktree.visible,
362 }
363 }
364
365 pub fn replica_id(&self) -> ReplicaId {
366 match self {
367 Worktree::Local(_) => 0,
368 Worktree::Remote(worktree) => worktree.replica_id,
369 }
370 }
371
372 pub fn diagnostic_summaries(
373 &self,
374 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
375 match self {
376 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
377 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
378 }
379 .iter()
380 .map(|(path, summary)| (path.0.clone(), *summary))
381 }
382
383 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
384 match self {
385 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
386 Self::Remote(worktree) => worktree.poll_snapshot(cx),
387 };
388 }
389
390 pub fn abs_path(&self) -> Arc<Path> {
391 match self {
392 Worktree::Local(worktree) => worktree.abs_path.clone(),
393 Worktree::Remote(worktree) => worktree.abs_path.clone(),
394 }
395 }
396}
397
398impl LocalWorktree {
399 async fn create(
400 client: Arc<Client>,
401 path: impl Into<Arc<Path>>,
402 visible: bool,
403 fs: Arc<dyn Fs>,
404 next_entry_id: Arc<AtomicUsize>,
405 cx: &mut AsyncAppContext,
406 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
407 let abs_path = path.into();
408 let path: Arc<Path> = Arc::from(Path::new(""));
409
410 // After determining whether the root entry is a file or a directory, populate the
411 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
412 let root_name = abs_path
413 .file_name()
414 .map_or(String::new(), |f| f.to_string_lossy().to_string());
415 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
416 let metadata = fs
417 .metadata(&abs_path)
418 .await
419 .context("failed to stat worktree path")?;
420
421 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
422 let (mut last_scan_state_tx, last_scan_state_rx) =
423 watch::channel_with(ScanState::Initializing);
424 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
425 let mut snapshot = LocalSnapshot {
426 ignores_by_parent_abs_path: Default::default(),
427 git_repositories: Default::default(),
428 removed_entry_ids: Default::default(),
429 next_entry_id,
430 snapshot: Snapshot {
431 id: WorktreeId::from_usize(cx.model_id()),
432 abs_path,
433 root_name: root_name.clone(),
434 root_char_bag,
435 entries_by_path: Default::default(),
436 entries_by_id: Default::default(),
437 scan_id: 0,
438 completed_scan_id: 0,
439 },
440 };
441 if let Some(metadata) = metadata {
442 let entry = Entry::new(
443 path,
444 &metadata,
445 &snapshot.next_entry_id,
446 snapshot.root_char_bag,
447 );
448 snapshot.insert_entry(entry, fs.as_ref());
449 }
450
451 let tree = Self {
452 snapshot: snapshot.clone(),
453 background_snapshot: Arc::new(Mutex::new(snapshot)),
454 last_scan_state_rx,
455 _background_scanner_task: None,
456 share: None,
457 poll_task: None,
458 diagnostics: Default::default(),
459 diagnostic_summaries: Default::default(),
460 client,
461 fs,
462 visible,
463 };
464
465 cx.spawn_weak(|this, mut cx| async move {
466 while let Some(scan_state) = scan_states_rx.next().await {
467 if let Some(this) = this.upgrade(&cx) {
468 last_scan_state_tx.blocking_send(scan_state).ok();
469 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
470 } else {
471 break;
472 }
473 }
474 })
475 .detach();
476
477 Worktree::Local(tree)
478 });
479
480 Ok((tree, scan_states_tx))
481 }
482
483 pub fn contains_abs_path(&self, path: &Path) -> bool {
484 path.starts_with(&self.abs_path)
485 }
486
487 fn absolutize(&self, path: &Path) -> PathBuf {
488 if path.file_name().is_some() {
489 self.abs_path.join(path)
490 } else {
491 self.abs_path.to_path_buf()
492 }
493 }
494
495 pub(crate) fn load_buffer(
496 &mut self,
497 path: &Path,
498 cx: &mut ModelContext<Worktree>,
499 ) -> Task<Result<ModelHandle<Buffer>>> {
500 let path = Arc::from(path);
501 cx.spawn(move |this, mut cx| async move {
502 let (file, contents, diff_base) = this
503 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
504 .await?;
505 Ok(cx.add_model(|cx| {
506 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
507 buffer.git_diff_recalc(cx);
508 buffer
509 }))
510 })
511 }
512
513 pub fn diagnostics_for_path(
514 &self,
515 path: &Path,
516 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
517 self.diagnostics.get(path).cloned()
518 }
519
520 pub fn update_diagnostics(
521 &mut self,
522 language_server_id: usize,
523 worktree_path: Arc<Path>,
524 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
525 _: &mut ModelContext<Worktree>,
526 ) -> Result<bool> {
527 self.diagnostics.remove(&worktree_path);
528 let old_summary = self
529 .diagnostic_summaries
530 .remove(&PathKey(worktree_path.clone()))
531 .unwrap_or_default();
532 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
533 if !new_summary.is_empty() {
534 self.diagnostic_summaries
535 .insert(PathKey(worktree_path.clone()), new_summary);
536 self.diagnostics.insert(worktree_path.clone(), diagnostics);
537 }
538
539 let updated = !old_summary.is_empty() || !new_summary.is_empty();
540 if updated {
541 if let Some(share) = self.share.as_ref() {
542 self.client
543 .send(proto::UpdateDiagnosticSummary {
544 project_id: share.project_id,
545 worktree_id: self.id().to_proto(),
546 summary: Some(proto::DiagnosticSummary {
547 path: worktree_path.to_string_lossy().to_string(),
548 language_server_id: language_server_id as u64,
549 error_count: new_summary.error_count as u32,
550 warning_count: new_summary.warning_count as u32,
551 }),
552 })
553 .log_err();
554 }
555 }
556
557 Ok(updated)
558 }
559
560 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
561 self.poll_task.take();
562
563 match self.scan_state() {
564 ScanState::Idle => {
565 let new_snapshot = self.background_snapshot.lock().clone();
566 let updated_repos = Self::changed_repos(
567 &self.snapshot.git_repositories,
568 &new_snapshot.git_repositories,
569 );
570 self.snapshot = new_snapshot;
571
572 if let Some(share) = self.share.as_mut() {
573 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
574 }
575
576 cx.emit(Event::UpdatedEntries);
577
578 if !updated_repos.is_empty() {
579 cx.emit(Event::UpdatedGitRepositories(updated_repos));
580 }
581 }
582
583 ScanState::Initializing => {
584 let is_fake_fs = self.fs.is_fake();
585
586 let new_snapshot = self.background_snapshot.lock().clone();
587 let updated_repos = Self::changed_repos(
588 &self.snapshot.git_repositories,
589 &new_snapshot.git_repositories,
590 );
591 self.snapshot = new_snapshot;
592
593 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
594 if is_fake_fs {
595 #[cfg(any(test, feature = "test-support"))]
596 cx.background().simulate_random_delay().await;
597 } else {
598 smol::Timer::after(Duration::from_millis(100)).await;
599 }
600 if let Some(this) = this.upgrade(&cx) {
601 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
602 }
603 }));
604
605 cx.emit(Event::UpdatedEntries);
606
607 if !updated_repos.is_empty() {
608 cx.emit(Event::UpdatedGitRepositories(updated_repos));
609 }
610 }
611
612 _ => {
613 if force {
614 self.snapshot = self.background_snapshot.lock().clone();
615 }
616 }
617 }
618
619 cx.notify();
620 }
621
622 fn changed_repos(
623 old_repos: &[GitRepositoryEntry],
624 new_repos: &[GitRepositoryEntry],
625 ) -> Vec<GitRepositoryEntry> {
626 fn diff<'a>(
627 a: &'a [GitRepositoryEntry],
628 b: &'a [GitRepositoryEntry],
629 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
630 ) {
631 for a_repo in a {
632 let matched = b.iter().find(|b_repo| {
633 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
634 });
635
636 if matched.is_none() {
637 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
638 }
639 }
640 }
641
642 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
643
644 diff(old_repos, new_repos, &mut updated);
645 diff(new_repos, old_repos, &mut updated);
646
647 updated.into_values().collect()
648 }
649
650 pub fn scan_complete(&self) -> impl Future<Output = ()> {
651 let mut scan_state_rx = self.last_scan_state_rx.clone();
652 async move {
653 let mut scan_state = Some(scan_state_rx.borrow().clone());
654 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
655 scan_state = scan_state_rx.recv().await;
656 }
657 }
658 }
659
660 fn scan_state(&self) -> ScanState {
661 self.last_scan_state_rx.borrow().clone()
662 }
663
664 pub fn snapshot(&self) -> LocalSnapshot {
665 self.snapshot.clone()
666 }
667
668 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
669 proto::WorktreeMetadata {
670 id: self.id().to_proto(),
671 root_name: self.root_name().to_string(),
672 visible: self.visible,
673 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
674 }
675 }
676
677 fn load(
678 &self,
679 path: &Path,
680 cx: &mut ModelContext<Worktree>,
681 ) -> Task<Result<(File, String, Option<String>)>> {
682 let handle = cx.handle();
683 let path = Arc::from(path);
684 let abs_path = self.absolutize(&path);
685 let fs = self.fs.clone();
686 let snapshot = self.snapshot();
687
688 cx.spawn(|this, mut cx| async move {
689 let text = fs.load(&abs_path).await?;
690
691 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
692 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
693 let repo_relative = repo_relative.to_owned();
694 cx.background()
695 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
696 .await
697 } else {
698 None
699 }
700 } else {
701 None
702 };
703
704 // Eagerly populate the snapshot with an updated entry for the loaded file
705 let entry = this
706 .update(&mut cx, |this, cx| {
707 this.as_local()
708 .unwrap()
709 .refresh_entry(path, abs_path, None, cx)
710 })
711 .await?;
712
713 Ok((
714 File {
715 entry_id: entry.id,
716 worktree: handle,
717 path: entry.path,
718 mtime: entry.mtime,
719 is_local: true,
720 is_deleted: false,
721 },
722 text,
723 diff_base,
724 ))
725 })
726 }
727
728 pub fn save_buffer(
729 &self,
730 buffer_handle: ModelHandle<Buffer>,
731 path: Arc<Path>,
732 has_changed_file: bool,
733 cx: &mut ModelContext<Worktree>,
734 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
735 let handle = cx.handle();
736 let buffer = buffer_handle.read(cx);
737
738 let rpc = self.client.clone();
739 let buffer_id = buffer.remote_id();
740 let project_id = self.share.as_ref().map(|share| share.project_id);
741
742 let text = buffer.as_rope().clone();
743 let fingerprint = text.fingerprint();
744 let version = buffer.version();
745 let save = self.write_file(path, text, buffer.line_ending(), cx);
746
747 cx.as_mut().spawn(|mut cx| async move {
748 let entry = save.await?;
749
750 if has_changed_file {
751 let new_file = Arc::new(File {
752 entry_id: entry.id,
753 worktree: handle,
754 path: entry.path,
755 mtime: entry.mtime,
756 is_local: true,
757 is_deleted: false,
758 });
759
760 if let Some(project_id) = project_id {
761 rpc.send(proto::UpdateBufferFile {
762 project_id,
763 buffer_id,
764 file: Some(new_file.to_proto()),
765 })
766 .log_err();
767 }
768
769 buffer_handle.update(&mut cx, |buffer, cx| {
770 if has_changed_file {
771 buffer.file_updated(new_file, cx).detach();
772 }
773 });
774 }
775
776 if let Some(project_id) = project_id {
777 rpc.send(proto::BufferSaved {
778 project_id,
779 buffer_id,
780 version: serialize_version(&version),
781 mtime: Some(entry.mtime.into()),
782 fingerprint: serialize_fingerprint(fingerprint),
783 })?;
784 }
785
786 buffer_handle.update(&mut cx, |buffer, cx| {
787 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
788 });
789
790 Ok((version, fingerprint, entry.mtime))
791 })
792 }
793
794 pub fn create_entry(
795 &self,
796 path: impl Into<Arc<Path>>,
797 is_dir: bool,
798 cx: &mut ModelContext<Worktree>,
799 ) -> Task<Result<Entry>> {
800 self.write_entry_internal(
801 path,
802 if is_dir {
803 None
804 } else {
805 Some(Default::default())
806 },
807 cx,
808 )
809 }
810
811 pub fn write_file(
812 &self,
813 path: impl Into<Arc<Path>>,
814 text: Rope,
815 line_ending: LineEnding,
816 cx: &mut ModelContext<Worktree>,
817 ) -> Task<Result<Entry>> {
818 self.write_entry_internal(path, Some((text, line_ending)), cx)
819 }
820
821 pub fn delete_entry(
822 &self,
823 entry_id: ProjectEntryId,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Option<Task<Result<()>>> {
826 let entry = self.entry_for_id(entry_id)?.clone();
827 let abs_path = self.absolutize(&entry.path);
828 let delete = cx.background().spawn({
829 let fs = self.fs.clone();
830 let abs_path = abs_path;
831 async move {
832 if entry.is_file() {
833 fs.remove_file(&abs_path, Default::default()).await
834 } else {
835 fs.remove_dir(
836 &abs_path,
837 RemoveOptions {
838 recursive: true,
839 ignore_if_not_exists: false,
840 },
841 )
842 .await
843 }
844 }
845 });
846
847 Some(cx.spawn(|this, mut cx| async move {
848 delete.await?;
849 this.update(&mut cx, |this, cx| {
850 let this = this.as_local_mut().unwrap();
851 {
852 let mut snapshot = this.background_snapshot.lock();
853 snapshot.delete_entry(entry_id);
854 }
855 this.poll_snapshot(true, cx);
856 });
857 Ok(())
858 }))
859 }
860
861 pub fn rename_entry(
862 &self,
863 entry_id: ProjectEntryId,
864 new_path: impl Into<Arc<Path>>,
865 cx: &mut ModelContext<Worktree>,
866 ) -> Option<Task<Result<Entry>>> {
867 let old_path = self.entry_for_id(entry_id)?.path.clone();
868 let new_path = new_path.into();
869 let abs_old_path = self.absolutize(&old_path);
870 let abs_new_path = self.absolutize(&new_path);
871 let rename = cx.background().spawn({
872 let fs = self.fs.clone();
873 let abs_new_path = abs_new_path.clone();
874 async move {
875 fs.rename(&abs_old_path, &abs_new_path, Default::default())
876 .await
877 }
878 });
879
880 Some(cx.spawn(|this, mut cx| async move {
881 rename.await?;
882 let entry = this
883 .update(&mut cx, |this, cx| {
884 this.as_local_mut().unwrap().refresh_entry(
885 new_path.clone(),
886 abs_new_path,
887 Some(old_path),
888 cx,
889 )
890 })
891 .await?;
892 Ok(entry)
893 }))
894 }
895
896 pub fn copy_entry(
897 &self,
898 entry_id: ProjectEntryId,
899 new_path: impl Into<Arc<Path>>,
900 cx: &mut ModelContext<Worktree>,
901 ) -> Option<Task<Result<Entry>>> {
902 let old_path = self.entry_for_id(entry_id)?.path.clone();
903 let new_path = new_path.into();
904 let abs_old_path = self.absolutize(&old_path);
905 let abs_new_path = self.absolutize(&new_path);
906 let copy = cx.background().spawn({
907 let fs = self.fs.clone();
908 let abs_new_path = abs_new_path.clone();
909 async move {
910 copy_recursive(
911 fs.as_ref(),
912 &abs_old_path,
913 &abs_new_path,
914 Default::default(),
915 )
916 .await
917 }
918 });
919
920 Some(cx.spawn(|this, mut cx| async move {
921 copy.await?;
922 let entry = this
923 .update(&mut cx, |this, cx| {
924 this.as_local_mut().unwrap().refresh_entry(
925 new_path.clone(),
926 abs_new_path,
927 None,
928 cx,
929 )
930 })
931 .await?;
932 Ok(entry)
933 }))
934 }
935
936 fn write_entry_internal(
937 &self,
938 path: impl Into<Arc<Path>>,
939 text_if_file: Option<(Rope, LineEnding)>,
940 cx: &mut ModelContext<Worktree>,
941 ) -> Task<Result<Entry>> {
942 let path = path.into();
943 let abs_path = self.absolutize(&path);
944 let write = cx.background().spawn({
945 let fs = self.fs.clone();
946 let abs_path = abs_path.clone();
947 async move {
948 if let Some((text, line_ending)) = text_if_file {
949 fs.save(&abs_path, &text, line_ending).await
950 } else {
951 fs.create_dir(&abs_path).await
952 }
953 }
954 });
955
956 cx.spawn(|this, mut cx| async move {
957 write.await?;
958 let entry = this
959 .update(&mut cx, |this, cx| {
960 this.as_local_mut()
961 .unwrap()
962 .refresh_entry(path, abs_path, None, cx)
963 })
964 .await?;
965 Ok(entry)
966 })
967 }
968
969 fn refresh_entry(
970 &self,
971 path: Arc<Path>,
972 abs_path: PathBuf,
973 old_path: Option<Arc<Path>>,
974 cx: &mut ModelContext<Worktree>,
975 ) -> Task<Result<Entry>> {
976 let fs = self.fs.clone();
977 let root_char_bag;
978 let next_entry_id;
979 {
980 let snapshot = self.background_snapshot.lock();
981 root_char_bag = snapshot.root_char_bag;
982 next_entry_id = snapshot.next_entry_id.clone();
983 }
984 cx.spawn_weak(|this, mut cx| async move {
985 let metadata = fs
986 .metadata(&abs_path)
987 .await?
988 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
989 let this = this
990 .upgrade(&cx)
991 .ok_or_else(|| anyhow!("worktree was dropped"))?;
992 this.update(&mut cx, |this, cx| {
993 let this = this.as_local_mut().unwrap();
994 let inserted_entry;
995 {
996 let mut snapshot = this.background_snapshot.lock();
997 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
998 entry.is_ignored = snapshot
999 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
1000 .is_abs_path_ignored(&abs_path, entry.is_dir());
1001 if let Some(old_path) = old_path {
1002 snapshot.remove_path(&old_path);
1003 }
1004 snapshot.scan_started();
1005 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
1006 snapshot.scan_completed();
1007 }
1008 this.poll_snapshot(true, cx);
1009 Ok(inserted_entry)
1010 })
1011 })
1012 }
1013
1014 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1015 let (share_tx, share_rx) = oneshot::channel();
1016
1017 if let Some(share) = self.share.as_mut() {
1018 let _ = share_tx.send(());
1019 *share.resume_updates.borrow_mut() = ();
1020 } else {
1021 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1022 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1023 let worktree_id = cx.model_id() as u64;
1024
1025 for (path, summary) in self.diagnostic_summaries.iter() {
1026 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1027 project_id,
1028 worktree_id,
1029 summary: Some(summary.to_proto(&path.0)),
1030 }) {
1031 return Task::ready(Err(e));
1032 }
1033 }
1034
1035 let _maintain_remote_snapshot = cx.background().spawn({
1036 let client = self.client.clone();
1037 async move {
1038 let mut share_tx = Some(share_tx);
1039 let mut prev_snapshot = LocalSnapshot {
1040 ignores_by_parent_abs_path: Default::default(),
1041 git_repositories: Default::default(),
1042 removed_entry_ids: Default::default(),
1043 next_entry_id: Default::default(),
1044 snapshot: Snapshot {
1045 id: WorktreeId(worktree_id as usize),
1046 abs_path: Path::new("").into(),
1047 root_name: Default::default(),
1048 root_char_bag: Default::default(),
1049 entries_by_path: Default::default(),
1050 entries_by_id: Default::default(),
1051 scan_id: 0,
1052 completed_scan_id: 0,
1053 },
1054 };
1055 while let Some(snapshot) = snapshots_rx.recv().await {
1056 #[cfg(any(test, feature = "test-support"))]
1057 const MAX_CHUNK_SIZE: usize = 2;
1058 #[cfg(not(any(test, feature = "test-support")))]
1059 const MAX_CHUNK_SIZE: usize = 256;
1060
1061 let update =
1062 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1063 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1064 let _ = resume_updates_rx.try_recv();
1065 while let Err(error) = client.request(update.clone()).await {
1066 log::error!("failed to send worktree update: {}", error);
1067 log::info!("waiting to resume updates");
1068 if resume_updates_rx.next().await.is_none() {
1069 return Ok(());
1070 }
1071 }
1072 }
1073
1074 if let Some(share_tx) = share_tx.take() {
1075 let _ = share_tx.send(());
1076 }
1077
1078 prev_snapshot = snapshot;
1079 }
1080
1081 Ok::<_, anyhow::Error>(())
1082 }
1083 .log_err()
1084 });
1085
1086 self.share = Some(ShareState {
1087 project_id,
1088 snapshots_tx,
1089 resume_updates: resume_updates_tx,
1090 _maintain_remote_snapshot,
1091 });
1092 }
1093
1094 cx.foreground()
1095 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1096 }
1097
1098 pub fn unshare(&mut self) {
1099 self.share.take();
1100 }
1101
1102 pub fn is_shared(&self) -> bool {
1103 self.share.is_some()
1104 }
1105}
1106
1107impl RemoteWorktree {
1108 fn snapshot(&self) -> Snapshot {
1109 self.snapshot.clone()
1110 }
1111
1112 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1113 self.snapshot = self.background_snapshot.lock().clone();
1114 cx.emit(Event::UpdatedEntries);
1115 cx.notify();
1116 }
1117
1118 pub fn disconnected_from_host(&mut self) {
1119 self.updates_tx.take();
1120 self.snapshot_subscriptions.clear();
1121 self.disconnected = true;
1122 }
1123
1124 pub fn save_buffer(
1125 &self,
1126 buffer_handle: ModelHandle<Buffer>,
1127 cx: &mut ModelContext<Worktree>,
1128 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1129 let buffer = buffer_handle.read(cx);
1130 let buffer_id = buffer.remote_id();
1131 let version = buffer.version();
1132 let rpc = self.client.clone();
1133 let project_id = self.project_id;
1134 cx.as_mut().spawn(|mut cx| async move {
1135 let response = rpc
1136 .request(proto::SaveBuffer {
1137 project_id,
1138 buffer_id,
1139 version: serialize_version(&version),
1140 })
1141 .await?;
1142 let version = deserialize_version(response.version);
1143 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1144 let mtime = response
1145 .mtime
1146 .ok_or_else(|| anyhow!("missing mtime"))?
1147 .into();
1148
1149 buffer_handle.update(&mut cx, |buffer, cx| {
1150 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1151 });
1152
1153 Ok((version, fingerprint, mtime))
1154 })
1155 }
1156
1157 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1158 if let Some(updates_tx) = &self.updates_tx {
1159 updates_tx
1160 .unbounded_send(update)
1161 .expect("consumer runs to completion");
1162 }
1163 }
1164
1165 fn observed_snapshot(&self, scan_id: usize) -> bool {
1166 self.completed_scan_id >= scan_id
1167 }
1168
1169 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1170 let (tx, rx) = oneshot::channel();
1171 if self.observed_snapshot(scan_id) {
1172 let _ = tx.send(());
1173 } else if self.disconnected {
1174 drop(tx);
1175 } else {
1176 match self
1177 .snapshot_subscriptions
1178 .binary_search_by_key(&scan_id, |probe| probe.0)
1179 {
1180 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1181 }
1182 }
1183
1184 async move {
1185 rx.await?;
1186 Ok(())
1187 }
1188 }
1189
1190 pub fn update_diagnostic_summary(
1191 &mut self,
1192 path: Arc<Path>,
1193 summary: &proto::DiagnosticSummary,
1194 ) {
1195 let summary = DiagnosticSummary {
1196 language_server_id: summary.language_server_id as usize,
1197 error_count: summary.error_count as usize,
1198 warning_count: summary.warning_count as usize,
1199 };
1200 if summary.is_empty() {
1201 self.diagnostic_summaries.remove(&PathKey(path));
1202 } else {
1203 self.diagnostic_summaries.insert(PathKey(path), summary);
1204 }
1205 }
1206
1207 pub fn insert_entry(
1208 &mut self,
1209 entry: proto::Entry,
1210 scan_id: usize,
1211 cx: &mut ModelContext<Worktree>,
1212 ) -> Task<Result<Entry>> {
1213 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1214 cx.spawn(|this, mut cx| async move {
1215 wait_for_snapshot.await?;
1216 this.update(&mut cx, |worktree, _| {
1217 let worktree = worktree.as_remote_mut().unwrap();
1218 let mut snapshot = worktree.background_snapshot.lock();
1219 let entry = snapshot.insert_entry(entry);
1220 worktree.snapshot = snapshot.clone();
1221 entry
1222 })
1223 })
1224 }
1225
1226 pub(crate) fn delete_entry(
1227 &mut self,
1228 id: ProjectEntryId,
1229 scan_id: usize,
1230 cx: &mut ModelContext<Worktree>,
1231 ) -> Task<Result<()>> {
1232 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1233 cx.spawn(|this, mut cx| async move {
1234 wait_for_snapshot.await?;
1235 this.update(&mut cx, |worktree, _| {
1236 let worktree = worktree.as_remote_mut().unwrap();
1237 let mut snapshot = worktree.background_snapshot.lock();
1238 snapshot.delete_entry(id);
1239 worktree.snapshot = snapshot.clone();
1240 });
1241 Ok(())
1242 })
1243 }
1244}
1245
1246impl Snapshot {
1247 pub fn id(&self) -> WorktreeId {
1248 self.id
1249 }
1250
1251 pub fn abs_path(&self) -> &Arc<Path> {
1252 &self.abs_path
1253 }
1254
1255 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1256 self.entries_by_id.get(&entry_id, &()).is_some()
1257 }
1258
1259 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1260 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1261 let old_entry = self.entries_by_id.insert_or_replace(
1262 PathEntry {
1263 id: entry.id,
1264 path: entry.path.clone(),
1265 is_ignored: entry.is_ignored,
1266 scan_id: 0,
1267 },
1268 &(),
1269 );
1270 if let Some(old_entry) = old_entry {
1271 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1272 }
1273 self.entries_by_path.insert_or_replace(entry.clone(), &());
1274 Ok(entry)
1275 }
1276
1277 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1278 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1279 self.entries_by_path = {
1280 let mut cursor = self.entries_by_path.cursor();
1281 let mut new_entries_by_path =
1282 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1283 while let Some(entry) = cursor.item() {
1284 if entry.path.starts_with(&removed_entry.path) {
1285 self.entries_by_id.remove(&entry.id, &());
1286 cursor.next(&());
1287 } else {
1288 break;
1289 }
1290 }
1291 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1292 new_entries_by_path
1293 };
1294
1295 true
1296 } else {
1297 false
1298 }
1299 }
1300
1301 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1302 let mut entries_by_path_edits = Vec::new();
1303 let mut entries_by_id_edits = Vec::new();
1304 for entry_id in update.removed_entries {
1305 let entry = self
1306 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1307 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1308 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1309 entries_by_id_edits.push(Edit::Remove(entry.id));
1310 }
1311
1312 for entry in update.updated_entries {
1313 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1314 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1315 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1316 }
1317 entries_by_id_edits.push(Edit::Insert(PathEntry {
1318 id: entry.id,
1319 path: entry.path.clone(),
1320 is_ignored: entry.is_ignored,
1321 scan_id: 0,
1322 }));
1323 entries_by_path_edits.push(Edit::Insert(entry));
1324 }
1325
1326 self.entries_by_path.edit(entries_by_path_edits, &());
1327 self.entries_by_id.edit(entries_by_id_edits, &());
1328 self.scan_id = update.scan_id as usize;
1329 if update.is_last_update {
1330 self.completed_scan_id = update.scan_id as usize;
1331 }
1332
1333 Ok(())
1334 }
1335
1336 pub fn file_count(&self) -> usize {
1337 self.entries_by_path.summary().file_count
1338 }
1339
1340 pub fn visible_file_count(&self) -> usize {
1341 self.entries_by_path.summary().visible_file_count
1342 }
1343
1344 fn traverse_from_offset(
1345 &self,
1346 include_dirs: bool,
1347 include_ignored: bool,
1348 start_offset: usize,
1349 ) -> Traversal {
1350 let mut cursor = self.entries_by_path.cursor();
1351 cursor.seek(
1352 &TraversalTarget::Count {
1353 count: start_offset,
1354 include_dirs,
1355 include_ignored,
1356 },
1357 Bias::Right,
1358 &(),
1359 );
1360 Traversal {
1361 cursor,
1362 include_dirs,
1363 include_ignored,
1364 }
1365 }
1366
1367 fn traverse_from_path(
1368 &self,
1369 include_dirs: bool,
1370 include_ignored: bool,
1371 path: &Path,
1372 ) -> Traversal {
1373 let mut cursor = self.entries_by_path.cursor();
1374 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1375 Traversal {
1376 cursor,
1377 include_dirs,
1378 include_ignored,
1379 }
1380 }
1381
1382 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1383 self.traverse_from_offset(false, include_ignored, start)
1384 }
1385
1386 pub fn entries(&self, include_ignored: bool) -> Traversal {
1387 self.traverse_from_offset(true, include_ignored, 0)
1388 }
1389
1390 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1391 let empty_path = Path::new("");
1392 self.entries_by_path
1393 .cursor::<()>()
1394 .filter(move |entry| entry.path.as_ref() != empty_path)
1395 .map(|entry| &entry.path)
1396 }
1397
1398 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1399 let mut cursor = self.entries_by_path.cursor();
1400 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1401 let traversal = Traversal {
1402 cursor,
1403 include_dirs: true,
1404 include_ignored: true,
1405 };
1406 ChildEntriesIter {
1407 traversal,
1408 parent_path,
1409 }
1410 }
1411
1412 pub fn root_entry(&self) -> Option<&Entry> {
1413 self.entry_for_path("")
1414 }
1415
1416 pub fn root_name(&self) -> &str {
1417 &self.root_name
1418 }
1419
1420 pub fn scan_started(&mut self) {
1421 self.scan_id += 1;
1422 }
1423
1424 pub fn scan_completed(&mut self) {
1425 self.completed_scan_id = self.scan_id;
1426 }
1427
1428 pub fn scan_id(&self) -> usize {
1429 self.scan_id
1430 }
1431
1432 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1433 let path = path.as_ref();
1434 self.traverse_from_path(true, true, path)
1435 .entry()
1436 .and_then(|entry| {
1437 if entry.path.as_ref() == path {
1438 Some(entry)
1439 } else {
1440 None
1441 }
1442 })
1443 }
1444
1445 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1446 let entry = self.entries_by_id.get(&id, &())?;
1447 self.entry_for_path(&entry.path)
1448 }
1449
1450 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1451 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1452 }
1453}
1454
1455impl LocalSnapshot {
1456 // Gives the most specific git repository for a given path
1457 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1458 self.git_repositories
1459 .iter()
1460 .rev() //git_repository is ordered lexicographically
1461 .find(|repo| repo.manages(path))
1462 .cloned()
1463 }
1464
1465 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1466 // Git repositories cannot be nested, so we don't need to reverse the order
1467 self.git_repositories
1468 .iter_mut()
1469 .find(|repo| repo.in_dot_git(path))
1470 }
1471
1472 #[cfg(test)]
1473 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1474 let root_name = self.root_name.clone();
1475 proto::UpdateWorktree {
1476 project_id,
1477 worktree_id: self.id().to_proto(),
1478 abs_path: self.abs_path().to_string_lossy().into(),
1479 root_name,
1480 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1481 removed_entries: Default::default(),
1482 scan_id: self.scan_id as u64,
1483 is_last_update: true,
1484 }
1485 }
1486
1487 pub(crate) fn build_update(
1488 &self,
1489 other: &Self,
1490 project_id: u64,
1491 worktree_id: u64,
1492 include_ignored: bool,
1493 ) -> proto::UpdateWorktree {
1494 let mut updated_entries = Vec::new();
1495 let mut removed_entries = Vec::new();
1496 let mut self_entries = self
1497 .entries_by_id
1498 .cursor::<()>()
1499 .filter(|e| include_ignored || !e.is_ignored)
1500 .peekable();
1501 let mut other_entries = other
1502 .entries_by_id
1503 .cursor::<()>()
1504 .filter(|e| include_ignored || !e.is_ignored)
1505 .peekable();
1506 loop {
1507 match (self_entries.peek(), other_entries.peek()) {
1508 (Some(self_entry), Some(other_entry)) => {
1509 match Ord::cmp(&self_entry.id, &other_entry.id) {
1510 Ordering::Less => {
1511 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1512 updated_entries.push(entry);
1513 self_entries.next();
1514 }
1515 Ordering::Equal => {
1516 if self_entry.scan_id != other_entry.scan_id {
1517 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1518 updated_entries.push(entry);
1519 }
1520
1521 self_entries.next();
1522 other_entries.next();
1523 }
1524 Ordering::Greater => {
1525 removed_entries.push(other_entry.id.to_proto());
1526 other_entries.next();
1527 }
1528 }
1529 }
1530 (Some(self_entry), None) => {
1531 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1532 updated_entries.push(entry);
1533 self_entries.next();
1534 }
1535 (None, Some(other_entry)) => {
1536 removed_entries.push(other_entry.id.to_proto());
1537 other_entries.next();
1538 }
1539 (None, None) => break,
1540 }
1541 }
1542
1543 proto::UpdateWorktree {
1544 project_id,
1545 worktree_id,
1546 abs_path: self.abs_path().to_string_lossy().into(),
1547 root_name: self.root_name().to_string(),
1548 updated_entries,
1549 removed_entries,
1550 scan_id: self.scan_id as u64,
1551 is_last_update: self.completed_scan_id == self.scan_id,
1552 }
1553 }
1554
1555 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1556 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1557 let abs_path = self.abs_path.join(&entry.path);
1558 match smol::block_on(build_gitignore(&abs_path, fs)) {
1559 Ok(ignore) => {
1560 self.ignores_by_parent_abs_path.insert(
1561 abs_path.parent().unwrap().into(),
1562 (Arc::new(ignore), self.scan_id),
1563 );
1564 }
1565 Err(error) => {
1566 log::error!(
1567 "error loading .gitignore file {:?} - {:?}",
1568 &entry.path,
1569 error
1570 );
1571 }
1572 }
1573 }
1574
1575 self.reuse_entry_id(&mut entry);
1576
1577 if entry.kind == EntryKind::PendingDir {
1578 if let Some(existing_entry) =
1579 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1580 {
1581 entry.kind = existing_entry.kind;
1582 }
1583 }
1584
1585 let scan_id = self.scan_id;
1586 self.entries_by_path.insert_or_replace(entry.clone(), &());
1587 self.entries_by_id.insert_or_replace(
1588 PathEntry {
1589 id: entry.id,
1590 path: entry.path.clone(),
1591 is_ignored: entry.is_ignored,
1592 scan_id,
1593 },
1594 &(),
1595 );
1596
1597 entry
1598 }
1599
1600 fn populate_dir(
1601 &mut self,
1602 parent_path: Arc<Path>,
1603 entries: impl IntoIterator<Item = Entry>,
1604 ignore: Option<Arc<Gitignore>>,
1605 fs: &dyn Fs,
1606 ) {
1607 let mut parent_entry = if let Some(parent_entry) =
1608 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1609 {
1610 parent_entry.clone()
1611 } else {
1612 log::warn!(
1613 "populating a directory {:?} that has been removed",
1614 parent_path
1615 );
1616 return;
1617 };
1618
1619 if let Some(ignore) = ignore {
1620 self.ignores_by_parent_abs_path.insert(
1621 self.abs_path.join(&parent_path).into(),
1622 (ignore, self.scan_id),
1623 );
1624 }
1625 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1626 parent_entry.kind = EntryKind::Dir;
1627 } else {
1628 unreachable!();
1629 }
1630
1631 if parent_path.file_name() == Some(&DOT_GIT) {
1632 let abs_path = self.abs_path.join(&parent_path);
1633 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1634 if let Err(ix) = self
1635 .git_repositories
1636 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1637 {
1638 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1639 self.git_repositories.insert(
1640 ix,
1641 GitRepositoryEntry {
1642 repo,
1643 scan_id: 0,
1644 content_path,
1645 git_dir_path: parent_path,
1646 },
1647 );
1648 }
1649 }
1650 }
1651
1652 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1653 let mut entries_by_id_edits = Vec::new();
1654
1655 for mut entry in entries {
1656 self.reuse_entry_id(&mut entry);
1657 entries_by_id_edits.push(Edit::Insert(PathEntry {
1658 id: entry.id,
1659 path: entry.path.clone(),
1660 is_ignored: entry.is_ignored,
1661 scan_id: self.scan_id,
1662 }));
1663 entries_by_path_edits.push(Edit::Insert(entry));
1664 }
1665
1666 self.entries_by_path.edit(entries_by_path_edits, &());
1667 self.entries_by_id.edit(entries_by_id_edits, &());
1668 }
1669
1670 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1671 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1672 entry.id = removed_entry_id;
1673 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1674 entry.id = existing_entry.id;
1675 }
1676 }
1677
1678 fn remove_path(&mut self, path: &Path) {
1679 let mut new_entries;
1680 let removed_entries;
1681 {
1682 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1683 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1684 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1685 new_entries.push_tree(cursor.suffix(&()), &());
1686 }
1687 self.entries_by_path = new_entries;
1688
1689 let mut entries_by_id_edits = Vec::new();
1690 for entry in removed_entries.cursor::<()>() {
1691 let removed_entry_id = self
1692 .removed_entry_ids
1693 .entry(entry.inode)
1694 .or_insert(entry.id);
1695 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1696 entries_by_id_edits.push(Edit::Remove(entry.id));
1697 }
1698 self.entries_by_id.edit(entries_by_id_edits, &());
1699
1700 if path.file_name() == Some(&GITIGNORE) {
1701 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1702 if let Some((_, scan_id)) = self
1703 .ignores_by_parent_abs_path
1704 .get_mut(abs_parent_path.as_path())
1705 {
1706 *scan_id = self.snapshot.scan_id;
1707 }
1708 } else if path.file_name() == Some(&DOT_GIT) {
1709 let parent_path = path.parent().unwrap();
1710 if let Ok(ix) = self
1711 .git_repositories
1712 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1713 {
1714 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1715 }
1716 }
1717 }
1718
1719 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1720 let mut inodes = TreeSet::default();
1721 for ancestor in path.ancestors().skip(1) {
1722 if let Some(entry) = self.entry_for_path(ancestor) {
1723 inodes.insert(entry.inode);
1724 }
1725 }
1726 inodes
1727 }
1728
1729 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1730 let mut new_ignores = Vec::new();
1731 for ancestor in abs_path.ancestors().skip(1) {
1732 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1733 new_ignores.push((ancestor, Some(ignore.clone())));
1734 } else {
1735 new_ignores.push((ancestor, None));
1736 }
1737 }
1738
1739 let mut ignore_stack = IgnoreStack::none();
1740 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1741 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1742 ignore_stack = IgnoreStack::all();
1743 break;
1744 } else if let Some(ignore) = ignore {
1745 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1746 }
1747 }
1748
1749 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1750 ignore_stack = IgnoreStack::all();
1751 }
1752
1753 ignore_stack
1754 }
1755
1756 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1757 &self.git_repositories
1758 }
1759}
1760
1761impl GitRepositoryEntry {
1762 // Note that these paths should be relative to the worktree root.
1763 pub(crate) fn manages(&self, path: &Path) -> bool {
1764 path.starts_with(self.content_path.as_ref())
1765 }
1766
1767 // Note that theis path should be relative to the worktree root.
1768 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1769 path.starts_with(self.git_dir_path.as_ref())
1770 }
1771}
1772
1773async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1774 let contents = fs.load(abs_path).await?;
1775 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1776 let mut builder = GitignoreBuilder::new(parent);
1777 for line in contents.lines() {
1778 builder.add_line(Some(abs_path.into()), line)?;
1779 }
1780 Ok(builder.build()?)
1781}
1782
1783impl WorktreeId {
1784 pub fn from_usize(handle_id: usize) -> Self {
1785 Self(handle_id)
1786 }
1787
1788 pub(crate) fn from_proto(id: u64) -> Self {
1789 Self(id as usize)
1790 }
1791
1792 pub fn to_proto(&self) -> u64 {
1793 self.0 as u64
1794 }
1795
1796 pub fn to_usize(&self) -> usize {
1797 self.0
1798 }
1799}
1800
1801impl fmt::Display for WorktreeId {
1802 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1803 self.0.fmt(f)
1804 }
1805}
1806
1807impl Deref for Worktree {
1808 type Target = Snapshot;
1809
1810 fn deref(&self) -> &Self::Target {
1811 match self {
1812 Worktree::Local(worktree) => &worktree.snapshot,
1813 Worktree::Remote(worktree) => &worktree.snapshot,
1814 }
1815 }
1816}
1817
1818impl Deref for LocalWorktree {
1819 type Target = LocalSnapshot;
1820
1821 fn deref(&self) -> &Self::Target {
1822 &self.snapshot
1823 }
1824}
1825
1826impl Deref for RemoteWorktree {
1827 type Target = Snapshot;
1828
1829 fn deref(&self) -> &Self::Target {
1830 &self.snapshot
1831 }
1832}
1833
1834impl fmt::Debug for LocalWorktree {
1835 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1836 self.snapshot.fmt(f)
1837 }
1838}
1839
1840impl fmt::Debug for Snapshot {
1841 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1842 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1843 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1844
1845 impl<'a> fmt::Debug for EntriesByPath<'a> {
1846 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1847 f.debug_map()
1848 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1849 .finish()
1850 }
1851 }
1852
1853 impl<'a> fmt::Debug for EntriesById<'a> {
1854 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1855 f.debug_list().entries(self.0.iter()).finish()
1856 }
1857 }
1858
1859 f.debug_struct("Snapshot")
1860 .field("id", &self.id)
1861 .field("root_name", &self.root_name)
1862 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1863 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1864 .finish()
1865 }
1866}
1867
1868#[derive(Clone, PartialEq)]
1869pub struct File {
1870 pub worktree: ModelHandle<Worktree>,
1871 pub path: Arc<Path>,
1872 pub mtime: SystemTime,
1873 pub(crate) entry_id: ProjectEntryId,
1874 pub(crate) is_local: bool,
1875 pub(crate) is_deleted: bool,
1876}
1877
1878impl language::File for File {
1879 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1880 if self.is_local {
1881 Some(self)
1882 } else {
1883 None
1884 }
1885 }
1886
1887 fn mtime(&self) -> SystemTime {
1888 self.mtime
1889 }
1890
1891 fn path(&self) -> &Arc<Path> {
1892 &self.path
1893 }
1894
1895 fn full_path(&self, cx: &AppContext) -> PathBuf {
1896 let mut full_path = PathBuf::new();
1897 let worktree = self.worktree.read(cx);
1898
1899 if worktree.is_visible() {
1900 full_path.push(worktree.root_name());
1901 } else {
1902 let path = worktree.abs_path();
1903
1904 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1905 full_path.push("~");
1906 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1907 } else {
1908 full_path.push(path)
1909 }
1910 }
1911
1912 if self.path.components().next().is_some() {
1913 full_path.push(&self.path);
1914 }
1915
1916 full_path
1917 }
1918
1919 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1920 /// of its worktree, then this method will return the name of the worktree itself.
1921 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1922 self.path
1923 .file_name()
1924 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1925 }
1926
1927 fn is_deleted(&self) -> bool {
1928 self.is_deleted
1929 }
1930
1931 fn as_any(&self) -> &dyn Any {
1932 self
1933 }
1934
1935 fn to_proto(&self) -> rpc::proto::File {
1936 rpc::proto::File {
1937 worktree_id: self.worktree.id() as u64,
1938 entry_id: self.entry_id.to_proto(),
1939 path: self.path.to_string_lossy().into(),
1940 mtime: Some(self.mtime.into()),
1941 is_deleted: self.is_deleted,
1942 }
1943 }
1944}
1945
1946impl language::LocalFile for File {
1947 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1948 self.worktree
1949 .read(cx)
1950 .as_local()
1951 .unwrap()
1952 .abs_path
1953 .join(&self.path)
1954 }
1955
1956 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1957 let worktree = self.worktree.read(cx).as_local().unwrap();
1958 let abs_path = worktree.absolutize(&self.path);
1959 let fs = worktree.fs.clone();
1960 cx.background()
1961 .spawn(async move { fs.load(&abs_path).await })
1962 }
1963
1964 fn buffer_reloaded(
1965 &self,
1966 buffer_id: u64,
1967 version: &clock::Global,
1968 fingerprint: RopeFingerprint,
1969 line_ending: LineEnding,
1970 mtime: SystemTime,
1971 cx: &mut MutableAppContext,
1972 ) {
1973 let worktree = self.worktree.read(cx).as_local().unwrap();
1974 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1975 worktree
1976 .client
1977 .send(proto::BufferReloaded {
1978 project_id,
1979 buffer_id,
1980 version: serialize_version(version),
1981 mtime: Some(mtime.into()),
1982 fingerprint: serialize_fingerprint(fingerprint),
1983 line_ending: serialize_line_ending(line_ending) as i32,
1984 })
1985 .log_err();
1986 }
1987 }
1988}
1989
1990impl File {
1991 pub fn from_proto(
1992 proto: rpc::proto::File,
1993 worktree: ModelHandle<Worktree>,
1994 cx: &AppContext,
1995 ) -> Result<Self> {
1996 let worktree_id = worktree
1997 .read(cx)
1998 .as_remote()
1999 .ok_or_else(|| anyhow!("not remote"))?
2000 .id();
2001
2002 if worktree_id.to_proto() != proto.worktree_id {
2003 return Err(anyhow!("worktree id does not match file"));
2004 }
2005
2006 Ok(Self {
2007 worktree,
2008 path: Path::new(&proto.path).into(),
2009 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2010 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2011 is_local: false,
2012 is_deleted: proto.is_deleted,
2013 })
2014 }
2015
2016 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2017 file.and_then(|f| f.as_any().downcast_ref())
2018 }
2019
2020 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2021 self.worktree.read(cx).id()
2022 }
2023
2024 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2025 if self.is_deleted {
2026 None
2027 } else {
2028 Some(self.entry_id)
2029 }
2030 }
2031}
2032
2033#[derive(Clone, Debug, PartialEq, Eq)]
2034pub struct Entry {
2035 pub id: ProjectEntryId,
2036 pub kind: EntryKind,
2037 pub path: Arc<Path>,
2038 pub inode: u64,
2039 pub mtime: SystemTime,
2040 pub is_symlink: bool,
2041 pub is_ignored: bool,
2042}
2043
2044#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2045pub enum EntryKind {
2046 PendingDir,
2047 Dir,
2048 File(CharBag),
2049}
2050
2051impl Entry {
2052 fn new(
2053 path: Arc<Path>,
2054 metadata: &fs::Metadata,
2055 next_entry_id: &AtomicUsize,
2056 root_char_bag: CharBag,
2057 ) -> Self {
2058 Self {
2059 id: ProjectEntryId::new(next_entry_id),
2060 kind: if metadata.is_dir {
2061 EntryKind::PendingDir
2062 } else {
2063 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2064 },
2065 path,
2066 inode: metadata.inode,
2067 mtime: metadata.mtime,
2068 is_symlink: metadata.is_symlink,
2069 is_ignored: false,
2070 }
2071 }
2072
2073 pub fn is_dir(&self) -> bool {
2074 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2075 }
2076
2077 pub fn is_file(&self) -> bool {
2078 matches!(self.kind, EntryKind::File(_))
2079 }
2080}
2081
2082impl sum_tree::Item for Entry {
2083 type Summary = EntrySummary;
2084
2085 fn summary(&self) -> Self::Summary {
2086 let visible_count = if self.is_ignored { 0 } else { 1 };
2087 let file_count;
2088 let visible_file_count;
2089 if self.is_file() {
2090 file_count = 1;
2091 visible_file_count = visible_count;
2092 } else {
2093 file_count = 0;
2094 visible_file_count = 0;
2095 }
2096
2097 EntrySummary {
2098 max_path: self.path.clone(),
2099 count: 1,
2100 visible_count,
2101 file_count,
2102 visible_file_count,
2103 }
2104 }
2105}
2106
2107impl sum_tree::KeyedItem for Entry {
2108 type Key = PathKey;
2109
2110 fn key(&self) -> Self::Key {
2111 PathKey(self.path.clone())
2112 }
2113}
2114
2115#[derive(Clone, Debug)]
2116pub struct EntrySummary {
2117 max_path: Arc<Path>,
2118 count: usize,
2119 visible_count: usize,
2120 file_count: usize,
2121 visible_file_count: usize,
2122}
2123
2124impl Default for EntrySummary {
2125 fn default() -> Self {
2126 Self {
2127 max_path: Arc::from(Path::new("")),
2128 count: 0,
2129 visible_count: 0,
2130 file_count: 0,
2131 visible_file_count: 0,
2132 }
2133 }
2134}
2135
2136impl sum_tree::Summary for EntrySummary {
2137 type Context = ();
2138
2139 fn add_summary(&mut self, rhs: &Self, _: &()) {
2140 self.max_path = rhs.max_path.clone();
2141 self.count += rhs.count;
2142 self.visible_count += rhs.visible_count;
2143 self.file_count += rhs.file_count;
2144 self.visible_file_count += rhs.visible_file_count;
2145 }
2146}
2147
2148#[derive(Clone, Debug)]
2149struct PathEntry {
2150 id: ProjectEntryId,
2151 path: Arc<Path>,
2152 is_ignored: bool,
2153 scan_id: usize,
2154}
2155
2156impl sum_tree::Item for PathEntry {
2157 type Summary = PathEntrySummary;
2158
2159 fn summary(&self) -> Self::Summary {
2160 PathEntrySummary { max_id: self.id }
2161 }
2162}
2163
2164impl sum_tree::KeyedItem for PathEntry {
2165 type Key = ProjectEntryId;
2166
2167 fn key(&self) -> Self::Key {
2168 self.id
2169 }
2170}
2171
2172#[derive(Clone, Debug, Default)]
2173struct PathEntrySummary {
2174 max_id: ProjectEntryId,
2175}
2176
2177impl sum_tree::Summary for PathEntrySummary {
2178 type Context = ();
2179
2180 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2181 self.max_id = summary.max_id;
2182 }
2183}
2184
2185impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2186 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2187 *self = summary.max_id;
2188 }
2189}
2190
2191#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2192pub struct PathKey(Arc<Path>);
2193
2194impl Default for PathKey {
2195 fn default() -> Self {
2196 Self(Path::new("").into())
2197 }
2198}
2199
2200impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2201 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2202 self.0 = summary.max_path.clone();
2203 }
2204}
2205
2206struct BackgroundScanner {
2207 fs: Arc<dyn Fs>,
2208 snapshot: Arc<Mutex<LocalSnapshot>>,
2209 notify: UnboundedSender<ScanState>,
2210 executor: Arc<executor::Background>,
2211}
2212
2213impl BackgroundScanner {
2214 fn new(
2215 snapshot: Arc<Mutex<LocalSnapshot>>,
2216 notify: UnboundedSender<ScanState>,
2217 fs: Arc<dyn Fs>,
2218 executor: Arc<executor::Background>,
2219 ) -> Self {
2220 Self {
2221 fs,
2222 snapshot,
2223 notify,
2224 executor,
2225 }
2226 }
2227
2228 fn abs_path(&self) -> Arc<Path> {
2229 self.snapshot.lock().abs_path.clone()
2230 }
2231
2232 fn snapshot(&self) -> LocalSnapshot {
2233 self.snapshot.lock().clone()
2234 }
2235
2236 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2237 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2238 return;
2239 }
2240
2241 if let Err(err) = self.scan_dirs().await {
2242 if self
2243 .notify
2244 .unbounded_send(ScanState::Err(Arc::new(err)))
2245 .is_err()
2246 {
2247 return;
2248 }
2249 }
2250
2251 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2252 return;
2253 }
2254
2255 futures::pin_mut!(events_rx);
2256
2257 while let Some(mut events) = events_rx.next().await {
2258 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2259 events.extend(additional_events);
2260 }
2261
2262 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2263 break;
2264 }
2265
2266 if !self.process_events(events).await {
2267 break;
2268 }
2269
2270 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2271 break;
2272 }
2273 }
2274 }
2275
2276 async fn scan_dirs(&mut self) -> Result<()> {
2277 let root_char_bag;
2278 let root_abs_path;
2279 let root_inode;
2280 let is_dir;
2281 let next_entry_id;
2282 {
2283 let mut snapshot = self.snapshot.lock();
2284 snapshot.scan_started();
2285 root_char_bag = snapshot.root_char_bag;
2286 root_abs_path = snapshot.abs_path.clone();
2287 root_inode = snapshot.root_entry().map(|e| e.inode);
2288 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2289 next_entry_id = snapshot.next_entry_id.clone();
2290 };
2291
2292 // Populate ignores above the root.
2293 for ancestor in root_abs_path.ancestors().skip(1) {
2294 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2295 {
2296 self.snapshot
2297 .lock()
2298 .ignores_by_parent_abs_path
2299 .insert(ancestor.into(), (ignore.into(), 0));
2300 }
2301 }
2302
2303 let ignore_stack = {
2304 let mut snapshot = self.snapshot.lock();
2305 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2306 if ignore_stack.is_all() {
2307 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2308 root_entry.is_ignored = true;
2309 snapshot.insert_entry(root_entry, self.fs.as_ref());
2310 }
2311 }
2312 ignore_stack
2313 };
2314
2315 if is_dir {
2316 let path: Arc<Path> = Arc::from(Path::new(""));
2317 let mut ancestor_inodes = TreeSet::default();
2318 if let Some(root_inode) = root_inode {
2319 ancestor_inodes.insert(root_inode);
2320 }
2321
2322 let (tx, rx) = channel::unbounded();
2323 self.executor
2324 .block(tx.send(ScanJob {
2325 abs_path: root_abs_path.to_path_buf(),
2326 path,
2327 ignore_stack,
2328 ancestor_inodes,
2329 scan_queue: tx.clone(),
2330 }))
2331 .unwrap();
2332 drop(tx);
2333
2334 self.executor
2335 .scoped(|scope| {
2336 for _ in 0..self.executor.num_cpus() {
2337 scope.spawn(async {
2338 while let Ok(job) = rx.recv().await {
2339 if let Err(err) = self
2340 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2341 .await
2342 {
2343 log::error!("error scanning {:?}: {}", job.abs_path, err);
2344 }
2345 }
2346 });
2347 }
2348 })
2349 .await;
2350
2351 self.snapshot.lock().scan_completed();
2352 }
2353
2354 Ok(())
2355 }
2356
2357 async fn scan_dir(
2358 &self,
2359 root_char_bag: CharBag,
2360 next_entry_id: Arc<AtomicUsize>,
2361 job: &ScanJob,
2362 ) -> Result<()> {
2363 let mut new_entries: Vec<Entry> = Vec::new();
2364 let mut new_jobs: Vec<ScanJob> = Vec::new();
2365 let mut ignore_stack = job.ignore_stack.clone();
2366 let mut new_ignore = None;
2367
2368 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2369 while let Some(child_abs_path) = child_paths.next().await {
2370 let child_abs_path = match child_abs_path {
2371 Ok(child_abs_path) => child_abs_path,
2372 Err(error) => {
2373 log::error!("error processing entry {:?}", error);
2374 continue;
2375 }
2376 };
2377 let child_name = child_abs_path.file_name().unwrap();
2378 let child_path: Arc<Path> = job.path.join(child_name).into();
2379 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2380 Ok(Some(metadata)) => metadata,
2381 Ok(None) => continue,
2382 Err(err) => {
2383 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2384 continue;
2385 }
2386 };
2387
2388 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2389 if child_name == *GITIGNORE {
2390 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2391 Ok(ignore) => {
2392 let ignore = Arc::new(ignore);
2393 ignore_stack =
2394 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2395 new_ignore = Some(ignore);
2396 }
2397 Err(error) => {
2398 log::error!(
2399 "error loading .gitignore file {:?} - {:?}",
2400 child_name,
2401 error
2402 );
2403 }
2404 }
2405
2406 // Update ignore status of any child entries we've already processed to reflect the
2407 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2408 // there should rarely be too numerous. Update the ignore stack associated with any
2409 // new jobs as well.
2410 let mut new_jobs = new_jobs.iter_mut();
2411 for entry in &mut new_entries {
2412 let entry_abs_path = self.abs_path().join(&entry.path);
2413 entry.is_ignored =
2414 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2415 if entry.is_dir() {
2416 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2417 IgnoreStack::all()
2418 } else {
2419 ignore_stack.clone()
2420 };
2421 }
2422 }
2423 }
2424
2425 let mut child_entry = Entry::new(
2426 child_path.clone(),
2427 &child_metadata,
2428 &next_entry_id,
2429 root_char_bag,
2430 );
2431
2432 if child_entry.is_dir() {
2433 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2434 child_entry.is_ignored = is_ignored;
2435
2436 if !job.ancestor_inodes.contains(&child_entry.inode) {
2437 let mut ancestor_inodes = job.ancestor_inodes.clone();
2438 ancestor_inodes.insert(child_entry.inode);
2439 new_jobs.push(ScanJob {
2440 abs_path: child_abs_path,
2441 path: child_path,
2442 ignore_stack: if is_ignored {
2443 IgnoreStack::all()
2444 } else {
2445 ignore_stack.clone()
2446 },
2447 ancestor_inodes,
2448 scan_queue: job.scan_queue.clone(),
2449 });
2450 }
2451 } else {
2452 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2453 }
2454
2455 new_entries.push(child_entry);
2456 }
2457
2458 self.snapshot.lock().populate_dir(
2459 job.path.clone(),
2460 new_entries,
2461 new_ignore,
2462 self.fs.as_ref(),
2463 );
2464 for new_job in new_jobs {
2465 job.scan_queue.send(new_job).await.unwrap();
2466 }
2467
2468 Ok(())
2469 }
2470
2471 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2472 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2473 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2474
2475 let root_char_bag;
2476 let root_abs_path;
2477 let next_entry_id;
2478 {
2479 let mut snapshot = self.snapshot.lock();
2480 snapshot.scan_started();
2481 root_char_bag = snapshot.root_char_bag;
2482 root_abs_path = snapshot.abs_path.clone();
2483 next_entry_id = snapshot.next_entry_id.clone();
2484 }
2485
2486 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2487 path
2488 } else {
2489 return false;
2490 };
2491 let metadata = futures::future::join_all(
2492 events
2493 .iter()
2494 .map(|event| self.fs.metadata(&event.path))
2495 .collect::<Vec<_>>(),
2496 )
2497 .await;
2498
2499 // Hold the snapshot lock while clearing and re-inserting the root entries
2500 // for each event. This way, the snapshot is not observable to the foreground
2501 // thread while this operation is in-progress.
2502 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2503 {
2504 let mut snapshot = self.snapshot.lock();
2505 for event in &events {
2506 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2507 snapshot.remove_path(path);
2508 }
2509 }
2510
2511 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2512 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2513 Ok(path) => Arc::from(path.to_path_buf()),
2514 Err(_) => {
2515 log::error!(
2516 "unexpected event {:?} for root path {:?}",
2517 event.path,
2518 root_canonical_path
2519 );
2520 continue;
2521 }
2522 };
2523 let abs_path = root_abs_path.join(&path);
2524
2525 match metadata {
2526 Ok(Some(metadata)) => {
2527 let ignore_stack =
2528 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2529 let mut fs_entry = Entry::new(
2530 path.clone(),
2531 &metadata,
2532 snapshot.next_entry_id.as_ref(),
2533 snapshot.root_char_bag,
2534 );
2535 fs_entry.is_ignored = ignore_stack.is_all();
2536 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2537
2538 let scan_id = snapshot.scan_id;
2539 if let Some(repo) = snapshot.in_dot_git(&path) {
2540 repo.repo.lock().reload_index();
2541 repo.scan_id = scan_id;
2542 }
2543
2544 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2545 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2546 ancestor_inodes.insert(metadata.inode);
2547 self.executor
2548 .block(scan_queue_tx.send(ScanJob {
2549 abs_path,
2550 path,
2551 ignore_stack,
2552 ancestor_inodes,
2553 scan_queue: scan_queue_tx.clone(),
2554 }))
2555 .unwrap();
2556 }
2557 }
2558 Ok(None) => {}
2559 Err(err) => {
2560 // TODO - create a special 'error' entry in the entries tree to mark this
2561 log::error!("error reading file on event {:?}", err);
2562 }
2563 }
2564 }
2565 drop(scan_queue_tx);
2566 }
2567
2568 // Scan any directories that were created as part of this event batch.
2569 self.executor
2570 .scoped(|scope| {
2571 for _ in 0..self.executor.num_cpus() {
2572 scope.spawn(async {
2573 while let Ok(job) = scan_queue_rx.recv().await {
2574 if let Err(err) = self
2575 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2576 .await
2577 {
2578 log::error!("error scanning {:?}: {}", job.abs_path, err);
2579 }
2580 }
2581 });
2582 }
2583 })
2584 .await;
2585
2586 // Attempt to detect renames only over a single batch of file-system events.
2587 self.snapshot.lock().removed_entry_ids.clear();
2588
2589 self.update_ignore_statuses().await;
2590 self.update_git_repositories();
2591 self.snapshot.lock().scan_completed();
2592 true
2593 }
2594
2595 async fn update_ignore_statuses(&self) {
2596 let mut snapshot = self.snapshot();
2597
2598 let mut ignores_to_update = Vec::new();
2599 let mut ignores_to_delete = Vec::new();
2600 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2601 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2602 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2603 ignores_to_update.push(parent_abs_path.clone());
2604 }
2605
2606 let ignore_path = parent_path.join(&*GITIGNORE);
2607 if snapshot.entry_for_path(ignore_path).is_none() {
2608 ignores_to_delete.push(parent_abs_path.clone());
2609 }
2610 }
2611 }
2612
2613 for parent_abs_path in ignores_to_delete {
2614 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2615 self.snapshot
2616 .lock()
2617 .ignores_by_parent_abs_path
2618 .remove(&parent_abs_path);
2619 }
2620
2621 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2622 ignores_to_update.sort_unstable();
2623 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2624 while let Some(parent_abs_path) = ignores_to_update.next() {
2625 while ignores_to_update
2626 .peek()
2627 .map_or(false, |p| p.starts_with(&parent_abs_path))
2628 {
2629 ignores_to_update.next().unwrap();
2630 }
2631
2632 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2633 ignore_queue_tx
2634 .send(UpdateIgnoreStatusJob {
2635 abs_path: parent_abs_path,
2636 ignore_stack,
2637 ignore_queue: ignore_queue_tx.clone(),
2638 })
2639 .await
2640 .unwrap();
2641 }
2642 drop(ignore_queue_tx);
2643
2644 self.executor
2645 .scoped(|scope| {
2646 for _ in 0..self.executor.num_cpus() {
2647 scope.spawn(async {
2648 while let Ok(job) = ignore_queue_rx.recv().await {
2649 self.update_ignore_status(job, &snapshot).await;
2650 }
2651 });
2652 }
2653 })
2654 .await;
2655 }
2656
2657 fn update_git_repositories(&self) {
2658 let mut snapshot = self.snapshot.lock();
2659 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2660 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2661 snapshot.git_repositories = git_repositories;
2662 }
2663
2664 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2665 let mut ignore_stack = job.ignore_stack;
2666 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2667 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2668 }
2669
2670 let mut entries_by_id_edits = Vec::new();
2671 let mut entries_by_path_edits = Vec::new();
2672 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2673 for mut entry in snapshot.child_entries(path).cloned() {
2674 let was_ignored = entry.is_ignored;
2675 let abs_path = self.abs_path().join(&entry.path);
2676 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2677 if entry.is_dir() {
2678 let child_ignore_stack = if entry.is_ignored {
2679 IgnoreStack::all()
2680 } else {
2681 ignore_stack.clone()
2682 };
2683 job.ignore_queue
2684 .send(UpdateIgnoreStatusJob {
2685 abs_path: abs_path.into(),
2686 ignore_stack: child_ignore_stack,
2687 ignore_queue: job.ignore_queue.clone(),
2688 })
2689 .await
2690 .unwrap();
2691 }
2692
2693 if entry.is_ignored != was_ignored {
2694 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2695 path_entry.scan_id = snapshot.scan_id;
2696 path_entry.is_ignored = entry.is_ignored;
2697 entries_by_id_edits.push(Edit::Insert(path_entry));
2698 entries_by_path_edits.push(Edit::Insert(entry));
2699 }
2700 }
2701
2702 let mut snapshot = self.snapshot.lock();
2703 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2704 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2705 }
2706}
2707
2708fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2709 let mut result = root_char_bag;
2710 result.extend(
2711 path.to_string_lossy()
2712 .chars()
2713 .map(|c| c.to_ascii_lowercase()),
2714 );
2715 result
2716}
2717
2718struct ScanJob {
2719 abs_path: PathBuf,
2720 path: Arc<Path>,
2721 ignore_stack: Arc<IgnoreStack>,
2722 scan_queue: Sender<ScanJob>,
2723 ancestor_inodes: TreeSet<u64>,
2724}
2725
2726struct UpdateIgnoreStatusJob {
2727 abs_path: Arc<Path>,
2728 ignore_stack: Arc<IgnoreStack>,
2729 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2730}
2731
2732pub trait WorktreeHandle {
2733 #[cfg(any(test, feature = "test-support"))]
2734 fn flush_fs_events<'a>(
2735 &self,
2736 cx: &'a gpui::TestAppContext,
2737 ) -> futures::future::LocalBoxFuture<'a, ()>;
2738}
2739
2740impl WorktreeHandle for ModelHandle<Worktree> {
2741 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2742 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2743 // extra directory scans, and emit extra scan-state notifications.
2744 //
2745 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2746 // to ensure that all redundant FS events have already been processed.
2747 #[cfg(any(test, feature = "test-support"))]
2748 fn flush_fs_events<'a>(
2749 &self,
2750 cx: &'a gpui::TestAppContext,
2751 ) -> futures::future::LocalBoxFuture<'a, ()> {
2752 use smol::future::FutureExt;
2753
2754 let filename = "fs-event-sentinel";
2755 let tree = self.clone();
2756 let (fs, root_path) = self.read_with(cx, |tree, _| {
2757 let tree = tree.as_local().unwrap();
2758 (tree.fs.clone(), tree.abs_path().clone())
2759 });
2760
2761 async move {
2762 fs.create_file(&root_path.join(filename), Default::default())
2763 .await
2764 .unwrap();
2765 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2766 .await;
2767
2768 fs.remove_file(&root_path.join(filename), Default::default())
2769 .await
2770 .unwrap();
2771 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2772 .await;
2773
2774 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2775 .await;
2776 }
2777 .boxed_local()
2778 }
2779}
2780
2781#[derive(Clone, Debug)]
2782struct TraversalProgress<'a> {
2783 max_path: &'a Path,
2784 count: usize,
2785 visible_count: usize,
2786 file_count: usize,
2787 visible_file_count: usize,
2788}
2789
2790impl<'a> TraversalProgress<'a> {
2791 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2792 match (include_ignored, include_dirs) {
2793 (true, true) => self.count,
2794 (true, false) => self.file_count,
2795 (false, true) => self.visible_count,
2796 (false, false) => self.visible_file_count,
2797 }
2798 }
2799}
2800
2801impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2802 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2803 self.max_path = summary.max_path.as_ref();
2804 self.count += summary.count;
2805 self.visible_count += summary.visible_count;
2806 self.file_count += summary.file_count;
2807 self.visible_file_count += summary.visible_file_count;
2808 }
2809}
2810
2811impl<'a> Default for TraversalProgress<'a> {
2812 fn default() -> Self {
2813 Self {
2814 max_path: Path::new(""),
2815 count: 0,
2816 visible_count: 0,
2817 file_count: 0,
2818 visible_file_count: 0,
2819 }
2820 }
2821}
2822
2823pub struct Traversal<'a> {
2824 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2825 include_ignored: bool,
2826 include_dirs: bool,
2827}
2828
2829impl<'a> Traversal<'a> {
2830 pub fn advance(&mut self) -> bool {
2831 self.advance_to_offset(self.offset() + 1)
2832 }
2833
2834 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2835 self.cursor.seek_forward(
2836 &TraversalTarget::Count {
2837 count: offset,
2838 include_dirs: self.include_dirs,
2839 include_ignored: self.include_ignored,
2840 },
2841 Bias::Right,
2842 &(),
2843 )
2844 }
2845
2846 pub fn advance_to_sibling(&mut self) -> bool {
2847 while let Some(entry) = self.cursor.item() {
2848 self.cursor.seek_forward(
2849 &TraversalTarget::PathSuccessor(&entry.path),
2850 Bias::Left,
2851 &(),
2852 );
2853 if let Some(entry) = self.cursor.item() {
2854 if (self.include_dirs || !entry.is_dir())
2855 && (self.include_ignored || !entry.is_ignored)
2856 {
2857 return true;
2858 }
2859 }
2860 }
2861 false
2862 }
2863
2864 pub fn entry(&self) -> Option<&'a Entry> {
2865 self.cursor.item()
2866 }
2867
2868 pub fn offset(&self) -> usize {
2869 self.cursor
2870 .start()
2871 .count(self.include_dirs, self.include_ignored)
2872 }
2873}
2874
2875impl<'a> Iterator for Traversal<'a> {
2876 type Item = &'a Entry;
2877
2878 fn next(&mut self) -> Option<Self::Item> {
2879 if let Some(item) = self.entry() {
2880 self.advance();
2881 Some(item)
2882 } else {
2883 None
2884 }
2885 }
2886}
2887
2888#[derive(Debug)]
2889enum TraversalTarget<'a> {
2890 Path(&'a Path),
2891 PathSuccessor(&'a Path),
2892 Count {
2893 count: usize,
2894 include_ignored: bool,
2895 include_dirs: bool,
2896 },
2897}
2898
2899impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2900 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2901 match self {
2902 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2903 TraversalTarget::PathSuccessor(path) => {
2904 if !cursor_location.max_path.starts_with(path) {
2905 Ordering::Equal
2906 } else {
2907 Ordering::Greater
2908 }
2909 }
2910 TraversalTarget::Count {
2911 count,
2912 include_dirs,
2913 include_ignored,
2914 } => Ord::cmp(
2915 count,
2916 &cursor_location.count(*include_dirs, *include_ignored),
2917 ),
2918 }
2919 }
2920}
2921
2922struct ChildEntriesIter<'a> {
2923 parent_path: &'a Path,
2924 traversal: Traversal<'a>,
2925}
2926
2927impl<'a> Iterator for ChildEntriesIter<'a> {
2928 type Item = &'a Entry;
2929
2930 fn next(&mut self) -> Option<Self::Item> {
2931 if let Some(item) = self.traversal.entry() {
2932 if item.path.starts_with(&self.parent_path) {
2933 self.traversal.advance_to_sibling();
2934 return Some(item);
2935 }
2936 }
2937 None
2938 }
2939}
2940
2941impl<'a> From<&'a Entry> for proto::Entry {
2942 fn from(entry: &'a Entry) -> Self {
2943 Self {
2944 id: entry.id.to_proto(),
2945 is_dir: entry.is_dir(),
2946 path: entry.path.to_string_lossy().into(),
2947 inode: entry.inode,
2948 mtime: Some(entry.mtime.into()),
2949 is_symlink: entry.is_symlink,
2950 is_ignored: entry.is_ignored,
2951 }
2952 }
2953}
2954
2955impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2956 type Error = anyhow::Error;
2957
2958 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2959 if let Some(mtime) = entry.mtime {
2960 let kind = if entry.is_dir {
2961 EntryKind::Dir
2962 } else {
2963 let mut char_bag = *root_char_bag;
2964 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2965 EntryKind::File(char_bag)
2966 };
2967 let path: Arc<Path> = PathBuf::from(entry.path).into();
2968 Ok(Entry {
2969 id: ProjectEntryId::from_proto(entry.id),
2970 kind,
2971 path,
2972 inode: entry.inode,
2973 mtime: mtime.into(),
2974 is_symlink: entry.is_symlink,
2975 is_ignored: entry.is_ignored,
2976 })
2977 } else {
2978 Err(anyhow!(
2979 "missing mtime in remote worktree entry {:?}",
2980 entry.path
2981 ))
2982 }
2983 }
2984}
2985
2986#[cfg(test)]
2987mod tests {
2988 use super::*;
2989 use anyhow::Result;
2990 use client::test::FakeHttpClient;
2991 use fs::repository::FakeGitRepository;
2992 use fs::{FakeFs, RealFs};
2993 use gpui::{executor::Deterministic, TestAppContext};
2994 use rand::prelude::*;
2995 use serde_json::json;
2996 use std::{
2997 env,
2998 fmt::Write,
2999 time::{SystemTime, UNIX_EPOCH},
3000 };
3001
3002 use util::test::temp_tree;
3003
3004 #[gpui::test]
3005 async fn test_traversal(cx: &mut TestAppContext) {
3006 let fs = FakeFs::new(cx.background());
3007 fs.insert_tree(
3008 "/root",
3009 json!({
3010 ".gitignore": "a/b\n",
3011 "a": {
3012 "b": "",
3013 "c": "",
3014 }
3015 }),
3016 )
3017 .await;
3018
3019 let http_client = FakeHttpClient::with_404_response();
3020 let client = cx.read(|cx| Client::new(http_client, cx));
3021
3022 let tree = Worktree::local(
3023 client,
3024 Arc::from(Path::new("/root")),
3025 true,
3026 fs,
3027 Default::default(),
3028 &mut cx.to_async(),
3029 )
3030 .await
3031 .unwrap();
3032 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3033 .await;
3034
3035 tree.read_with(cx, |tree, _| {
3036 assert_eq!(
3037 tree.entries(false)
3038 .map(|entry| entry.path.as_ref())
3039 .collect::<Vec<_>>(),
3040 vec![
3041 Path::new(""),
3042 Path::new(".gitignore"),
3043 Path::new("a"),
3044 Path::new("a/c"),
3045 ]
3046 );
3047 assert_eq!(
3048 tree.entries(true)
3049 .map(|entry| entry.path.as_ref())
3050 .collect::<Vec<_>>(),
3051 vec![
3052 Path::new(""),
3053 Path::new(".gitignore"),
3054 Path::new("a"),
3055 Path::new("a/b"),
3056 Path::new("a/c"),
3057 ]
3058 );
3059 })
3060 }
3061
3062 #[gpui::test(iterations = 10)]
3063 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3064 let fs = FakeFs::new(cx.background());
3065 fs.insert_tree(
3066 "/root",
3067 json!({
3068 "lib": {
3069 "a": {
3070 "a.txt": ""
3071 },
3072 "b": {
3073 "b.txt": ""
3074 }
3075 }
3076 }),
3077 )
3078 .await;
3079 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3080 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3081
3082 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3083 let tree = Worktree::local(
3084 client,
3085 Arc::from(Path::new("/root")),
3086 true,
3087 fs.clone(),
3088 Default::default(),
3089 &mut cx.to_async(),
3090 )
3091 .await
3092 .unwrap();
3093
3094 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3095 .await;
3096
3097 tree.read_with(cx, |tree, _| {
3098 assert_eq!(
3099 tree.entries(false)
3100 .map(|entry| entry.path.as_ref())
3101 .collect::<Vec<_>>(),
3102 vec![
3103 Path::new(""),
3104 Path::new("lib"),
3105 Path::new("lib/a"),
3106 Path::new("lib/a/a.txt"),
3107 Path::new("lib/a/lib"),
3108 Path::new("lib/b"),
3109 Path::new("lib/b/b.txt"),
3110 Path::new("lib/b/lib"),
3111 ]
3112 );
3113 });
3114
3115 fs.rename(
3116 Path::new("/root/lib/a/lib"),
3117 Path::new("/root/lib/a/lib-2"),
3118 Default::default(),
3119 )
3120 .await
3121 .unwrap();
3122 executor.run_until_parked();
3123 tree.read_with(cx, |tree, _| {
3124 assert_eq!(
3125 tree.entries(false)
3126 .map(|entry| entry.path.as_ref())
3127 .collect::<Vec<_>>(),
3128 vec![
3129 Path::new(""),
3130 Path::new("lib"),
3131 Path::new("lib/a"),
3132 Path::new("lib/a/a.txt"),
3133 Path::new("lib/a/lib-2"),
3134 Path::new("lib/b"),
3135 Path::new("lib/b/b.txt"),
3136 Path::new("lib/b/lib"),
3137 ]
3138 );
3139 });
3140 }
3141
3142 #[gpui::test]
3143 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3144 let parent_dir = temp_tree(json!({
3145 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3146 "tree": {
3147 ".git": {},
3148 ".gitignore": "ignored-dir\n",
3149 "tracked-dir": {
3150 "tracked-file1": "",
3151 "ancestor-ignored-file1": "",
3152 },
3153 "ignored-dir": {
3154 "ignored-file1": ""
3155 }
3156 }
3157 }));
3158 let dir = parent_dir.path().join("tree");
3159
3160 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3161
3162 let tree = Worktree::local(
3163 client,
3164 dir.as_path(),
3165 true,
3166 Arc::new(RealFs),
3167 Default::default(),
3168 &mut cx.to_async(),
3169 )
3170 .await
3171 .unwrap();
3172 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3173 .await;
3174 tree.flush_fs_events(cx).await;
3175 cx.read(|cx| {
3176 let tree = tree.read(cx);
3177 assert!(
3178 !tree
3179 .entry_for_path("tracked-dir/tracked-file1")
3180 .unwrap()
3181 .is_ignored
3182 );
3183 assert!(
3184 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3185 .unwrap()
3186 .is_ignored
3187 );
3188 assert!(
3189 tree.entry_for_path("ignored-dir/ignored-file1")
3190 .unwrap()
3191 .is_ignored
3192 );
3193 });
3194
3195 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3196 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3197 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3198 tree.flush_fs_events(cx).await;
3199 cx.read(|cx| {
3200 let tree = tree.read(cx);
3201 assert!(
3202 !tree
3203 .entry_for_path("tracked-dir/tracked-file2")
3204 .unwrap()
3205 .is_ignored
3206 );
3207 assert!(
3208 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3209 .unwrap()
3210 .is_ignored
3211 );
3212 assert!(
3213 tree.entry_for_path("ignored-dir/ignored-file2")
3214 .unwrap()
3215 .is_ignored
3216 );
3217 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3218 });
3219 }
3220
3221 #[gpui::test]
3222 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3223 let root = temp_tree(json!({
3224 "dir1": {
3225 ".git": {},
3226 "deps": {
3227 "dep1": {
3228 ".git": {},
3229 "src": {
3230 "a.txt": ""
3231 }
3232 }
3233 },
3234 "src": {
3235 "b.txt": ""
3236 }
3237 },
3238 "c.txt": "",
3239 }));
3240
3241 let http_client = FakeHttpClient::with_404_response();
3242 let client = cx.read(|cx| Client::new(http_client, cx));
3243 let tree = Worktree::local(
3244 client,
3245 root.path(),
3246 true,
3247 Arc::new(RealFs),
3248 Default::default(),
3249 &mut cx.to_async(),
3250 )
3251 .await
3252 .unwrap();
3253
3254 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3255 .await;
3256 tree.flush_fs_events(cx).await;
3257
3258 tree.read_with(cx, |tree, _cx| {
3259 let tree = tree.as_local().unwrap();
3260
3261 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3262
3263 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3264 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3265 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3266
3267 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3268 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3269 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3270 });
3271
3272 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3273 let tree = tree.as_local().unwrap();
3274 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3275 });
3276
3277 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3278 tree.flush_fs_events(cx).await;
3279
3280 tree.read_with(cx, |tree, _cx| {
3281 let tree = tree.as_local().unwrap();
3282 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3283 assert_ne!(
3284 original_scan_id, new_scan_id,
3285 "original {original_scan_id}, new {new_scan_id}"
3286 );
3287 });
3288
3289 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3290 tree.flush_fs_events(cx).await;
3291
3292 tree.read_with(cx, |tree, _cx| {
3293 let tree = tree.as_local().unwrap();
3294
3295 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3296 });
3297 }
3298
3299 #[test]
3300 fn test_changed_repos() {
3301 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3302 GitRepositoryEntry {
3303 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3304 scan_id,
3305 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3306 git_dir_path: git_dir_path.as_ref().into(),
3307 }
3308 }
3309
3310 let prev_repos: Vec<GitRepositoryEntry> = vec![
3311 fake_entry("/.git", 0),
3312 fake_entry("/a/.git", 0),
3313 fake_entry("/a/b/.git", 0),
3314 ];
3315
3316 let new_repos: Vec<GitRepositoryEntry> = vec![
3317 fake_entry("/a/.git", 1),
3318 fake_entry("/a/b/.git", 0),
3319 fake_entry("/a/c/.git", 0),
3320 ];
3321
3322 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3323
3324 // Deletion retained
3325 assert!(res
3326 .iter()
3327 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3328 .is_some());
3329
3330 // Update retained
3331 assert!(res
3332 .iter()
3333 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3334 .is_some());
3335
3336 // Addition retained
3337 assert!(res
3338 .iter()
3339 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3340 .is_some());
3341
3342 // Nochange, not retained
3343 assert!(res
3344 .iter()
3345 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3346 .is_none());
3347 }
3348
3349 #[gpui::test]
3350 async fn test_write_file(cx: &mut TestAppContext) {
3351 let dir = temp_tree(json!({
3352 ".git": {},
3353 ".gitignore": "ignored-dir\n",
3354 "tracked-dir": {},
3355 "ignored-dir": {}
3356 }));
3357
3358 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3359
3360 let tree = Worktree::local(
3361 client,
3362 dir.path(),
3363 true,
3364 Arc::new(RealFs),
3365 Default::default(),
3366 &mut cx.to_async(),
3367 )
3368 .await
3369 .unwrap();
3370 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3371 .await;
3372 tree.flush_fs_events(cx).await;
3373
3374 tree.update(cx, |tree, cx| {
3375 tree.as_local().unwrap().write_file(
3376 Path::new("tracked-dir/file.txt"),
3377 "hello".into(),
3378 Default::default(),
3379 cx,
3380 )
3381 })
3382 .await
3383 .unwrap();
3384 tree.update(cx, |tree, cx| {
3385 tree.as_local().unwrap().write_file(
3386 Path::new("ignored-dir/file.txt"),
3387 "world".into(),
3388 Default::default(),
3389 cx,
3390 )
3391 })
3392 .await
3393 .unwrap();
3394
3395 tree.read_with(cx, |tree, _| {
3396 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3397 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3398 assert!(!tracked.is_ignored);
3399 assert!(ignored.is_ignored);
3400 });
3401 }
3402
3403 #[gpui::test(iterations = 30)]
3404 async fn test_create_directory(cx: &mut TestAppContext) {
3405 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3406
3407 let fs = FakeFs::new(cx.background());
3408 fs.insert_tree(
3409 "/a",
3410 json!({
3411 "b": {},
3412 "c": {},
3413 "d": {},
3414 }),
3415 )
3416 .await;
3417
3418 let tree = Worktree::local(
3419 client,
3420 "/a".as_ref(),
3421 true,
3422 fs,
3423 Default::default(),
3424 &mut cx.to_async(),
3425 )
3426 .await
3427 .unwrap();
3428
3429 let entry = tree
3430 .update(cx, |tree, cx| {
3431 tree.as_local_mut()
3432 .unwrap()
3433 .create_entry("a/e".as_ref(), true, cx)
3434 })
3435 .await
3436 .unwrap();
3437 assert!(entry.is_dir());
3438
3439 cx.foreground().run_until_parked();
3440 tree.read_with(cx, |tree, _| {
3441 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3442 });
3443 }
3444
3445 #[gpui::test(iterations = 100)]
3446 fn test_random(mut rng: StdRng) {
3447 let operations = env::var("OPERATIONS")
3448 .map(|o| o.parse().unwrap())
3449 .unwrap_or(40);
3450 let initial_entries = env::var("INITIAL_ENTRIES")
3451 .map(|o| o.parse().unwrap())
3452 .unwrap_or(20);
3453
3454 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3455 for _ in 0..initial_entries {
3456 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3457 }
3458 log::info!("Generated initial tree");
3459
3460 let (notify_tx, _notify_rx) = mpsc::unbounded();
3461 let fs = Arc::new(RealFs);
3462 let next_entry_id = Arc::new(AtomicUsize::new(0));
3463 let mut initial_snapshot = LocalSnapshot {
3464 removed_entry_ids: Default::default(),
3465 ignores_by_parent_abs_path: Default::default(),
3466 git_repositories: Default::default(),
3467 next_entry_id: next_entry_id.clone(),
3468 snapshot: Snapshot {
3469 id: WorktreeId::from_usize(0),
3470 entries_by_path: Default::default(),
3471 entries_by_id: Default::default(),
3472 abs_path: root_dir.path().into(),
3473 root_name: Default::default(),
3474 root_char_bag: Default::default(),
3475 scan_id: 0,
3476 completed_scan_id: 0,
3477 },
3478 };
3479 initial_snapshot.insert_entry(
3480 Entry::new(
3481 Path::new("").into(),
3482 &smol::block_on(fs.metadata(root_dir.path()))
3483 .unwrap()
3484 .unwrap(),
3485 &next_entry_id,
3486 Default::default(),
3487 ),
3488 fs.as_ref(),
3489 );
3490 let mut scanner = BackgroundScanner::new(
3491 Arc::new(Mutex::new(initial_snapshot.clone())),
3492 notify_tx,
3493 fs.clone(),
3494 Arc::new(gpui::executor::Background::new()),
3495 );
3496 smol::block_on(scanner.scan_dirs()).unwrap();
3497 scanner.snapshot().check_invariants();
3498
3499 let mut events = Vec::new();
3500 let mut snapshots = Vec::new();
3501 let mut mutations_len = operations;
3502 while mutations_len > 1 {
3503 if !events.is_empty() && rng.gen_bool(0.4) {
3504 let len = rng.gen_range(0..=events.len());
3505 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3506 log::info!("Delivering events: {:#?}", to_deliver);
3507 smol::block_on(scanner.process_events(to_deliver));
3508 scanner.snapshot().check_invariants();
3509 } else {
3510 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3511 mutations_len -= 1;
3512 }
3513
3514 if rng.gen_bool(0.2) {
3515 snapshots.push(scanner.snapshot());
3516 }
3517 }
3518 log::info!("Quiescing: {:#?}", events);
3519 smol::block_on(scanner.process_events(events));
3520 scanner.snapshot().check_invariants();
3521
3522 let (notify_tx, _notify_rx) = mpsc::unbounded();
3523 let mut new_scanner = BackgroundScanner::new(
3524 Arc::new(Mutex::new(initial_snapshot)),
3525 notify_tx,
3526 scanner.fs.clone(),
3527 scanner.executor.clone(),
3528 );
3529 smol::block_on(new_scanner.scan_dirs()).unwrap();
3530 assert_eq!(
3531 scanner.snapshot().to_vec(true),
3532 new_scanner.snapshot().to_vec(true)
3533 );
3534
3535 for mut prev_snapshot in snapshots {
3536 let include_ignored = rng.gen::<bool>();
3537 if !include_ignored {
3538 let mut entries_by_path_edits = Vec::new();
3539 let mut entries_by_id_edits = Vec::new();
3540 for entry in prev_snapshot
3541 .entries_by_id
3542 .cursor::<()>()
3543 .filter(|e| e.is_ignored)
3544 {
3545 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3546 entries_by_id_edits.push(Edit::Remove(entry.id));
3547 }
3548
3549 prev_snapshot
3550 .entries_by_path
3551 .edit(entries_by_path_edits, &());
3552 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3553 }
3554
3555 let update = scanner
3556 .snapshot()
3557 .build_update(&prev_snapshot, 0, 0, include_ignored);
3558 prev_snapshot.apply_remote_update(update).unwrap();
3559 assert_eq!(
3560 prev_snapshot.to_vec(true),
3561 scanner.snapshot().to_vec(include_ignored)
3562 );
3563 }
3564 }
3565
3566 fn randomly_mutate_tree(
3567 root_path: &Path,
3568 insertion_probability: f64,
3569 rng: &mut impl Rng,
3570 ) -> Result<Vec<fsevent::Event>> {
3571 let root_path = root_path.canonicalize().unwrap();
3572 let (dirs, files) = read_dir_recursive(root_path.clone());
3573
3574 let mut events = Vec::new();
3575 let mut record_event = |path: PathBuf| {
3576 events.push(fsevent::Event {
3577 event_id: SystemTime::now()
3578 .duration_since(UNIX_EPOCH)
3579 .unwrap()
3580 .as_secs(),
3581 flags: fsevent::StreamFlags::empty(),
3582 path,
3583 });
3584 };
3585
3586 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3587 let path = dirs.choose(rng).unwrap();
3588 let new_path = path.join(gen_name(rng));
3589
3590 if rng.gen() {
3591 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3592 std::fs::create_dir(&new_path)?;
3593 } else {
3594 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3595 std::fs::write(&new_path, "")?;
3596 }
3597 record_event(new_path);
3598 } else if rng.gen_bool(0.05) {
3599 let ignore_dir_path = dirs.choose(rng).unwrap();
3600 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3601
3602 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3603 let files_to_ignore = {
3604 let len = rng.gen_range(0..=subfiles.len());
3605 subfiles.choose_multiple(rng, len)
3606 };
3607 let dirs_to_ignore = {
3608 let len = rng.gen_range(0..subdirs.len());
3609 subdirs.choose_multiple(rng, len)
3610 };
3611
3612 let mut ignore_contents = String::new();
3613 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3614 writeln!(
3615 ignore_contents,
3616 "{}",
3617 path_to_ignore
3618 .strip_prefix(&ignore_dir_path)?
3619 .to_str()
3620 .unwrap()
3621 )
3622 .unwrap();
3623 }
3624 log::info!(
3625 "Creating {:?} with contents:\n{}",
3626 ignore_path.strip_prefix(&root_path)?,
3627 ignore_contents
3628 );
3629 std::fs::write(&ignore_path, ignore_contents).unwrap();
3630 record_event(ignore_path);
3631 } else {
3632 let old_path = {
3633 let file_path = files.choose(rng);
3634 let dir_path = dirs[1..].choose(rng);
3635 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3636 };
3637
3638 let is_rename = rng.gen();
3639 if is_rename {
3640 let new_path_parent = dirs
3641 .iter()
3642 .filter(|d| !d.starts_with(old_path))
3643 .choose(rng)
3644 .unwrap();
3645
3646 let overwrite_existing_dir =
3647 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3648 let new_path = if overwrite_existing_dir {
3649 std::fs::remove_dir_all(&new_path_parent).ok();
3650 new_path_parent.to_path_buf()
3651 } else {
3652 new_path_parent.join(gen_name(rng))
3653 };
3654
3655 log::info!(
3656 "Renaming {:?} to {}{:?}",
3657 old_path.strip_prefix(&root_path)?,
3658 if overwrite_existing_dir {
3659 "overwrite "
3660 } else {
3661 ""
3662 },
3663 new_path.strip_prefix(&root_path)?
3664 );
3665 std::fs::rename(&old_path, &new_path)?;
3666 record_event(old_path.clone());
3667 record_event(new_path);
3668 } else if old_path.is_dir() {
3669 let (dirs, files) = read_dir_recursive(old_path.clone());
3670
3671 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3672 std::fs::remove_dir_all(&old_path).unwrap();
3673 for file in files {
3674 record_event(file);
3675 }
3676 for dir in dirs {
3677 record_event(dir);
3678 }
3679 } else {
3680 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3681 std::fs::remove_file(old_path).unwrap();
3682 record_event(old_path.clone());
3683 }
3684 }
3685
3686 Ok(events)
3687 }
3688
3689 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3690 let child_entries = std::fs::read_dir(&path).unwrap();
3691 let mut dirs = vec![path];
3692 let mut files = Vec::new();
3693 for child_entry in child_entries {
3694 let child_path = child_entry.unwrap().path();
3695 if child_path.is_dir() {
3696 let (child_dirs, child_files) = read_dir_recursive(child_path);
3697 dirs.extend(child_dirs);
3698 files.extend(child_files);
3699 } else {
3700 files.push(child_path);
3701 }
3702 }
3703 (dirs, files)
3704 }
3705
3706 fn gen_name(rng: &mut impl Rng) -> String {
3707 (0..6)
3708 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3709 .map(char::from)
3710 .collect()
3711 }
3712
3713 impl LocalSnapshot {
3714 fn check_invariants(&self) {
3715 let mut files = self.files(true, 0);
3716 let mut visible_files = self.files(false, 0);
3717 for entry in self.entries_by_path.cursor::<()>() {
3718 if entry.is_file() {
3719 assert_eq!(files.next().unwrap().inode, entry.inode);
3720 if !entry.is_ignored {
3721 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3722 }
3723 }
3724 }
3725 assert!(files.next().is_none());
3726 assert!(visible_files.next().is_none());
3727
3728 let mut bfs_paths = Vec::new();
3729 let mut stack = vec![Path::new("")];
3730 while let Some(path) = stack.pop() {
3731 bfs_paths.push(path);
3732 let ix = stack.len();
3733 for child_entry in self.child_entries(path) {
3734 stack.insert(ix, &child_entry.path);
3735 }
3736 }
3737
3738 let dfs_paths_via_iter = self
3739 .entries_by_path
3740 .cursor::<()>()
3741 .map(|e| e.path.as_ref())
3742 .collect::<Vec<_>>();
3743 assert_eq!(bfs_paths, dfs_paths_via_iter);
3744
3745 let dfs_paths_via_traversal = self
3746 .entries(true)
3747 .map(|e| e.path.as_ref())
3748 .collect::<Vec<_>>();
3749 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3750
3751 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3752 let ignore_parent_path =
3753 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3754 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3755 assert!(self
3756 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3757 .is_some());
3758 }
3759 }
3760
3761 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3762 let mut paths = Vec::new();
3763 for entry in self.entries_by_path.cursor::<()>() {
3764 if include_ignored || !entry.is_ignored {
3765 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3766 }
3767 }
3768 paths.sort_by(|a, b| a.0.cmp(b.0));
3769 paths
3770 }
3771 }
3772}