1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::File as _;
24use language::{
25 proto::{
26 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
27 serialize_version,
28 },
29 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
30};
31use parking_lot::Mutex;
32use postage::{
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36
37use smol::channel::{self, Sender};
38use std::{
39 any::Any,
40 cmp::{self, Ordering},
41 convert::TryFrom,
42 ffi::OsStr,
43 fmt,
44 future::Future,
45 mem,
46 ops::{Deref, DerefMut},
47 path::{Path, PathBuf},
48 sync::{atomic::AtomicUsize, Arc},
49 task::Poll,
50 time::{Duration, SystemTime},
51};
52use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
53use util::paths::HOME;
54use util::{ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59#[allow(clippy::large_enum_variant)]
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 background_snapshot: Arc<Mutex<LocalSnapshot>>,
68 last_scan_state_rx: watch::Receiver<ScanState>,
69 _background_scanner_task: Option<Task<()>>,
70 poll_task: Option<Task<()>>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
73 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
74 client: Arc<Client>,
75 fs: Arc<dyn Fs>,
76 visible: bool,
77}
78
79pub struct RemoteWorktree {
80 pub snapshot: Snapshot,
81 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
82 project_id: u64,
83 client: Arc<Client>,
84 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
85 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
86 replica_id: ReplicaId,
87 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
88 visible: bool,
89 disconnected: bool,
90}
91
92#[derive(Clone)]
93pub struct Snapshot {
94 id: WorktreeId,
95 abs_path: Arc<Path>,
96 root_name: String,
97 root_char_bag: CharBag,
98 entries_by_path: SumTree<Entry>,
99 entries_by_id: SumTree<PathEntry>,
100 scan_id: usize,
101 completed_scan_id: usize,
102}
103
104#[derive(Clone)]
105pub struct GitRepositoryEntry {
106 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
107
108 pub(crate) scan_id: usize,
109 // Path to folder containing the .git file or directory
110 pub(crate) content_path: Arc<Path>,
111 // Path to the actual .git folder.
112 // Note: if .git is a file, this points to the folder indicated by the .git file
113 pub(crate) git_dir_path: Arc<Path>,
114}
115
116impl std::fmt::Debug for GitRepositoryEntry {
117 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 f.debug_struct("GitRepositoryEntry")
119 .field("content_path", &self.content_path)
120 .field("git_dir_path", &self.git_dir_path)
121 .field("libgit_repository", &"LibGitRepository")
122 .finish()
123 }
124}
125
126pub struct LocalSnapshot {
127 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
128 git_repositories: Vec<GitRepositoryEntry>,
129 removed_entry_ids: HashMap<u64, ProjectEntryId>,
130 next_entry_id: Arc<AtomicUsize>,
131 snapshot: Snapshot,
132}
133
134impl Clone for LocalSnapshot {
135 fn clone(&self) -> Self {
136 Self {
137 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
138 git_repositories: self.git_repositories.iter().cloned().collect(),
139 removed_entry_ids: self.removed_entry_ids.clone(),
140 next_entry_id: self.next_entry_id.clone(),
141 snapshot: self.snapshot.clone(),
142 }
143 }
144}
145
146impl Deref for LocalSnapshot {
147 type Target = Snapshot;
148
149 fn deref(&self) -> &Self::Target {
150 &self.snapshot
151 }
152}
153
154impl DerefMut for LocalSnapshot {
155 fn deref_mut(&mut self) -> &mut Self::Target {
156 &mut self.snapshot
157 }
158}
159
160#[derive(Clone, Debug)]
161enum ScanState {
162 Idle,
163 /// The worktree is performing its initial scan of the filesystem.
164 Initializing,
165 /// The worktree is updating in response to filesystem events.
166 Updating,
167 Err(Arc<anyhow::Error>),
168}
169
170struct ShareState {
171 project_id: u64,
172 snapshots_tx: watch::Sender<LocalSnapshot>,
173 resume_updates: watch::Sender<()>,
174 _maintain_remote_snapshot: Task<Option<()>>,
175}
176
177pub enum Event {
178 UpdatedEntries,
179 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
180}
181
182impl Entity for Worktree {
183 type Event = Event;
184}
185
186impl Worktree {
187 pub async fn local(
188 client: Arc<Client>,
189 path: impl Into<Arc<Path>>,
190 visible: bool,
191 fs: Arc<dyn Fs>,
192 next_entry_id: Arc<AtomicUsize>,
193 cx: &mut AsyncAppContext,
194 ) -> Result<ModelHandle<Self>> {
195 let (tree, scan_states_tx) =
196 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
197 tree.update(cx, |tree, cx| {
198 let tree = tree.as_local_mut().unwrap();
199 let abs_path = tree.abs_path().clone();
200 let background_snapshot = tree.background_snapshot.clone();
201 let background = cx.background().clone();
202 tree._background_scanner_task = Some(cx.background().spawn(async move {
203 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
204 let scanner =
205 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
206 scanner.run(events).await;
207 }));
208 });
209 Ok(tree)
210 }
211
212 pub fn remote(
213 project_remote_id: u64,
214 replica_id: ReplicaId,
215 worktree: proto::WorktreeMetadata,
216 client: Arc<Client>,
217 cx: &mut MutableAppContext,
218 ) -> ModelHandle<Self> {
219 let remote_id = worktree.id;
220 let root_char_bag: CharBag = worktree
221 .root_name
222 .chars()
223 .map(|c| c.to_ascii_lowercase())
224 .collect();
225 let root_name = worktree.root_name.clone();
226 let visible = worktree.visible;
227
228 let abs_path = PathBuf::from(worktree.abs_path);
229 let snapshot = Snapshot {
230 id: WorktreeId(remote_id as usize),
231 abs_path: Arc::from(abs_path.deref()),
232 root_name,
233 root_char_bag,
234 entries_by_path: Default::default(),
235 entries_by_id: Default::default(),
236 scan_id: 0,
237 completed_scan_id: 0,
238 };
239
240 let (updates_tx, mut updates_rx) = mpsc::unbounded();
241 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
242 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
243 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
244 Worktree::Remote(RemoteWorktree {
245 project_id: project_remote_id,
246 replica_id,
247 snapshot: snapshot.clone(),
248 background_snapshot: background_snapshot.clone(),
249 updates_tx: Some(updates_tx),
250 snapshot_subscriptions: Default::default(),
251 client: client.clone(),
252 diagnostic_summaries: Default::default(),
253 visible,
254 disconnected: false,
255 })
256 });
257
258 cx.background()
259 .spawn(async move {
260 while let Some(update) = updates_rx.next().await {
261 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
262 log::error!("error applying worktree update: {}", error);
263 }
264 snapshot_updated_tx.send(()).await.ok();
265 }
266 })
267 .detach();
268
269 cx.spawn(|mut cx| {
270 let this = worktree_handle.downgrade();
271 async move {
272 while (snapshot_updated_rx.recv().await).is_some() {
273 if let Some(this) = this.upgrade(&cx) {
274 this.update(&mut cx, |this, cx| {
275 this.poll_snapshot(cx);
276 let this = this.as_remote_mut().unwrap();
277 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
278 if this.observed_snapshot(*scan_id) {
279 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
280 let _ = tx.send(());
281 } else {
282 break;
283 }
284 }
285 });
286 } else {
287 break;
288 }
289 }
290 }
291 })
292 .detach();
293
294 worktree_handle
295 }
296
297 pub fn as_local(&self) -> Option<&LocalWorktree> {
298 if let Worktree::Local(worktree) = self {
299 Some(worktree)
300 } else {
301 None
302 }
303 }
304
305 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
306 if let Worktree::Remote(worktree) = self {
307 Some(worktree)
308 } else {
309 None
310 }
311 }
312
313 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
314 if let Worktree::Local(worktree) = self {
315 Some(worktree)
316 } else {
317 None
318 }
319 }
320
321 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
322 if let Worktree::Remote(worktree) = self {
323 Some(worktree)
324 } else {
325 None
326 }
327 }
328
329 pub fn is_local(&self) -> bool {
330 matches!(self, Worktree::Local(_))
331 }
332
333 pub fn is_remote(&self) -> bool {
334 !self.is_local()
335 }
336
337 pub fn snapshot(&self) -> Snapshot {
338 match self {
339 Worktree::Local(worktree) => worktree.snapshot().snapshot,
340 Worktree::Remote(worktree) => worktree.snapshot(),
341 }
342 }
343
344 pub fn scan_id(&self) -> usize {
345 match self {
346 Worktree::Local(worktree) => worktree.snapshot.scan_id,
347 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
348 }
349 }
350
351 pub fn completed_scan_id(&self) -> usize {
352 match self {
353 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
354 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
355 }
356 }
357
358 pub fn is_visible(&self) -> bool {
359 match self {
360 Worktree::Local(worktree) => worktree.visible,
361 Worktree::Remote(worktree) => worktree.visible,
362 }
363 }
364
365 pub fn replica_id(&self) -> ReplicaId {
366 match self {
367 Worktree::Local(_) => 0,
368 Worktree::Remote(worktree) => worktree.replica_id,
369 }
370 }
371
372 pub fn diagnostic_summaries(
373 &self,
374 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
375 match self {
376 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
377 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
378 }
379 .iter()
380 .map(|(path, summary)| (path.0.clone(), *summary))
381 }
382
383 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
384 match self {
385 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
386 Self::Remote(worktree) => worktree.poll_snapshot(cx),
387 };
388 }
389
390 pub fn abs_path(&self) -> Arc<Path> {
391 match self {
392 Worktree::Local(worktree) => worktree.abs_path.clone(),
393 Worktree::Remote(worktree) => worktree.abs_path.clone(),
394 }
395 }
396}
397
398impl LocalWorktree {
399 async fn create(
400 client: Arc<Client>,
401 path: impl Into<Arc<Path>>,
402 visible: bool,
403 fs: Arc<dyn Fs>,
404 next_entry_id: Arc<AtomicUsize>,
405 cx: &mut AsyncAppContext,
406 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
407 let abs_path = path.into();
408 let path: Arc<Path> = Arc::from(Path::new(""));
409
410 // After determining whether the root entry is a file or a directory, populate the
411 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
412 let root_name = abs_path
413 .file_name()
414 .map_or(String::new(), |f| f.to_string_lossy().to_string());
415 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
416 let metadata = fs
417 .metadata(&abs_path)
418 .await
419 .context("failed to stat worktree path")?;
420
421 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
422 let (mut last_scan_state_tx, last_scan_state_rx) =
423 watch::channel_with(ScanState::Initializing);
424 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
425 let mut snapshot = LocalSnapshot {
426 ignores_by_parent_abs_path: Default::default(),
427 git_repositories: Default::default(),
428 removed_entry_ids: Default::default(),
429 next_entry_id,
430 snapshot: Snapshot {
431 id: WorktreeId::from_usize(cx.model_id()),
432 abs_path,
433 root_name: root_name.clone(),
434 root_char_bag,
435 entries_by_path: Default::default(),
436 entries_by_id: Default::default(),
437 scan_id: 0,
438 completed_scan_id: 0,
439 },
440 };
441 if let Some(metadata) = metadata {
442 let entry = Entry::new(
443 path,
444 &metadata,
445 &snapshot.next_entry_id,
446 snapshot.root_char_bag,
447 );
448 snapshot.insert_entry(entry, fs.as_ref());
449 }
450
451 let tree = Self {
452 snapshot: snapshot.clone(),
453 background_snapshot: Arc::new(Mutex::new(snapshot)),
454 last_scan_state_rx,
455 _background_scanner_task: None,
456 share: None,
457 poll_task: None,
458 diagnostics: Default::default(),
459 diagnostic_summaries: Default::default(),
460 client,
461 fs,
462 visible,
463 };
464
465 cx.spawn_weak(|this, mut cx| async move {
466 while let Some(scan_state) = scan_states_rx.next().await {
467 if let Some(this) = this.upgrade(&cx) {
468 last_scan_state_tx.blocking_send(scan_state).ok();
469 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
470 } else {
471 break;
472 }
473 }
474 })
475 .detach();
476
477 Worktree::Local(tree)
478 });
479
480 Ok((tree, scan_states_tx))
481 }
482
483 pub fn contains_abs_path(&self, path: &Path) -> bool {
484 path.starts_with(&self.abs_path)
485 }
486
487 fn absolutize(&self, path: &Path) -> PathBuf {
488 if path.file_name().is_some() {
489 self.abs_path.join(path)
490 } else {
491 self.abs_path.to_path_buf()
492 }
493 }
494
495 pub(crate) fn load_buffer(
496 &mut self,
497 path: &Path,
498 cx: &mut ModelContext<Worktree>,
499 ) -> Task<Result<ModelHandle<Buffer>>> {
500 let path = Arc::from(path);
501 cx.spawn(move |this, mut cx| async move {
502 let (file, contents, diff_base) = this
503 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
504 .await?;
505 Ok(cx.add_model(|cx| {
506 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
507 buffer.git_diff_recalc(cx);
508 buffer
509 }))
510 })
511 }
512
513 pub fn diagnostics_for_path(
514 &self,
515 path: &Path,
516 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
517 self.diagnostics.get(path).cloned()
518 }
519
520 pub fn update_diagnostics(
521 &mut self,
522 language_server_id: usize,
523 worktree_path: Arc<Path>,
524 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
525 _: &mut ModelContext<Worktree>,
526 ) -> Result<bool> {
527 self.diagnostics.remove(&worktree_path);
528 let old_summary = self
529 .diagnostic_summaries
530 .remove(&PathKey(worktree_path.clone()))
531 .unwrap_or_default();
532 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
533 if !new_summary.is_empty() {
534 self.diagnostic_summaries
535 .insert(PathKey(worktree_path.clone()), new_summary);
536 self.diagnostics.insert(worktree_path.clone(), diagnostics);
537 }
538
539 let updated = !old_summary.is_empty() || !new_summary.is_empty();
540 if updated {
541 if let Some(share) = self.share.as_ref() {
542 self.client
543 .send(proto::UpdateDiagnosticSummary {
544 project_id: share.project_id,
545 worktree_id: self.id().to_proto(),
546 summary: Some(proto::DiagnosticSummary {
547 path: worktree_path.to_string_lossy().to_string(),
548 language_server_id: language_server_id as u64,
549 error_count: new_summary.error_count as u32,
550 warning_count: new_summary.warning_count as u32,
551 }),
552 })
553 .log_err();
554 }
555 }
556
557 Ok(updated)
558 }
559
560 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
561 self.poll_task.take();
562
563 match self.scan_state() {
564 ScanState::Idle => {
565 let new_snapshot = self.background_snapshot.lock().clone();
566 let updated_repos = Self::changed_repos(
567 &self.snapshot.git_repositories,
568 &new_snapshot.git_repositories,
569 );
570 self.snapshot = new_snapshot;
571
572 if let Some(share) = self.share.as_mut() {
573 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
574 }
575
576 cx.emit(Event::UpdatedEntries);
577
578 if !updated_repos.is_empty() {
579 cx.emit(Event::UpdatedGitRepositories(updated_repos));
580 }
581 }
582
583 ScanState::Initializing => {
584 let is_fake_fs = self.fs.is_fake();
585
586 let new_snapshot = self.background_snapshot.lock().clone();
587 let updated_repos = Self::changed_repos(
588 &self.snapshot.git_repositories,
589 &new_snapshot.git_repositories,
590 );
591 self.snapshot = new_snapshot;
592
593 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
594 if is_fake_fs {
595 #[cfg(any(test, feature = "test-support"))]
596 cx.background().simulate_random_delay().await;
597 } else {
598 smol::Timer::after(Duration::from_millis(100)).await;
599 }
600 if let Some(this) = this.upgrade(&cx) {
601 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
602 }
603 }));
604
605 cx.emit(Event::UpdatedEntries);
606
607 if !updated_repos.is_empty() {
608 cx.emit(Event::UpdatedGitRepositories(updated_repos));
609 }
610 }
611
612 _ => {
613 if force {
614 self.snapshot = self.background_snapshot.lock().clone();
615 }
616 }
617 }
618
619 cx.notify();
620 }
621
622 fn changed_repos(
623 old_repos: &[GitRepositoryEntry],
624 new_repos: &[GitRepositoryEntry],
625 ) -> Vec<GitRepositoryEntry> {
626 fn diff<'a>(
627 a: &'a [GitRepositoryEntry],
628 b: &'a [GitRepositoryEntry],
629 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
630 ) {
631 for a_repo in a {
632 let matched = b.iter().find(|b_repo| {
633 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
634 });
635
636 if matched.is_none() {
637 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
638 }
639 }
640 }
641
642 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
643
644 diff(old_repos, new_repos, &mut updated);
645 diff(new_repos, old_repos, &mut updated);
646
647 updated.into_values().collect()
648 }
649
650 pub fn scan_complete(&self) -> impl Future<Output = ()> {
651 let mut scan_state_rx = self.last_scan_state_rx.clone();
652 async move {
653 let mut scan_state = Some(scan_state_rx.borrow().clone());
654 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
655 scan_state = scan_state_rx.recv().await;
656 }
657 }
658 }
659
660 fn scan_state(&self) -> ScanState {
661 self.last_scan_state_rx.borrow().clone()
662 }
663
664 pub fn snapshot(&self) -> LocalSnapshot {
665 self.snapshot.clone()
666 }
667
668 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
669 proto::WorktreeMetadata {
670 id: self.id().to_proto(),
671 root_name: self.root_name().to_string(),
672 visible: self.visible,
673 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
674 }
675 }
676
677 fn load(
678 &self,
679 path: &Path,
680 cx: &mut ModelContext<Worktree>,
681 ) -> Task<Result<(File, String, Option<String>)>> {
682 let handle = cx.handle();
683 let path = Arc::from(path);
684 let abs_path = self.absolutize(&path);
685 let fs = self.fs.clone();
686 let snapshot = self.snapshot();
687
688 cx.spawn(|this, mut cx| async move {
689 let text = fs.load(&abs_path).await?;
690
691 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
692 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
693 let repo_relative = repo_relative.to_owned();
694 cx.background()
695 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
696 .await
697 } else {
698 None
699 }
700 } else {
701 None
702 };
703
704 // Eagerly populate the snapshot with an updated entry for the loaded file
705 let entry = this
706 .update(&mut cx, |this, cx| {
707 this.as_local()
708 .unwrap()
709 .refresh_entry(path, abs_path, None, cx)
710 })
711 .await?;
712
713 Ok((
714 File {
715 entry_id: entry.id,
716 worktree: handle,
717 path: entry.path,
718 mtime: entry.mtime,
719 is_local: true,
720 is_deleted: false,
721 },
722 text,
723 diff_base,
724 ))
725 })
726 }
727
728 pub fn save_buffer(
729 &self,
730 buffer_handle: ModelHandle<Buffer>,
731 path: Arc<Path>,
732 has_changed_file: bool,
733 cx: &mut ModelContext<Worktree>,
734 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
735 let handle = cx.handle();
736 let buffer = buffer_handle.read(cx);
737
738 let rpc = self.client.clone();
739 let buffer_id = buffer.remote_id();
740 let project_id = self.share.as_ref().map(|share| share.project_id);
741
742 let text = buffer.as_rope().clone();
743 let fingerprint = text.fingerprint();
744 let version = buffer.version();
745 let save = self.write_file(path, text, buffer.line_ending(), cx);
746
747 cx.as_mut().spawn(|mut cx| async move {
748 let entry = save.await?;
749
750 if has_changed_file {
751 let new_file = Arc::new(File {
752 entry_id: entry.id,
753 worktree: handle,
754 path: entry.path,
755 mtime: entry.mtime,
756 is_local: true,
757 is_deleted: false,
758 });
759
760 if let Some(project_id) = project_id {
761 rpc.send(proto::UpdateBufferFile {
762 project_id,
763 buffer_id,
764 file: Some(new_file.to_proto()),
765 })
766 .log_err();
767 }
768
769 buffer_handle.update(&mut cx, |buffer, cx| {
770 if has_changed_file {
771 buffer.file_updated(new_file, cx).detach();
772 }
773 });
774 }
775
776 if let Some(project_id) = project_id {
777 rpc.send(proto::BufferSaved {
778 project_id,
779 buffer_id,
780 version: serialize_version(&version),
781 mtime: Some(entry.mtime.into()),
782 fingerprint: serialize_fingerprint(fingerprint),
783 })?;
784 }
785
786 buffer_handle.update(&mut cx, |buffer, cx| {
787 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
788 });
789
790 Ok((version, fingerprint, entry.mtime))
791 })
792 }
793
794 pub fn create_entry(
795 &self,
796 path: impl Into<Arc<Path>>,
797 is_dir: bool,
798 cx: &mut ModelContext<Worktree>,
799 ) -> Task<Result<Entry>> {
800 self.write_entry_internal(
801 path,
802 if is_dir {
803 None
804 } else {
805 Some(Default::default())
806 },
807 cx,
808 )
809 }
810
811 pub fn write_file(
812 &self,
813 path: impl Into<Arc<Path>>,
814 text: Rope,
815 line_ending: LineEnding,
816 cx: &mut ModelContext<Worktree>,
817 ) -> Task<Result<Entry>> {
818 self.write_entry_internal(path, Some((text, line_ending)), cx)
819 }
820
821 pub fn delete_entry(
822 &self,
823 entry_id: ProjectEntryId,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Option<Task<Result<()>>> {
826 let entry = self.entry_for_id(entry_id)?.clone();
827 let abs_path = self.absolutize(&entry.path);
828 let delete = cx.background().spawn({
829 let fs = self.fs.clone();
830 let abs_path = abs_path;
831 async move {
832 if entry.is_file() {
833 fs.remove_file(&abs_path, Default::default()).await
834 } else {
835 fs.remove_dir(
836 &abs_path,
837 RemoveOptions {
838 recursive: true,
839 ignore_if_not_exists: false,
840 },
841 )
842 .await
843 }
844 }
845 });
846
847 Some(cx.spawn(|this, mut cx| async move {
848 delete.await?;
849 this.update(&mut cx, |this, cx| {
850 let this = this.as_local_mut().unwrap();
851 {
852 let mut snapshot = this.background_snapshot.lock();
853 snapshot.delete_entry(entry_id);
854 }
855 this.poll_snapshot(true, cx);
856 });
857 Ok(())
858 }))
859 }
860
861 pub fn rename_entry(
862 &self,
863 entry_id: ProjectEntryId,
864 new_path: impl Into<Arc<Path>>,
865 cx: &mut ModelContext<Worktree>,
866 ) -> Option<Task<Result<Entry>>> {
867 let old_path = self.entry_for_id(entry_id)?.path.clone();
868 let new_path = new_path.into();
869 let abs_old_path = self.absolutize(&old_path);
870 let abs_new_path = self.absolutize(new_path.as_ref());
871 let rename = cx.background().spawn({
872 let fs = self.fs.clone();
873 let abs_new_path = abs_new_path.clone();
874 async move {
875 fs.rename(&abs_old_path, &abs_new_path, Default::default())
876 .await
877 }
878 });
879
880 Some(cx.spawn(|this, mut cx| async move {
881 rename.await?;
882 let entry = this
883 .update(&mut cx, |this, cx| {
884 this.as_local_mut().unwrap().refresh_entry(
885 new_path.clone(),
886 abs_new_path,
887 Some(old_path),
888 cx,
889 )
890 })
891 .await?;
892 Ok(entry)
893 }))
894 }
895
896 pub fn copy_entry(
897 &self,
898 entry_id: ProjectEntryId,
899 new_path: impl Into<Arc<Path>>,
900 cx: &mut ModelContext<Worktree>,
901 ) -> Option<Task<Result<Entry>>> {
902 let old_path = self.entry_for_id(entry_id)?.path.clone();
903 let new_path = new_path.into();
904 let abs_old_path = self.absolutize(&old_path);
905 let abs_new_path = self.absolutize(&new_path);
906 let copy = cx.background().spawn({
907 let fs = self.fs.clone();
908 let abs_new_path = abs_new_path.clone();
909 async move {
910 copy_recursive(
911 fs.as_ref(),
912 &abs_old_path,
913 &abs_new_path,
914 Default::default(),
915 )
916 .await
917 }
918 });
919
920 Some(cx.spawn(|this, mut cx| async move {
921 copy.await?;
922 let entry = this
923 .update(&mut cx, |this, cx| {
924 this.as_local_mut().unwrap().refresh_entry(
925 new_path.clone(),
926 abs_new_path,
927 None,
928 cx,
929 )
930 })
931 .await?;
932 Ok(entry)
933 }))
934 }
935
936 fn write_entry_internal(
937 &self,
938 path: impl Into<Arc<Path>>,
939 text_if_file: Option<(Rope, LineEnding)>,
940 cx: &mut ModelContext<Worktree>,
941 ) -> Task<Result<Entry>> {
942 let path = path.into();
943 let abs_path = self.absolutize(&path);
944 let write = cx.background().spawn({
945 let fs = self.fs.clone();
946 let abs_path = abs_path.clone();
947 async move {
948 if let Some((text, line_ending)) = text_if_file {
949 fs.save(&abs_path, &text, line_ending).await
950 } else {
951 fs.create_dir(&abs_path).await
952 }
953 }
954 });
955
956 cx.spawn(|this, mut cx| async move {
957 write.await?;
958 let entry = this
959 .update(&mut cx, |this, cx| {
960 this.as_local_mut()
961 .unwrap()
962 .refresh_entry(path, abs_path, None, cx)
963 })
964 .await?;
965 Ok(entry)
966 })
967 }
968
969 fn refresh_entry(
970 &self,
971 path: Arc<Path>,
972 abs_path: PathBuf,
973 old_path: Option<Arc<Path>>,
974 cx: &mut ModelContext<Worktree>,
975 ) -> Task<Result<Entry>> {
976 let fs = self.fs.clone();
977 let root_char_bag;
978 let next_entry_id;
979 {
980 let snapshot = self.background_snapshot.lock();
981 root_char_bag = snapshot.root_char_bag;
982 next_entry_id = snapshot.next_entry_id.clone();
983 }
984 cx.spawn_weak(|this, mut cx| async move {
985 let metadata = fs
986 .metadata(&abs_path)
987 .await?
988 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
989 let this = this
990 .upgrade(&cx)
991 .ok_or_else(|| anyhow!("worktree was dropped"))?;
992 this.update(&mut cx, |this, cx| {
993 let this = this.as_local_mut().unwrap();
994 let inserted_entry;
995 {
996 let mut snapshot = this.background_snapshot.lock();
997 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
998 entry.is_ignored = snapshot
999 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
1000 .is_abs_path_ignored(&abs_path, entry.is_dir());
1001 if let Some(old_path) = old_path {
1002 snapshot.remove_path(&old_path);
1003 }
1004 snapshot.scan_started();
1005 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
1006 snapshot.scan_completed();
1007 }
1008 this.poll_snapshot(true, cx);
1009 Ok(inserted_entry)
1010 })
1011 })
1012 }
1013
1014 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1015 let (share_tx, share_rx) = oneshot::channel();
1016
1017 if let Some(share) = self.share.as_mut() {
1018 let _ = share_tx.send(());
1019 *share.resume_updates.borrow_mut() = ();
1020 } else {
1021 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1022 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1023 let worktree_id = cx.model_id() as u64;
1024
1025 for (path, summary) in self.diagnostic_summaries.iter() {
1026 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1027 project_id,
1028 worktree_id,
1029 summary: Some(summary.to_proto(&path.0)),
1030 }) {
1031 return Task::ready(Err(e));
1032 }
1033 }
1034
1035 let _maintain_remote_snapshot = cx.background().spawn({
1036 let client = self.client.clone();
1037 async move {
1038 let mut share_tx = Some(share_tx);
1039 let mut prev_snapshot = LocalSnapshot {
1040 ignores_by_parent_abs_path: Default::default(),
1041 git_repositories: Default::default(),
1042 removed_entry_ids: Default::default(),
1043 next_entry_id: Default::default(),
1044 snapshot: Snapshot {
1045 id: WorktreeId(worktree_id as usize),
1046 abs_path: Path::new("").into(),
1047 root_name: Default::default(),
1048 root_char_bag: Default::default(),
1049 entries_by_path: Default::default(),
1050 entries_by_id: Default::default(),
1051 scan_id: 0,
1052 completed_scan_id: 0,
1053 },
1054 };
1055 while let Some(snapshot) = snapshots_rx.recv().await {
1056 #[cfg(any(test, feature = "test-support"))]
1057 const MAX_CHUNK_SIZE: usize = 2;
1058 #[cfg(not(any(test, feature = "test-support")))]
1059 const MAX_CHUNK_SIZE: usize = 256;
1060
1061 let update =
1062 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1063 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1064 let _ = resume_updates_rx.try_recv();
1065 while let Err(error) = client.request(update.clone()).await {
1066 log::error!("failed to send worktree update: {}", error);
1067 log::info!("waiting to resume updates");
1068 if resume_updates_rx.next().await.is_none() {
1069 return Ok(());
1070 }
1071 }
1072 }
1073
1074 if let Some(share_tx) = share_tx.take() {
1075 let _ = share_tx.send(());
1076 }
1077
1078 prev_snapshot = snapshot;
1079 }
1080
1081 Ok::<_, anyhow::Error>(())
1082 }
1083 .log_err()
1084 });
1085
1086 self.share = Some(ShareState {
1087 project_id,
1088 snapshots_tx,
1089 resume_updates: resume_updates_tx,
1090 _maintain_remote_snapshot,
1091 });
1092 }
1093
1094 cx.foreground()
1095 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1096 }
1097
1098 pub fn unshare(&mut self) {
1099 self.share.take();
1100 }
1101
1102 pub fn is_shared(&self) -> bool {
1103 self.share.is_some()
1104 }
1105}
1106
1107impl RemoteWorktree {
1108 fn snapshot(&self) -> Snapshot {
1109 self.snapshot.clone()
1110 }
1111
1112 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1113 self.snapshot = self.background_snapshot.lock().clone();
1114 cx.emit(Event::UpdatedEntries);
1115 cx.notify();
1116 }
1117
1118 pub fn disconnected_from_host(&mut self) {
1119 self.updates_tx.take();
1120 self.snapshot_subscriptions.clear();
1121 self.disconnected = true;
1122 }
1123
1124 pub fn save_buffer(
1125 &self,
1126 buffer_handle: ModelHandle<Buffer>,
1127 cx: &mut ModelContext<Worktree>,
1128 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1129 let buffer = buffer_handle.read(cx);
1130 let buffer_id = buffer.remote_id();
1131 let version = buffer.version();
1132 let rpc = self.client.clone();
1133 let project_id = self.project_id;
1134 cx.as_mut().spawn(|mut cx| async move {
1135 let response = rpc
1136 .request(proto::SaveBuffer {
1137 project_id,
1138 buffer_id,
1139 version: serialize_version(&version),
1140 })
1141 .await?;
1142 let version = deserialize_version(response.version);
1143 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1144 let mtime = response
1145 .mtime
1146 .ok_or_else(|| anyhow!("missing mtime"))?
1147 .into();
1148
1149 buffer_handle.update(&mut cx, |buffer, cx| {
1150 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1151 });
1152
1153 Ok((version, fingerprint, mtime))
1154 })
1155 }
1156
1157 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1158 if let Some(updates_tx) = &self.updates_tx {
1159 updates_tx
1160 .unbounded_send(update)
1161 .expect("consumer runs to completion");
1162 }
1163 }
1164
1165 fn observed_snapshot(&self, scan_id: usize) -> bool {
1166 self.completed_scan_id >= scan_id
1167 }
1168
1169 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1170 let (tx, rx) = oneshot::channel();
1171 if self.observed_snapshot(scan_id) {
1172 let _ = tx.send(());
1173 } else if self.disconnected {
1174 drop(tx);
1175 } else {
1176 match self
1177 .snapshot_subscriptions
1178 .binary_search_by_key(&scan_id, |probe| probe.0)
1179 {
1180 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1181 }
1182 }
1183
1184 async move {
1185 rx.await?;
1186 Ok(())
1187 }
1188 }
1189
1190 pub fn update_diagnostic_summary(
1191 &mut self,
1192 path: Arc<Path>,
1193 summary: &proto::DiagnosticSummary,
1194 ) {
1195 let summary = DiagnosticSummary {
1196 language_server_id: summary.language_server_id as usize,
1197 error_count: summary.error_count as usize,
1198 warning_count: summary.warning_count as usize,
1199 };
1200 if summary.is_empty() {
1201 self.diagnostic_summaries.remove(&PathKey(path));
1202 } else {
1203 self.diagnostic_summaries.insert(PathKey(path), summary);
1204 }
1205 }
1206
1207 pub fn insert_entry(
1208 &mut self,
1209 entry: proto::Entry,
1210 scan_id: usize,
1211 cx: &mut ModelContext<Worktree>,
1212 ) -> Task<Result<Entry>> {
1213 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1214 cx.spawn(|this, mut cx| async move {
1215 wait_for_snapshot.await?;
1216 this.update(&mut cx, |worktree, _| {
1217 let worktree = worktree.as_remote_mut().unwrap();
1218 let mut snapshot = worktree.background_snapshot.lock();
1219 let entry = snapshot.insert_entry(entry);
1220 worktree.snapshot = snapshot.clone();
1221 entry
1222 })
1223 })
1224 }
1225
1226 pub(crate) fn delete_entry(
1227 &mut self,
1228 id: ProjectEntryId,
1229 scan_id: usize,
1230 cx: &mut ModelContext<Worktree>,
1231 ) -> Task<Result<()>> {
1232 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1233 cx.spawn(|this, mut cx| async move {
1234 wait_for_snapshot.await?;
1235 this.update(&mut cx, |worktree, _| {
1236 let worktree = worktree.as_remote_mut().unwrap();
1237 let mut snapshot = worktree.background_snapshot.lock();
1238 snapshot.delete_entry(id);
1239 worktree.snapshot = snapshot.clone();
1240 });
1241 Ok(())
1242 })
1243 }
1244}
1245
1246impl Snapshot {
1247 pub fn id(&self) -> WorktreeId {
1248 self.id
1249 }
1250
1251 pub fn abs_path(&self) -> &Arc<Path> {
1252 &self.abs_path
1253 }
1254
1255 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1256 self.entries_by_id.get(&entry_id, &()).is_some()
1257 }
1258
1259 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1260 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1261 let old_entry = self.entries_by_id.insert_or_replace(
1262 PathEntry {
1263 id: entry.id,
1264 path: entry.path.clone(),
1265 is_ignored: entry.is_ignored,
1266 scan_id: 0,
1267 },
1268 &(),
1269 );
1270 if let Some(old_entry) = old_entry {
1271 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1272 }
1273 self.entries_by_path.insert_or_replace(entry.clone(), &());
1274 Ok(entry)
1275 }
1276
1277 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1278 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1279 self.entries_by_path = {
1280 let mut cursor = self.entries_by_path.cursor();
1281 let mut new_entries_by_path =
1282 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1283 while let Some(entry) = cursor.item() {
1284 if entry.path.starts_with(&removed_entry.path) {
1285 self.entries_by_id.remove(&entry.id, &());
1286 cursor.next(&());
1287 } else {
1288 break;
1289 }
1290 }
1291 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1292 new_entries_by_path
1293 };
1294
1295 true
1296 } else {
1297 false
1298 }
1299 }
1300
1301 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1302 let mut entries_by_path_edits = Vec::new();
1303 let mut entries_by_id_edits = Vec::new();
1304 for entry_id in update.removed_entries {
1305 let entry = self
1306 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1307 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1308 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1309 entries_by_id_edits.push(Edit::Remove(entry.id));
1310 }
1311
1312 for entry in update.updated_entries {
1313 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1314 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1315 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1316 }
1317 entries_by_id_edits.push(Edit::Insert(PathEntry {
1318 id: entry.id,
1319 path: entry.path.clone(),
1320 is_ignored: entry.is_ignored,
1321 scan_id: 0,
1322 }));
1323 entries_by_path_edits.push(Edit::Insert(entry));
1324 }
1325
1326 self.entries_by_path.edit(entries_by_path_edits, &());
1327 self.entries_by_id.edit(entries_by_id_edits, &());
1328 self.scan_id = update.scan_id as usize;
1329 if update.is_last_update {
1330 self.completed_scan_id = update.scan_id as usize;
1331 }
1332
1333 Ok(())
1334 }
1335
1336 pub fn file_count(&self) -> usize {
1337 self.entries_by_path.summary().file_count
1338 }
1339
1340 pub fn visible_file_count(&self) -> usize {
1341 self.entries_by_path.summary().visible_file_count
1342 }
1343
1344 fn traverse_from_offset(
1345 &self,
1346 include_dirs: bool,
1347 include_ignored: bool,
1348 start_offset: usize,
1349 ) -> Traversal {
1350 let mut cursor = self.entries_by_path.cursor();
1351 cursor.seek(
1352 &TraversalTarget::Count {
1353 count: start_offset,
1354 include_dirs,
1355 include_ignored,
1356 },
1357 Bias::Right,
1358 &(),
1359 );
1360 Traversal {
1361 cursor,
1362 include_dirs,
1363 include_ignored,
1364 }
1365 }
1366
1367 fn traverse_from_path(
1368 &self,
1369 include_dirs: bool,
1370 include_ignored: bool,
1371 path: &Path,
1372 ) -> Traversal {
1373 let mut cursor = self.entries_by_path.cursor();
1374 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1375 Traversal {
1376 cursor,
1377 include_dirs,
1378 include_ignored,
1379 }
1380 }
1381
1382 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1383 self.traverse_from_offset(false, include_ignored, start)
1384 }
1385
1386 pub fn entries(&self, include_ignored: bool) -> Traversal {
1387 self.traverse_from_offset(true, include_ignored, 0)
1388 }
1389
1390 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1391 let empty_path = Path::new("");
1392 self.entries_by_path
1393 .cursor::<()>()
1394 .filter(move |entry| entry.path.as_ref() != empty_path)
1395 .map(|entry| &entry.path)
1396 }
1397
1398 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1399 let mut cursor = self.entries_by_path.cursor();
1400 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1401 let traversal = Traversal {
1402 cursor,
1403 include_dirs: true,
1404 include_ignored: true,
1405 };
1406 ChildEntriesIter {
1407 traversal,
1408 parent_path,
1409 }
1410 }
1411
1412 pub fn root_entry(&self) -> Option<&Entry> {
1413 self.entry_for_path("")
1414 }
1415
1416 pub fn root_name(&self) -> &str {
1417 &self.root_name
1418 }
1419
1420 pub fn scan_started(&mut self) {
1421 self.scan_id += 1;
1422 }
1423
1424 pub fn scan_completed(&mut self) {
1425 self.completed_scan_id = self.scan_id;
1426 }
1427
1428 pub fn scan_id(&self) -> usize {
1429 self.scan_id
1430 }
1431
1432 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1433 let path = path.as_ref();
1434 self.traverse_from_path(true, true, path)
1435 .entry()
1436 .and_then(|entry| {
1437 if entry.path.as_ref() == path {
1438 Some(entry)
1439 } else {
1440 None
1441 }
1442 })
1443 }
1444
1445 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1446 let entry = self.entries_by_id.get(&id, &())?;
1447 self.entry_for_path(&entry.path)
1448 }
1449
1450 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1451 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1452 }
1453}
1454
1455impl LocalSnapshot {
1456 // Gives the most specific git repository for a given path
1457 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1458 self.git_repositories
1459 .iter()
1460 .rev() //git_repository is ordered lexicographically
1461 .find(|repo| repo.manages(path))
1462 .cloned()
1463 }
1464
1465 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1466 // Git repositories cannot be nested, so we don't need to reverse the order
1467 self.git_repositories
1468 .iter_mut()
1469 .find(|repo| repo.in_dot_git(path))
1470 }
1471
1472 #[cfg(test)]
1473 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1474 let root_name = self.root_name.clone();
1475 proto::UpdateWorktree {
1476 project_id,
1477 worktree_id: self.id().to_proto(),
1478 abs_path: self.abs_path().to_string_lossy().into(),
1479 root_name,
1480 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1481 removed_entries: Default::default(),
1482 scan_id: self.scan_id as u64,
1483 is_last_update: true,
1484 }
1485 }
1486
1487 pub(crate) fn build_update(
1488 &self,
1489 other: &Self,
1490 project_id: u64,
1491 worktree_id: u64,
1492 include_ignored: bool,
1493 ) -> proto::UpdateWorktree {
1494 let mut updated_entries = Vec::new();
1495 let mut removed_entries = Vec::new();
1496 let mut self_entries = self
1497 .entries_by_id
1498 .cursor::<()>()
1499 .filter(|e| include_ignored || !e.is_ignored)
1500 .peekable();
1501 let mut other_entries = other
1502 .entries_by_id
1503 .cursor::<()>()
1504 .filter(|e| include_ignored || !e.is_ignored)
1505 .peekable();
1506 loop {
1507 match (self_entries.peek(), other_entries.peek()) {
1508 (Some(self_entry), Some(other_entry)) => {
1509 match Ord::cmp(&self_entry.id, &other_entry.id) {
1510 Ordering::Less => {
1511 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1512 updated_entries.push(entry);
1513 self_entries.next();
1514 }
1515 Ordering::Equal => {
1516 if self_entry.scan_id != other_entry.scan_id {
1517 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1518 updated_entries.push(entry);
1519 }
1520
1521 self_entries.next();
1522 other_entries.next();
1523 }
1524 Ordering::Greater => {
1525 removed_entries.push(other_entry.id.to_proto());
1526 other_entries.next();
1527 }
1528 }
1529 }
1530 (Some(self_entry), None) => {
1531 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1532 updated_entries.push(entry);
1533 self_entries.next();
1534 }
1535 (None, Some(other_entry)) => {
1536 removed_entries.push(other_entry.id.to_proto());
1537 other_entries.next();
1538 }
1539 (None, None) => break,
1540 }
1541 }
1542
1543 proto::UpdateWorktree {
1544 project_id,
1545 worktree_id,
1546 abs_path: self.abs_path().to_string_lossy().into(),
1547 root_name: self.root_name().to_string(),
1548 updated_entries,
1549 removed_entries,
1550 scan_id: self.scan_id as u64,
1551 is_last_update: self.completed_scan_id == self.scan_id,
1552 }
1553 }
1554
1555 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1556 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1557 let abs_path = self.abs_path.join(&entry.path);
1558 match smol::block_on(build_gitignore(&abs_path, fs)) {
1559 Ok(ignore) => {
1560 self.ignores_by_parent_abs_path.insert(
1561 abs_path.parent().unwrap().into(),
1562 (Arc::new(ignore), self.scan_id),
1563 );
1564 }
1565 Err(error) => {
1566 log::error!(
1567 "error loading .gitignore file {:?} - {:?}",
1568 &entry.path,
1569 error
1570 );
1571 }
1572 }
1573 }
1574
1575 self.reuse_entry_id(&mut entry);
1576
1577 if entry.kind == EntryKind::PendingDir {
1578 if let Some(existing_entry) =
1579 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1580 {
1581 entry.kind = existing_entry.kind;
1582 }
1583 }
1584
1585 let scan_id = self.scan_id;
1586 self.entries_by_path.insert_or_replace(entry.clone(), &());
1587 self.entries_by_id.insert_or_replace(
1588 PathEntry {
1589 id: entry.id,
1590 path: entry.path.clone(),
1591 is_ignored: entry.is_ignored,
1592 scan_id,
1593 },
1594 &(),
1595 );
1596
1597 entry
1598 }
1599
1600 fn populate_dir(
1601 &mut self,
1602 parent_path: Arc<Path>,
1603 entries: impl IntoIterator<Item = Entry>,
1604 ignore: Option<Arc<Gitignore>>,
1605 fs: &dyn Fs,
1606 ) {
1607 let mut parent_entry = if let Some(parent_entry) =
1608 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1609 {
1610 parent_entry.clone()
1611 } else {
1612 log::warn!(
1613 "populating a directory {:?} that has been removed",
1614 parent_path
1615 );
1616 return;
1617 };
1618
1619 if let Some(ignore) = ignore {
1620 self.ignores_by_parent_abs_path.insert(
1621 self.abs_path.join(&parent_path).into(),
1622 (ignore, self.scan_id),
1623 );
1624 }
1625 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1626 parent_entry.kind = EntryKind::Dir;
1627 } else {
1628 unreachable!();
1629 }
1630
1631 if parent_path.file_name() == Some(&DOT_GIT) {
1632 let abs_path = self.abs_path.join(&parent_path);
1633 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1634 if let Err(ix) = self
1635 .git_repositories
1636 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1637 {
1638 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1639 self.git_repositories.insert(
1640 ix,
1641 GitRepositoryEntry {
1642 repo,
1643 scan_id: 0,
1644 content_path,
1645 git_dir_path: parent_path,
1646 },
1647 );
1648 }
1649 }
1650 }
1651
1652 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1653 let mut entries_by_id_edits = Vec::new();
1654
1655 for mut entry in entries {
1656 self.reuse_entry_id(&mut entry);
1657 entries_by_id_edits.push(Edit::Insert(PathEntry {
1658 id: entry.id,
1659 path: entry.path.clone(),
1660 is_ignored: entry.is_ignored,
1661 scan_id: self.scan_id,
1662 }));
1663 entries_by_path_edits.push(Edit::Insert(entry));
1664 }
1665
1666 self.entries_by_path.edit(entries_by_path_edits, &());
1667 self.entries_by_id.edit(entries_by_id_edits, &());
1668 }
1669
1670 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1671 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1672 entry.id = removed_entry_id;
1673 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1674 entry.id = existing_entry.id;
1675 }
1676 }
1677
1678 fn remove_path(&mut self, path: &Path) {
1679 let mut new_entries;
1680 let removed_entries;
1681 {
1682 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1683 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1684 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1685 new_entries.push_tree(cursor.suffix(&()), &());
1686 }
1687 self.entries_by_path = new_entries;
1688
1689 let mut entries_by_id_edits = Vec::new();
1690 for entry in removed_entries.cursor::<()>() {
1691 let removed_entry_id = self
1692 .removed_entry_ids
1693 .entry(entry.inode)
1694 .or_insert(entry.id);
1695 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1696 entries_by_id_edits.push(Edit::Remove(entry.id));
1697 }
1698 self.entries_by_id.edit(entries_by_id_edits, &());
1699
1700 if path.file_name() == Some(&GITIGNORE) {
1701 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1702 if let Some((_, scan_id)) = self
1703 .ignores_by_parent_abs_path
1704 .get_mut(abs_parent_path.as_path())
1705 {
1706 *scan_id = self.snapshot.scan_id;
1707 }
1708 } else if path.file_name() == Some(&DOT_GIT) {
1709 let parent_path = path.parent().unwrap();
1710 if let Ok(ix) = self
1711 .git_repositories
1712 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1713 {
1714 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1715 }
1716 }
1717 }
1718
1719 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1720 let mut inodes = TreeSet::default();
1721 for ancestor in path.ancestors().skip(1) {
1722 if let Some(entry) = self.entry_for_path(ancestor) {
1723 inodes.insert(entry.inode);
1724 }
1725 }
1726 inodes
1727 }
1728
1729 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1730 let mut new_ignores = Vec::new();
1731 for ancestor in abs_path.ancestors().skip(1) {
1732 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1733 new_ignores.push((ancestor, Some(ignore.clone())));
1734 } else {
1735 new_ignores.push((ancestor, None));
1736 }
1737 }
1738
1739 let mut ignore_stack = IgnoreStack::none();
1740 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1741 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1742 ignore_stack = IgnoreStack::all();
1743 break;
1744 } else if let Some(ignore) = ignore {
1745 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1746 }
1747 }
1748
1749 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1750 ignore_stack = IgnoreStack::all();
1751 }
1752
1753 ignore_stack
1754 }
1755
1756 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1757 &self.git_repositories
1758 }
1759}
1760
1761impl GitRepositoryEntry {
1762 // Note that these paths should be relative to the worktree root.
1763 pub(crate) fn manages(&self, path: &Path) -> bool {
1764 path.starts_with(self.content_path.as_ref())
1765 }
1766
1767 // Note that theis path should be relative to the worktree root.
1768 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1769 path.starts_with(self.git_dir_path.as_ref())
1770 }
1771}
1772
1773async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1774 let contents = fs.load(abs_path).await?;
1775 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1776 let mut builder = GitignoreBuilder::new(parent);
1777 for line in contents.lines() {
1778 builder.add_line(Some(abs_path.into()), line)?;
1779 }
1780 Ok(builder.build()?)
1781}
1782
1783impl WorktreeId {
1784 pub fn from_usize(handle_id: usize) -> Self {
1785 Self(handle_id)
1786 }
1787
1788 pub(crate) fn from_proto(id: u64) -> Self {
1789 Self(id as usize)
1790 }
1791
1792 pub fn to_proto(&self) -> u64 {
1793 self.0 as u64
1794 }
1795
1796 pub fn to_usize(&self) -> usize {
1797 self.0
1798 }
1799}
1800
1801impl fmt::Display for WorktreeId {
1802 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1803 self.0.fmt(f)
1804 }
1805}
1806
1807impl Deref for Worktree {
1808 type Target = Snapshot;
1809
1810 fn deref(&self) -> &Self::Target {
1811 match self {
1812 Worktree::Local(worktree) => &worktree.snapshot,
1813 Worktree::Remote(worktree) => &worktree.snapshot,
1814 }
1815 }
1816}
1817
1818impl Deref for LocalWorktree {
1819 type Target = LocalSnapshot;
1820
1821 fn deref(&self) -> &Self::Target {
1822 &self.snapshot
1823 }
1824}
1825
1826impl Deref for RemoteWorktree {
1827 type Target = Snapshot;
1828
1829 fn deref(&self) -> &Self::Target {
1830 &self.snapshot
1831 }
1832}
1833
1834impl fmt::Debug for LocalWorktree {
1835 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1836 self.snapshot.fmt(f)
1837 }
1838}
1839
1840impl fmt::Debug for Snapshot {
1841 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1842 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1843 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1844
1845 impl<'a> fmt::Debug for EntriesByPath<'a> {
1846 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1847 f.debug_map()
1848 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1849 .finish()
1850 }
1851 }
1852
1853 impl<'a> fmt::Debug for EntriesById<'a> {
1854 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1855 f.debug_list().entries(self.0.iter()).finish()
1856 }
1857 }
1858
1859 f.debug_struct("Snapshot")
1860 .field("id", &self.id)
1861 .field("root_name", &self.root_name)
1862 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1863 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1864 .finish()
1865 }
1866}
1867
1868#[derive(Clone, PartialEq)]
1869pub struct File {
1870 pub worktree: ModelHandle<Worktree>,
1871 pub path: Arc<Path>,
1872 pub mtime: SystemTime,
1873 pub(crate) entry_id: ProjectEntryId,
1874 pub(crate) is_local: bool,
1875 pub(crate) is_deleted: bool,
1876}
1877
1878impl language::File for File {
1879 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1880 if self.is_local {
1881 Some(self)
1882 } else {
1883 None
1884 }
1885 }
1886
1887 fn mtime(&self) -> SystemTime {
1888 self.mtime
1889 }
1890
1891 fn path(&self) -> &Arc<Path> {
1892 &self.path
1893 }
1894
1895 fn full_path(&self, cx: &AppContext) -> PathBuf {
1896 let mut full_path = PathBuf::new();
1897 let worktree = self.worktree.read(cx);
1898
1899 if worktree.is_visible() {
1900 full_path.push(worktree.root_name());
1901 } else {
1902 let path = worktree.abs_path();
1903
1904 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1905 full_path.push("~");
1906 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1907 } else {
1908 full_path.push(path)
1909 }
1910 }
1911
1912 if self.path.components().next().is_some() {
1913 full_path.push(&self.path);
1914 }
1915
1916 full_path
1917 }
1918
1919 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1920 /// of its worktree, then this method will return the name of the worktree itself.
1921 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1922 self.path
1923 .file_name()
1924 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1925 }
1926
1927 fn is_deleted(&self) -> bool {
1928 self.is_deleted
1929 }
1930
1931 fn as_any(&self) -> &dyn Any {
1932 self
1933 }
1934
1935 fn to_proto(&self) -> rpc::proto::File {
1936 rpc::proto::File {
1937 worktree_id: self.worktree.id() as u64,
1938 entry_id: self.entry_id.to_proto(),
1939 path: self.path.to_string_lossy().into(),
1940 mtime: Some(self.mtime.into()),
1941 is_deleted: self.is_deleted,
1942 }
1943 }
1944}
1945
1946impl language::LocalFile for File {
1947 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1948 self.worktree
1949 .read(cx)
1950 .as_local()
1951 .unwrap()
1952 .abs_path
1953 .join(&self.path)
1954 }
1955
1956 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1957 let worktree = self.worktree.read(cx).as_local().unwrap();
1958 let abs_path = worktree.absolutize(&self.path);
1959 let fs = worktree.fs.clone();
1960 cx.background()
1961 .spawn(async move { fs.load(&abs_path).await })
1962 }
1963
1964 fn buffer_reloaded(
1965 &self,
1966 buffer_id: u64,
1967 version: &clock::Global,
1968 fingerprint: RopeFingerprint,
1969 line_ending: LineEnding,
1970 mtime: SystemTime,
1971 cx: &mut MutableAppContext,
1972 ) {
1973 let worktree = self.worktree.read(cx).as_local().unwrap();
1974 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1975 worktree
1976 .client
1977 .send(proto::BufferReloaded {
1978 project_id,
1979 buffer_id,
1980 version: serialize_version(version),
1981 mtime: Some(mtime.into()),
1982 fingerprint: serialize_fingerprint(fingerprint),
1983 line_ending: serialize_line_ending(line_ending) as i32,
1984 })
1985 .log_err();
1986 }
1987 }
1988}
1989
1990impl File {
1991 pub fn from_proto(
1992 proto: rpc::proto::File,
1993 worktree: ModelHandle<Worktree>,
1994 cx: &AppContext,
1995 ) -> Result<Self> {
1996 let worktree_id = worktree
1997 .read(cx)
1998 .as_remote()
1999 .ok_or_else(|| anyhow!("not remote"))?
2000 .id();
2001
2002 if worktree_id.to_proto() != proto.worktree_id {
2003 return Err(anyhow!("worktree id does not match file"));
2004 }
2005
2006 Ok(Self {
2007 worktree,
2008 path: Path::new(&proto.path).into(),
2009 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2010 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2011 is_local: false,
2012 is_deleted: proto.is_deleted,
2013 })
2014 }
2015
2016 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2017 file.and_then(|f| f.as_any().downcast_ref())
2018 }
2019
2020 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2021 self.worktree.read(cx).id()
2022 }
2023
2024 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2025 if self.is_deleted {
2026 None
2027 } else {
2028 Some(self.entry_id)
2029 }
2030 }
2031}
2032
2033#[derive(Clone, Debug, PartialEq, Eq)]
2034pub struct Entry {
2035 pub id: ProjectEntryId,
2036 pub kind: EntryKind,
2037 pub path: Arc<Path>,
2038 pub inode: u64,
2039 pub mtime: SystemTime,
2040 pub is_symlink: bool,
2041 pub is_ignored: bool,
2042}
2043
2044#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2045pub enum EntryKind {
2046 PendingDir,
2047 Dir,
2048 File(CharBag),
2049}
2050
2051impl Entry {
2052 fn new(
2053 path: Arc<Path>,
2054 metadata: &fs::Metadata,
2055 next_entry_id: &AtomicUsize,
2056 root_char_bag: CharBag,
2057 ) -> Self {
2058 Self {
2059 id: ProjectEntryId::new(next_entry_id),
2060 kind: if metadata.is_dir {
2061 EntryKind::PendingDir
2062 } else {
2063 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2064 },
2065 path,
2066 inode: metadata.inode,
2067 mtime: metadata.mtime,
2068 is_symlink: metadata.is_symlink,
2069 is_ignored: false,
2070 }
2071 }
2072
2073 pub fn is_dir(&self) -> bool {
2074 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2075 }
2076
2077 pub fn is_file(&self) -> bool {
2078 matches!(self.kind, EntryKind::File(_))
2079 }
2080}
2081
2082impl sum_tree::Item for Entry {
2083 type Summary = EntrySummary;
2084
2085 fn summary(&self) -> Self::Summary {
2086 let visible_count = if self.is_ignored { 0 } else { 1 };
2087 let file_count;
2088 let visible_file_count;
2089 if self.is_file() {
2090 file_count = 1;
2091 visible_file_count = visible_count;
2092 } else {
2093 file_count = 0;
2094 visible_file_count = 0;
2095 }
2096
2097 EntrySummary {
2098 max_path: self.path.clone(),
2099 count: 1,
2100 visible_count,
2101 file_count,
2102 visible_file_count,
2103 }
2104 }
2105}
2106
2107impl sum_tree::KeyedItem for Entry {
2108 type Key = PathKey;
2109
2110 fn key(&self) -> Self::Key {
2111 PathKey(self.path.clone())
2112 }
2113}
2114
2115#[derive(Clone, Debug)]
2116pub struct EntrySummary {
2117 max_path: Arc<Path>,
2118 count: usize,
2119 visible_count: usize,
2120 file_count: usize,
2121 visible_file_count: usize,
2122}
2123
2124impl Default for EntrySummary {
2125 fn default() -> Self {
2126 Self {
2127 max_path: Arc::from(Path::new("")),
2128 count: 0,
2129 visible_count: 0,
2130 file_count: 0,
2131 visible_file_count: 0,
2132 }
2133 }
2134}
2135
2136impl sum_tree::Summary for EntrySummary {
2137 type Context = ();
2138
2139 fn add_summary(&mut self, rhs: &Self, _: &()) {
2140 self.max_path = rhs.max_path.clone();
2141 self.count += rhs.count;
2142 self.visible_count += rhs.visible_count;
2143 self.file_count += rhs.file_count;
2144 self.visible_file_count += rhs.visible_file_count;
2145 }
2146}
2147
2148#[derive(Clone, Debug)]
2149struct PathEntry {
2150 id: ProjectEntryId,
2151 path: Arc<Path>,
2152 is_ignored: bool,
2153 scan_id: usize,
2154}
2155
2156impl sum_tree::Item for PathEntry {
2157 type Summary = PathEntrySummary;
2158
2159 fn summary(&self) -> Self::Summary {
2160 PathEntrySummary { max_id: self.id }
2161 }
2162}
2163
2164impl sum_tree::KeyedItem for PathEntry {
2165 type Key = ProjectEntryId;
2166
2167 fn key(&self) -> Self::Key {
2168 self.id
2169 }
2170}
2171
2172#[derive(Clone, Debug, Default)]
2173struct PathEntrySummary {
2174 max_id: ProjectEntryId,
2175}
2176
2177impl sum_tree::Summary for PathEntrySummary {
2178 type Context = ();
2179
2180 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2181 self.max_id = summary.max_id;
2182 }
2183}
2184
2185impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2186 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2187 *self = summary.max_id;
2188 }
2189}
2190
2191#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2192pub struct PathKey(Arc<Path>);
2193
2194impl Default for PathKey {
2195 fn default() -> Self {
2196 Self(Path::new("").into())
2197 }
2198}
2199
2200impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2201 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2202 self.0 = summary.max_path.clone();
2203 }
2204}
2205
2206struct BackgroundScanner {
2207 fs: Arc<dyn Fs>,
2208 snapshot: Arc<Mutex<LocalSnapshot>>,
2209 notify: UnboundedSender<ScanState>,
2210 executor: Arc<executor::Background>,
2211}
2212
2213impl BackgroundScanner {
2214 fn new(
2215 snapshot: Arc<Mutex<LocalSnapshot>>,
2216 notify: UnboundedSender<ScanState>,
2217 fs: Arc<dyn Fs>,
2218 executor: Arc<executor::Background>,
2219 ) -> Self {
2220 Self {
2221 fs,
2222 snapshot,
2223 notify,
2224 executor,
2225 }
2226 }
2227
2228 fn abs_path(&self) -> Arc<Path> {
2229 self.snapshot.lock().abs_path.clone()
2230 }
2231
2232 fn snapshot(&self) -> LocalSnapshot {
2233 self.snapshot.lock().clone()
2234 }
2235
2236 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2237 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2238 return;
2239 }
2240
2241 if let Err(err) = self.scan_dirs().await {
2242 if self
2243 .notify
2244 .unbounded_send(ScanState::Err(Arc::new(err)))
2245 .is_err()
2246 {
2247 return;
2248 }
2249 }
2250
2251 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2252 return;
2253 }
2254
2255 futures::pin_mut!(events_rx);
2256
2257 while let Some(mut events) = events_rx.next().await {
2258 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2259 events.extend(additional_events);
2260 }
2261
2262 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2263 break;
2264 }
2265
2266 if !self.process_events(events).await {
2267 break;
2268 }
2269
2270 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2271 break;
2272 }
2273 }
2274 }
2275
2276 async fn scan_dirs(&mut self) -> Result<()> {
2277 let root_char_bag;
2278 let root_abs_path;
2279 let root_inode;
2280 let is_dir;
2281 let next_entry_id;
2282 {
2283 let mut snapshot = self.snapshot.lock();
2284 snapshot.scan_started();
2285 root_char_bag = snapshot.root_char_bag;
2286 root_abs_path = snapshot.abs_path.clone();
2287 root_inode = snapshot.root_entry().map(|e| e.inode);
2288 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2289 next_entry_id = snapshot.next_entry_id.clone();
2290 };
2291
2292 // Populate ignores above the root.
2293 for ancestor in root_abs_path.ancestors().skip(1) {
2294 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2295 {
2296 self.snapshot
2297 .lock()
2298 .ignores_by_parent_abs_path
2299 .insert(ancestor.into(), (ignore.into(), 0));
2300 }
2301 }
2302
2303 let ignore_stack = {
2304 let mut snapshot = self.snapshot.lock();
2305 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2306 if ignore_stack.is_all() {
2307 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2308 root_entry.is_ignored = true;
2309 snapshot.insert_entry(root_entry, self.fs.as_ref());
2310 }
2311 }
2312 ignore_stack
2313 };
2314
2315 if is_dir {
2316 let path: Arc<Path> = Arc::from(Path::new(""));
2317 let mut ancestor_inodes = TreeSet::default();
2318 if let Some(root_inode) = root_inode {
2319 ancestor_inodes.insert(root_inode);
2320 }
2321
2322 let (tx, rx) = channel::unbounded();
2323 self.executor
2324 .block(tx.send(ScanJob {
2325 abs_path: root_abs_path.to_path_buf(),
2326 path,
2327 ignore_stack,
2328 ancestor_inodes,
2329 scan_queue: tx.clone(),
2330 }))
2331 .unwrap();
2332 drop(tx);
2333
2334 self.executor
2335 .scoped(|scope| {
2336 for _ in 0..self.executor.num_cpus() {
2337 scope.spawn(async {
2338 while let Ok(job) = rx.recv().await {
2339 if let Err(err) = self
2340 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2341 .await
2342 {
2343 log::error!("error scanning {:?}: {}", job.abs_path, err);
2344 }
2345 }
2346 });
2347 }
2348 })
2349 .await;
2350
2351 self.snapshot.lock().scan_completed();
2352 }
2353
2354 Ok(())
2355 }
2356
2357 async fn scan_dir(
2358 &self,
2359 root_char_bag: CharBag,
2360 next_entry_id: Arc<AtomicUsize>,
2361 job: &ScanJob,
2362 ) -> Result<()> {
2363 let mut new_entries: Vec<Entry> = Vec::new();
2364 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2365 let mut ignore_stack = job.ignore_stack.clone();
2366 let mut new_ignore = None;
2367
2368 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2369 while let Some(child_abs_path) = child_paths.next().await {
2370 let child_abs_path = match child_abs_path {
2371 Ok(child_abs_path) => child_abs_path,
2372 Err(error) => {
2373 log::error!("error processing entry {:?}", error);
2374 continue;
2375 }
2376 };
2377
2378 let child_name = child_abs_path.file_name().unwrap();
2379 let child_path: Arc<Path> = job.path.join(child_name).into();
2380 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2381 Ok(Some(metadata)) => metadata,
2382 Ok(None) => continue,
2383 Err(err) => {
2384 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2385 continue;
2386 }
2387 };
2388
2389 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2390 if child_name == *GITIGNORE {
2391 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2392 Ok(ignore) => {
2393 let ignore = Arc::new(ignore);
2394 ignore_stack =
2395 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2396 new_ignore = Some(ignore);
2397 }
2398 Err(error) => {
2399 log::error!(
2400 "error loading .gitignore file {:?} - {:?}",
2401 child_name,
2402 error
2403 );
2404 }
2405 }
2406
2407 // Update ignore status of any child entries we've already processed to reflect the
2408 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2409 // there should rarely be too numerous. Update the ignore stack associated with any
2410 // new jobs as well.
2411 let mut new_jobs = new_jobs.iter_mut();
2412 for entry in &mut new_entries {
2413 let entry_abs_path = self.abs_path().join(&entry.path);
2414 entry.is_ignored =
2415 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2416
2417 if entry.is_dir() {
2418 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2419 job.ignore_stack = if entry.is_ignored {
2420 IgnoreStack::all()
2421 } else {
2422 ignore_stack.clone()
2423 };
2424 }
2425 }
2426 }
2427 }
2428
2429 let mut child_entry = Entry::new(
2430 child_path.clone(),
2431 &child_metadata,
2432 &next_entry_id,
2433 root_char_bag,
2434 );
2435
2436 if child_entry.is_dir() {
2437 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2438 child_entry.is_ignored = is_ignored;
2439
2440 // Avoid recursing until crash in the case of a recursive symlink
2441 if !job.ancestor_inodes.contains(&child_entry.inode) {
2442 let mut ancestor_inodes = job.ancestor_inodes.clone();
2443 ancestor_inodes.insert(child_entry.inode);
2444
2445 new_jobs.push(Some(ScanJob {
2446 abs_path: child_abs_path,
2447 path: child_path,
2448 ignore_stack: if is_ignored {
2449 IgnoreStack::all()
2450 } else {
2451 ignore_stack.clone()
2452 },
2453 ancestor_inodes,
2454 scan_queue: job.scan_queue.clone(),
2455 }));
2456 } else {
2457 new_jobs.push(None);
2458 }
2459 } else {
2460 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2461 }
2462
2463 new_entries.push(child_entry);
2464 }
2465
2466 self.snapshot.lock().populate_dir(
2467 job.path.clone(),
2468 new_entries,
2469 new_ignore,
2470 self.fs.as_ref(),
2471 );
2472
2473 for new_job in new_jobs {
2474 if let Some(new_job) = new_job {
2475 job.scan_queue.send(new_job).await.unwrap();
2476 }
2477 }
2478
2479 Ok(())
2480 }
2481
2482 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2483 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2484 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2485
2486 let root_char_bag;
2487 let root_abs_path;
2488 let next_entry_id;
2489 {
2490 let mut snapshot = self.snapshot.lock();
2491 snapshot.scan_started();
2492 root_char_bag = snapshot.root_char_bag;
2493 root_abs_path = snapshot.abs_path.clone();
2494 next_entry_id = snapshot.next_entry_id.clone();
2495 }
2496
2497 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2498 path
2499 } else {
2500 return false;
2501 };
2502 let metadata = futures::future::join_all(
2503 events
2504 .iter()
2505 .map(|event| self.fs.metadata(&event.path))
2506 .collect::<Vec<_>>(),
2507 )
2508 .await;
2509
2510 // Hold the snapshot lock while clearing and re-inserting the root entries
2511 // for each event. This way, the snapshot is not observable to the foreground
2512 // thread while this operation is in-progress.
2513 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2514 {
2515 let mut snapshot = self.snapshot.lock();
2516 for event in &events {
2517 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2518 snapshot.remove_path(path);
2519 }
2520 }
2521
2522 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2523 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2524 Ok(path) => Arc::from(path.to_path_buf()),
2525 Err(_) => {
2526 log::error!(
2527 "unexpected event {:?} for root path {:?}",
2528 event.path,
2529 root_canonical_path
2530 );
2531 continue;
2532 }
2533 };
2534 let abs_path = root_abs_path.join(&path);
2535
2536 match metadata {
2537 Ok(Some(metadata)) => {
2538 let ignore_stack =
2539 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2540 let mut fs_entry = Entry::new(
2541 path.clone(),
2542 &metadata,
2543 snapshot.next_entry_id.as_ref(),
2544 snapshot.root_char_bag,
2545 );
2546 fs_entry.is_ignored = ignore_stack.is_all();
2547 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2548
2549 let scan_id = snapshot.scan_id;
2550 if let Some(repo) = snapshot.in_dot_git(&path) {
2551 repo.repo.lock().reload_index();
2552 repo.scan_id = scan_id;
2553 }
2554
2555 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2556 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2557 ancestor_inodes.insert(metadata.inode);
2558 self.executor
2559 .block(scan_queue_tx.send(ScanJob {
2560 abs_path,
2561 path,
2562 ignore_stack,
2563 ancestor_inodes,
2564 scan_queue: scan_queue_tx.clone(),
2565 }))
2566 .unwrap();
2567 }
2568 }
2569 Ok(None) => {}
2570 Err(err) => {
2571 // TODO - create a special 'error' entry in the entries tree to mark this
2572 log::error!("error reading file on event {:?}", err);
2573 }
2574 }
2575 }
2576 drop(scan_queue_tx);
2577 }
2578
2579 // Scan any directories that were created as part of this event batch.
2580 self.executor
2581 .scoped(|scope| {
2582 for _ in 0..self.executor.num_cpus() {
2583 scope.spawn(async {
2584 while let Ok(job) = scan_queue_rx.recv().await {
2585 if let Err(err) = self
2586 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2587 .await
2588 {
2589 log::error!("error scanning {:?}: {}", job.abs_path, err);
2590 }
2591 }
2592 });
2593 }
2594 })
2595 .await;
2596
2597 // Attempt to detect renames only over a single batch of file-system events.
2598 self.snapshot.lock().removed_entry_ids.clear();
2599
2600 self.update_ignore_statuses().await;
2601 self.update_git_repositories();
2602 self.snapshot.lock().scan_completed();
2603 true
2604 }
2605
2606 async fn update_ignore_statuses(&self) {
2607 let mut snapshot = self.snapshot();
2608
2609 let mut ignores_to_update = Vec::new();
2610 let mut ignores_to_delete = Vec::new();
2611 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2612 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2613 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2614 ignores_to_update.push(parent_abs_path.clone());
2615 }
2616
2617 let ignore_path = parent_path.join(&*GITIGNORE);
2618 if snapshot.entry_for_path(ignore_path).is_none() {
2619 ignores_to_delete.push(parent_abs_path.clone());
2620 }
2621 }
2622 }
2623
2624 for parent_abs_path in ignores_to_delete {
2625 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2626 self.snapshot
2627 .lock()
2628 .ignores_by_parent_abs_path
2629 .remove(&parent_abs_path);
2630 }
2631
2632 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2633 ignores_to_update.sort_unstable();
2634 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2635 while let Some(parent_abs_path) = ignores_to_update.next() {
2636 while ignores_to_update
2637 .peek()
2638 .map_or(false, |p| p.starts_with(&parent_abs_path))
2639 {
2640 ignores_to_update.next().unwrap();
2641 }
2642
2643 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2644 ignore_queue_tx
2645 .send(UpdateIgnoreStatusJob {
2646 abs_path: parent_abs_path,
2647 ignore_stack,
2648 ignore_queue: ignore_queue_tx.clone(),
2649 })
2650 .await
2651 .unwrap();
2652 }
2653 drop(ignore_queue_tx);
2654
2655 self.executor
2656 .scoped(|scope| {
2657 for _ in 0..self.executor.num_cpus() {
2658 scope.spawn(async {
2659 while let Ok(job) = ignore_queue_rx.recv().await {
2660 self.update_ignore_status(job, &snapshot).await;
2661 }
2662 });
2663 }
2664 })
2665 .await;
2666 }
2667
2668 fn update_git_repositories(&self) {
2669 let mut snapshot = self.snapshot.lock();
2670 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2671 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2672 snapshot.git_repositories = git_repositories;
2673 }
2674
2675 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2676 let mut ignore_stack = job.ignore_stack;
2677 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2678 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2679 }
2680
2681 let mut entries_by_id_edits = Vec::new();
2682 let mut entries_by_path_edits = Vec::new();
2683 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2684 for mut entry in snapshot.child_entries(path).cloned() {
2685 let was_ignored = entry.is_ignored;
2686 let abs_path = self.abs_path().join(&entry.path);
2687 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2688 if entry.is_dir() {
2689 let child_ignore_stack = if entry.is_ignored {
2690 IgnoreStack::all()
2691 } else {
2692 ignore_stack.clone()
2693 };
2694 job.ignore_queue
2695 .send(UpdateIgnoreStatusJob {
2696 abs_path: abs_path.into(),
2697 ignore_stack: child_ignore_stack,
2698 ignore_queue: job.ignore_queue.clone(),
2699 })
2700 .await
2701 .unwrap();
2702 }
2703
2704 if entry.is_ignored != was_ignored {
2705 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2706 path_entry.scan_id = snapshot.scan_id;
2707 path_entry.is_ignored = entry.is_ignored;
2708 entries_by_id_edits.push(Edit::Insert(path_entry));
2709 entries_by_path_edits.push(Edit::Insert(entry));
2710 }
2711 }
2712
2713 let mut snapshot = self.snapshot.lock();
2714 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2715 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2716 }
2717}
2718
2719fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2720 let mut result = root_char_bag;
2721 result.extend(
2722 path.to_string_lossy()
2723 .chars()
2724 .map(|c| c.to_ascii_lowercase()),
2725 );
2726 result
2727}
2728
2729struct ScanJob {
2730 abs_path: PathBuf,
2731 path: Arc<Path>,
2732 ignore_stack: Arc<IgnoreStack>,
2733 scan_queue: Sender<ScanJob>,
2734 ancestor_inodes: TreeSet<u64>,
2735}
2736
2737struct UpdateIgnoreStatusJob {
2738 abs_path: Arc<Path>,
2739 ignore_stack: Arc<IgnoreStack>,
2740 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2741}
2742
2743pub trait WorktreeHandle {
2744 #[cfg(any(test, feature = "test-support"))]
2745 fn flush_fs_events<'a>(
2746 &self,
2747 cx: &'a gpui::TestAppContext,
2748 ) -> futures::future::LocalBoxFuture<'a, ()>;
2749}
2750
2751impl WorktreeHandle for ModelHandle<Worktree> {
2752 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2753 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2754 // extra directory scans, and emit extra scan-state notifications.
2755 //
2756 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2757 // to ensure that all redundant FS events have already been processed.
2758 #[cfg(any(test, feature = "test-support"))]
2759 fn flush_fs_events<'a>(
2760 &self,
2761 cx: &'a gpui::TestAppContext,
2762 ) -> futures::future::LocalBoxFuture<'a, ()> {
2763 use smol::future::FutureExt;
2764
2765 let filename = "fs-event-sentinel";
2766 let tree = self.clone();
2767 let (fs, root_path) = self.read_with(cx, |tree, _| {
2768 let tree = tree.as_local().unwrap();
2769 (tree.fs.clone(), tree.abs_path().clone())
2770 });
2771
2772 async move {
2773 fs.create_file(&root_path.join(filename), Default::default())
2774 .await
2775 .unwrap();
2776 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2777 .await;
2778
2779 fs.remove_file(&root_path.join(filename), Default::default())
2780 .await
2781 .unwrap();
2782 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2783 .await;
2784
2785 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2786 .await;
2787 }
2788 .boxed_local()
2789 }
2790}
2791
2792#[derive(Clone, Debug)]
2793struct TraversalProgress<'a> {
2794 max_path: &'a Path,
2795 count: usize,
2796 visible_count: usize,
2797 file_count: usize,
2798 visible_file_count: usize,
2799}
2800
2801impl<'a> TraversalProgress<'a> {
2802 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2803 match (include_ignored, include_dirs) {
2804 (true, true) => self.count,
2805 (true, false) => self.file_count,
2806 (false, true) => self.visible_count,
2807 (false, false) => self.visible_file_count,
2808 }
2809 }
2810}
2811
2812impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2813 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2814 self.max_path = summary.max_path.as_ref();
2815 self.count += summary.count;
2816 self.visible_count += summary.visible_count;
2817 self.file_count += summary.file_count;
2818 self.visible_file_count += summary.visible_file_count;
2819 }
2820}
2821
2822impl<'a> Default for TraversalProgress<'a> {
2823 fn default() -> Self {
2824 Self {
2825 max_path: Path::new(""),
2826 count: 0,
2827 visible_count: 0,
2828 file_count: 0,
2829 visible_file_count: 0,
2830 }
2831 }
2832}
2833
2834pub struct Traversal<'a> {
2835 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2836 include_ignored: bool,
2837 include_dirs: bool,
2838}
2839
2840impl<'a> Traversal<'a> {
2841 pub fn advance(&mut self) -> bool {
2842 self.advance_to_offset(self.offset() + 1)
2843 }
2844
2845 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2846 self.cursor.seek_forward(
2847 &TraversalTarget::Count {
2848 count: offset,
2849 include_dirs: self.include_dirs,
2850 include_ignored: self.include_ignored,
2851 },
2852 Bias::Right,
2853 &(),
2854 )
2855 }
2856
2857 pub fn advance_to_sibling(&mut self) -> bool {
2858 while let Some(entry) = self.cursor.item() {
2859 self.cursor.seek_forward(
2860 &TraversalTarget::PathSuccessor(&entry.path),
2861 Bias::Left,
2862 &(),
2863 );
2864 if let Some(entry) = self.cursor.item() {
2865 if (self.include_dirs || !entry.is_dir())
2866 && (self.include_ignored || !entry.is_ignored)
2867 {
2868 return true;
2869 }
2870 }
2871 }
2872 false
2873 }
2874
2875 pub fn entry(&self) -> Option<&'a Entry> {
2876 self.cursor.item()
2877 }
2878
2879 pub fn offset(&self) -> usize {
2880 self.cursor
2881 .start()
2882 .count(self.include_dirs, self.include_ignored)
2883 }
2884}
2885
2886impl<'a> Iterator for Traversal<'a> {
2887 type Item = &'a Entry;
2888
2889 fn next(&mut self) -> Option<Self::Item> {
2890 if let Some(item) = self.entry() {
2891 self.advance();
2892 Some(item)
2893 } else {
2894 None
2895 }
2896 }
2897}
2898
2899#[derive(Debug)]
2900enum TraversalTarget<'a> {
2901 Path(&'a Path),
2902 PathSuccessor(&'a Path),
2903 Count {
2904 count: usize,
2905 include_ignored: bool,
2906 include_dirs: bool,
2907 },
2908}
2909
2910impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2911 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2912 match self {
2913 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2914 TraversalTarget::PathSuccessor(path) => {
2915 if !cursor_location.max_path.starts_with(path) {
2916 Ordering::Equal
2917 } else {
2918 Ordering::Greater
2919 }
2920 }
2921 TraversalTarget::Count {
2922 count,
2923 include_dirs,
2924 include_ignored,
2925 } => Ord::cmp(
2926 count,
2927 &cursor_location.count(*include_dirs, *include_ignored),
2928 ),
2929 }
2930 }
2931}
2932
2933struct ChildEntriesIter<'a> {
2934 parent_path: &'a Path,
2935 traversal: Traversal<'a>,
2936}
2937
2938impl<'a> Iterator for ChildEntriesIter<'a> {
2939 type Item = &'a Entry;
2940
2941 fn next(&mut self) -> Option<Self::Item> {
2942 if let Some(item) = self.traversal.entry() {
2943 if item.path.starts_with(&self.parent_path) {
2944 self.traversal.advance_to_sibling();
2945 return Some(item);
2946 }
2947 }
2948 None
2949 }
2950}
2951
2952impl<'a> From<&'a Entry> for proto::Entry {
2953 fn from(entry: &'a Entry) -> Self {
2954 Self {
2955 id: entry.id.to_proto(),
2956 is_dir: entry.is_dir(),
2957 path: entry.path.to_string_lossy().into(),
2958 inode: entry.inode,
2959 mtime: Some(entry.mtime.into()),
2960 is_symlink: entry.is_symlink,
2961 is_ignored: entry.is_ignored,
2962 }
2963 }
2964}
2965
2966impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2967 type Error = anyhow::Error;
2968
2969 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2970 if let Some(mtime) = entry.mtime {
2971 let kind = if entry.is_dir {
2972 EntryKind::Dir
2973 } else {
2974 let mut char_bag = *root_char_bag;
2975 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2976 EntryKind::File(char_bag)
2977 };
2978 let path: Arc<Path> = PathBuf::from(entry.path).into();
2979 Ok(Entry {
2980 id: ProjectEntryId::from_proto(entry.id),
2981 kind,
2982 path,
2983 inode: entry.inode,
2984 mtime: mtime.into(),
2985 is_symlink: entry.is_symlink,
2986 is_ignored: entry.is_ignored,
2987 })
2988 } else {
2989 Err(anyhow!(
2990 "missing mtime in remote worktree entry {:?}",
2991 entry.path
2992 ))
2993 }
2994 }
2995}
2996
2997#[cfg(test)]
2998mod tests {
2999 use super::*;
3000 use anyhow::Result;
3001 use client::test::FakeHttpClient;
3002 use fs::repository::FakeGitRepository;
3003 use fs::{FakeFs, RealFs};
3004 use gpui::{executor::Deterministic, TestAppContext};
3005 use rand::prelude::*;
3006 use serde_json::json;
3007 use std::{
3008 env,
3009 fmt::Write,
3010 time::{SystemTime, UNIX_EPOCH},
3011 };
3012
3013 use util::test::temp_tree;
3014
3015 #[gpui::test]
3016 async fn test_traversal(cx: &mut TestAppContext) {
3017 let fs = FakeFs::new(cx.background());
3018 fs.insert_tree(
3019 "/root",
3020 json!({
3021 ".gitignore": "a/b\n",
3022 "a": {
3023 "b": "",
3024 "c": "",
3025 }
3026 }),
3027 )
3028 .await;
3029
3030 let http_client = FakeHttpClient::with_404_response();
3031 let client = cx.read(|cx| Client::new(http_client, cx));
3032
3033 let tree = Worktree::local(
3034 client,
3035 Arc::from(Path::new("/root")),
3036 true,
3037 fs,
3038 Default::default(),
3039 &mut cx.to_async(),
3040 )
3041 .await
3042 .unwrap();
3043 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3044 .await;
3045
3046 tree.read_with(cx, |tree, _| {
3047 assert_eq!(
3048 tree.entries(false)
3049 .map(|entry| entry.path.as_ref())
3050 .collect::<Vec<_>>(),
3051 vec![
3052 Path::new(""),
3053 Path::new(".gitignore"),
3054 Path::new("a"),
3055 Path::new("a/c"),
3056 ]
3057 );
3058 assert_eq!(
3059 tree.entries(true)
3060 .map(|entry| entry.path.as_ref())
3061 .collect::<Vec<_>>(),
3062 vec![
3063 Path::new(""),
3064 Path::new(".gitignore"),
3065 Path::new("a"),
3066 Path::new("a/b"),
3067 Path::new("a/c"),
3068 ]
3069 );
3070 })
3071 }
3072
3073 #[gpui::test(iterations = 10)]
3074 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3075 let fs = FakeFs::new(cx.background());
3076 fs.insert_tree(
3077 "/root",
3078 json!({
3079 "lib": {
3080 "a": {
3081 "a.txt": ""
3082 },
3083 "b": {
3084 "b.txt": ""
3085 }
3086 }
3087 }),
3088 )
3089 .await;
3090 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3091 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3092
3093 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3094 let tree = Worktree::local(
3095 client,
3096 Arc::from(Path::new("/root")),
3097 true,
3098 fs.clone(),
3099 Default::default(),
3100 &mut cx.to_async(),
3101 )
3102 .await
3103 .unwrap();
3104
3105 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3106 .await;
3107
3108 tree.read_with(cx, |tree, _| {
3109 assert_eq!(
3110 tree.entries(false)
3111 .map(|entry| entry.path.as_ref())
3112 .collect::<Vec<_>>(),
3113 vec![
3114 Path::new(""),
3115 Path::new("lib"),
3116 Path::new("lib/a"),
3117 Path::new("lib/a/a.txt"),
3118 Path::new("lib/a/lib"),
3119 Path::new("lib/b"),
3120 Path::new("lib/b/b.txt"),
3121 Path::new("lib/b/lib"),
3122 ]
3123 );
3124 });
3125
3126 fs.rename(
3127 Path::new("/root/lib/a/lib"),
3128 Path::new("/root/lib/a/lib-2"),
3129 Default::default(),
3130 )
3131 .await
3132 .unwrap();
3133 executor.run_until_parked();
3134 tree.read_with(cx, |tree, _| {
3135 assert_eq!(
3136 tree.entries(false)
3137 .map(|entry| entry.path.as_ref())
3138 .collect::<Vec<_>>(),
3139 vec![
3140 Path::new(""),
3141 Path::new("lib"),
3142 Path::new("lib/a"),
3143 Path::new("lib/a/a.txt"),
3144 Path::new("lib/a/lib-2"),
3145 Path::new("lib/b"),
3146 Path::new("lib/b/b.txt"),
3147 Path::new("lib/b/lib"),
3148 ]
3149 );
3150 });
3151 }
3152
3153 #[gpui::test]
3154 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3155 let parent_dir = temp_tree(json!({
3156 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3157 "tree": {
3158 ".git": {},
3159 ".gitignore": "ignored-dir\n",
3160 "tracked-dir": {
3161 "tracked-file1": "",
3162 "ancestor-ignored-file1": "",
3163 },
3164 "ignored-dir": {
3165 "ignored-file1": ""
3166 }
3167 }
3168 }));
3169 let dir = parent_dir.path().join("tree");
3170
3171 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3172
3173 let tree = Worktree::local(
3174 client,
3175 dir.as_path(),
3176 true,
3177 Arc::new(RealFs),
3178 Default::default(),
3179 &mut cx.to_async(),
3180 )
3181 .await
3182 .unwrap();
3183 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3184 .await;
3185 tree.flush_fs_events(cx).await;
3186 cx.read(|cx| {
3187 let tree = tree.read(cx);
3188 assert!(
3189 !tree
3190 .entry_for_path("tracked-dir/tracked-file1")
3191 .unwrap()
3192 .is_ignored
3193 );
3194 assert!(
3195 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3196 .unwrap()
3197 .is_ignored
3198 );
3199 assert!(
3200 tree.entry_for_path("ignored-dir/ignored-file1")
3201 .unwrap()
3202 .is_ignored
3203 );
3204 });
3205
3206 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3207 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3208 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3209 tree.flush_fs_events(cx).await;
3210 cx.read(|cx| {
3211 let tree = tree.read(cx);
3212 assert!(
3213 !tree
3214 .entry_for_path("tracked-dir/tracked-file2")
3215 .unwrap()
3216 .is_ignored
3217 );
3218 assert!(
3219 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3220 .unwrap()
3221 .is_ignored
3222 );
3223 assert!(
3224 tree.entry_for_path("ignored-dir/ignored-file2")
3225 .unwrap()
3226 .is_ignored
3227 );
3228 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3229 });
3230 }
3231
3232 #[gpui::test]
3233 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3234 let root = temp_tree(json!({
3235 "dir1": {
3236 ".git": {},
3237 "deps": {
3238 "dep1": {
3239 ".git": {},
3240 "src": {
3241 "a.txt": ""
3242 }
3243 }
3244 },
3245 "src": {
3246 "b.txt": ""
3247 }
3248 },
3249 "c.txt": "",
3250 }));
3251
3252 let http_client = FakeHttpClient::with_404_response();
3253 let client = cx.read(|cx| Client::new(http_client, cx));
3254 let tree = Worktree::local(
3255 client,
3256 root.path(),
3257 true,
3258 Arc::new(RealFs),
3259 Default::default(),
3260 &mut cx.to_async(),
3261 )
3262 .await
3263 .unwrap();
3264
3265 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3266 .await;
3267 tree.flush_fs_events(cx).await;
3268
3269 tree.read_with(cx, |tree, _cx| {
3270 let tree = tree.as_local().unwrap();
3271
3272 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3273
3274 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3275 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3276 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3277
3278 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3279 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3280 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3281 });
3282
3283 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3284 let tree = tree.as_local().unwrap();
3285 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3286 });
3287
3288 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3289 tree.flush_fs_events(cx).await;
3290
3291 tree.read_with(cx, |tree, _cx| {
3292 let tree = tree.as_local().unwrap();
3293 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3294 assert_ne!(
3295 original_scan_id, new_scan_id,
3296 "original {original_scan_id}, new {new_scan_id}"
3297 );
3298 });
3299
3300 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3301 tree.flush_fs_events(cx).await;
3302
3303 tree.read_with(cx, |tree, _cx| {
3304 let tree = tree.as_local().unwrap();
3305
3306 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3307 });
3308 }
3309
3310 #[test]
3311 fn test_changed_repos() {
3312 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3313 GitRepositoryEntry {
3314 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3315 scan_id,
3316 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3317 git_dir_path: git_dir_path.as_ref().into(),
3318 }
3319 }
3320
3321 let prev_repos: Vec<GitRepositoryEntry> = vec![
3322 fake_entry("/.git", 0),
3323 fake_entry("/a/.git", 0),
3324 fake_entry("/a/b/.git", 0),
3325 ];
3326
3327 let new_repos: Vec<GitRepositoryEntry> = vec![
3328 fake_entry("/a/.git", 1),
3329 fake_entry("/a/b/.git", 0),
3330 fake_entry("/a/c/.git", 0),
3331 ];
3332
3333 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3334
3335 // Deletion retained
3336 assert!(res
3337 .iter()
3338 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3339 .is_some());
3340
3341 // Update retained
3342 assert!(res
3343 .iter()
3344 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3345 .is_some());
3346
3347 // Addition retained
3348 assert!(res
3349 .iter()
3350 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3351 .is_some());
3352
3353 // Nochange, not retained
3354 assert!(res
3355 .iter()
3356 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3357 .is_none());
3358 }
3359
3360 #[gpui::test]
3361 async fn test_write_file(cx: &mut TestAppContext) {
3362 let dir = temp_tree(json!({
3363 ".git": {},
3364 ".gitignore": "ignored-dir\n",
3365 "tracked-dir": {},
3366 "ignored-dir": {}
3367 }));
3368
3369 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3370
3371 let tree = Worktree::local(
3372 client,
3373 dir.path(),
3374 true,
3375 Arc::new(RealFs),
3376 Default::default(),
3377 &mut cx.to_async(),
3378 )
3379 .await
3380 .unwrap();
3381 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3382 .await;
3383 tree.flush_fs_events(cx).await;
3384
3385 tree.update(cx, |tree, cx| {
3386 tree.as_local().unwrap().write_file(
3387 Path::new("tracked-dir/file.txt"),
3388 "hello".into(),
3389 Default::default(),
3390 cx,
3391 )
3392 })
3393 .await
3394 .unwrap();
3395 tree.update(cx, |tree, cx| {
3396 tree.as_local().unwrap().write_file(
3397 Path::new("ignored-dir/file.txt"),
3398 "world".into(),
3399 Default::default(),
3400 cx,
3401 )
3402 })
3403 .await
3404 .unwrap();
3405
3406 tree.read_with(cx, |tree, _| {
3407 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3408 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3409 assert!(!tracked.is_ignored);
3410 assert!(ignored.is_ignored);
3411 });
3412 }
3413
3414 #[gpui::test(iterations = 30)]
3415 async fn test_create_directory(cx: &mut TestAppContext) {
3416 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3417
3418 let fs = FakeFs::new(cx.background());
3419 fs.insert_tree(
3420 "/a",
3421 json!({
3422 "b": {},
3423 "c": {},
3424 "d": {},
3425 }),
3426 )
3427 .await;
3428
3429 let tree = Worktree::local(
3430 client,
3431 "/a".as_ref(),
3432 true,
3433 fs,
3434 Default::default(),
3435 &mut cx.to_async(),
3436 )
3437 .await
3438 .unwrap();
3439
3440 let entry = tree
3441 .update(cx, |tree, cx| {
3442 tree.as_local_mut()
3443 .unwrap()
3444 .create_entry("a/e".as_ref(), true, cx)
3445 })
3446 .await
3447 .unwrap();
3448 assert!(entry.is_dir());
3449
3450 cx.foreground().run_until_parked();
3451 tree.read_with(cx, |tree, _| {
3452 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3453 });
3454 }
3455
3456 #[gpui::test(iterations = 100)]
3457 fn test_random(mut rng: StdRng) {
3458 let operations = env::var("OPERATIONS")
3459 .map(|o| o.parse().unwrap())
3460 .unwrap_or(40);
3461 let initial_entries = env::var("INITIAL_ENTRIES")
3462 .map(|o| o.parse().unwrap())
3463 .unwrap_or(20);
3464
3465 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3466 for _ in 0..initial_entries {
3467 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3468 }
3469 log::info!("Generated initial tree");
3470
3471 let (notify_tx, _notify_rx) = mpsc::unbounded();
3472 let fs = Arc::new(RealFs);
3473 let next_entry_id = Arc::new(AtomicUsize::new(0));
3474 let mut initial_snapshot = LocalSnapshot {
3475 removed_entry_ids: Default::default(),
3476 ignores_by_parent_abs_path: Default::default(),
3477 git_repositories: Default::default(),
3478 next_entry_id: next_entry_id.clone(),
3479 snapshot: Snapshot {
3480 id: WorktreeId::from_usize(0),
3481 entries_by_path: Default::default(),
3482 entries_by_id: Default::default(),
3483 abs_path: root_dir.path().into(),
3484 root_name: Default::default(),
3485 root_char_bag: Default::default(),
3486 scan_id: 0,
3487 completed_scan_id: 0,
3488 },
3489 };
3490 initial_snapshot.insert_entry(
3491 Entry::new(
3492 Path::new("").into(),
3493 &smol::block_on(fs.metadata(root_dir.path()))
3494 .unwrap()
3495 .unwrap(),
3496 &next_entry_id,
3497 Default::default(),
3498 ),
3499 fs.as_ref(),
3500 );
3501 let mut scanner = BackgroundScanner::new(
3502 Arc::new(Mutex::new(initial_snapshot.clone())),
3503 notify_tx,
3504 fs.clone(),
3505 Arc::new(gpui::executor::Background::new()),
3506 );
3507 smol::block_on(scanner.scan_dirs()).unwrap();
3508 scanner.snapshot().check_invariants();
3509
3510 let mut events = Vec::new();
3511 let mut snapshots = Vec::new();
3512 let mut mutations_len = operations;
3513 while mutations_len > 1 {
3514 if !events.is_empty() && rng.gen_bool(0.4) {
3515 let len = rng.gen_range(0..=events.len());
3516 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3517 log::info!("Delivering events: {:#?}", to_deliver);
3518 smol::block_on(scanner.process_events(to_deliver));
3519 scanner.snapshot().check_invariants();
3520 } else {
3521 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3522 mutations_len -= 1;
3523 }
3524
3525 if rng.gen_bool(0.2) {
3526 snapshots.push(scanner.snapshot());
3527 }
3528 }
3529 log::info!("Quiescing: {:#?}", events);
3530 smol::block_on(scanner.process_events(events));
3531 scanner.snapshot().check_invariants();
3532
3533 let (notify_tx, _notify_rx) = mpsc::unbounded();
3534 let mut new_scanner = BackgroundScanner::new(
3535 Arc::new(Mutex::new(initial_snapshot)),
3536 notify_tx,
3537 scanner.fs.clone(),
3538 scanner.executor.clone(),
3539 );
3540 smol::block_on(new_scanner.scan_dirs()).unwrap();
3541 assert_eq!(
3542 scanner.snapshot().to_vec(true),
3543 new_scanner.snapshot().to_vec(true)
3544 );
3545
3546 for mut prev_snapshot in snapshots {
3547 let include_ignored = rng.gen::<bool>();
3548 if !include_ignored {
3549 let mut entries_by_path_edits = Vec::new();
3550 let mut entries_by_id_edits = Vec::new();
3551 for entry in prev_snapshot
3552 .entries_by_id
3553 .cursor::<()>()
3554 .filter(|e| e.is_ignored)
3555 {
3556 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3557 entries_by_id_edits.push(Edit::Remove(entry.id));
3558 }
3559
3560 prev_snapshot
3561 .entries_by_path
3562 .edit(entries_by_path_edits, &());
3563 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3564 }
3565
3566 let update = scanner
3567 .snapshot()
3568 .build_update(&prev_snapshot, 0, 0, include_ignored);
3569 prev_snapshot.apply_remote_update(update).unwrap();
3570 assert_eq!(
3571 prev_snapshot.to_vec(true),
3572 scanner.snapshot().to_vec(include_ignored)
3573 );
3574 }
3575 }
3576
3577 fn randomly_mutate_tree(
3578 root_path: &Path,
3579 insertion_probability: f64,
3580 rng: &mut impl Rng,
3581 ) -> Result<Vec<fsevent::Event>> {
3582 let root_path = root_path.canonicalize().unwrap();
3583 let (dirs, files) = read_dir_recursive(root_path.clone());
3584
3585 let mut events = Vec::new();
3586 let mut record_event = |path: PathBuf| {
3587 events.push(fsevent::Event {
3588 event_id: SystemTime::now()
3589 .duration_since(UNIX_EPOCH)
3590 .unwrap()
3591 .as_secs(),
3592 flags: fsevent::StreamFlags::empty(),
3593 path,
3594 });
3595 };
3596
3597 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3598 let path = dirs.choose(rng).unwrap();
3599 let new_path = path.join(gen_name(rng));
3600
3601 if rng.gen() {
3602 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3603 std::fs::create_dir(&new_path)?;
3604 } else {
3605 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3606 std::fs::write(&new_path, "")?;
3607 }
3608 record_event(new_path);
3609 } else if rng.gen_bool(0.05) {
3610 let ignore_dir_path = dirs.choose(rng).unwrap();
3611 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3612
3613 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3614 let files_to_ignore = {
3615 let len = rng.gen_range(0..=subfiles.len());
3616 subfiles.choose_multiple(rng, len)
3617 };
3618 let dirs_to_ignore = {
3619 let len = rng.gen_range(0..subdirs.len());
3620 subdirs.choose_multiple(rng, len)
3621 };
3622
3623 let mut ignore_contents = String::new();
3624 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3625 writeln!(
3626 ignore_contents,
3627 "{}",
3628 path_to_ignore
3629 .strip_prefix(&ignore_dir_path)?
3630 .to_str()
3631 .unwrap()
3632 )
3633 .unwrap();
3634 }
3635 log::info!(
3636 "Creating {:?} with contents:\n{}",
3637 ignore_path.strip_prefix(&root_path)?,
3638 ignore_contents
3639 );
3640 std::fs::write(&ignore_path, ignore_contents).unwrap();
3641 record_event(ignore_path);
3642 } else {
3643 let old_path = {
3644 let file_path = files.choose(rng);
3645 let dir_path = dirs[1..].choose(rng);
3646 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3647 };
3648
3649 let is_rename = rng.gen();
3650 if is_rename {
3651 let new_path_parent = dirs
3652 .iter()
3653 .filter(|d| !d.starts_with(old_path))
3654 .choose(rng)
3655 .unwrap();
3656
3657 let overwrite_existing_dir =
3658 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3659 let new_path = if overwrite_existing_dir {
3660 std::fs::remove_dir_all(&new_path_parent).ok();
3661 new_path_parent.to_path_buf()
3662 } else {
3663 new_path_parent.join(gen_name(rng))
3664 };
3665
3666 log::info!(
3667 "Renaming {:?} to {}{:?}",
3668 old_path.strip_prefix(&root_path)?,
3669 if overwrite_existing_dir {
3670 "overwrite "
3671 } else {
3672 ""
3673 },
3674 new_path.strip_prefix(&root_path)?
3675 );
3676 std::fs::rename(&old_path, &new_path)?;
3677 record_event(old_path.clone());
3678 record_event(new_path);
3679 } else if old_path.is_dir() {
3680 let (dirs, files) = read_dir_recursive(old_path.clone());
3681
3682 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3683 std::fs::remove_dir_all(&old_path).unwrap();
3684 for file in files {
3685 record_event(file);
3686 }
3687 for dir in dirs {
3688 record_event(dir);
3689 }
3690 } else {
3691 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3692 std::fs::remove_file(old_path).unwrap();
3693 record_event(old_path.clone());
3694 }
3695 }
3696
3697 Ok(events)
3698 }
3699
3700 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3701 let child_entries = std::fs::read_dir(&path).unwrap();
3702 let mut dirs = vec![path];
3703 let mut files = Vec::new();
3704 for child_entry in child_entries {
3705 let child_path = child_entry.unwrap().path();
3706 if child_path.is_dir() {
3707 let (child_dirs, child_files) = read_dir_recursive(child_path);
3708 dirs.extend(child_dirs);
3709 files.extend(child_files);
3710 } else {
3711 files.push(child_path);
3712 }
3713 }
3714 (dirs, files)
3715 }
3716
3717 fn gen_name(rng: &mut impl Rng) -> String {
3718 (0..6)
3719 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3720 .map(char::from)
3721 .collect()
3722 }
3723
3724 impl LocalSnapshot {
3725 fn check_invariants(&self) {
3726 let mut files = self.files(true, 0);
3727 let mut visible_files = self.files(false, 0);
3728 for entry in self.entries_by_path.cursor::<()>() {
3729 if entry.is_file() {
3730 assert_eq!(files.next().unwrap().inode, entry.inode);
3731 if !entry.is_ignored {
3732 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3733 }
3734 }
3735 }
3736 assert!(files.next().is_none());
3737 assert!(visible_files.next().is_none());
3738
3739 let mut bfs_paths = Vec::new();
3740 let mut stack = vec![Path::new("")];
3741 while let Some(path) = stack.pop() {
3742 bfs_paths.push(path);
3743 let ix = stack.len();
3744 for child_entry in self.child_entries(path) {
3745 stack.insert(ix, &child_entry.path);
3746 }
3747 }
3748
3749 let dfs_paths_via_iter = self
3750 .entries_by_path
3751 .cursor::<()>()
3752 .map(|e| e.path.as_ref())
3753 .collect::<Vec<_>>();
3754 assert_eq!(bfs_paths, dfs_paths_via_iter);
3755
3756 let dfs_paths_via_traversal = self
3757 .entries(true)
3758 .map(|e| e.path.as_ref())
3759 .collect::<Vec<_>>();
3760 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3761
3762 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3763 let ignore_parent_path =
3764 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3765 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3766 assert!(self
3767 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3768 .is_some());
3769 }
3770 }
3771
3772 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3773 let mut paths = Vec::new();
3774 for entry in self.entries_by_path.cursor::<()>() {
3775 if include_ignored || !entry.is_ignored {
3776 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3777 }
3778 }
3779 paths.sort_by(|a, b| a.0.cmp(b.0));
3780 paths
3781 }
3782 }
3783}