1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::{
24 proto::{
25 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
26 serialize_version,
27 },
28 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
29};
30use parking_lot::Mutex;
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::paths::HOME;
53use util::{ResultExt, TryFutureExt};
54
55#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
56pub struct WorktreeId(usize);
57
58#[allow(clippy::large_enum_variant)]
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 background_snapshot: Arc<Mutex<LocalSnapshot>>,
67 last_scan_state_rx: watch::Receiver<ScanState>,
68 _background_scanner_task: Option<Task<()>>,
69 poll_task: Option<Task<()>>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
72 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
73 client: Arc<Client>,
74 fs: Arc<dyn Fs>,
75 visible: bool,
76}
77
78pub struct RemoteWorktree {
79 pub snapshot: Snapshot,
80 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
81 project_id: u64,
82 client: Arc<Client>,
83 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
84 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
85 replica_id: ReplicaId,
86 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
87 visible: bool,
88 disconnected: bool,
89}
90
91#[derive(Clone)]
92pub struct Snapshot {
93 id: WorktreeId,
94 abs_path: Arc<Path>,
95 root_name: String,
96 root_char_bag: CharBag,
97 entries_by_path: SumTree<Entry>,
98 entries_by_id: SumTree<PathEntry>,
99 scan_id: usize,
100 completed_scan_id: usize,
101}
102
103#[derive(Clone)]
104pub struct GitRepositoryEntry {
105 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
106
107 pub(crate) scan_id: usize,
108 // Path to folder containing the .git file or directory
109 pub(crate) content_path: Arc<Path>,
110 // Path to the actual .git folder.
111 // Note: if .git is a file, this points to the folder indicated by the .git file
112 pub(crate) git_dir_path: Arc<Path>,
113}
114
115impl std::fmt::Debug for GitRepositoryEntry {
116 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
117 f.debug_struct("GitRepositoryEntry")
118 .field("content_path", &self.content_path)
119 .field("git_dir_path", &self.git_dir_path)
120 .field("libgit_repository", &"LibGitRepository")
121 .finish()
122 }
123}
124
125pub struct LocalSnapshot {
126 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
127 git_repositories: Vec<GitRepositoryEntry>,
128 removed_entry_ids: HashMap<u64, ProjectEntryId>,
129 next_entry_id: Arc<AtomicUsize>,
130 snapshot: Snapshot,
131}
132
133impl Clone for LocalSnapshot {
134 fn clone(&self) -> Self {
135 Self {
136 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
137 git_repositories: self.git_repositories.iter().cloned().collect(),
138 removed_entry_ids: self.removed_entry_ids.clone(),
139 next_entry_id: self.next_entry_id.clone(),
140 snapshot: self.snapshot.clone(),
141 }
142 }
143}
144
145impl Deref for LocalSnapshot {
146 type Target = Snapshot;
147
148 fn deref(&self) -> &Self::Target {
149 &self.snapshot
150 }
151}
152
153impl DerefMut for LocalSnapshot {
154 fn deref_mut(&mut self) -> &mut Self::Target {
155 &mut self.snapshot
156 }
157}
158
159#[derive(Clone, Debug)]
160enum ScanState {
161 Idle,
162 /// The worktree is performing its initial scan of the filesystem.
163 Initializing,
164 /// The worktree is updating in response to filesystem events.
165 Updating,
166 Err(Arc<anyhow::Error>),
167}
168
169struct ShareState {
170 project_id: u64,
171 snapshots_tx: watch::Sender<LocalSnapshot>,
172 resume_updates: watch::Sender<()>,
173 _maintain_remote_snapshot: Task<Option<()>>,
174}
175
176pub enum Event {
177 UpdatedEntries,
178 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
179}
180
181impl Entity for Worktree {
182 type Event = Event;
183}
184
185impl Worktree {
186 pub async fn local(
187 client: Arc<Client>,
188 path: impl Into<Arc<Path>>,
189 visible: bool,
190 fs: Arc<dyn Fs>,
191 next_entry_id: Arc<AtomicUsize>,
192 cx: &mut AsyncAppContext,
193 ) -> Result<ModelHandle<Self>> {
194 let (tree, scan_states_tx) =
195 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
196 tree.update(cx, |tree, cx| {
197 let tree = tree.as_local_mut().unwrap();
198 let abs_path = tree.abs_path().clone();
199 let background_snapshot = tree.background_snapshot.clone();
200 let background = cx.background().clone();
201 tree._background_scanner_task = Some(cx.background().spawn(async move {
202 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
203 let scanner =
204 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
205 scanner.run(events).await;
206 }));
207 });
208 Ok(tree)
209 }
210
211 pub fn remote(
212 project_remote_id: u64,
213 replica_id: ReplicaId,
214 worktree: proto::WorktreeMetadata,
215 client: Arc<Client>,
216 cx: &mut MutableAppContext,
217 ) -> ModelHandle<Self> {
218 let remote_id = worktree.id;
219 let root_char_bag: CharBag = worktree
220 .root_name
221 .chars()
222 .map(|c| c.to_ascii_lowercase())
223 .collect();
224 let root_name = worktree.root_name.clone();
225 let visible = worktree.visible;
226
227 let abs_path = PathBuf::from(worktree.abs_path);
228 let snapshot = Snapshot {
229 id: WorktreeId(remote_id as usize),
230 abs_path: Arc::from(abs_path.deref()),
231 root_name,
232 root_char_bag,
233 entries_by_path: Default::default(),
234 entries_by_id: Default::default(),
235 scan_id: 0,
236 completed_scan_id: 0,
237 };
238
239 let (updates_tx, mut updates_rx) = mpsc::unbounded();
240 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
241 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
242 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
243 Worktree::Remote(RemoteWorktree {
244 project_id: project_remote_id,
245 replica_id,
246 snapshot: snapshot.clone(),
247 background_snapshot: background_snapshot.clone(),
248 updates_tx: Some(updates_tx),
249 snapshot_subscriptions: Default::default(),
250 client: client.clone(),
251 diagnostic_summaries: Default::default(),
252 visible,
253 disconnected: false,
254 })
255 });
256
257 cx.background()
258 .spawn(async move {
259 while let Some(update) = updates_rx.next().await {
260 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
261 log::error!("error applying worktree update: {}", error);
262 }
263 snapshot_updated_tx.send(()).await.ok();
264 }
265 })
266 .detach();
267
268 cx.spawn(|mut cx| {
269 let this = worktree_handle.downgrade();
270 async move {
271 while (snapshot_updated_rx.recv().await).is_some() {
272 if let Some(this) = this.upgrade(&cx) {
273 this.update(&mut cx, |this, cx| {
274 this.poll_snapshot(cx);
275 let this = this.as_remote_mut().unwrap();
276 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
277 if this.observed_snapshot(*scan_id) {
278 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
279 let _ = tx.send(());
280 } else {
281 break;
282 }
283 }
284 });
285 } else {
286 break;
287 }
288 }
289 }
290 })
291 .detach();
292
293 worktree_handle
294 }
295
296 pub fn as_local(&self) -> Option<&LocalWorktree> {
297 if let Worktree::Local(worktree) = self {
298 Some(worktree)
299 } else {
300 None
301 }
302 }
303
304 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
305 if let Worktree::Remote(worktree) = self {
306 Some(worktree)
307 } else {
308 None
309 }
310 }
311
312 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
313 if let Worktree::Local(worktree) = self {
314 Some(worktree)
315 } else {
316 None
317 }
318 }
319
320 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
321 if let Worktree::Remote(worktree) = self {
322 Some(worktree)
323 } else {
324 None
325 }
326 }
327
328 pub fn is_local(&self) -> bool {
329 matches!(self, Worktree::Local(_))
330 }
331
332 pub fn is_remote(&self) -> bool {
333 !self.is_local()
334 }
335
336 pub fn snapshot(&self) -> Snapshot {
337 match self {
338 Worktree::Local(worktree) => worktree.snapshot().snapshot,
339 Worktree::Remote(worktree) => worktree.snapshot(),
340 }
341 }
342
343 pub fn scan_id(&self) -> usize {
344 match self {
345 Worktree::Local(worktree) => worktree.snapshot.scan_id,
346 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
347 }
348 }
349
350 pub fn completed_scan_id(&self) -> usize {
351 match self {
352 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
353 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
354 }
355 }
356
357 pub fn is_visible(&self) -> bool {
358 match self {
359 Worktree::Local(worktree) => worktree.visible,
360 Worktree::Remote(worktree) => worktree.visible,
361 }
362 }
363
364 pub fn replica_id(&self) -> ReplicaId {
365 match self {
366 Worktree::Local(_) => 0,
367 Worktree::Remote(worktree) => worktree.replica_id,
368 }
369 }
370
371 pub fn diagnostic_summaries(
372 &self,
373 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
374 match self {
375 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
376 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
377 }
378 .iter()
379 .map(|(path, summary)| (path.0.clone(), *summary))
380 }
381
382 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
383 match self {
384 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
385 Self::Remote(worktree) => worktree.poll_snapshot(cx),
386 };
387 }
388
389 pub fn abs_path(&self) -> Arc<Path> {
390 match self {
391 Worktree::Local(worktree) => worktree.abs_path.clone(),
392 Worktree::Remote(worktree) => worktree.abs_path.clone(),
393 }
394 }
395}
396
397impl LocalWorktree {
398 async fn create(
399 client: Arc<Client>,
400 path: impl Into<Arc<Path>>,
401 visible: bool,
402 fs: Arc<dyn Fs>,
403 next_entry_id: Arc<AtomicUsize>,
404 cx: &mut AsyncAppContext,
405 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
406 let abs_path = path.into();
407 let path: Arc<Path> = Arc::from(Path::new(""));
408
409 // After determining whether the root entry is a file or a directory, populate the
410 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
411 let root_name = abs_path
412 .file_name()
413 .map_or(String::new(), |f| f.to_string_lossy().to_string());
414 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
415 let metadata = fs
416 .metadata(&abs_path)
417 .await
418 .context("failed to stat worktree path")?;
419
420 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
421 let (mut last_scan_state_tx, last_scan_state_rx) =
422 watch::channel_with(ScanState::Initializing);
423 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
424 let mut snapshot = LocalSnapshot {
425 ignores_by_parent_abs_path: Default::default(),
426 git_repositories: Default::default(),
427 removed_entry_ids: Default::default(),
428 next_entry_id,
429 snapshot: Snapshot {
430 id: WorktreeId::from_usize(cx.model_id()),
431 abs_path,
432 root_name: root_name.clone(),
433 root_char_bag,
434 entries_by_path: Default::default(),
435 entries_by_id: Default::default(),
436 scan_id: 0,
437 completed_scan_id: 0,
438 },
439 };
440 if let Some(metadata) = metadata {
441 let entry = Entry::new(
442 path,
443 &metadata,
444 &snapshot.next_entry_id,
445 snapshot.root_char_bag,
446 );
447 snapshot.insert_entry(entry, fs.as_ref());
448 }
449
450 let tree = Self {
451 snapshot: snapshot.clone(),
452 background_snapshot: Arc::new(Mutex::new(snapshot)),
453 last_scan_state_rx,
454 _background_scanner_task: None,
455 share: None,
456 poll_task: None,
457 diagnostics: Default::default(),
458 diagnostic_summaries: Default::default(),
459 client,
460 fs,
461 visible,
462 };
463
464 cx.spawn_weak(|this, mut cx| async move {
465 while let Some(scan_state) = scan_states_rx.next().await {
466 if let Some(this) = this.upgrade(&cx) {
467 last_scan_state_tx.blocking_send(scan_state).ok();
468 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
469 } else {
470 break;
471 }
472 }
473 })
474 .detach();
475
476 Worktree::Local(tree)
477 });
478
479 Ok((tree, scan_states_tx))
480 }
481
482 pub fn contains_abs_path(&self, path: &Path) -> bool {
483 path.starts_with(&self.abs_path)
484 }
485
486 fn absolutize(&self, path: &Path) -> PathBuf {
487 if path.file_name().is_some() {
488 self.abs_path.join(path)
489 } else {
490 self.abs_path.to_path_buf()
491 }
492 }
493
494 pub(crate) fn load_buffer(
495 &mut self,
496 path: &Path,
497 cx: &mut ModelContext<Worktree>,
498 ) -> Task<Result<ModelHandle<Buffer>>> {
499 let path = Arc::from(path);
500 cx.spawn(move |this, mut cx| async move {
501 let (file, contents, diff_base) = this
502 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
503 .await?;
504 Ok(cx.add_model(|cx| {
505 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
506 buffer.git_diff_recalc(cx);
507 buffer
508 }))
509 })
510 }
511
512 pub fn diagnostics_for_path(
513 &self,
514 path: &Path,
515 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
516 self.diagnostics.get(path).cloned()
517 }
518
519 pub fn update_diagnostics(
520 &mut self,
521 language_server_id: usize,
522 worktree_path: Arc<Path>,
523 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
524 _: &mut ModelContext<Worktree>,
525 ) -> Result<bool> {
526 self.diagnostics.remove(&worktree_path);
527 let old_summary = self
528 .diagnostic_summaries
529 .remove(&PathKey(worktree_path.clone()))
530 .unwrap_or_default();
531 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
532 if !new_summary.is_empty() {
533 self.diagnostic_summaries
534 .insert(PathKey(worktree_path.clone()), new_summary);
535 self.diagnostics.insert(worktree_path.clone(), diagnostics);
536 }
537
538 let updated = !old_summary.is_empty() || !new_summary.is_empty();
539 if updated {
540 if let Some(share) = self.share.as_ref() {
541 self.client
542 .send(proto::UpdateDiagnosticSummary {
543 project_id: share.project_id,
544 worktree_id: self.id().to_proto(),
545 summary: Some(proto::DiagnosticSummary {
546 path: worktree_path.to_string_lossy().to_string(),
547 language_server_id: language_server_id as u64,
548 error_count: new_summary.error_count as u32,
549 warning_count: new_summary.warning_count as u32,
550 }),
551 })
552 .log_err();
553 }
554 }
555
556 Ok(updated)
557 }
558
559 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
560 self.poll_task.take();
561
562 match self.scan_state() {
563 ScanState::Idle => {
564 let new_snapshot = self.background_snapshot.lock().clone();
565 let updated_repos = Self::changed_repos(
566 &self.snapshot.git_repositories,
567 &new_snapshot.git_repositories,
568 );
569 self.snapshot = new_snapshot;
570
571 if let Some(share) = self.share.as_mut() {
572 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
573 }
574
575 cx.emit(Event::UpdatedEntries);
576
577 if !updated_repos.is_empty() {
578 cx.emit(Event::UpdatedGitRepositories(updated_repos));
579 }
580 }
581
582 ScanState::Initializing => {
583 let is_fake_fs = self.fs.is_fake();
584
585 let new_snapshot = self.background_snapshot.lock().clone();
586 let updated_repos = Self::changed_repos(
587 &self.snapshot.git_repositories,
588 &new_snapshot.git_repositories,
589 );
590 self.snapshot = new_snapshot;
591
592 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
593 if is_fake_fs {
594 #[cfg(any(test, feature = "test-support"))]
595 cx.background().simulate_random_delay().await;
596 } else {
597 smol::Timer::after(Duration::from_millis(100)).await;
598 }
599 if let Some(this) = this.upgrade(&cx) {
600 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
601 }
602 }));
603
604 cx.emit(Event::UpdatedEntries);
605
606 if !updated_repos.is_empty() {
607 cx.emit(Event::UpdatedGitRepositories(updated_repos));
608 }
609 }
610
611 _ => {
612 if force {
613 self.snapshot = self.background_snapshot.lock().clone();
614 }
615 }
616 }
617
618 cx.notify();
619 }
620
621 fn changed_repos(
622 old_repos: &[GitRepositoryEntry],
623 new_repos: &[GitRepositoryEntry],
624 ) -> Vec<GitRepositoryEntry> {
625 fn diff<'a>(
626 a: &'a [GitRepositoryEntry],
627 b: &'a [GitRepositoryEntry],
628 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
629 ) {
630 for a_repo in a {
631 let matched = b.iter().find(|b_repo| {
632 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
633 });
634
635 if matched.is_none() {
636 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
637 }
638 }
639 }
640
641 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
642
643 diff(old_repos, new_repos, &mut updated);
644 diff(new_repos, old_repos, &mut updated);
645
646 updated.into_values().collect()
647 }
648
649 pub fn scan_complete(&self) -> impl Future<Output = ()> {
650 let mut scan_state_rx = self.last_scan_state_rx.clone();
651 async move {
652 let mut scan_state = Some(scan_state_rx.borrow().clone());
653 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
654 scan_state = scan_state_rx.recv().await;
655 }
656 }
657 }
658
659 fn scan_state(&self) -> ScanState {
660 self.last_scan_state_rx.borrow().clone()
661 }
662
663 pub fn snapshot(&self) -> LocalSnapshot {
664 self.snapshot.clone()
665 }
666
667 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
668 proto::WorktreeMetadata {
669 id: self.id().to_proto(),
670 root_name: self.root_name().to_string(),
671 visible: self.visible,
672 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
673 }
674 }
675
676 fn load(
677 &self,
678 path: &Path,
679 cx: &mut ModelContext<Worktree>,
680 ) -> Task<Result<(File, String, Option<String>)>> {
681 let handle = cx.handle();
682 let path = Arc::from(path);
683 let abs_path = self.absolutize(&path);
684 let fs = self.fs.clone();
685 let snapshot = self.snapshot();
686
687 cx.spawn(|this, mut cx| async move {
688 let text = fs.load(&abs_path).await?;
689
690 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
691 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
692 let repo_relative = repo_relative.to_owned();
693 cx.background()
694 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
695 .await
696 } else {
697 None
698 }
699 } else {
700 None
701 };
702
703 // Eagerly populate the snapshot with an updated entry for the loaded file
704 let entry = this
705 .update(&mut cx, |this, cx| {
706 this.as_local()
707 .unwrap()
708 .refresh_entry(path, abs_path, None, cx)
709 })
710 .await?;
711
712 Ok((
713 File {
714 entry_id: entry.id,
715 worktree: handle,
716 path: entry.path,
717 mtime: entry.mtime,
718 is_local: true,
719 is_deleted: false,
720 },
721 text,
722 diff_base,
723 ))
724 })
725 }
726
727 pub fn save_buffer(
728 &self,
729 buffer_handle: ModelHandle<Buffer>,
730 path: Arc<Path>,
731 cx: &mut ModelContext<Worktree>,
732 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
733 let buffer = buffer_handle.read(cx);
734
735 let rpc = self.client.clone();
736 let buffer_id = buffer.remote_id();
737 let project_id = self.share.as_ref().map(|share| share.project_id);
738
739 let text = buffer.as_rope().clone();
740 let fingerprint = text.fingerprint();
741 let version = buffer.version();
742 let save = self.write_file(path, text, buffer.line_ending(), cx);
743
744 cx.as_mut().spawn(|mut cx| async move {
745 let mtime = save.await?.mtime;
746 if let Some(project_id) = project_id {
747 rpc.send(proto::BufferSaved {
748 project_id,
749 buffer_id,
750 version: serialize_version(&version),
751 mtime: Some(mtime.into()),
752 fingerprint: serialize_fingerprint(fingerprint),
753 })?;
754 }
755 buffer_handle.update(&mut cx, |buffer, cx| {
756 buffer.did_save(version.clone(), fingerprint, mtime, None, cx);
757 });
758 anyhow::Ok((version, fingerprint, mtime))
759 })
760 }
761
762 pub fn save_buffer_as(
763 &self,
764 buffer_handle: ModelHandle<Buffer>,
765 path: impl Into<Arc<Path>>,
766 cx: &mut ModelContext<Worktree>,
767 ) -> Task<Result<()>> {
768 let handle = cx.handle();
769 let buffer = buffer_handle.read(cx);
770
771 let text = buffer.as_rope().clone();
772 let fingerprint = text.fingerprint();
773 let version = buffer.version();
774 let save = self.write_file(path, text, buffer.line_ending(), cx);
775
776 cx.as_mut().spawn(|mut cx| async move {
777 let entry = save.await?;
778 let file = File {
779 entry_id: entry.id,
780 worktree: handle,
781 path: entry.path,
782 mtime: entry.mtime,
783 is_local: true,
784 is_deleted: false,
785 };
786
787 buffer_handle.update(&mut cx, |buffer, cx| {
788 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
789 });
790
791 Ok(())
792 })
793 }
794
795 pub fn create_entry(
796 &self,
797 path: impl Into<Arc<Path>>,
798 is_dir: bool,
799 cx: &mut ModelContext<Worktree>,
800 ) -> Task<Result<Entry>> {
801 self.write_entry_internal(
802 path,
803 if is_dir {
804 None
805 } else {
806 Some(Default::default())
807 },
808 cx,
809 )
810 }
811
812 pub fn write_file(
813 &self,
814 path: impl Into<Arc<Path>>,
815 text: Rope,
816 line_ending: LineEnding,
817 cx: &mut ModelContext<Worktree>,
818 ) -> Task<Result<Entry>> {
819 self.write_entry_internal(path, Some((text, line_ending)), cx)
820 }
821
822 pub fn delete_entry(
823 &self,
824 entry_id: ProjectEntryId,
825 cx: &mut ModelContext<Worktree>,
826 ) -> Option<Task<Result<()>>> {
827 let entry = self.entry_for_id(entry_id)?.clone();
828 let abs_path = self.absolutize(&entry.path);
829 let delete = cx.background().spawn({
830 let fs = self.fs.clone();
831 let abs_path = abs_path;
832 async move {
833 if entry.is_file() {
834 fs.remove_file(&abs_path, Default::default()).await
835 } else {
836 fs.remove_dir(
837 &abs_path,
838 RemoveOptions {
839 recursive: true,
840 ignore_if_not_exists: false,
841 },
842 )
843 .await
844 }
845 }
846 });
847
848 Some(cx.spawn(|this, mut cx| async move {
849 delete.await?;
850 this.update(&mut cx, |this, cx| {
851 let this = this.as_local_mut().unwrap();
852 {
853 let mut snapshot = this.background_snapshot.lock();
854 snapshot.delete_entry(entry_id);
855 }
856 this.poll_snapshot(true, cx);
857 });
858 Ok(())
859 }))
860 }
861
862 pub fn rename_entry(
863 &self,
864 entry_id: ProjectEntryId,
865 new_path: impl Into<Arc<Path>>,
866 cx: &mut ModelContext<Worktree>,
867 ) -> Option<Task<Result<Entry>>> {
868 let old_path = self.entry_for_id(entry_id)?.path.clone();
869 let new_path = new_path.into();
870 let abs_old_path = self.absolutize(&old_path);
871 let abs_new_path = self.absolutize(&new_path);
872 let rename = cx.background().spawn({
873 let fs = self.fs.clone();
874 let abs_new_path = abs_new_path.clone();
875 async move {
876 fs.rename(&abs_old_path, &abs_new_path, Default::default())
877 .await
878 }
879 });
880
881 Some(cx.spawn(|this, mut cx| async move {
882 rename.await?;
883 let entry = this
884 .update(&mut cx, |this, cx| {
885 this.as_local_mut().unwrap().refresh_entry(
886 new_path.clone(),
887 abs_new_path,
888 Some(old_path),
889 cx,
890 )
891 })
892 .await?;
893 Ok(entry)
894 }))
895 }
896
897 pub fn copy_entry(
898 &self,
899 entry_id: ProjectEntryId,
900 new_path: impl Into<Arc<Path>>,
901 cx: &mut ModelContext<Worktree>,
902 ) -> Option<Task<Result<Entry>>> {
903 let old_path = self.entry_for_id(entry_id)?.path.clone();
904 let new_path = new_path.into();
905 let abs_old_path = self.absolutize(&old_path);
906 let abs_new_path = self.absolutize(&new_path);
907 let copy = cx.background().spawn({
908 let fs = self.fs.clone();
909 let abs_new_path = abs_new_path.clone();
910 async move {
911 copy_recursive(
912 fs.as_ref(),
913 &abs_old_path,
914 &abs_new_path,
915 Default::default(),
916 )
917 .await
918 }
919 });
920
921 Some(cx.spawn(|this, mut cx| async move {
922 copy.await?;
923 let entry = this
924 .update(&mut cx, |this, cx| {
925 this.as_local_mut().unwrap().refresh_entry(
926 new_path.clone(),
927 abs_new_path,
928 None,
929 cx,
930 )
931 })
932 .await?;
933 Ok(entry)
934 }))
935 }
936
937 fn write_entry_internal(
938 &self,
939 path: impl Into<Arc<Path>>,
940 text_if_file: Option<(Rope, LineEnding)>,
941 cx: &mut ModelContext<Worktree>,
942 ) -> Task<Result<Entry>> {
943 let path = path.into();
944 let abs_path = self.absolutize(&path);
945 let write = cx.background().spawn({
946 let fs = self.fs.clone();
947 let abs_path = abs_path.clone();
948 async move {
949 if let Some((text, line_ending)) = text_if_file {
950 fs.save(&abs_path, &text, line_ending).await
951 } else {
952 fs.create_dir(&abs_path).await
953 }
954 }
955 });
956
957 cx.spawn(|this, mut cx| async move {
958 write.await?;
959 let entry = this
960 .update(&mut cx, |this, cx| {
961 this.as_local_mut()
962 .unwrap()
963 .refresh_entry(path, abs_path, None, cx)
964 })
965 .await?;
966 Ok(entry)
967 })
968 }
969
970 fn refresh_entry(
971 &self,
972 path: Arc<Path>,
973 abs_path: PathBuf,
974 old_path: Option<Arc<Path>>,
975 cx: &mut ModelContext<Worktree>,
976 ) -> Task<Result<Entry>> {
977 let fs = self.fs.clone();
978 let root_char_bag;
979 let next_entry_id;
980 {
981 let snapshot = self.background_snapshot.lock();
982 root_char_bag = snapshot.root_char_bag;
983 next_entry_id = snapshot.next_entry_id.clone();
984 }
985 cx.spawn_weak(|this, mut cx| async move {
986 let metadata = fs
987 .metadata(&abs_path)
988 .await?
989 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
990 let this = this
991 .upgrade(&cx)
992 .ok_or_else(|| anyhow!("worktree was dropped"))?;
993 this.update(&mut cx, |this, cx| {
994 let this = this.as_local_mut().unwrap();
995 let inserted_entry;
996 {
997 let mut snapshot = this.background_snapshot.lock();
998 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
999 entry.is_ignored = snapshot
1000 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
1001 .is_abs_path_ignored(&abs_path, entry.is_dir());
1002 if let Some(old_path) = old_path {
1003 snapshot.remove_path(&old_path);
1004 }
1005 snapshot.scan_started();
1006 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
1007 snapshot.scan_completed();
1008 }
1009 this.poll_snapshot(true, cx);
1010 Ok(inserted_entry)
1011 })
1012 })
1013 }
1014
1015 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1016 let (share_tx, share_rx) = oneshot::channel();
1017
1018 if let Some(share) = self.share.as_mut() {
1019 let _ = share_tx.send(());
1020 *share.resume_updates.borrow_mut() = ();
1021 } else {
1022 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1023 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1024 let worktree_id = cx.model_id() as u64;
1025
1026 for (path, summary) in self.diagnostic_summaries.iter() {
1027 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1028 project_id,
1029 worktree_id,
1030 summary: Some(summary.to_proto(&path.0)),
1031 }) {
1032 return Task::ready(Err(e));
1033 }
1034 }
1035
1036 let _maintain_remote_snapshot = cx.background().spawn({
1037 let client = self.client.clone();
1038 async move {
1039 let mut share_tx = Some(share_tx);
1040 let mut prev_snapshot = LocalSnapshot {
1041 ignores_by_parent_abs_path: Default::default(),
1042 git_repositories: Default::default(),
1043 removed_entry_ids: Default::default(),
1044 next_entry_id: Default::default(),
1045 snapshot: Snapshot {
1046 id: WorktreeId(worktree_id as usize),
1047 abs_path: Path::new("").into(),
1048 root_name: Default::default(),
1049 root_char_bag: Default::default(),
1050 entries_by_path: Default::default(),
1051 entries_by_id: Default::default(),
1052 scan_id: 0,
1053 completed_scan_id: 0,
1054 },
1055 };
1056 while let Some(snapshot) = snapshots_rx.recv().await {
1057 #[cfg(any(test, feature = "test-support"))]
1058 const MAX_CHUNK_SIZE: usize = 2;
1059 #[cfg(not(any(test, feature = "test-support")))]
1060 const MAX_CHUNK_SIZE: usize = 256;
1061
1062 let update =
1063 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1064 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1065 let _ = resume_updates_rx.try_recv();
1066 while let Err(error) = client.request(update.clone()).await {
1067 log::error!("failed to send worktree update: {}", error);
1068 log::info!("waiting to resume updates");
1069 if resume_updates_rx.next().await.is_none() {
1070 return Ok(());
1071 }
1072 }
1073 }
1074
1075 if let Some(share_tx) = share_tx.take() {
1076 let _ = share_tx.send(());
1077 }
1078
1079 prev_snapshot = snapshot;
1080 }
1081
1082 Ok::<_, anyhow::Error>(())
1083 }
1084 .log_err()
1085 });
1086
1087 self.share = Some(ShareState {
1088 project_id,
1089 snapshots_tx,
1090 resume_updates: resume_updates_tx,
1091 _maintain_remote_snapshot,
1092 });
1093 }
1094
1095 cx.foreground()
1096 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1097 }
1098
1099 pub fn unshare(&mut self) {
1100 self.share.take();
1101 }
1102
1103 pub fn is_shared(&self) -> bool {
1104 self.share.is_some()
1105 }
1106}
1107
1108impl RemoteWorktree {
1109 fn snapshot(&self) -> Snapshot {
1110 self.snapshot.clone()
1111 }
1112
1113 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1114 self.snapshot = self.background_snapshot.lock().clone();
1115 cx.emit(Event::UpdatedEntries);
1116 cx.notify();
1117 }
1118
1119 pub fn disconnected_from_host(&mut self) {
1120 self.updates_tx.take();
1121 self.snapshot_subscriptions.clear();
1122 self.disconnected = true;
1123 }
1124
1125 pub fn save_buffer(
1126 &self,
1127 buffer_handle: ModelHandle<Buffer>,
1128 cx: &mut ModelContext<Worktree>,
1129 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1130 let buffer = buffer_handle.read(cx);
1131 let buffer_id = buffer.remote_id();
1132 let version = buffer.version();
1133 let rpc = self.client.clone();
1134 let project_id = self.project_id;
1135 cx.as_mut().spawn(|mut cx| async move {
1136 let response = rpc
1137 .request(proto::SaveBuffer {
1138 project_id,
1139 buffer_id,
1140 version: serialize_version(&version),
1141 })
1142 .await?;
1143 let version = deserialize_version(response.version);
1144 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1145 let mtime = response
1146 .mtime
1147 .ok_or_else(|| anyhow!("missing mtime"))?
1148 .into();
1149
1150 buffer_handle.update(&mut cx, |buffer, cx| {
1151 buffer.did_save(version.clone(), fingerprint, mtime, None, cx);
1152 });
1153
1154 Ok((version, fingerprint, mtime))
1155 })
1156 }
1157
1158 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1159 if let Some(updates_tx) = &self.updates_tx {
1160 updates_tx
1161 .unbounded_send(update)
1162 .expect("consumer runs to completion");
1163 }
1164 }
1165
1166 fn observed_snapshot(&self, scan_id: usize) -> bool {
1167 self.completed_scan_id >= scan_id
1168 }
1169
1170 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1171 let (tx, rx) = oneshot::channel();
1172 if self.observed_snapshot(scan_id) {
1173 let _ = tx.send(());
1174 } else if self.disconnected {
1175 drop(tx);
1176 } else {
1177 match self
1178 .snapshot_subscriptions
1179 .binary_search_by_key(&scan_id, |probe| probe.0)
1180 {
1181 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1182 }
1183 }
1184
1185 async move {
1186 rx.await?;
1187 Ok(())
1188 }
1189 }
1190
1191 pub fn update_diagnostic_summary(
1192 &mut self,
1193 path: Arc<Path>,
1194 summary: &proto::DiagnosticSummary,
1195 ) {
1196 let summary = DiagnosticSummary {
1197 language_server_id: summary.language_server_id as usize,
1198 error_count: summary.error_count as usize,
1199 warning_count: summary.warning_count as usize,
1200 };
1201 if summary.is_empty() {
1202 self.diagnostic_summaries.remove(&PathKey(path));
1203 } else {
1204 self.diagnostic_summaries.insert(PathKey(path), summary);
1205 }
1206 }
1207
1208 pub fn insert_entry(
1209 &mut self,
1210 entry: proto::Entry,
1211 scan_id: usize,
1212 cx: &mut ModelContext<Worktree>,
1213 ) -> Task<Result<Entry>> {
1214 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1215 cx.spawn(|this, mut cx| async move {
1216 wait_for_snapshot.await?;
1217 this.update(&mut cx, |worktree, _| {
1218 let worktree = worktree.as_remote_mut().unwrap();
1219 let mut snapshot = worktree.background_snapshot.lock();
1220 let entry = snapshot.insert_entry(entry);
1221 worktree.snapshot = snapshot.clone();
1222 entry
1223 })
1224 })
1225 }
1226
1227 pub(crate) fn delete_entry(
1228 &mut self,
1229 id: ProjectEntryId,
1230 scan_id: usize,
1231 cx: &mut ModelContext<Worktree>,
1232 ) -> Task<Result<()>> {
1233 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1234 cx.spawn(|this, mut cx| async move {
1235 wait_for_snapshot.await?;
1236 this.update(&mut cx, |worktree, _| {
1237 let worktree = worktree.as_remote_mut().unwrap();
1238 let mut snapshot = worktree.background_snapshot.lock();
1239 snapshot.delete_entry(id);
1240 worktree.snapshot = snapshot.clone();
1241 });
1242 Ok(())
1243 })
1244 }
1245}
1246
1247impl Snapshot {
1248 pub fn id(&self) -> WorktreeId {
1249 self.id
1250 }
1251
1252 pub fn abs_path(&self) -> &Arc<Path> {
1253 &self.abs_path
1254 }
1255
1256 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1257 self.entries_by_id.get(&entry_id, &()).is_some()
1258 }
1259
1260 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1261 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1262 let old_entry = self.entries_by_id.insert_or_replace(
1263 PathEntry {
1264 id: entry.id,
1265 path: entry.path.clone(),
1266 is_ignored: entry.is_ignored,
1267 scan_id: 0,
1268 },
1269 &(),
1270 );
1271 if let Some(old_entry) = old_entry {
1272 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1273 }
1274 self.entries_by_path.insert_or_replace(entry.clone(), &());
1275 Ok(entry)
1276 }
1277
1278 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1279 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1280 self.entries_by_path = {
1281 let mut cursor = self.entries_by_path.cursor();
1282 let mut new_entries_by_path =
1283 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1284 while let Some(entry) = cursor.item() {
1285 if entry.path.starts_with(&removed_entry.path) {
1286 self.entries_by_id.remove(&entry.id, &());
1287 cursor.next(&());
1288 } else {
1289 break;
1290 }
1291 }
1292 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1293 new_entries_by_path
1294 };
1295
1296 true
1297 } else {
1298 false
1299 }
1300 }
1301
1302 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1303 let mut entries_by_path_edits = Vec::new();
1304 let mut entries_by_id_edits = Vec::new();
1305 for entry_id in update.removed_entries {
1306 let entry = self
1307 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1308 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1309 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1310 entries_by_id_edits.push(Edit::Remove(entry.id));
1311 }
1312
1313 for entry in update.updated_entries {
1314 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1315 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1316 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1317 }
1318 entries_by_id_edits.push(Edit::Insert(PathEntry {
1319 id: entry.id,
1320 path: entry.path.clone(),
1321 is_ignored: entry.is_ignored,
1322 scan_id: 0,
1323 }));
1324 entries_by_path_edits.push(Edit::Insert(entry));
1325 }
1326
1327 self.entries_by_path.edit(entries_by_path_edits, &());
1328 self.entries_by_id.edit(entries_by_id_edits, &());
1329 self.scan_id = update.scan_id as usize;
1330 if update.is_last_update {
1331 self.completed_scan_id = update.scan_id as usize;
1332 }
1333
1334 Ok(())
1335 }
1336
1337 pub fn file_count(&self) -> usize {
1338 self.entries_by_path.summary().file_count
1339 }
1340
1341 pub fn visible_file_count(&self) -> usize {
1342 self.entries_by_path.summary().visible_file_count
1343 }
1344
1345 fn traverse_from_offset(
1346 &self,
1347 include_dirs: bool,
1348 include_ignored: bool,
1349 start_offset: usize,
1350 ) -> Traversal {
1351 let mut cursor = self.entries_by_path.cursor();
1352 cursor.seek(
1353 &TraversalTarget::Count {
1354 count: start_offset,
1355 include_dirs,
1356 include_ignored,
1357 },
1358 Bias::Right,
1359 &(),
1360 );
1361 Traversal {
1362 cursor,
1363 include_dirs,
1364 include_ignored,
1365 }
1366 }
1367
1368 fn traverse_from_path(
1369 &self,
1370 include_dirs: bool,
1371 include_ignored: bool,
1372 path: &Path,
1373 ) -> Traversal {
1374 let mut cursor = self.entries_by_path.cursor();
1375 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1376 Traversal {
1377 cursor,
1378 include_dirs,
1379 include_ignored,
1380 }
1381 }
1382
1383 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1384 self.traverse_from_offset(false, include_ignored, start)
1385 }
1386
1387 pub fn entries(&self, include_ignored: bool) -> Traversal {
1388 self.traverse_from_offset(true, include_ignored, 0)
1389 }
1390
1391 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1392 let empty_path = Path::new("");
1393 self.entries_by_path
1394 .cursor::<()>()
1395 .filter(move |entry| entry.path.as_ref() != empty_path)
1396 .map(|entry| &entry.path)
1397 }
1398
1399 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1400 let mut cursor = self.entries_by_path.cursor();
1401 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1402 let traversal = Traversal {
1403 cursor,
1404 include_dirs: true,
1405 include_ignored: true,
1406 };
1407 ChildEntriesIter {
1408 traversal,
1409 parent_path,
1410 }
1411 }
1412
1413 pub fn root_entry(&self) -> Option<&Entry> {
1414 self.entry_for_path("")
1415 }
1416
1417 pub fn root_name(&self) -> &str {
1418 &self.root_name
1419 }
1420
1421 pub fn scan_started(&mut self) {
1422 self.scan_id += 1;
1423 }
1424
1425 pub fn scan_completed(&mut self) {
1426 self.completed_scan_id = self.scan_id;
1427 }
1428
1429 pub fn scan_id(&self) -> usize {
1430 self.scan_id
1431 }
1432
1433 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1434 let path = path.as_ref();
1435 self.traverse_from_path(true, true, path)
1436 .entry()
1437 .and_then(|entry| {
1438 if entry.path.as_ref() == path {
1439 Some(entry)
1440 } else {
1441 None
1442 }
1443 })
1444 }
1445
1446 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1447 let entry = self.entries_by_id.get(&id, &())?;
1448 self.entry_for_path(&entry.path)
1449 }
1450
1451 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1452 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1453 }
1454}
1455
1456impl LocalSnapshot {
1457 // Gives the most specific git repository for a given path
1458 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1459 self.git_repositories
1460 .iter()
1461 .rev() //git_repository is ordered lexicographically
1462 .find(|repo| repo.manages(path))
1463 .cloned()
1464 }
1465
1466 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1467 // Git repositories cannot be nested, so we don't need to reverse the order
1468 self.git_repositories
1469 .iter_mut()
1470 .find(|repo| repo.in_dot_git(path))
1471 }
1472
1473 #[cfg(test)]
1474 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1475 let root_name = self.root_name.clone();
1476 proto::UpdateWorktree {
1477 project_id,
1478 worktree_id: self.id().to_proto(),
1479 abs_path: self.abs_path().to_string_lossy().into(),
1480 root_name,
1481 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1482 removed_entries: Default::default(),
1483 scan_id: self.scan_id as u64,
1484 is_last_update: true,
1485 }
1486 }
1487
1488 pub(crate) fn build_update(
1489 &self,
1490 other: &Self,
1491 project_id: u64,
1492 worktree_id: u64,
1493 include_ignored: bool,
1494 ) -> proto::UpdateWorktree {
1495 let mut updated_entries = Vec::new();
1496 let mut removed_entries = Vec::new();
1497 let mut self_entries = self
1498 .entries_by_id
1499 .cursor::<()>()
1500 .filter(|e| include_ignored || !e.is_ignored)
1501 .peekable();
1502 let mut other_entries = other
1503 .entries_by_id
1504 .cursor::<()>()
1505 .filter(|e| include_ignored || !e.is_ignored)
1506 .peekable();
1507 loop {
1508 match (self_entries.peek(), other_entries.peek()) {
1509 (Some(self_entry), Some(other_entry)) => {
1510 match Ord::cmp(&self_entry.id, &other_entry.id) {
1511 Ordering::Less => {
1512 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1513 updated_entries.push(entry);
1514 self_entries.next();
1515 }
1516 Ordering::Equal => {
1517 if self_entry.scan_id != other_entry.scan_id {
1518 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1519 updated_entries.push(entry);
1520 }
1521
1522 self_entries.next();
1523 other_entries.next();
1524 }
1525 Ordering::Greater => {
1526 removed_entries.push(other_entry.id.to_proto());
1527 other_entries.next();
1528 }
1529 }
1530 }
1531 (Some(self_entry), None) => {
1532 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1533 updated_entries.push(entry);
1534 self_entries.next();
1535 }
1536 (None, Some(other_entry)) => {
1537 removed_entries.push(other_entry.id.to_proto());
1538 other_entries.next();
1539 }
1540 (None, None) => break,
1541 }
1542 }
1543
1544 proto::UpdateWorktree {
1545 project_id,
1546 worktree_id,
1547 abs_path: self.abs_path().to_string_lossy().into(),
1548 root_name: self.root_name().to_string(),
1549 updated_entries,
1550 removed_entries,
1551 scan_id: self.scan_id as u64,
1552 is_last_update: self.completed_scan_id == self.scan_id,
1553 }
1554 }
1555
1556 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1557 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1558 let abs_path = self.abs_path.join(&entry.path);
1559 match smol::block_on(build_gitignore(&abs_path, fs)) {
1560 Ok(ignore) => {
1561 self.ignores_by_parent_abs_path.insert(
1562 abs_path.parent().unwrap().into(),
1563 (Arc::new(ignore), self.scan_id),
1564 );
1565 }
1566 Err(error) => {
1567 log::error!(
1568 "error loading .gitignore file {:?} - {:?}",
1569 &entry.path,
1570 error
1571 );
1572 }
1573 }
1574 }
1575
1576 self.reuse_entry_id(&mut entry);
1577
1578 if entry.kind == EntryKind::PendingDir {
1579 if let Some(existing_entry) =
1580 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1581 {
1582 entry.kind = existing_entry.kind;
1583 }
1584 }
1585
1586 let scan_id = self.scan_id;
1587 self.entries_by_path.insert_or_replace(entry.clone(), &());
1588 self.entries_by_id.insert_or_replace(
1589 PathEntry {
1590 id: entry.id,
1591 path: entry.path.clone(),
1592 is_ignored: entry.is_ignored,
1593 scan_id,
1594 },
1595 &(),
1596 );
1597
1598 entry
1599 }
1600
1601 fn populate_dir(
1602 &mut self,
1603 parent_path: Arc<Path>,
1604 entries: impl IntoIterator<Item = Entry>,
1605 ignore: Option<Arc<Gitignore>>,
1606 fs: &dyn Fs,
1607 ) {
1608 let mut parent_entry = if let Some(parent_entry) =
1609 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1610 {
1611 parent_entry.clone()
1612 } else {
1613 log::warn!(
1614 "populating a directory {:?} that has been removed",
1615 parent_path
1616 );
1617 return;
1618 };
1619
1620 if let Some(ignore) = ignore {
1621 self.ignores_by_parent_abs_path.insert(
1622 self.abs_path.join(&parent_path).into(),
1623 (ignore, self.scan_id),
1624 );
1625 }
1626 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1627 parent_entry.kind = EntryKind::Dir;
1628 } else {
1629 unreachable!();
1630 }
1631
1632 if parent_path.file_name() == Some(&DOT_GIT) {
1633 let abs_path = self.abs_path.join(&parent_path);
1634 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1635 if let Err(ix) = self
1636 .git_repositories
1637 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1638 {
1639 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1640 self.git_repositories.insert(
1641 ix,
1642 GitRepositoryEntry {
1643 repo,
1644 scan_id: 0,
1645 content_path,
1646 git_dir_path: parent_path,
1647 },
1648 );
1649 }
1650 }
1651 }
1652
1653 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1654 let mut entries_by_id_edits = Vec::new();
1655
1656 for mut entry in entries {
1657 self.reuse_entry_id(&mut entry);
1658 entries_by_id_edits.push(Edit::Insert(PathEntry {
1659 id: entry.id,
1660 path: entry.path.clone(),
1661 is_ignored: entry.is_ignored,
1662 scan_id: self.scan_id,
1663 }));
1664 entries_by_path_edits.push(Edit::Insert(entry));
1665 }
1666
1667 self.entries_by_path.edit(entries_by_path_edits, &());
1668 self.entries_by_id.edit(entries_by_id_edits, &());
1669 }
1670
1671 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1672 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1673 entry.id = removed_entry_id;
1674 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1675 entry.id = existing_entry.id;
1676 }
1677 }
1678
1679 fn remove_path(&mut self, path: &Path) {
1680 let mut new_entries;
1681 let removed_entries;
1682 {
1683 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1684 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1685 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1686 new_entries.push_tree(cursor.suffix(&()), &());
1687 }
1688 self.entries_by_path = new_entries;
1689
1690 let mut entries_by_id_edits = Vec::new();
1691 for entry in removed_entries.cursor::<()>() {
1692 let removed_entry_id = self
1693 .removed_entry_ids
1694 .entry(entry.inode)
1695 .or_insert(entry.id);
1696 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1697 entries_by_id_edits.push(Edit::Remove(entry.id));
1698 }
1699 self.entries_by_id.edit(entries_by_id_edits, &());
1700
1701 if path.file_name() == Some(&GITIGNORE) {
1702 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1703 if let Some((_, scan_id)) = self
1704 .ignores_by_parent_abs_path
1705 .get_mut(abs_parent_path.as_path())
1706 {
1707 *scan_id = self.snapshot.scan_id;
1708 }
1709 } else if path.file_name() == Some(&DOT_GIT) {
1710 let parent_path = path.parent().unwrap();
1711 if let Ok(ix) = self
1712 .git_repositories
1713 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1714 {
1715 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1716 }
1717 }
1718 }
1719
1720 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1721 let mut inodes = TreeSet::default();
1722 for ancestor in path.ancestors().skip(1) {
1723 if let Some(entry) = self.entry_for_path(ancestor) {
1724 inodes.insert(entry.inode);
1725 }
1726 }
1727 inodes
1728 }
1729
1730 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1731 let mut new_ignores = Vec::new();
1732 for ancestor in abs_path.ancestors().skip(1) {
1733 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1734 new_ignores.push((ancestor, Some(ignore.clone())));
1735 } else {
1736 new_ignores.push((ancestor, None));
1737 }
1738 }
1739
1740 let mut ignore_stack = IgnoreStack::none();
1741 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1742 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1743 ignore_stack = IgnoreStack::all();
1744 break;
1745 } else if let Some(ignore) = ignore {
1746 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1747 }
1748 }
1749
1750 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1751 ignore_stack = IgnoreStack::all();
1752 }
1753
1754 ignore_stack
1755 }
1756
1757 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1758 &self.git_repositories
1759 }
1760}
1761
1762impl GitRepositoryEntry {
1763 // Note that these paths should be relative to the worktree root.
1764 pub(crate) fn manages(&self, path: &Path) -> bool {
1765 path.starts_with(self.content_path.as_ref())
1766 }
1767
1768 // Note that theis path should be relative to the worktree root.
1769 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1770 path.starts_with(self.git_dir_path.as_ref())
1771 }
1772}
1773
1774async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1775 let contents = fs.load(abs_path).await?;
1776 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1777 let mut builder = GitignoreBuilder::new(parent);
1778 for line in contents.lines() {
1779 builder.add_line(Some(abs_path.into()), line)?;
1780 }
1781 Ok(builder.build()?)
1782}
1783
1784impl WorktreeId {
1785 pub fn from_usize(handle_id: usize) -> Self {
1786 Self(handle_id)
1787 }
1788
1789 pub(crate) fn from_proto(id: u64) -> Self {
1790 Self(id as usize)
1791 }
1792
1793 pub fn to_proto(&self) -> u64 {
1794 self.0 as u64
1795 }
1796
1797 pub fn to_usize(&self) -> usize {
1798 self.0
1799 }
1800}
1801
1802impl fmt::Display for WorktreeId {
1803 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1804 self.0.fmt(f)
1805 }
1806}
1807
1808impl Deref for Worktree {
1809 type Target = Snapshot;
1810
1811 fn deref(&self) -> &Self::Target {
1812 match self {
1813 Worktree::Local(worktree) => &worktree.snapshot,
1814 Worktree::Remote(worktree) => &worktree.snapshot,
1815 }
1816 }
1817}
1818
1819impl Deref for LocalWorktree {
1820 type Target = LocalSnapshot;
1821
1822 fn deref(&self) -> &Self::Target {
1823 &self.snapshot
1824 }
1825}
1826
1827impl Deref for RemoteWorktree {
1828 type Target = Snapshot;
1829
1830 fn deref(&self) -> &Self::Target {
1831 &self.snapshot
1832 }
1833}
1834
1835impl fmt::Debug for LocalWorktree {
1836 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1837 self.snapshot.fmt(f)
1838 }
1839}
1840
1841impl fmt::Debug for Snapshot {
1842 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1843 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1844 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1845
1846 impl<'a> fmt::Debug for EntriesByPath<'a> {
1847 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1848 f.debug_map()
1849 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1850 .finish()
1851 }
1852 }
1853
1854 impl<'a> fmt::Debug for EntriesById<'a> {
1855 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1856 f.debug_list().entries(self.0.iter()).finish()
1857 }
1858 }
1859
1860 f.debug_struct("Snapshot")
1861 .field("id", &self.id)
1862 .field("root_name", &self.root_name)
1863 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1864 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1865 .finish()
1866 }
1867}
1868
1869#[derive(Clone, PartialEq)]
1870pub struct File {
1871 pub worktree: ModelHandle<Worktree>,
1872 pub path: Arc<Path>,
1873 pub mtime: SystemTime,
1874 pub(crate) entry_id: ProjectEntryId,
1875 pub(crate) is_local: bool,
1876 pub(crate) is_deleted: bool,
1877}
1878
1879impl language::File for File {
1880 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1881 if self.is_local {
1882 Some(self)
1883 } else {
1884 None
1885 }
1886 }
1887
1888 fn mtime(&self) -> SystemTime {
1889 self.mtime
1890 }
1891
1892 fn path(&self) -> &Arc<Path> {
1893 &self.path
1894 }
1895
1896 fn full_path(&self, cx: &AppContext) -> PathBuf {
1897 let mut full_path = PathBuf::new();
1898 let worktree = self.worktree.read(cx);
1899
1900 if worktree.is_visible() {
1901 full_path.push(worktree.root_name());
1902 } else {
1903 let path = worktree.abs_path();
1904
1905 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1906 full_path.push("~");
1907 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1908 } else {
1909 full_path.push(path)
1910 }
1911 }
1912
1913 if self.path.components().next().is_some() {
1914 full_path.push(&self.path);
1915 }
1916
1917 full_path
1918 }
1919
1920 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1921 /// of its worktree, then this method will return the name of the worktree itself.
1922 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1923 self.path
1924 .file_name()
1925 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1926 }
1927
1928 fn is_deleted(&self) -> bool {
1929 self.is_deleted
1930 }
1931
1932 fn as_any(&self) -> &dyn Any {
1933 self
1934 }
1935
1936 fn to_proto(&self) -> rpc::proto::File {
1937 rpc::proto::File {
1938 worktree_id: self.worktree.id() as u64,
1939 entry_id: self.entry_id.to_proto(),
1940 path: self.path.to_string_lossy().into(),
1941 mtime: Some(self.mtime.into()),
1942 is_deleted: self.is_deleted,
1943 }
1944 }
1945}
1946
1947impl language::LocalFile for File {
1948 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1949 self.worktree
1950 .read(cx)
1951 .as_local()
1952 .unwrap()
1953 .abs_path
1954 .join(&self.path)
1955 }
1956
1957 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1958 let worktree = self.worktree.read(cx).as_local().unwrap();
1959 let abs_path = worktree.absolutize(&self.path);
1960 let fs = worktree.fs.clone();
1961 cx.background()
1962 .spawn(async move { fs.load(&abs_path).await })
1963 }
1964
1965 fn buffer_reloaded(
1966 &self,
1967 buffer_id: u64,
1968 version: &clock::Global,
1969 fingerprint: RopeFingerprint,
1970 line_ending: LineEnding,
1971 mtime: SystemTime,
1972 cx: &mut MutableAppContext,
1973 ) {
1974 let worktree = self.worktree.read(cx).as_local().unwrap();
1975 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1976 worktree
1977 .client
1978 .send(proto::BufferReloaded {
1979 project_id,
1980 buffer_id,
1981 version: serialize_version(version),
1982 mtime: Some(mtime.into()),
1983 fingerprint: serialize_fingerprint(fingerprint),
1984 line_ending: serialize_line_ending(line_ending) as i32,
1985 })
1986 .log_err();
1987 }
1988 }
1989}
1990
1991impl File {
1992 pub fn from_proto(
1993 proto: rpc::proto::File,
1994 worktree: ModelHandle<Worktree>,
1995 cx: &AppContext,
1996 ) -> Result<Self> {
1997 let worktree_id = worktree
1998 .read(cx)
1999 .as_remote()
2000 .ok_or_else(|| anyhow!("not remote"))?
2001 .id();
2002
2003 if worktree_id.to_proto() != proto.worktree_id {
2004 return Err(anyhow!("worktree id does not match file"));
2005 }
2006
2007 Ok(Self {
2008 worktree,
2009 path: Path::new(&proto.path).into(),
2010 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2011 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2012 is_local: false,
2013 is_deleted: proto.is_deleted,
2014 })
2015 }
2016
2017 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2018 file.and_then(|f| f.as_any().downcast_ref())
2019 }
2020
2021 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2022 self.worktree.read(cx).id()
2023 }
2024
2025 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2026 if self.is_deleted {
2027 None
2028 } else {
2029 Some(self.entry_id)
2030 }
2031 }
2032}
2033
2034#[derive(Clone, Debug, PartialEq, Eq)]
2035pub struct Entry {
2036 pub id: ProjectEntryId,
2037 pub kind: EntryKind,
2038 pub path: Arc<Path>,
2039 pub inode: u64,
2040 pub mtime: SystemTime,
2041 pub is_symlink: bool,
2042 pub is_ignored: bool,
2043}
2044
2045#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2046pub enum EntryKind {
2047 PendingDir,
2048 Dir,
2049 File(CharBag),
2050}
2051
2052impl Entry {
2053 fn new(
2054 path: Arc<Path>,
2055 metadata: &fs::Metadata,
2056 next_entry_id: &AtomicUsize,
2057 root_char_bag: CharBag,
2058 ) -> Self {
2059 Self {
2060 id: ProjectEntryId::new(next_entry_id),
2061 kind: if metadata.is_dir {
2062 EntryKind::PendingDir
2063 } else {
2064 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2065 },
2066 path,
2067 inode: metadata.inode,
2068 mtime: metadata.mtime,
2069 is_symlink: metadata.is_symlink,
2070 is_ignored: false,
2071 }
2072 }
2073
2074 pub fn is_dir(&self) -> bool {
2075 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2076 }
2077
2078 pub fn is_file(&self) -> bool {
2079 matches!(self.kind, EntryKind::File(_))
2080 }
2081}
2082
2083impl sum_tree::Item for Entry {
2084 type Summary = EntrySummary;
2085
2086 fn summary(&self) -> Self::Summary {
2087 let visible_count = if self.is_ignored { 0 } else { 1 };
2088 let file_count;
2089 let visible_file_count;
2090 if self.is_file() {
2091 file_count = 1;
2092 visible_file_count = visible_count;
2093 } else {
2094 file_count = 0;
2095 visible_file_count = 0;
2096 }
2097
2098 EntrySummary {
2099 max_path: self.path.clone(),
2100 count: 1,
2101 visible_count,
2102 file_count,
2103 visible_file_count,
2104 }
2105 }
2106}
2107
2108impl sum_tree::KeyedItem for Entry {
2109 type Key = PathKey;
2110
2111 fn key(&self) -> Self::Key {
2112 PathKey(self.path.clone())
2113 }
2114}
2115
2116#[derive(Clone, Debug)]
2117pub struct EntrySummary {
2118 max_path: Arc<Path>,
2119 count: usize,
2120 visible_count: usize,
2121 file_count: usize,
2122 visible_file_count: usize,
2123}
2124
2125impl Default for EntrySummary {
2126 fn default() -> Self {
2127 Self {
2128 max_path: Arc::from(Path::new("")),
2129 count: 0,
2130 visible_count: 0,
2131 file_count: 0,
2132 visible_file_count: 0,
2133 }
2134 }
2135}
2136
2137impl sum_tree::Summary for EntrySummary {
2138 type Context = ();
2139
2140 fn add_summary(&mut self, rhs: &Self, _: &()) {
2141 self.max_path = rhs.max_path.clone();
2142 self.count += rhs.count;
2143 self.visible_count += rhs.visible_count;
2144 self.file_count += rhs.file_count;
2145 self.visible_file_count += rhs.visible_file_count;
2146 }
2147}
2148
2149#[derive(Clone, Debug)]
2150struct PathEntry {
2151 id: ProjectEntryId,
2152 path: Arc<Path>,
2153 is_ignored: bool,
2154 scan_id: usize,
2155}
2156
2157impl sum_tree::Item for PathEntry {
2158 type Summary = PathEntrySummary;
2159
2160 fn summary(&self) -> Self::Summary {
2161 PathEntrySummary { max_id: self.id }
2162 }
2163}
2164
2165impl sum_tree::KeyedItem for PathEntry {
2166 type Key = ProjectEntryId;
2167
2168 fn key(&self) -> Self::Key {
2169 self.id
2170 }
2171}
2172
2173#[derive(Clone, Debug, Default)]
2174struct PathEntrySummary {
2175 max_id: ProjectEntryId,
2176}
2177
2178impl sum_tree::Summary for PathEntrySummary {
2179 type Context = ();
2180
2181 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2182 self.max_id = summary.max_id;
2183 }
2184}
2185
2186impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2187 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2188 *self = summary.max_id;
2189 }
2190}
2191
2192#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2193pub struct PathKey(Arc<Path>);
2194
2195impl Default for PathKey {
2196 fn default() -> Self {
2197 Self(Path::new("").into())
2198 }
2199}
2200
2201impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2202 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2203 self.0 = summary.max_path.clone();
2204 }
2205}
2206
2207struct BackgroundScanner {
2208 fs: Arc<dyn Fs>,
2209 snapshot: Arc<Mutex<LocalSnapshot>>,
2210 notify: UnboundedSender<ScanState>,
2211 executor: Arc<executor::Background>,
2212}
2213
2214impl BackgroundScanner {
2215 fn new(
2216 snapshot: Arc<Mutex<LocalSnapshot>>,
2217 notify: UnboundedSender<ScanState>,
2218 fs: Arc<dyn Fs>,
2219 executor: Arc<executor::Background>,
2220 ) -> Self {
2221 Self {
2222 fs,
2223 snapshot,
2224 notify,
2225 executor,
2226 }
2227 }
2228
2229 fn abs_path(&self) -> Arc<Path> {
2230 self.snapshot.lock().abs_path.clone()
2231 }
2232
2233 fn snapshot(&self) -> LocalSnapshot {
2234 self.snapshot.lock().clone()
2235 }
2236
2237 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2238 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2239 return;
2240 }
2241
2242 if let Err(err) = self.scan_dirs().await {
2243 if self
2244 .notify
2245 .unbounded_send(ScanState::Err(Arc::new(err)))
2246 .is_err()
2247 {
2248 return;
2249 }
2250 }
2251
2252 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2253 return;
2254 }
2255
2256 futures::pin_mut!(events_rx);
2257
2258 while let Some(mut events) = events_rx.next().await {
2259 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2260 events.extend(additional_events);
2261 }
2262
2263 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2264 break;
2265 }
2266
2267 if !self.process_events(events).await {
2268 break;
2269 }
2270
2271 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2272 break;
2273 }
2274 }
2275 }
2276
2277 async fn scan_dirs(&mut self) -> Result<()> {
2278 let root_char_bag;
2279 let root_abs_path;
2280 let root_inode;
2281 let is_dir;
2282 let next_entry_id;
2283 {
2284 let mut snapshot = self.snapshot.lock();
2285 snapshot.scan_started();
2286 root_char_bag = snapshot.root_char_bag;
2287 root_abs_path = snapshot.abs_path.clone();
2288 root_inode = snapshot.root_entry().map(|e| e.inode);
2289 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2290 next_entry_id = snapshot.next_entry_id.clone();
2291 };
2292
2293 // Populate ignores above the root.
2294 for ancestor in root_abs_path.ancestors().skip(1) {
2295 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2296 {
2297 self.snapshot
2298 .lock()
2299 .ignores_by_parent_abs_path
2300 .insert(ancestor.into(), (ignore.into(), 0));
2301 }
2302 }
2303
2304 let ignore_stack = {
2305 let mut snapshot = self.snapshot.lock();
2306 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2307 if ignore_stack.is_all() {
2308 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2309 root_entry.is_ignored = true;
2310 snapshot.insert_entry(root_entry, self.fs.as_ref());
2311 }
2312 }
2313 ignore_stack
2314 };
2315
2316 if is_dir {
2317 let path: Arc<Path> = Arc::from(Path::new(""));
2318 let mut ancestor_inodes = TreeSet::default();
2319 if let Some(root_inode) = root_inode {
2320 ancestor_inodes.insert(root_inode);
2321 }
2322
2323 let (tx, rx) = channel::unbounded();
2324 self.executor
2325 .block(tx.send(ScanJob {
2326 abs_path: root_abs_path.to_path_buf(),
2327 path,
2328 ignore_stack,
2329 ancestor_inodes,
2330 scan_queue: tx.clone(),
2331 }))
2332 .unwrap();
2333 drop(tx);
2334
2335 self.executor
2336 .scoped(|scope| {
2337 for _ in 0..self.executor.num_cpus() {
2338 scope.spawn(async {
2339 while let Ok(job) = rx.recv().await {
2340 if let Err(err) = self
2341 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2342 .await
2343 {
2344 log::error!("error scanning {:?}: {}", job.abs_path, err);
2345 }
2346 }
2347 });
2348 }
2349 })
2350 .await;
2351
2352 self.snapshot.lock().scan_completed();
2353 }
2354
2355 Ok(())
2356 }
2357
2358 async fn scan_dir(
2359 &self,
2360 root_char_bag: CharBag,
2361 next_entry_id: Arc<AtomicUsize>,
2362 job: &ScanJob,
2363 ) -> Result<()> {
2364 let mut new_entries: Vec<Entry> = Vec::new();
2365 let mut new_jobs: Vec<ScanJob> = Vec::new();
2366 let mut ignore_stack = job.ignore_stack.clone();
2367 let mut new_ignore = None;
2368
2369 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2370 while let Some(child_abs_path) = child_paths.next().await {
2371 let child_abs_path = match child_abs_path {
2372 Ok(child_abs_path) => child_abs_path,
2373 Err(error) => {
2374 log::error!("error processing entry {:?}", error);
2375 continue;
2376 }
2377 };
2378 let child_name = child_abs_path.file_name().unwrap();
2379 let child_path: Arc<Path> = job.path.join(child_name).into();
2380 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2381 Ok(Some(metadata)) => metadata,
2382 Ok(None) => continue,
2383 Err(err) => {
2384 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2385 continue;
2386 }
2387 };
2388
2389 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2390 if child_name == *GITIGNORE {
2391 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2392 Ok(ignore) => {
2393 let ignore = Arc::new(ignore);
2394 ignore_stack =
2395 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2396 new_ignore = Some(ignore);
2397 }
2398 Err(error) => {
2399 log::error!(
2400 "error loading .gitignore file {:?} - {:?}",
2401 child_name,
2402 error
2403 );
2404 }
2405 }
2406
2407 // Update ignore status of any child entries we've already processed to reflect the
2408 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2409 // there should rarely be too numerous. Update the ignore stack associated with any
2410 // new jobs as well.
2411 let mut new_jobs = new_jobs.iter_mut();
2412 for entry in &mut new_entries {
2413 let entry_abs_path = self.abs_path().join(&entry.path);
2414 entry.is_ignored =
2415 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2416 if entry.is_dir() {
2417 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2418 IgnoreStack::all()
2419 } else {
2420 ignore_stack.clone()
2421 };
2422 }
2423 }
2424 }
2425
2426 let mut child_entry = Entry::new(
2427 child_path.clone(),
2428 &child_metadata,
2429 &next_entry_id,
2430 root_char_bag,
2431 );
2432
2433 if child_entry.is_dir() {
2434 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2435 child_entry.is_ignored = is_ignored;
2436
2437 if !job.ancestor_inodes.contains(&child_entry.inode) {
2438 let mut ancestor_inodes = job.ancestor_inodes.clone();
2439 ancestor_inodes.insert(child_entry.inode);
2440 new_jobs.push(ScanJob {
2441 abs_path: child_abs_path,
2442 path: child_path,
2443 ignore_stack: if is_ignored {
2444 IgnoreStack::all()
2445 } else {
2446 ignore_stack.clone()
2447 },
2448 ancestor_inodes,
2449 scan_queue: job.scan_queue.clone(),
2450 });
2451 }
2452 } else {
2453 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2454 }
2455
2456 new_entries.push(child_entry);
2457 }
2458
2459 self.snapshot.lock().populate_dir(
2460 job.path.clone(),
2461 new_entries,
2462 new_ignore,
2463 self.fs.as_ref(),
2464 );
2465 for new_job in new_jobs {
2466 job.scan_queue.send(new_job).await.unwrap();
2467 }
2468
2469 Ok(())
2470 }
2471
2472 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2473 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2474 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2475
2476 let root_char_bag;
2477 let root_abs_path;
2478 let next_entry_id;
2479 {
2480 let mut snapshot = self.snapshot.lock();
2481 snapshot.scan_started();
2482 root_char_bag = snapshot.root_char_bag;
2483 root_abs_path = snapshot.abs_path.clone();
2484 next_entry_id = snapshot.next_entry_id.clone();
2485 }
2486
2487 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2488 path
2489 } else {
2490 return false;
2491 };
2492 let metadata = futures::future::join_all(
2493 events
2494 .iter()
2495 .map(|event| self.fs.metadata(&event.path))
2496 .collect::<Vec<_>>(),
2497 )
2498 .await;
2499
2500 // Hold the snapshot lock while clearing and re-inserting the root entries
2501 // for each event. This way, the snapshot is not observable to the foreground
2502 // thread while this operation is in-progress.
2503 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2504 {
2505 let mut snapshot = self.snapshot.lock();
2506 for event in &events {
2507 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2508 snapshot.remove_path(path);
2509 }
2510 }
2511
2512 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2513 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2514 Ok(path) => Arc::from(path.to_path_buf()),
2515 Err(_) => {
2516 log::error!(
2517 "unexpected event {:?} for root path {:?}",
2518 event.path,
2519 root_canonical_path
2520 );
2521 continue;
2522 }
2523 };
2524 let abs_path = root_abs_path.join(&path);
2525
2526 match metadata {
2527 Ok(Some(metadata)) => {
2528 let ignore_stack =
2529 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2530 let mut fs_entry = Entry::new(
2531 path.clone(),
2532 &metadata,
2533 snapshot.next_entry_id.as_ref(),
2534 snapshot.root_char_bag,
2535 );
2536 fs_entry.is_ignored = ignore_stack.is_all();
2537 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2538
2539 let scan_id = snapshot.scan_id;
2540 if let Some(repo) = snapshot.in_dot_git(&path) {
2541 repo.repo.lock().reload_index();
2542 repo.scan_id = scan_id;
2543 }
2544
2545 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2546 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2547 ancestor_inodes.insert(metadata.inode);
2548 self.executor
2549 .block(scan_queue_tx.send(ScanJob {
2550 abs_path,
2551 path,
2552 ignore_stack,
2553 ancestor_inodes,
2554 scan_queue: scan_queue_tx.clone(),
2555 }))
2556 .unwrap();
2557 }
2558 }
2559 Ok(None) => {}
2560 Err(err) => {
2561 // TODO - create a special 'error' entry in the entries tree to mark this
2562 log::error!("error reading file on event {:?}", err);
2563 }
2564 }
2565 }
2566 drop(scan_queue_tx);
2567 }
2568
2569 // Scan any directories that were created as part of this event batch.
2570 self.executor
2571 .scoped(|scope| {
2572 for _ in 0..self.executor.num_cpus() {
2573 scope.spawn(async {
2574 while let Ok(job) = scan_queue_rx.recv().await {
2575 if let Err(err) = self
2576 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2577 .await
2578 {
2579 log::error!("error scanning {:?}: {}", job.abs_path, err);
2580 }
2581 }
2582 });
2583 }
2584 })
2585 .await;
2586
2587 // Attempt to detect renames only over a single batch of file-system events.
2588 self.snapshot.lock().removed_entry_ids.clear();
2589
2590 self.update_ignore_statuses().await;
2591 self.update_git_repositories();
2592 self.snapshot.lock().scan_completed();
2593 true
2594 }
2595
2596 async fn update_ignore_statuses(&self) {
2597 let mut snapshot = self.snapshot();
2598
2599 let mut ignores_to_update = Vec::new();
2600 let mut ignores_to_delete = Vec::new();
2601 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2602 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2603 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2604 ignores_to_update.push(parent_abs_path.clone());
2605 }
2606
2607 let ignore_path = parent_path.join(&*GITIGNORE);
2608 if snapshot.entry_for_path(ignore_path).is_none() {
2609 ignores_to_delete.push(parent_abs_path.clone());
2610 }
2611 }
2612 }
2613
2614 for parent_abs_path in ignores_to_delete {
2615 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2616 self.snapshot
2617 .lock()
2618 .ignores_by_parent_abs_path
2619 .remove(&parent_abs_path);
2620 }
2621
2622 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2623 ignores_to_update.sort_unstable();
2624 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2625 while let Some(parent_abs_path) = ignores_to_update.next() {
2626 while ignores_to_update
2627 .peek()
2628 .map_or(false, |p| p.starts_with(&parent_abs_path))
2629 {
2630 ignores_to_update.next().unwrap();
2631 }
2632
2633 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2634 ignore_queue_tx
2635 .send(UpdateIgnoreStatusJob {
2636 abs_path: parent_abs_path,
2637 ignore_stack,
2638 ignore_queue: ignore_queue_tx.clone(),
2639 })
2640 .await
2641 .unwrap();
2642 }
2643 drop(ignore_queue_tx);
2644
2645 self.executor
2646 .scoped(|scope| {
2647 for _ in 0..self.executor.num_cpus() {
2648 scope.spawn(async {
2649 while let Ok(job) = ignore_queue_rx.recv().await {
2650 self.update_ignore_status(job, &snapshot).await;
2651 }
2652 });
2653 }
2654 })
2655 .await;
2656 }
2657
2658 fn update_git_repositories(&self) {
2659 let mut snapshot = self.snapshot.lock();
2660 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2661 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2662 snapshot.git_repositories = git_repositories;
2663 }
2664
2665 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2666 let mut ignore_stack = job.ignore_stack;
2667 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2668 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2669 }
2670
2671 let mut entries_by_id_edits = Vec::new();
2672 let mut entries_by_path_edits = Vec::new();
2673 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2674 for mut entry in snapshot.child_entries(path).cloned() {
2675 let was_ignored = entry.is_ignored;
2676 let abs_path = self.abs_path().join(&entry.path);
2677 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2678 if entry.is_dir() {
2679 let child_ignore_stack = if entry.is_ignored {
2680 IgnoreStack::all()
2681 } else {
2682 ignore_stack.clone()
2683 };
2684 job.ignore_queue
2685 .send(UpdateIgnoreStatusJob {
2686 abs_path: abs_path.into(),
2687 ignore_stack: child_ignore_stack,
2688 ignore_queue: job.ignore_queue.clone(),
2689 })
2690 .await
2691 .unwrap();
2692 }
2693
2694 if entry.is_ignored != was_ignored {
2695 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2696 path_entry.scan_id = snapshot.scan_id;
2697 path_entry.is_ignored = entry.is_ignored;
2698 entries_by_id_edits.push(Edit::Insert(path_entry));
2699 entries_by_path_edits.push(Edit::Insert(entry));
2700 }
2701 }
2702
2703 let mut snapshot = self.snapshot.lock();
2704 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2705 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2706 }
2707}
2708
2709fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2710 let mut result = root_char_bag;
2711 result.extend(
2712 path.to_string_lossy()
2713 .chars()
2714 .map(|c| c.to_ascii_lowercase()),
2715 );
2716 result
2717}
2718
2719struct ScanJob {
2720 abs_path: PathBuf,
2721 path: Arc<Path>,
2722 ignore_stack: Arc<IgnoreStack>,
2723 scan_queue: Sender<ScanJob>,
2724 ancestor_inodes: TreeSet<u64>,
2725}
2726
2727struct UpdateIgnoreStatusJob {
2728 abs_path: Arc<Path>,
2729 ignore_stack: Arc<IgnoreStack>,
2730 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2731}
2732
2733pub trait WorktreeHandle {
2734 #[cfg(any(test, feature = "test-support"))]
2735 fn flush_fs_events<'a>(
2736 &self,
2737 cx: &'a gpui::TestAppContext,
2738 ) -> futures::future::LocalBoxFuture<'a, ()>;
2739}
2740
2741impl WorktreeHandle for ModelHandle<Worktree> {
2742 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2743 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2744 // extra directory scans, and emit extra scan-state notifications.
2745 //
2746 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2747 // to ensure that all redundant FS events have already been processed.
2748 #[cfg(any(test, feature = "test-support"))]
2749 fn flush_fs_events<'a>(
2750 &self,
2751 cx: &'a gpui::TestAppContext,
2752 ) -> futures::future::LocalBoxFuture<'a, ()> {
2753 use smol::future::FutureExt;
2754
2755 let filename = "fs-event-sentinel";
2756 let tree = self.clone();
2757 let (fs, root_path) = self.read_with(cx, |tree, _| {
2758 let tree = tree.as_local().unwrap();
2759 (tree.fs.clone(), tree.abs_path().clone())
2760 });
2761
2762 async move {
2763 fs.create_file(&root_path.join(filename), Default::default())
2764 .await
2765 .unwrap();
2766 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2767 .await;
2768
2769 fs.remove_file(&root_path.join(filename), Default::default())
2770 .await
2771 .unwrap();
2772 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2773 .await;
2774
2775 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2776 .await;
2777 }
2778 .boxed_local()
2779 }
2780}
2781
2782#[derive(Clone, Debug)]
2783struct TraversalProgress<'a> {
2784 max_path: &'a Path,
2785 count: usize,
2786 visible_count: usize,
2787 file_count: usize,
2788 visible_file_count: usize,
2789}
2790
2791impl<'a> TraversalProgress<'a> {
2792 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2793 match (include_ignored, include_dirs) {
2794 (true, true) => self.count,
2795 (true, false) => self.file_count,
2796 (false, true) => self.visible_count,
2797 (false, false) => self.visible_file_count,
2798 }
2799 }
2800}
2801
2802impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2803 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2804 self.max_path = summary.max_path.as_ref();
2805 self.count += summary.count;
2806 self.visible_count += summary.visible_count;
2807 self.file_count += summary.file_count;
2808 self.visible_file_count += summary.visible_file_count;
2809 }
2810}
2811
2812impl<'a> Default for TraversalProgress<'a> {
2813 fn default() -> Self {
2814 Self {
2815 max_path: Path::new(""),
2816 count: 0,
2817 visible_count: 0,
2818 file_count: 0,
2819 visible_file_count: 0,
2820 }
2821 }
2822}
2823
2824pub struct Traversal<'a> {
2825 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2826 include_ignored: bool,
2827 include_dirs: bool,
2828}
2829
2830impl<'a> Traversal<'a> {
2831 pub fn advance(&mut self) -> bool {
2832 self.advance_to_offset(self.offset() + 1)
2833 }
2834
2835 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2836 self.cursor.seek_forward(
2837 &TraversalTarget::Count {
2838 count: offset,
2839 include_dirs: self.include_dirs,
2840 include_ignored: self.include_ignored,
2841 },
2842 Bias::Right,
2843 &(),
2844 )
2845 }
2846
2847 pub fn advance_to_sibling(&mut self) -> bool {
2848 while let Some(entry) = self.cursor.item() {
2849 self.cursor.seek_forward(
2850 &TraversalTarget::PathSuccessor(&entry.path),
2851 Bias::Left,
2852 &(),
2853 );
2854 if let Some(entry) = self.cursor.item() {
2855 if (self.include_dirs || !entry.is_dir())
2856 && (self.include_ignored || !entry.is_ignored)
2857 {
2858 return true;
2859 }
2860 }
2861 }
2862 false
2863 }
2864
2865 pub fn entry(&self) -> Option<&'a Entry> {
2866 self.cursor.item()
2867 }
2868
2869 pub fn offset(&self) -> usize {
2870 self.cursor
2871 .start()
2872 .count(self.include_dirs, self.include_ignored)
2873 }
2874}
2875
2876impl<'a> Iterator for Traversal<'a> {
2877 type Item = &'a Entry;
2878
2879 fn next(&mut self) -> Option<Self::Item> {
2880 if let Some(item) = self.entry() {
2881 self.advance();
2882 Some(item)
2883 } else {
2884 None
2885 }
2886 }
2887}
2888
2889#[derive(Debug)]
2890enum TraversalTarget<'a> {
2891 Path(&'a Path),
2892 PathSuccessor(&'a Path),
2893 Count {
2894 count: usize,
2895 include_ignored: bool,
2896 include_dirs: bool,
2897 },
2898}
2899
2900impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2901 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2902 match self {
2903 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2904 TraversalTarget::PathSuccessor(path) => {
2905 if !cursor_location.max_path.starts_with(path) {
2906 Ordering::Equal
2907 } else {
2908 Ordering::Greater
2909 }
2910 }
2911 TraversalTarget::Count {
2912 count,
2913 include_dirs,
2914 include_ignored,
2915 } => Ord::cmp(
2916 count,
2917 &cursor_location.count(*include_dirs, *include_ignored),
2918 ),
2919 }
2920 }
2921}
2922
2923struct ChildEntriesIter<'a> {
2924 parent_path: &'a Path,
2925 traversal: Traversal<'a>,
2926}
2927
2928impl<'a> Iterator for ChildEntriesIter<'a> {
2929 type Item = &'a Entry;
2930
2931 fn next(&mut self) -> Option<Self::Item> {
2932 if let Some(item) = self.traversal.entry() {
2933 if item.path.starts_with(&self.parent_path) {
2934 self.traversal.advance_to_sibling();
2935 return Some(item);
2936 }
2937 }
2938 None
2939 }
2940}
2941
2942impl<'a> From<&'a Entry> for proto::Entry {
2943 fn from(entry: &'a Entry) -> Self {
2944 Self {
2945 id: entry.id.to_proto(),
2946 is_dir: entry.is_dir(),
2947 path: entry.path.to_string_lossy().into(),
2948 inode: entry.inode,
2949 mtime: Some(entry.mtime.into()),
2950 is_symlink: entry.is_symlink,
2951 is_ignored: entry.is_ignored,
2952 }
2953 }
2954}
2955
2956impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2957 type Error = anyhow::Error;
2958
2959 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2960 if let Some(mtime) = entry.mtime {
2961 let kind = if entry.is_dir {
2962 EntryKind::Dir
2963 } else {
2964 let mut char_bag = *root_char_bag;
2965 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2966 EntryKind::File(char_bag)
2967 };
2968 let path: Arc<Path> = PathBuf::from(entry.path).into();
2969 Ok(Entry {
2970 id: ProjectEntryId::from_proto(entry.id),
2971 kind,
2972 path,
2973 inode: entry.inode,
2974 mtime: mtime.into(),
2975 is_symlink: entry.is_symlink,
2976 is_ignored: entry.is_ignored,
2977 })
2978 } else {
2979 Err(anyhow!(
2980 "missing mtime in remote worktree entry {:?}",
2981 entry.path
2982 ))
2983 }
2984 }
2985}
2986
2987#[cfg(test)]
2988mod tests {
2989 use super::*;
2990 use anyhow::Result;
2991 use client::test::FakeHttpClient;
2992 use fs::repository::FakeGitRepository;
2993 use fs::{FakeFs, RealFs};
2994 use gpui::{executor::Deterministic, TestAppContext};
2995 use rand::prelude::*;
2996 use serde_json::json;
2997 use std::{
2998 env,
2999 fmt::Write,
3000 time::{SystemTime, UNIX_EPOCH},
3001 };
3002
3003 use util::test::temp_tree;
3004
3005 #[gpui::test]
3006 async fn test_traversal(cx: &mut TestAppContext) {
3007 let fs = FakeFs::new(cx.background());
3008 fs.insert_tree(
3009 "/root",
3010 json!({
3011 ".gitignore": "a/b\n",
3012 "a": {
3013 "b": "",
3014 "c": "",
3015 }
3016 }),
3017 )
3018 .await;
3019
3020 let http_client = FakeHttpClient::with_404_response();
3021 let client = cx.read(|cx| Client::new(http_client, cx));
3022
3023 let tree = Worktree::local(
3024 client,
3025 Arc::from(Path::new("/root")),
3026 true,
3027 fs,
3028 Default::default(),
3029 &mut cx.to_async(),
3030 )
3031 .await
3032 .unwrap();
3033 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3034 .await;
3035
3036 tree.read_with(cx, |tree, _| {
3037 assert_eq!(
3038 tree.entries(false)
3039 .map(|entry| entry.path.as_ref())
3040 .collect::<Vec<_>>(),
3041 vec![
3042 Path::new(""),
3043 Path::new(".gitignore"),
3044 Path::new("a"),
3045 Path::new("a/c"),
3046 ]
3047 );
3048 assert_eq!(
3049 tree.entries(true)
3050 .map(|entry| entry.path.as_ref())
3051 .collect::<Vec<_>>(),
3052 vec![
3053 Path::new(""),
3054 Path::new(".gitignore"),
3055 Path::new("a"),
3056 Path::new("a/b"),
3057 Path::new("a/c"),
3058 ]
3059 );
3060 })
3061 }
3062
3063 #[gpui::test(iterations = 10)]
3064 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3065 let fs = FakeFs::new(cx.background());
3066 fs.insert_tree(
3067 "/root",
3068 json!({
3069 "lib": {
3070 "a": {
3071 "a.txt": ""
3072 },
3073 "b": {
3074 "b.txt": ""
3075 }
3076 }
3077 }),
3078 )
3079 .await;
3080 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3081 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3082
3083 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3084 let tree = Worktree::local(
3085 client,
3086 Arc::from(Path::new("/root")),
3087 true,
3088 fs.clone(),
3089 Default::default(),
3090 &mut cx.to_async(),
3091 )
3092 .await
3093 .unwrap();
3094
3095 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3096 .await;
3097
3098 tree.read_with(cx, |tree, _| {
3099 assert_eq!(
3100 tree.entries(false)
3101 .map(|entry| entry.path.as_ref())
3102 .collect::<Vec<_>>(),
3103 vec![
3104 Path::new(""),
3105 Path::new("lib"),
3106 Path::new("lib/a"),
3107 Path::new("lib/a/a.txt"),
3108 Path::new("lib/a/lib"),
3109 Path::new("lib/b"),
3110 Path::new("lib/b/b.txt"),
3111 Path::new("lib/b/lib"),
3112 ]
3113 );
3114 });
3115
3116 fs.rename(
3117 Path::new("/root/lib/a/lib"),
3118 Path::new("/root/lib/a/lib-2"),
3119 Default::default(),
3120 )
3121 .await
3122 .unwrap();
3123 executor.run_until_parked();
3124 tree.read_with(cx, |tree, _| {
3125 assert_eq!(
3126 tree.entries(false)
3127 .map(|entry| entry.path.as_ref())
3128 .collect::<Vec<_>>(),
3129 vec![
3130 Path::new(""),
3131 Path::new("lib"),
3132 Path::new("lib/a"),
3133 Path::new("lib/a/a.txt"),
3134 Path::new("lib/a/lib-2"),
3135 Path::new("lib/b"),
3136 Path::new("lib/b/b.txt"),
3137 Path::new("lib/b/lib"),
3138 ]
3139 );
3140 });
3141 }
3142
3143 #[gpui::test]
3144 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3145 let parent_dir = temp_tree(json!({
3146 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3147 "tree": {
3148 ".git": {},
3149 ".gitignore": "ignored-dir\n",
3150 "tracked-dir": {
3151 "tracked-file1": "",
3152 "ancestor-ignored-file1": "",
3153 },
3154 "ignored-dir": {
3155 "ignored-file1": ""
3156 }
3157 }
3158 }));
3159 let dir = parent_dir.path().join("tree");
3160
3161 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3162
3163 let tree = Worktree::local(
3164 client,
3165 dir.as_path(),
3166 true,
3167 Arc::new(RealFs),
3168 Default::default(),
3169 &mut cx.to_async(),
3170 )
3171 .await
3172 .unwrap();
3173 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3174 .await;
3175 tree.flush_fs_events(cx).await;
3176 cx.read(|cx| {
3177 let tree = tree.read(cx);
3178 assert!(
3179 !tree
3180 .entry_for_path("tracked-dir/tracked-file1")
3181 .unwrap()
3182 .is_ignored
3183 );
3184 assert!(
3185 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3186 .unwrap()
3187 .is_ignored
3188 );
3189 assert!(
3190 tree.entry_for_path("ignored-dir/ignored-file1")
3191 .unwrap()
3192 .is_ignored
3193 );
3194 });
3195
3196 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3197 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3198 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3199 tree.flush_fs_events(cx).await;
3200 cx.read(|cx| {
3201 let tree = tree.read(cx);
3202 assert!(
3203 !tree
3204 .entry_for_path("tracked-dir/tracked-file2")
3205 .unwrap()
3206 .is_ignored
3207 );
3208 assert!(
3209 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3210 .unwrap()
3211 .is_ignored
3212 );
3213 assert!(
3214 tree.entry_for_path("ignored-dir/ignored-file2")
3215 .unwrap()
3216 .is_ignored
3217 );
3218 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3219 });
3220 }
3221
3222 #[gpui::test]
3223 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3224 let root = temp_tree(json!({
3225 "dir1": {
3226 ".git": {},
3227 "deps": {
3228 "dep1": {
3229 ".git": {},
3230 "src": {
3231 "a.txt": ""
3232 }
3233 }
3234 },
3235 "src": {
3236 "b.txt": ""
3237 }
3238 },
3239 "c.txt": "",
3240 }));
3241
3242 let http_client = FakeHttpClient::with_404_response();
3243 let client = cx.read(|cx| Client::new(http_client, cx));
3244 let tree = Worktree::local(
3245 client,
3246 root.path(),
3247 true,
3248 Arc::new(RealFs),
3249 Default::default(),
3250 &mut cx.to_async(),
3251 )
3252 .await
3253 .unwrap();
3254
3255 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3256 .await;
3257 tree.flush_fs_events(cx).await;
3258
3259 tree.read_with(cx, |tree, _cx| {
3260 let tree = tree.as_local().unwrap();
3261
3262 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3263
3264 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3265 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3266 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3267
3268 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3269 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3270 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3271 });
3272
3273 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3274 let tree = tree.as_local().unwrap();
3275 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3276 });
3277
3278 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3279 tree.flush_fs_events(cx).await;
3280
3281 tree.read_with(cx, |tree, _cx| {
3282 let tree = tree.as_local().unwrap();
3283 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3284 assert_ne!(
3285 original_scan_id, new_scan_id,
3286 "original {original_scan_id}, new {new_scan_id}"
3287 );
3288 });
3289
3290 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3291 tree.flush_fs_events(cx).await;
3292
3293 tree.read_with(cx, |tree, _cx| {
3294 let tree = tree.as_local().unwrap();
3295
3296 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3297 });
3298 }
3299
3300 #[test]
3301 fn test_changed_repos() {
3302 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3303 GitRepositoryEntry {
3304 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3305 scan_id,
3306 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3307 git_dir_path: git_dir_path.as_ref().into(),
3308 }
3309 }
3310
3311 let prev_repos: Vec<GitRepositoryEntry> = vec![
3312 fake_entry("/.git", 0),
3313 fake_entry("/a/.git", 0),
3314 fake_entry("/a/b/.git", 0),
3315 ];
3316
3317 let new_repos: Vec<GitRepositoryEntry> = vec![
3318 fake_entry("/a/.git", 1),
3319 fake_entry("/a/b/.git", 0),
3320 fake_entry("/a/c/.git", 0),
3321 ];
3322
3323 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3324
3325 // Deletion retained
3326 assert!(res
3327 .iter()
3328 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3329 .is_some());
3330
3331 // Update retained
3332 assert!(res
3333 .iter()
3334 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3335 .is_some());
3336
3337 // Addition retained
3338 assert!(res
3339 .iter()
3340 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3341 .is_some());
3342
3343 // Nochange, not retained
3344 assert!(res
3345 .iter()
3346 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3347 .is_none());
3348 }
3349
3350 #[gpui::test]
3351 async fn test_write_file(cx: &mut TestAppContext) {
3352 let dir = temp_tree(json!({
3353 ".git": {},
3354 ".gitignore": "ignored-dir\n",
3355 "tracked-dir": {},
3356 "ignored-dir": {}
3357 }));
3358
3359 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3360
3361 let tree = Worktree::local(
3362 client,
3363 dir.path(),
3364 true,
3365 Arc::new(RealFs),
3366 Default::default(),
3367 &mut cx.to_async(),
3368 )
3369 .await
3370 .unwrap();
3371 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3372 .await;
3373 tree.flush_fs_events(cx).await;
3374
3375 tree.update(cx, |tree, cx| {
3376 tree.as_local().unwrap().write_file(
3377 Path::new("tracked-dir/file.txt"),
3378 "hello".into(),
3379 Default::default(),
3380 cx,
3381 )
3382 })
3383 .await
3384 .unwrap();
3385 tree.update(cx, |tree, cx| {
3386 tree.as_local().unwrap().write_file(
3387 Path::new("ignored-dir/file.txt"),
3388 "world".into(),
3389 Default::default(),
3390 cx,
3391 )
3392 })
3393 .await
3394 .unwrap();
3395
3396 tree.read_with(cx, |tree, _| {
3397 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3398 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3399 assert!(!tracked.is_ignored);
3400 assert!(ignored.is_ignored);
3401 });
3402 }
3403
3404 #[gpui::test(iterations = 30)]
3405 async fn test_create_directory(cx: &mut TestAppContext) {
3406 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3407
3408 let fs = FakeFs::new(cx.background());
3409 fs.insert_tree(
3410 "/a",
3411 json!({
3412 "b": {},
3413 "c": {},
3414 "d": {},
3415 }),
3416 )
3417 .await;
3418
3419 let tree = Worktree::local(
3420 client,
3421 "/a".as_ref(),
3422 true,
3423 fs,
3424 Default::default(),
3425 &mut cx.to_async(),
3426 )
3427 .await
3428 .unwrap();
3429
3430 let entry = tree
3431 .update(cx, |tree, cx| {
3432 tree.as_local_mut()
3433 .unwrap()
3434 .create_entry("a/e".as_ref(), true, cx)
3435 })
3436 .await
3437 .unwrap();
3438 assert!(entry.is_dir());
3439
3440 cx.foreground().run_until_parked();
3441 tree.read_with(cx, |tree, _| {
3442 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3443 });
3444 }
3445
3446 #[gpui::test(iterations = 100)]
3447 fn test_random(mut rng: StdRng) {
3448 let operations = env::var("OPERATIONS")
3449 .map(|o| o.parse().unwrap())
3450 .unwrap_or(40);
3451 let initial_entries = env::var("INITIAL_ENTRIES")
3452 .map(|o| o.parse().unwrap())
3453 .unwrap_or(20);
3454
3455 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3456 for _ in 0..initial_entries {
3457 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3458 }
3459 log::info!("Generated initial tree");
3460
3461 let (notify_tx, _notify_rx) = mpsc::unbounded();
3462 let fs = Arc::new(RealFs);
3463 let next_entry_id = Arc::new(AtomicUsize::new(0));
3464 let mut initial_snapshot = LocalSnapshot {
3465 removed_entry_ids: Default::default(),
3466 ignores_by_parent_abs_path: Default::default(),
3467 git_repositories: Default::default(),
3468 next_entry_id: next_entry_id.clone(),
3469 snapshot: Snapshot {
3470 id: WorktreeId::from_usize(0),
3471 entries_by_path: Default::default(),
3472 entries_by_id: Default::default(),
3473 abs_path: root_dir.path().into(),
3474 root_name: Default::default(),
3475 root_char_bag: Default::default(),
3476 scan_id: 0,
3477 completed_scan_id: 0,
3478 },
3479 };
3480 initial_snapshot.insert_entry(
3481 Entry::new(
3482 Path::new("").into(),
3483 &smol::block_on(fs.metadata(root_dir.path()))
3484 .unwrap()
3485 .unwrap(),
3486 &next_entry_id,
3487 Default::default(),
3488 ),
3489 fs.as_ref(),
3490 );
3491 let mut scanner = BackgroundScanner::new(
3492 Arc::new(Mutex::new(initial_snapshot.clone())),
3493 notify_tx,
3494 fs.clone(),
3495 Arc::new(gpui::executor::Background::new()),
3496 );
3497 smol::block_on(scanner.scan_dirs()).unwrap();
3498 scanner.snapshot().check_invariants();
3499
3500 let mut events = Vec::new();
3501 let mut snapshots = Vec::new();
3502 let mut mutations_len = operations;
3503 while mutations_len > 1 {
3504 if !events.is_empty() && rng.gen_bool(0.4) {
3505 let len = rng.gen_range(0..=events.len());
3506 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3507 log::info!("Delivering events: {:#?}", to_deliver);
3508 smol::block_on(scanner.process_events(to_deliver));
3509 scanner.snapshot().check_invariants();
3510 } else {
3511 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3512 mutations_len -= 1;
3513 }
3514
3515 if rng.gen_bool(0.2) {
3516 snapshots.push(scanner.snapshot());
3517 }
3518 }
3519 log::info!("Quiescing: {:#?}", events);
3520 smol::block_on(scanner.process_events(events));
3521 scanner.snapshot().check_invariants();
3522
3523 let (notify_tx, _notify_rx) = mpsc::unbounded();
3524 let mut new_scanner = BackgroundScanner::new(
3525 Arc::new(Mutex::new(initial_snapshot)),
3526 notify_tx,
3527 scanner.fs.clone(),
3528 scanner.executor.clone(),
3529 );
3530 smol::block_on(new_scanner.scan_dirs()).unwrap();
3531 assert_eq!(
3532 scanner.snapshot().to_vec(true),
3533 new_scanner.snapshot().to_vec(true)
3534 );
3535
3536 for mut prev_snapshot in snapshots {
3537 let include_ignored = rng.gen::<bool>();
3538 if !include_ignored {
3539 let mut entries_by_path_edits = Vec::new();
3540 let mut entries_by_id_edits = Vec::new();
3541 for entry in prev_snapshot
3542 .entries_by_id
3543 .cursor::<()>()
3544 .filter(|e| e.is_ignored)
3545 {
3546 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3547 entries_by_id_edits.push(Edit::Remove(entry.id));
3548 }
3549
3550 prev_snapshot
3551 .entries_by_path
3552 .edit(entries_by_path_edits, &());
3553 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3554 }
3555
3556 let update = scanner
3557 .snapshot()
3558 .build_update(&prev_snapshot, 0, 0, include_ignored);
3559 prev_snapshot.apply_remote_update(update).unwrap();
3560 assert_eq!(
3561 prev_snapshot.to_vec(true),
3562 scanner.snapshot().to_vec(include_ignored)
3563 );
3564 }
3565 }
3566
3567 fn randomly_mutate_tree(
3568 root_path: &Path,
3569 insertion_probability: f64,
3570 rng: &mut impl Rng,
3571 ) -> Result<Vec<fsevent::Event>> {
3572 let root_path = root_path.canonicalize().unwrap();
3573 let (dirs, files) = read_dir_recursive(root_path.clone());
3574
3575 let mut events = Vec::new();
3576 let mut record_event = |path: PathBuf| {
3577 events.push(fsevent::Event {
3578 event_id: SystemTime::now()
3579 .duration_since(UNIX_EPOCH)
3580 .unwrap()
3581 .as_secs(),
3582 flags: fsevent::StreamFlags::empty(),
3583 path,
3584 });
3585 };
3586
3587 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3588 let path = dirs.choose(rng).unwrap();
3589 let new_path = path.join(gen_name(rng));
3590
3591 if rng.gen() {
3592 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3593 std::fs::create_dir(&new_path)?;
3594 } else {
3595 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3596 std::fs::write(&new_path, "")?;
3597 }
3598 record_event(new_path);
3599 } else if rng.gen_bool(0.05) {
3600 let ignore_dir_path = dirs.choose(rng).unwrap();
3601 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3602
3603 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3604 let files_to_ignore = {
3605 let len = rng.gen_range(0..=subfiles.len());
3606 subfiles.choose_multiple(rng, len)
3607 };
3608 let dirs_to_ignore = {
3609 let len = rng.gen_range(0..subdirs.len());
3610 subdirs.choose_multiple(rng, len)
3611 };
3612
3613 let mut ignore_contents = String::new();
3614 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3615 writeln!(
3616 ignore_contents,
3617 "{}",
3618 path_to_ignore
3619 .strip_prefix(&ignore_dir_path)?
3620 .to_str()
3621 .unwrap()
3622 )
3623 .unwrap();
3624 }
3625 log::info!(
3626 "Creating {:?} with contents:\n{}",
3627 ignore_path.strip_prefix(&root_path)?,
3628 ignore_contents
3629 );
3630 std::fs::write(&ignore_path, ignore_contents).unwrap();
3631 record_event(ignore_path);
3632 } else {
3633 let old_path = {
3634 let file_path = files.choose(rng);
3635 let dir_path = dirs[1..].choose(rng);
3636 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3637 };
3638
3639 let is_rename = rng.gen();
3640 if is_rename {
3641 let new_path_parent = dirs
3642 .iter()
3643 .filter(|d| !d.starts_with(old_path))
3644 .choose(rng)
3645 .unwrap();
3646
3647 let overwrite_existing_dir =
3648 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3649 let new_path = if overwrite_existing_dir {
3650 std::fs::remove_dir_all(&new_path_parent).ok();
3651 new_path_parent.to_path_buf()
3652 } else {
3653 new_path_parent.join(gen_name(rng))
3654 };
3655
3656 log::info!(
3657 "Renaming {:?} to {}{:?}",
3658 old_path.strip_prefix(&root_path)?,
3659 if overwrite_existing_dir {
3660 "overwrite "
3661 } else {
3662 ""
3663 },
3664 new_path.strip_prefix(&root_path)?
3665 );
3666 std::fs::rename(&old_path, &new_path)?;
3667 record_event(old_path.clone());
3668 record_event(new_path);
3669 } else if old_path.is_dir() {
3670 let (dirs, files) = read_dir_recursive(old_path.clone());
3671
3672 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3673 std::fs::remove_dir_all(&old_path).unwrap();
3674 for file in files {
3675 record_event(file);
3676 }
3677 for dir in dirs {
3678 record_event(dir);
3679 }
3680 } else {
3681 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3682 std::fs::remove_file(old_path).unwrap();
3683 record_event(old_path.clone());
3684 }
3685 }
3686
3687 Ok(events)
3688 }
3689
3690 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3691 let child_entries = std::fs::read_dir(&path).unwrap();
3692 let mut dirs = vec![path];
3693 let mut files = Vec::new();
3694 for child_entry in child_entries {
3695 let child_path = child_entry.unwrap().path();
3696 if child_path.is_dir() {
3697 let (child_dirs, child_files) = read_dir_recursive(child_path);
3698 dirs.extend(child_dirs);
3699 files.extend(child_files);
3700 } else {
3701 files.push(child_path);
3702 }
3703 }
3704 (dirs, files)
3705 }
3706
3707 fn gen_name(rng: &mut impl Rng) -> String {
3708 (0..6)
3709 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3710 .map(char::from)
3711 .collect()
3712 }
3713
3714 impl LocalSnapshot {
3715 fn check_invariants(&self) {
3716 let mut files = self.files(true, 0);
3717 let mut visible_files = self.files(false, 0);
3718 for entry in self.entries_by_path.cursor::<()>() {
3719 if entry.is_file() {
3720 assert_eq!(files.next().unwrap().inode, entry.inode);
3721 if !entry.is_ignored {
3722 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3723 }
3724 }
3725 }
3726 assert!(files.next().is_none());
3727 assert!(visible_files.next().is_none());
3728
3729 let mut bfs_paths = Vec::new();
3730 let mut stack = vec![Path::new("")];
3731 while let Some(path) = stack.pop() {
3732 bfs_paths.push(path);
3733 let ix = stack.len();
3734 for child_entry in self.child_entries(path) {
3735 stack.insert(ix, &child_entry.path);
3736 }
3737 }
3738
3739 let dfs_paths_via_iter = self
3740 .entries_by_path
3741 .cursor::<()>()
3742 .map(|e| e.path.as_ref())
3743 .collect::<Vec<_>>();
3744 assert_eq!(bfs_paths, dfs_paths_via_iter);
3745
3746 let dfs_paths_via_traversal = self
3747 .entries(true)
3748 .map(|e| e.path.as_ref())
3749 .collect::<Vec<_>>();
3750 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3751
3752 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3753 let ignore_parent_path =
3754 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3755 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3756 assert!(self
3757 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3758 .is_some());
3759 }
3760 }
3761
3762 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3763 let mut paths = Vec::new();
3764 for entry in self.entries_by_path.cursor::<()>() {
3765 if include_ignored || !entry.is_ignored {
3766 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3767 }
3768 }
3769 paths.sort_by(|a, b| a.0.cmp(b.0));
3770 paths
3771 }
3772 }
3773}