1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::File as _;
24use language::{
25 proto::{
26 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
27 serialize_version,
28 },
29 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
30};
31use parking_lot::Mutex;
32use postage::{
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::paths::HOME;
53use util::{ResultExt, TryFutureExt};
54
55#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
56pub struct WorktreeId(usize);
57
58#[allow(clippy::large_enum_variant)]
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 background_snapshot: Arc<Mutex<LocalSnapshot>>,
67 background_changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
68 last_scan_state_rx: watch::Receiver<ScanState>,
69 _background_scanner_task: Option<Task<()>>,
70 poll_task: Option<Task<()>>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
73 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
74 client: Arc<Client>,
75 fs: Arc<dyn Fs>,
76 visible: bool,
77}
78
79pub struct RemoteWorktree {
80 pub snapshot: Snapshot,
81 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
82 project_id: u64,
83 client: Arc<Client>,
84 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
85 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
86 replica_id: ReplicaId,
87 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
88 visible: bool,
89 disconnected: bool,
90}
91
92#[derive(Clone)]
93pub struct Snapshot {
94 id: WorktreeId,
95 abs_path: Arc<Path>,
96 root_name: String,
97 root_char_bag: CharBag,
98 entries_by_path: SumTree<Entry>,
99 entries_by_id: SumTree<PathEntry>,
100 scan_id: usize,
101 completed_scan_id: usize,
102}
103
104#[derive(Clone)]
105pub struct GitRepositoryEntry {
106 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
107
108 pub(crate) scan_id: usize,
109 // Path to folder containing the .git file or directory
110 pub(crate) content_path: Arc<Path>,
111 // Path to the actual .git folder.
112 // Note: if .git is a file, this points to the folder indicated by the .git file
113 pub(crate) git_dir_path: Arc<Path>,
114}
115
116impl std::fmt::Debug for GitRepositoryEntry {
117 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 f.debug_struct("GitRepositoryEntry")
119 .field("content_path", &self.content_path)
120 .field("git_dir_path", &self.git_dir_path)
121 .field("libgit_repository", &"LibGitRepository")
122 .finish()
123 }
124}
125
126pub struct LocalSnapshot {
127 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
128 git_repositories: Vec<GitRepositoryEntry>,
129 removed_entry_ids: HashMap<u64, ProjectEntryId>,
130 next_entry_id: Arc<AtomicUsize>,
131 snapshot: Snapshot,
132}
133
134impl Clone for LocalSnapshot {
135 fn clone(&self) -> Self {
136 Self {
137 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
138 git_repositories: self.git_repositories.iter().cloned().collect(),
139 removed_entry_ids: self.removed_entry_ids.clone(),
140 next_entry_id: self.next_entry_id.clone(),
141 snapshot: self.snapshot.clone(),
142 }
143 }
144}
145
146impl Deref for LocalSnapshot {
147 type Target = Snapshot;
148
149 fn deref(&self) -> &Self::Target {
150 &self.snapshot
151 }
152}
153
154impl DerefMut for LocalSnapshot {
155 fn deref_mut(&mut self) -> &mut Self::Target {
156 &mut self.snapshot
157 }
158}
159
160#[derive(Clone, Debug)]
161enum ScanState {
162 Idle,
163 /// The worktree is performing its initial scan of the filesystem.
164 Initializing,
165 /// The worktree is updating in response to filesystem events.
166 Updating,
167 Err(Arc<anyhow::Error>),
168}
169
170struct ShareState {
171 project_id: u64,
172 snapshots_tx: watch::Sender<LocalSnapshot>,
173 resume_updates: watch::Sender<()>,
174 _maintain_remote_snapshot: Task<Option<()>>,
175}
176
177pub enum Event {
178 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
179 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
180}
181
182impl Entity for Worktree {
183 type Event = Event;
184}
185
186impl Worktree {
187 pub async fn local(
188 client: Arc<Client>,
189 path: impl Into<Arc<Path>>,
190 visible: bool,
191 fs: Arc<dyn Fs>,
192 next_entry_id: Arc<AtomicUsize>,
193 cx: &mut AsyncAppContext,
194 ) -> Result<ModelHandle<Self>> {
195 let (tree, scan_states_tx) =
196 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
197 tree.update(cx, |tree, cx| {
198 let tree = tree.as_local_mut().unwrap();
199 let abs_path = tree.abs_path().clone();
200 let background_snapshot = tree.background_snapshot.clone();
201 let background_changes = tree.background_changes.clone();
202 let background = cx.background().clone();
203 tree._background_scanner_task = Some(cx.background().spawn(async move {
204 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
205 let scanner = BackgroundScanner::new(
206 background_snapshot,
207 background_changes,
208 scan_states_tx,
209 fs,
210 background,
211 );
212 scanner.run(events).await;
213 }));
214 });
215 Ok(tree)
216 }
217
218 pub fn remote(
219 project_remote_id: u64,
220 replica_id: ReplicaId,
221 worktree: proto::WorktreeMetadata,
222 client: Arc<Client>,
223 cx: &mut MutableAppContext,
224 ) -> ModelHandle<Self> {
225 let remote_id = worktree.id;
226 let root_char_bag: CharBag = worktree
227 .root_name
228 .chars()
229 .map(|c| c.to_ascii_lowercase())
230 .collect();
231 let root_name = worktree.root_name.clone();
232 let visible = worktree.visible;
233
234 let abs_path = PathBuf::from(worktree.abs_path);
235 let snapshot = Snapshot {
236 id: WorktreeId(remote_id as usize),
237 abs_path: Arc::from(abs_path.deref()),
238 root_name,
239 root_char_bag,
240 entries_by_path: Default::default(),
241 entries_by_id: Default::default(),
242 scan_id: 0,
243 completed_scan_id: 0,
244 };
245
246 let (updates_tx, mut updates_rx) = mpsc::unbounded();
247 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
248 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
249 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
250 Worktree::Remote(RemoteWorktree {
251 project_id: project_remote_id,
252 replica_id,
253 snapshot: snapshot.clone(),
254 background_snapshot: background_snapshot.clone(),
255 updates_tx: Some(updates_tx),
256 snapshot_subscriptions: Default::default(),
257 client: client.clone(),
258 diagnostic_summaries: Default::default(),
259 visible,
260 disconnected: false,
261 })
262 });
263
264 cx.background()
265 .spawn(async move {
266 while let Some(update) = updates_rx.next().await {
267 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
268 log::error!("error applying worktree update: {}", error);
269 }
270 snapshot_updated_tx.send(()).await.ok();
271 }
272 })
273 .detach();
274
275 cx.spawn(|mut cx| {
276 let this = worktree_handle.downgrade();
277 async move {
278 while (snapshot_updated_rx.recv().await).is_some() {
279 if let Some(this) = this.upgrade(&cx) {
280 this.update(&mut cx, |this, cx| {
281 this.poll_snapshot(cx);
282 let this = this.as_remote_mut().unwrap();
283 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
284 if this.observed_snapshot(*scan_id) {
285 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
286 let _ = tx.send(());
287 } else {
288 break;
289 }
290 }
291 });
292 } else {
293 break;
294 }
295 }
296 }
297 })
298 .detach();
299
300 worktree_handle
301 }
302
303 pub fn as_local(&self) -> Option<&LocalWorktree> {
304 if let Worktree::Local(worktree) = self {
305 Some(worktree)
306 } else {
307 None
308 }
309 }
310
311 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
312 if let Worktree::Remote(worktree) = self {
313 Some(worktree)
314 } else {
315 None
316 }
317 }
318
319 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
320 if let Worktree::Local(worktree) = self {
321 Some(worktree)
322 } else {
323 None
324 }
325 }
326
327 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
328 if let Worktree::Remote(worktree) = self {
329 Some(worktree)
330 } else {
331 None
332 }
333 }
334
335 pub fn is_local(&self) -> bool {
336 matches!(self, Worktree::Local(_))
337 }
338
339 pub fn is_remote(&self) -> bool {
340 !self.is_local()
341 }
342
343 pub fn snapshot(&self) -> Snapshot {
344 match self {
345 Worktree::Local(worktree) => worktree.snapshot().snapshot,
346 Worktree::Remote(worktree) => worktree.snapshot(),
347 }
348 }
349
350 pub fn scan_id(&self) -> usize {
351 match self {
352 Worktree::Local(worktree) => worktree.snapshot.scan_id,
353 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
354 }
355 }
356
357 pub fn completed_scan_id(&self) -> usize {
358 match self {
359 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
360 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
361 }
362 }
363
364 pub fn is_visible(&self) -> bool {
365 match self {
366 Worktree::Local(worktree) => worktree.visible,
367 Worktree::Remote(worktree) => worktree.visible,
368 }
369 }
370
371 pub fn replica_id(&self) -> ReplicaId {
372 match self {
373 Worktree::Local(_) => 0,
374 Worktree::Remote(worktree) => worktree.replica_id,
375 }
376 }
377
378 pub fn diagnostic_summaries(
379 &self,
380 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
381 match self {
382 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
383 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
384 }
385 .iter()
386 .map(|(path, summary)| (path.0.clone(), *summary))
387 }
388
389 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
390 match self {
391 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
392 Self::Remote(worktree) => worktree.poll_snapshot(cx),
393 };
394 }
395
396 pub fn abs_path(&self) -> Arc<Path> {
397 match self {
398 Worktree::Local(worktree) => worktree.abs_path.clone(),
399 Worktree::Remote(worktree) => worktree.abs_path.clone(),
400 }
401 }
402}
403
404impl LocalWorktree {
405 async fn create(
406 client: Arc<Client>,
407 path: impl Into<Arc<Path>>,
408 visible: bool,
409 fs: Arc<dyn Fs>,
410 next_entry_id: Arc<AtomicUsize>,
411 cx: &mut AsyncAppContext,
412 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
413 let abs_path = path.into();
414 let path: Arc<Path> = Arc::from(Path::new(""));
415
416 // After determining whether the root entry is a file or a directory, populate the
417 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
418 let root_name = abs_path
419 .file_name()
420 .map_or(String::new(), |f| f.to_string_lossy().to_string());
421 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
422 let metadata = fs
423 .metadata(&abs_path)
424 .await
425 .context("failed to stat worktree path")?;
426
427 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
428 let (mut last_scan_state_tx, last_scan_state_rx) =
429 watch::channel_with(ScanState::Initializing);
430 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
431 let mut snapshot = LocalSnapshot {
432 ignores_by_parent_abs_path: Default::default(),
433 git_repositories: Default::default(),
434 removed_entry_ids: Default::default(),
435 next_entry_id,
436 snapshot: Snapshot {
437 id: WorktreeId::from_usize(cx.model_id()),
438 abs_path,
439 root_name: root_name.clone(),
440 root_char_bag,
441 entries_by_path: Default::default(),
442 entries_by_id: Default::default(),
443 scan_id: 0,
444 completed_scan_id: 0,
445 },
446 };
447 if let Some(metadata) = metadata {
448 let entry = Entry::new(
449 path,
450 &metadata,
451 &snapshot.next_entry_id,
452 snapshot.root_char_bag,
453 );
454 snapshot.insert_entry(entry, fs.as_ref());
455 }
456
457 let tree = Self {
458 snapshot: snapshot.clone(),
459 background_snapshot: Arc::new(Mutex::new(snapshot)),
460 background_changes: Arc::new(Mutex::new(HashMap::default())),
461 last_scan_state_rx,
462 _background_scanner_task: None,
463 share: None,
464 poll_task: None,
465 diagnostics: Default::default(),
466 diagnostic_summaries: Default::default(),
467 client,
468 fs,
469 visible,
470 };
471
472 cx.spawn_weak(|this, mut cx| async move {
473 while let Some(scan_state) = scan_states_rx.next().await {
474 if let Some(this) = this.upgrade(&cx) {
475 last_scan_state_tx.blocking_send(scan_state).ok();
476 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
477 } else {
478 break;
479 }
480 }
481 })
482 .detach();
483
484 Worktree::Local(tree)
485 });
486
487 Ok((tree, scan_states_tx))
488 }
489
490 pub fn contains_abs_path(&self, path: &Path) -> bool {
491 path.starts_with(&self.abs_path)
492 }
493
494 fn absolutize(&self, path: &Path) -> PathBuf {
495 if path.file_name().is_some() {
496 self.abs_path.join(path)
497 } else {
498 self.abs_path.to_path_buf()
499 }
500 }
501
502 pub(crate) fn load_buffer(
503 &mut self,
504 path: &Path,
505 cx: &mut ModelContext<Worktree>,
506 ) -> Task<Result<ModelHandle<Buffer>>> {
507 let path = Arc::from(path);
508 cx.spawn(move |this, mut cx| async move {
509 let (file, contents, diff_base) = this
510 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
511 .await?;
512 Ok(cx.add_model(|cx| {
513 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
514 buffer.git_diff_recalc(cx);
515 buffer
516 }))
517 })
518 }
519
520 pub fn diagnostics_for_path(
521 &self,
522 path: &Path,
523 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
524 self.diagnostics.get(path).cloned()
525 }
526
527 pub fn update_diagnostics(
528 &mut self,
529 language_server_id: usize,
530 worktree_path: Arc<Path>,
531 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
532 _: &mut ModelContext<Worktree>,
533 ) -> Result<bool> {
534 self.diagnostics.remove(&worktree_path);
535 let old_summary = self
536 .diagnostic_summaries
537 .remove(&PathKey(worktree_path.clone()))
538 .unwrap_or_default();
539 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
540 if !new_summary.is_empty() {
541 self.diagnostic_summaries
542 .insert(PathKey(worktree_path.clone()), new_summary);
543 self.diagnostics.insert(worktree_path.clone(), diagnostics);
544 }
545
546 let updated = !old_summary.is_empty() || !new_summary.is_empty();
547 if updated {
548 if let Some(share) = self.share.as_ref() {
549 self.client
550 .send(proto::UpdateDiagnosticSummary {
551 project_id: share.project_id,
552 worktree_id: self.id().to_proto(),
553 summary: Some(proto::DiagnosticSummary {
554 path: worktree_path.to_string_lossy().to_string(),
555 language_server_id: language_server_id as u64,
556 error_count: new_summary.error_count as u32,
557 warning_count: new_summary.warning_count as u32,
558 }),
559 })
560 .log_err();
561 }
562 }
563
564 Ok(updated)
565 }
566
567 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
568 self.poll_task.take();
569
570 match self.scan_state() {
571 ScanState::Idle => {
572 let new_snapshot = self.background_snapshot.lock().clone();
573 let changes = mem::take(&mut *self.background_changes.lock());
574 let updated_repos = Self::changed_repos(
575 &self.snapshot.git_repositories,
576 &new_snapshot.git_repositories,
577 );
578 self.snapshot = new_snapshot;
579
580 if let Some(share) = self.share.as_mut() {
581 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
582 }
583
584 cx.emit(Event::UpdatedEntries(changes));
585
586 if !updated_repos.is_empty() {
587 cx.emit(Event::UpdatedGitRepositories(updated_repos));
588 }
589 }
590
591 ScanState::Initializing => {
592 let is_fake_fs = self.fs.is_fake();
593
594 let new_snapshot = self.background_snapshot.lock().clone();
595 let updated_repos = Self::changed_repos(
596 &self.snapshot.git_repositories,
597 &new_snapshot.git_repositories,
598 );
599 self.snapshot = new_snapshot;
600
601 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
602 if is_fake_fs {
603 #[cfg(any(test, feature = "test-support"))]
604 cx.background().simulate_random_delay().await;
605 } else {
606 smol::Timer::after(Duration::from_millis(100)).await;
607 }
608 if let Some(this) = this.upgrade(&cx) {
609 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
610 }
611 }));
612
613 if !updated_repos.is_empty() {
614 cx.emit(Event::UpdatedGitRepositories(updated_repos));
615 }
616 }
617
618 _ => {
619 if force {
620 self.snapshot = self.background_snapshot.lock().clone();
621 }
622 }
623 }
624
625 cx.notify();
626 }
627
628 fn changed_repos(
629 old_repos: &[GitRepositoryEntry],
630 new_repos: &[GitRepositoryEntry],
631 ) -> Vec<GitRepositoryEntry> {
632 fn diff<'a>(
633 a: &'a [GitRepositoryEntry],
634 b: &'a [GitRepositoryEntry],
635 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
636 ) {
637 for a_repo in a {
638 let matched = b.iter().find(|b_repo| {
639 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
640 });
641
642 if matched.is_none() {
643 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
644 }
645 }
646 }
647
648 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
649
650 diff(old_repos, new_repos, &mut updated);
651 diff(new_repos, old_repos, &mut updated);
652
653 updated.into_values().collect()
654 }
655
656 pub fn scan_complete(&self) -> impl Future<Output = ()> {
657 let mut scan_state_rx = self.last_scan_state_rx.clone();
658 async move {
659 let mut scan_state = Some(scan_state_rx.borrow().clone());
660 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
661 scan_state = scan_state_rx.recv().await;
662 }
663 }
664 }
665
666 fn scan_state(&self) -> ScanState {
667 self.last_scan_state_rx.borrow().clone()
668 }
669
670 pub fn snapshot(&self) -> LocalSnapshot {
671 self.snapshot.clone()
672 }
673
674 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
675 proto::WorktreeMetadata {
676 id: self.id().to_proto(),
677 root_name: self.root_name().to_string(),
678 visible: self.visible,
679 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
680 }
681 }
682
683 fn load(
684 &self,
685 path: &Path,
686 cx: &mut ModelContext<Worktree>,
687 ) -> Task<Result<(File, String, Option<String>)>> {
688 let handle = cx.handle();
689 let path = Arc::from(path);
690 let abs_path = self.absolutize(&path);
691 let fs = self.fs.clone();
692 let snapshot = self.snapshot();
693
694 cx.spawn(|this, mut cx| async move {
695 let text = fs.load(&abs_path).await?;
696
697 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
698 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
699 let repo_relative = repo_relative.to_owned();
700 cx.background()
701 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
702 .await
703 } else {
704 None
705 }
706 } else {
707 None
708 };
709
710 // Eagerly populate the snapshot with an updated entry for the loaded file
711 let entry = this
712 .update(&mut cx, |this, cx| {
713 this.as_local()
714 .unwrap()
715 .refresh_entry(path, abs_path, None, cx)
716 })
717 .await?;
718
719 Ok((
720 File {
721 entry_id: entry.id,
722 worktree: handle,
723 path: entry.path,
724 mtime: entry.mtime,
725 is_local: true,
726 is_deleted: false,
727 },
728 text,
729 diff_base,
730 ))
731 })
732 }
733
734 pub fn save_buffer(
735 &self,
736 buffer_handle: ModelHandle<Buffer>,
737 path: Arc<Path>,
738 has_changed_file: bool,
739 cx: &mut ModelContext<Worktree>,
740 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
741 let handle = cx.handle();
742 let buffer = buffer_handle.read(cx);
743
744 let rpc = self.client.clone();
745 let buffer_id = buffer.remote_id();
746 let project_id = self.share.as_ref().map(|share| share.project_id);
747
748 let text = buffer.as_rope().clone();
749 let fingerprint = text.fingerprint();
750 let version = buffer.version();
751 let save = self.write_file(path, text, buffer.line_ending(), cx);
752
753 cx.as_mut().spawn(|mut cx| async move {
754 let entry = save.await?;
755
756 if has_changed_file {
757 let new_file = Arc::new(File {
758 entry_id: entry.id,
759 worktree: handle,
760 path: entry.path,
761 mtime: entry.mtime,
762 is_local: true,
763 is_deleted: false,
764 });
765
766 if let Some(project_id) = project_id {
767 rpc.send(proto::UpdateBufferFile {
768 project_id,
769 buffer_id,
770 file: Some(new_file.to_proto()),
771 })
772 .log_err();
773 }
774
775 buffer_handle.update(&mut cx, |buffer, cx| {
776 if has_changed_file {
777 buffer.file_updated(new_file, cx).detach();
778 }
779 });
780 }
781
782 if let Some(project_id) = project_id {
783 rpc.send(proto::BufferSaved {
784 project_id,
785 buffer_id,
786 version: serialize_version(&version),
787 mtime: Some(entry.mtime.into()),
788 fingerprint: serialize_fingerprint(fingerprint),
789 })?;
790 }
791
792 buffer_handle.update(&mut cx, |buffer, cx| {
793 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
794 });
795
796 Ok((version, fingerprint, entry.mtime))
797 })
798 }
799
800 pub fn create_entry(
801 &self,
802 path: impl Into<Arc<Path>>,
803 is_dir: bool,
804 cx: &mut ModelContext<Worktree>,
805 ) -> Task<Result<Entry>> {
806 self.write_entry_internal(
807 path,
808 if is_dir {
809 None
810 } else {
811 Some(Default::default())
812 },
813 cx,
814 )
815 }
816
817 pub fn write_file(
818 &self,
819 path: impl Into<Arc<Path>>,
820 text: Rope,
821 line_ending: LineEnding,
822 cx: &mut ModelContext<Worktree>,
823 ) -> Task<Result<Entry>> {
824 self.write_entry_internal(path, Some((text, line_ending)), cx)
825 }
826
827 pub fn delete_entry(
828 &self,
829 entry_id: ProjectEntryId,
830 cx: &mut ModelContext<Worktree>,
831 ) -> Option<Task<Result<()>>> {
832 let entry = self.entry_for_id(entry_id)?.clone();
833 let abs_path = self.absolutize(&entry.path);
834 let delete = cx.background().spawn({
835 let fs = self.fs.clone();
836 let abs_path = abs_path;
837 async move {
838 if entry.is_file() {
839 fs.remove_file(&abs_path, Default::default()).await
840 } else {
841 fs.remove_dir(
842 &abs_path,
843 RemoveOptions {
844 recursive: true,
845 ignore_if_not_exists: false,
846 },
847 )
848 .await
849 }
850 }
851 });
852
853 Some(cx.spawn(|this, mut cx| async move {
854 delete.await?;
855 this.update(&mut cx, |this, cx| {
856 let this = this.as_local_mut().unwrap();
857 {
858 let mut snapshot = this.background_snapshot.lock();
859 snapshot.delete_entry(entry_id);
860 }
861 this.poll_snapshot(true, cx);
862 });
863 Ok(())
864 }))
865 }
866
867 pub fn rename_entry(
868 &self,
869 entry_id: ProjectEntryId,
870 new_path: impl Into<Arc<Path>>,
871 cx: &mut ModelContext<Worktree>,
872 ) -> Option<Task<Result<Entry>>> {
873 let old_path = self.entry_for_id(entry_id)?.path.clone();
874 let new_path = new_path.into();
875 let abs_old_path = self.absolutize(&old_path);
876 let abs_new_path = self.absolutize(new_path.as_ref());
877 let rename = cx.background().spawn({
878 let fs = self.fs.clone();
879 let abs_new_path = abs_new_path.clone();
880 async move {
881 fs.rename(&abs_old_path, &abs_new_path, Default::default())
882 .await
883 }
884 });
885
886 Some(cx.spawn(|this, mut cx| async move {
887 rename.await?;
888 let entry = this
889 .update(&mut cx, |this, cx| {
890 this.as_local_mut().unwrap().refresh_entry(
891 new_path.clone(),
892 abs_new_path,
893 Some(old_path),
894 cx,
895 )
896 })
897 .await?;
898 Ok(entry)
899 }))
900 }
901
902 pub fn copy_entry(
903 &self,
904 entry_id: ProjectEntryId,
905 new_path: impl Into<Arc<Path>>,
906 cx: &mut ModelContext<Worktree>,
907 ) -> Option<Task<Result<Entry>>> {
908 let old_path = self.entry_for_id(entry_id)?.path.clone();
909 let new_path = new_path.into();
910 let abs_old_path = self.absolutize(&old_path);
911 let abs_new_path = self.absolutize(&new_path);
912 let copy = cx.background().spawn({
913 let fs = self.fs.clone();
914 let abs_new_path = abs_new_path.clone();
915 async move {
916 copy_recursive(
917 fs.as_ref(),
918 &abs_old_path,
919 &abs_new_path,
920 Default::default(),
921 )
922 .await
923 }
924 });
925
926 Some(cx.spawn(|this, mut cx| async move {
927 copy.await?;
928 let entry = this
929 .update(&mut cx, |this, cx| {
930 this.as_local_mut().unwrap().refresh_entry(
931 new_path.clone(),
932 abs_new_path,
933 None,
934 cx,
935 )
936 })
937 .await?;
938 Ok(entry)
939 }))
940 }
941
942 fn write_entry_internal(
943 &self,
944 path: impl Into<Arc<Path>>,
945 text_if_file: Option<(Rope, LineEnding)>,
946 cx: &mut ModelContext<Worktree>,
947 ) -> Task<Result<Entry>> {
948 let path = path.into();
949 let abs_path = self.absolutize(&path);
950 let write = cx.background().spawn({
951 let fs = self.fs.clone();
952 let abs_path = abs_path.clone();
953 async move {
954 if let Some((text, line_ending)) = text_if_file {
955 fs.save(&abs_path, &text, line_ending).await
956 } else {
957 fs.create_dir(&abs_path).await
958 }
959 }
960 });
961
962 cx.spawn(|this, mut cx| async move {
963 write.await?;
964 let entry = this
965 .update(&mut cx, |this, cx| {
966 this.as_local_mut()
967 .unwrap()
968 .refresh_entry(path, abs_path, None, cx)
969 })
970 .await?;
971 Ok(entry)
972 })
973 }
974
975 fn refresh_entry(
976 &self,
977 path: Arc<Path>,
978 abs_path: PathBuf,
979 old_path: Option<Arc<Path>>,
980 cx: &mut ModelContext<Worktree>,
981 ) -> Task<Result<Entry>> {
982 let fs = self.fs.clone();
983 let root_char_bag;
984 let next_entry_id;
985 {
986 let snapshot = self.background_snapshot.lock();
987 root_char_bag = snapshot.root_char_bag;
988 next_entry_id = snapshot.next_entry_id.clone();
989 }
990 cx.spawn_weak(|this, mut cx| async move {
991 let metadata = fs
992 .metadata(&abs_path)
993 .await?
994 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
995 let this = this
996 .upgrade(&cx)
997 .ok_or_else(|| anyhow!("worktree was dropped"))?;
998 this.update(&mut cx, |this, cx| {
999 let this = this.as_local_mut().unwrap();
1000 let inserted_entry;
1001 {
1002 let mut snapshot = this.background_snapshot.lock();
1003 let mut changes = this.background_changes.lock();
1004 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
1005 entry.is_ignored = snapshot
1006 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
1007 .is_abs_path_ignored(&abs_path, entry.is_dir());
1008 if let Some(old_path) = old_path {
1009 snapshot.remove_path(&old_path);
1010 changes.insert(old_path.clone(), PathChange::Removed);
1011 }
1012 snapshot.scan_started();
1013 let exists = snapshot.entry_for_path(&entry.path).is_some();
1014 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
1015 changes.insert(
1016 inserted_entry.path.clone(),
1017 if exists {
1018 PathChange::Updated
1019 } else {
1020 PathChange::Added
1021 },
1022 );
1023 snapshot.scan_completed();
1024 }
1025 this.poll_snapshot(true, cx);
1026 Ok(inserted_entry)
1027 })
1028 })
1029 }
1030
1031 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1032 let (share_tx, share_rx) = oneshot::channel();
1033
1034 if let Some(share) = self.share.as_mut() {
1035 let _ = share_tx.send(());
1036 *share.resume_updates.borrow_mut() = ();
1037 } else {
1038 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1039 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1040 let worktree_id = cx.model_id() as u64;
1041
1042 for (path, summary) in self.diagnostic_summaries.iter() {
1043 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1044 project_id,
1045 worktree_id,
1046 summary: Some(summary.to_proto(&path.0)),
1047 }) {
1048 return Task::ready(Err(e));
1049 }
1050 }
1051
1052 let _maintain_remote_snapshot = cx.background().spawn({
1053 let client = self.client.clone();
1054 async move {
1055 let mut share_tx = Some(share_tx);
1056 let mut prev_snapshot = LocalSnapshot {
1057 ignores_by_parent_abs_path: Default::default(),
1058 git_repositories: Default::default(),
1059 removed_entry_ids: Default::default(),
1060 next_entry_id: Default::default(),
1061 snapshot: Snapshot {
1062 id: WorktreeId(worktree_id as usize),
1063 abs_path: Path::new("").into(),
1064 root_name: Default::default(),
1065 root_char_bag: Default::default(),
1066 entries_by_path: Default::default(),
1067 entries_by_id: Default::default(),
1068 scan_id: 0,
1069 completed_scan_id: 0,
1070 },
1071 };
1072 while let Some(snapshot) = snapshots_rx.recv().await {
1073 #[cfg(any(test, feature = "test-support"))]
1074 const MAX_CHUNK_SIZE: usize = 2;
1075 #[cfg(not(any(test, feature = "test-support")))]
1076 const MAX_CHUNK_SIZE: usize = 256;
1077
1078 let update =
1079 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1080 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1081 let _ = resume_updates_rx.try_recv();
1082 while let Err(error) = client.request(update.clone()).await {
1083 log::error!("failed to send worktree update: {}", error);
1084 log::info!("waiting to resume updates");
1085 if resume_updates_rx.next().await.is_none() {
1086 return Ok(());
1087 }
1088 }
1089 }
1090
1091 if let Some(share_tx) = share_tx.take() {
1092 let _ = share_tx.send(());
1093 }
1094
1095 prev_snapshot = snapshot;
1096 }
1097
1098 Ok::<_, anyhow::Error>(())
1099 }
1100 .log_err()
1101 });
1102
1103 self.share = Some(ShareState {
1104 project_id,
1105 snapshots_tx,
1106 resume_updates: resume_updates_tx,
1107 _maintain_remote_snapshot,
1108 });
1109 }
1110
1111 cx.foreground()
1112 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1113 }
1114
1115 pub fn unshare(&mut self) {
1116 self.share.take();
1117 }
1118
1119 pub fn is_shared(&self) -> bool {
1120 self.share.is_some()
1121 }
1122}
1123
1124impl RemoteWorktree {
1125 fn snapshot(&self) -> Snapshot {
1126 self.snapshot.clone()
1127 }
1128
1129 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1130 self.snapshot = self.background_snapshot.lock().clone();
1131 cx.emit(Event::UpdatedEntries(Default::default()));
1132 cx.notify();
1133 }
1134
1135 pub fn disconnected_from_host(&mut self) {
1136 self.updates_tx.take();
1137 self.snapshot_subscriptions.clear();
1138 self.disconnected = true;
1139 }
1140
1141 pub fn save_buffer(
1142 &self,
1143 buffer_handle: ModelHandle<Buffer>,
1144 cx: &mut ModelContext<Worktree>,
1145 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1146 let buffer = buffer_handle.read(cx);
1147 let buffer_id = buffer.remote_id();
1148 let version = buffer.version();
1149 let rpc = self.client.clone();
1150 let project_id = self.project_id;
1151 cx.as_mut().spawn(|mut cx| async move {
1152 let response = rpc
1153 .request(proto::SaveBuffer {
1154 project_id,
1155 buffer_id,
1156 version: serialize_version(&version),
1157 })
1158 .await?;
1159 let version = deserialize_version(response.version);
1160 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1161 let mtime = response
1162 .mtime
1163 .ok_or_else(|| anyhow!("missing mtime"))?
1164 .into();
1165
1166 buffer_handle.update(&mut cx, |buffer, cx| {
1167 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1168 });
1169
1170 Ok((version, fingerprint, mtime))
1171 })
1172 }
1173
1174 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1175 if let Some(updates_tx) = &self.updates_tx {
1176 updates_tx
1177 .unbounded_send(update)
1178 .expect("consumer runs to completion");
1179 }
1180 }
1181
1182 fn observed_snapshot(&self, scan_id: usize) -> bool {
1183 self.completed_scan_id >= scan_id
1184 }
1185
1186 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1187 let (tx, rx) = oneshot::channel();
1188 if self.observed_snapshot(scan_id) {
1189 let _ = tx.send(());
1190 } else if self.disconnected {
1191 drop(tx);
1192 } else {
1193 match self
1194 .snapshot_subscriptions
1195 .binary_search_by_key(&scan_id, |probe| probe.0)
1196 {
1197 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1198 }
1199 }
1200
1201 async move {
1202 rx.await?;
1203 Ok(())
1204 }
1205 }
1206
1207 pub fn update_diagnostic_summary(
1208 &mut self,
1209 path: Arc<Path>,
1210 summary: &proto::DiagnosticSummary,
1211 ) {
1212 let summary = DiagnosticSummary {
1213 language_server_id: summary.language_server_id as usize,
1214 error_count: summary.error_count as usize,
1215 warning_count: summary.warning_count as usize,
1216 };
1217 if summary.is_empty() {
1218 self.diagnostic_summaries.remove(&PathKey(path));
1219 } else {
1220 self.diagnostic_summaries.insert(PathKey(path), summary);
1221 }
1222 }
1223
1224 pub fn insert_entry(
1225 &mut self,
1226 entry: proto::Entry,
1227 scan_id: usize,
1228 cx: &mut ModelContext<Worktree>,
1229 ) -> Task<Result<Entry>> {
1230 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1231 cx.spawn(|this, mut cx| async move {
1232 wait_for_snapshot.await?;
1233 this.update(&mut cx, |worktree, _| {
1234 let worktree = worktree.as_remote_mut().unwrap();
1235 let mut snapshot = worktree.background_snapshot.lock();
1236 let entry = snapshot.insert_entry(entry);
1237 worktree.snapshot = snapshot.clone();
1238 entry
1239 })
1240 })
1241 }
1242
1243 pub(crate) fn delete_entry(
1244 &mut self,
1245 id: ProjectEntryId,
1246 scan_id: usize,
1247 cx: &mut ModelContext<Worktree>,
1248 ) -> Task<Result<()>> {
1249 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1250 cx.spawn(|this, mut cx| async move {
1251 wait_for_snapshot.await?;
1252 this.update(&mut cx, |worktree, _| {
1253 let worktree = worktree.as_remote_mut().unwrap();
1254 let mut snapshot = worktree.background_snapshot.lock();
1255 snapshot.delete_entry(id);
1256 worktree.snapshot = snapshot.clone();
1257 });
1258 Ok(())
1259 })
1260 }
1261}
1262
1263impl Snapshot {
1264 pub fn id(&self) -> WorktreeId {
1265 self.id
1266 }
1267
1268 pub fn abs_path(&self) -> &Arc<Path> {
1269 &self.abs_path
1270 }
1271
1272 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1273 self.entries_by_id.get(&entry_id, &()).is_some()
1274 }
1275
1276 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1277 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1278 let old_entry = self.entries_by_id.insert_or_replace(
1279 PathEntry {
1280 id: entry.id,
1281 path: entry.path.clone(),
1282 is_ignored: entry.is_ignored,
1283 scan_id: 0,
1284 },
1285 &(),
1286 );
1287 if let Some(old_entry) = old_entry {
1288 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1289 }
1290 self.entries_by_path.insert_or_replace(entry.clone(), &());
1291 Ok(entry)
1292 }
1293
1294 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1295 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1296 self.entries_by_path = {
1297 let mut cursor = self.entries_by_path.cursor();
1298 let mut new_entries_by_path =
1299 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1300 while let Some(entry) = cursor.item() {
1301 if entry.path.starts_with(&removed_entry.path) {
1302 self.entries_by_id.remove(&entry.id, &());
1303 cursor.next(&());
1304 } else {
1305 break;
1306 }
1307 }
1308 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1309 new_entries_by_path
1310 };
1311
1312 true
1313 } else {
1314 false
1315 }
1316 }
1317
1318 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1319 let mut entries_by_path_edits = Vec::new();
1320 let mut entries_by_id_edits = Vec::new();
1321 for entry_id in update.removed_entries {
1322 let entry = self
1323 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1324 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1325 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1326 entries_by_id_edits.push(Edit::Remove(entry.id));
1327 }
1328
1329 for entry in update.updated_entries {
1330 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1331 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1332 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1333 }
1334 entries_by_id_edits.push(Edit::Insert(PathEntry {
1335 id: entry.id,
1336 path: entry.path.clone(),
1337 is_ignored: entry.is_ignored,
1338 scan_id: 0,
1339 }));
1340 entries_by_path_edits.push(Edit::Insert(entry));
1341 }
1342
1343 self.entries_by_path.edit(entries_by_path_edits, &());
1344 self.entries_by_id.edit(entries_by_id_edits, &());
1345 self.scan_id = update.scan_id as usize;
1346 if update.is_last_update {
1347 self.completed_scan_id = update.scan_id as usize;
1348 }
1349
1350 Ok(())
1351 }
1352
1353 pub fn file_count(&self) -> usize {
1354 self.entries_by_path.summary().file_count
1355 }
1356
1357 pub fn visible_file_count(&self) -> usize {
1358 self.entries_by_path.summary().visible_file_count
1359 }
1360
1361 fn traverse_from_offset(
1362 &self,
1363 include_dirs: bool,
1364 include_ignored: bool,
1365 start_offset: usize,
1366 ) -> Traversal {
1367 let mut cursor = self.entries_by_path.cursor();
1368 cursor.seek(
1369 &TraversalTarget::Count {
1370 count: start_offset,
1371 include_dirs,
1372 include_ignored,
1373 },
1374 Bias::Right,
1375 &(),
1376 );
1377 Traversal {
1378 cursor,
1379 include_dirs,
1380 include_ignored,
1381 }
1382 }
1383
1384 fn traverse_from_path(
1385 &self,
1386 include_dirs: bool,
1387 include_ignored: bool,
1388 path: &Path,
1389 ) -> Traversal {
1390 let mut cursor = self.entries_by_path.cursor();
1391 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1392 Traversal {
1393 cursor,
1394 include_dirs,
1395 include_ignored,
1396 }
1397 }
1398
1399 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1400 self.traverse_from_offset(false, include_ignored, start)
1401 }
1402
1403 pub fn entries(&self, include_ignored: bool) -> Traversal {
1404 self.traverse_from_offset(true, include_ignored, 0)
1405 }
1406
1407 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1408 let empty_path = Path::new("");
1409 self.entries_by_path
1410 .cursor::<()>()
1411 .filter(move |entry| entry.path.as_ref() != empty_path)
1412 .map(|entry| &entry.path)
1413 }
1414
1415 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1416 let mut cursor = self.entries_by_path.cursor();
1417 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1418 let traversal = Traversal {
1419 cursor,
1420 include_dirs: true,
1421 include_ignored: true,
1422 };
1423 ChildEntriesIter {
1424 traversal,
1425 parent_path,
1426 }
1427 }
1428
1429 pub fn root_entry(&self) -> Option<&Entry> {
1430 self.entry_for_path("")
1431 }
1432
1433 pub fn root_name(&self) -> &str {
1434 &self.root_name
1435 }
1436
1437 pub fn scan_started(&mut self) {
1438 self.scan_id += 1;
1439 }
1440
1441 pub fn scan_completed(&mut self) {
1442 self.completed_scan_id = self.scan_id;
1443 }
1444
1445 pub fn scan_id(&self) -> usize {
1446 self.scan_id
1447 }
1448
1449 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1450 let path = path.as_ref();
1451 self.traverse_from_path(true, true, path)
1452 .entry()
1453 .and_then(|entry| {
1454 if entry.path.as_ref() == path {
1455 Some(entry)
1456 } else {
1457 None
1458 }
1459 })
1460 }
1461
1462 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1463 let entry = self.entries_by_id.get(&id, &())?;
1464 self.entry_for_path(&entry.path)
1465 }
1466
1467 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1468 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1469 }
1470}
1471
1472impl LocalSnapshot {
1473 // Gives the most specific git repository for a given path
1474 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1475 self.git_repositories
1476 .iter()
1477 .rev() //git_repository is ordered lexicographically
1478 .find(|repo| repo.manages(path))
1479 .cloned()
1480 }
1481
1482 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1483 // Git repositories cannot be nested, so we don't need to reverse the order
1484 self.git_repositories
1485 .iter_mut()
1486 .find(|repo| repo.in_dot_git(path))
1487 }
1488
1489 #[cfg(test)]
1490 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1491 let root_name = self.root_name.clone();
1492 proto::UpdateWorktree {
1493 project_id,
1494 worktree_id: self.id().to_proto(),
1495 abs_path: self.abs_path().to_string_lossy().into(),
1496 root_name,
1497 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1498 removed_entries: Default::default(),
1499 scan_id: self.scan_id as u64,
1500 is_last_update: true,
1501 }
1502 }
1503
1504 pub(crate) fn build_update(
1505 &self,
1506 other: &Self,
1507 project_id: u64,
1508 worktree_id: u64,
1509 include_ignored: bool,
1510 ) -> proto::UpdateWorktree {
1511 let mut updated_entries = Vec::new();
1512 let mut removed_entries = Vec::new();
1513 let mut self_entries = self
1514 .entries_by_id
1515 .cursor::<()>()
1516 .filter(|e| include_ignored || !e.is_ignored)
1517 .peekable();
1518 let mut other_entries = other
1519 .entries_by_id
1520 .cursor::<()>()
1521 .filter(|e| include_ignored || !e.is_ignored)
1522 .peekable();
1523 loop {
1524 match (self_entries.peek(), other_entries.peek()) {
1525 (Some(self_entry), Some(other_entry)) => {
1526 match Ord::cmp(&self_entry.id, &other_entry.id) {
1527 Ordering::Less => {
1528 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1529 updated_entries.push(entry);
1530 self_entries.next();
1531 }
1532 Ordering::Equal => {
1533 if self_entry.scan_id != other_entry.scan_id {
1534 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1535 updated_entries.push(entry);
1536 }
1537
1538 self_entries.next();
1539 other_entries.next();
1540 }
1541 Ordering::Greater => {
1542 removed_entries.push(other_entry.id.to_proto());
1543 other_entries.next();
1544 }
1545 }
1546 }
1547 (Some(self_entry), None) => {
1548 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1549 updated_entries.push(entry);
1550 self_entries.next();
1551 }
1552 (None, Some(other_entry)) => {
1553 removed_entries.push(other_entry.id.to_proto());
1554 other_entries.next();
1555 }
1556 (None, None) => break,
1557 }
1558 }
1559
1560 proto::UpdateWorktree {
1561 project_id,
1562 worktree_id,
1563 abs_path: self.abs_path().to_string_lossy().into(),
1564 root_name: self.root_name().to_string(),
1565 updated_entries,
1566 removed_entries,
1567 scan_id: self.scan_id as u64,
1568 is_last_update: self.completed_scan_id == self.scan_id,
1569 }
1570 }
1571
1572 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1573 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1574 let abs_path = self.abs_path.join(&entry.path);
1575 match smol::block_on(build_gitignore(&abs_path, fs)) {
1576 Ok(ignore) => {
1577 self.ignores_by_parent_abs_path.insert(
1578 abs_path.parent().unwrap().into(),
1579 (Arc::new(ignore), self.scan_id),
1580 );
1581 }
1582 Err(error) => {
1583 log::error!(
1584 "error loading .gitignore file {:?} - {:?}",
1585 &entry.path,
1586 error
1587 );
1588 }
1589 }
1590 }
1591
1592 self.reuse_entry_id(&mut entry);
1593
1594 if entry.kind == EntryKind::PendingDir {
1595 if let Some(existing_entry) =
1596 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1597 {
1598 entry.kind = existing_entry.kind;
1599 }
1600 }
1601
1602 let scan_id = self.scan_id;
1603 self.entries_by_path.insert_or_replace(entry.clone(), &());
1604 self.entries_by_id.insert_or_replace(
1605 PathEntry {
1606 id: entry.id,
1607 path: entry.path.clone(),
1608 is_ignored: entry.is_ignored,
1609 scan_id,
1610 },
1611 &(),
1612 );
1613
1614 entry
1615 }
1616
1617 fn populate_dir(
1618 &mut self,
1619 parent_path: Arc<Path>,
1620 entries: impl IntoIterator<Item = Entry>,
1621 ignore: Option<Arc<Gitignore>>,
1622 fs: &dyn Fs,
1623 ) {
1624 let mut parent_entry = if let Some(parent_entry) =
1625 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1626 {
1627 parent_entry.clone()
1628 } else {
1629 log::warn!(
1630 "populating a directory {:?} that has been removed",
1631 parent_path
1632 );
1633 return;
1634 };
1635
1636 if let Some(ignore) = ignore {
1637 self.ignores_by_parent_abs_path.insert(
1638 self.abs_path.join(&parent_path).into(),
1639 (ignore, self.scan_id),
1640 );
1641 }
1642 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1643 parent_entry.kind = EntryKind::Dir;
1644 } else {
1645 unreachable!();
1646 }
1647
1648 if parent_path.file_name() == Some(&DOT_GIT) {
1649 let abs_path = self.abs_path.join(&parent_path);
1650 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1651 if let Err(ix) = self
1652 .git_repositories
1653 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1654 {
1655 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1656 self.git_repositories.insert(
1657 ix,
1658 GitRepositoryEntry {
1659 repo,
1660 scan_id: 0,
1661 content_path,
1662 git_dir_path: parent_path,
1663 },
1664 );
1665 }
1666 }
1667 }
1668
1669 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1670 let mut entries_by_id_edits = Vec::new();
1671
1672 for mut entry in entries {
1673 self.reuse_entry_id(&mut entry);
1674 entries_by_id_edits.push(Edit::Insert(PathEntry {
1675 id: entry.id,
1676 path: entry.path.clone(),
1677 is_ignored: entry.is_ignored,
1678 scan_id: self.scan_id,
1679 }));
1680 entries_by_path_edits.push(Edit::Insert(entry));
1681 }
1682
1683 self.entries_by_path.edit(entries_by_path_edits, &());
1684 self.entries_by_id.edit(entries_by_id_edits, &());
1685 }
1686
1687 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1688 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1689 entry.id = removed_entry_id;
1690 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1691 entry.id = existing_entry.id;
1692 }
1693 }
1694
1695 fn remove_path(&mut self, path: &Path) {
1696 let mut new_entries;
1697 let removed_entries;
1698 {
1699 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1700 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1701 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1702 new_entries.push_tree(cursor.suffix(&()), &());
1703 }
1704 self.entries_by_path = new_entries;
1705
1706 let mut entries_by_id_edits = Vec::new();
1707 for entry in removed_entries.cursor::<()>() {
1708 let removed_entry_id = self
1709 .removed_entry_ids
1710 .entry(entry.inode)
1711 .or_insert(entry.id);
1712 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1713 entries_by_id_edits.push(Edit::Remove(entry.id));
1714 }
1715 self.entries_by_id.edit(entries_by_id_edits, &());
1716
1717 if path.file_name() == Some(&GITIGNORE) {
1718 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1719 if let Some((_, scan_id)) = self
1720 .ignores_by_parent_abs_path
1721 .get_mut(abs_parent_path.as_path())
1722 {
1723 *scan_id = self.snapshot.scan_id;
1724 }
1725 } else if path.file_name() == Some(&DOT_GIT) {
1726 let parent_path = path.parent().unwrap();
1727 if let Ok(ix) = self
1728 .git_repositories
1729 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1730 {
1731 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1732 }
1733 }
1734 }
1735
1736 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1737 let mut inodes = TreeSet::default();
1738 for ancestor in path.ancestors().skip(1) {
1739 if let Some(entry) = self.entry_for_path(ancestor) {
1740 inodes.insert(entry.inode);
1741 }
1742 }
1743 inodes
1744 }
1745
1746 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1747 let mut new_ignores = Vec::new();
1748 for ancestor in abs_path.ancestors().skip(1) {
1749 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1750 new_ignores.push((ancestor, Some(ignore.clone())));
1751 } else {
1752 new_ignores.push((ancestor, None));
1753 }
1754 }
1755
1756 let mut ignore_stack = IgnoreStack::none();
1757 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1758 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1759 ignore_stack = IgnoreStack::all();
1760 break;
1761 } else if let Some(ignore) = ignore {
1762 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1763 }
1764 }
1765
1766 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1767 ignore_stack = IgnoreStack::all();
1768 }
1769
1770 ignore_stack
1771 }
1772
1773 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1774 &self.git_repositories
1775 }
1776}
1777
1778impl GitRepositoryEntry {
1779 // Note that these paths should be relative to the worktree root.
1780 pub(crate) fn manages(&self, path: &Path) -> bool {
1781 path.starts_with(self.content_path.as_ref())
1782 }
1783
1784 // Note that theis path should be relative to the worktree root.
1785 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1786 path.starts_with(self.git_dir_path.as_ref())
1787 }
1788}
1789
1790async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1791 let contents = fs.load(abs_path).await?;
1792 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1793 let mut builder = GitignoreBuilder::new(parent);
1794 for line in contents.lines() {
1795 builder.add_line(Some(abs_path.into()), line)?;
1796 }
1797 Ok(builder.build()?)
1798}
1799
1800impl WorktreeId {
1801 pub fn from_usize(handle_id: usize) -> Self {
1802 Self(handle_id)
1803 }
1804
1805 pub(crate) fn from_proto(id: u64) -> Self {
1806 Self(id as usize)
1807 }
1808
1809 pub fn to_proto(&self) -> u64 {
1810 self.0 as u64
1811 }
1812
1813 pub fn to_usize(&self) -> usize {
1814 self.0
1815 }
1816}
1817
1818impl fmt::Display for WorktreeId {
1819 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1820 self.0.fmt(f)
1821 }
1822}
1823
1824impl Deref for Worktree {
1825 type Target = Snapshot;
1826
1827 fn deref(&self) -> &Self::Target {
1828 match self {
1829 Worktree::Local(worktree) => &worktree.snapshot,
1830 Worktree::Remote(worktree) => &worktree.snapshot,
1831 }
1832 }
1833}
1834
1835impl Deref for LocalWorktree {
1836 type Target = LocalSnapshot;
1837
1838 fn deref(&self) -> &Self::Target {
1839 &self.snapshot
1840 }
1841}
1842
1843impl Deref for RemoteWorktree {
1844 type Target = Snapshot;
1845
1846 fn deref(&self) -> &Self::Target {
1847 &self.snapshot
1848 }
1849}
1850
1851impl fmt::Debug for LocalWorktree {
1852 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1853 self.snapshot.fmt(f)
1854 }
1855}
1856
1857impl fmt::Debug for Snapshot {
1858 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1859 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1860 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1861
1862 impl<'a> fmt::Debug for EntriesByPath<'a> {
1863 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1864 f.debug_map()
1865 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1866 .finish()
1867 }
1868 }
1869
1870 impl<'a> fmt::Debug for EntriesById<'a> {
1871 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1872 f.debug_list().entries(self.0.iter()).finish()
1873 }
1874 }
1875
1876 f.debug_struct("Snapshot")
1877 .field("id", &self.id)
1878 .field("root_name", &self.root_name)
1879 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1880 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1881 .finish()
1882 }
1883}
1884
1885#[derive(Clone, PartialEq)]
1886pub struct File {
1887 pub worktree: ModelHandle<Worktree>,
1888 pub path: Arc<Path>,
1889 pub mtime: SystemTime,
1890 pub(crate) entry_id: ProjectEntryId,
1891 pub(crate) is_local: bool,
1892 pub(crate) is_deleted: bool,
1893}
1894
1895impl language::File for File {
1896 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1897 if self.is_local {
1898 Some(self)
1899 } else {
1900 None
1901 }
1902 }
1903
1904 fn mtime(&self) -> SystemTime {
1905 self.mtime
1906 }
1907
1908 fn path(&self) -> &Arc<Path> {
1909 &self.path
1910 }
1911
1912 fn full_path(&self, cx: &AppContext) -> PathBuf {
1913 let mut full_path = PathBuf::new();
1914 let worktree = self.worktree.read(cx);
1915
1916 if worktree.is_visible() {
1917 full_path.push(worktree.root_name());
1918 } else {
1919 let path = worktree.abs_path();
1920
1921 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1922 full_path.push("~");
1923 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1924 } else {
1925 full_path.push(path)
1926 }
1927 }
1928
1929 if self.path.components().next().is_some() {
1930 full_path.push(&self.path);
1931 }
1932
1933 full_path
1934 }
1935
1936 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1937 /// of its worktree, then this method will return the name of the worktree itself.
1938 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1939 self.path
1940 .file_name()
1941 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1942 }
1943
1944 fn is_deleted(&self) -> bool {
1945 self.is_deleted
1946 }
1947
1948 fn as_any(&self) -> &dyn Any {
1949 self
1950 }
1951
1952 fn to_proto(&self) -> rpc::proto::File {
1953 rpc::proto::File {
1954 worktree_id: self.worktree.id() as u64,
1955 entry_id: self.entry_id.to_proto(),
1956 path: self.path.to_string_lossy().into(),
1957 mtime: Some(self.mtime.into()),
1958 is_deleted: self.is_deleted,
1959 }
1960 }
1961}
1962
1963impl language::LocalFile for File {
1964 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1965 self.worktree
1966 .read(cx)
1967 .as_local()
1968 .unwrap()
1969 .abs_path
1970 .join(&self.path)
1971 }
1972
1973 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1974 let worktree = self.worktree.read(cx).as_local().unwrap();
1975 let abs_path = worktree.absolutize(&self.path);
1976 let fs = worktree.fs.clone();
1977 cx.background()
1978 .spawn(async move { fs.load(&abs_path).await })
1979 }
1980
1981 fn buffer_reloaded(
1982 &self,
1983 buffer_id: u64,
1984 version: &clock::Global,
1985 fingerprint: RopeFingerprint,
1986 line_ending: LineEnding,
1987 mtime: SystemTime,
1988 cx: &mut MutableAppContext,
1989 ) {
1990 let worktree = self.worktree.read(cx).as_local().unwrap();
1991 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1992 worktree
1993 .client
1994 .send(proto::BufferReloaded {
1995 project_id,
1996 buffer_id,
1997 version: serialize_version(version),
1998 mtime: Some(mtime.into()),
1999 fingerprint: serialize_fingerprint(fingerprint),
2000 line_ending: serialize_line_ending(line_ending) as i32,
2001 })
2002 .log_err();
2003 }
2004 }
2005}
2006
2007impl File {
2008 pub fn from_proto(
2009 proto: rpc::proto::File,
2010 worktree: ModelHandle<Worktree>,
2011 cx: &AppContext,
2012 ) -> Result<Self> {
2013 let worktree_id = worktree
2014 .read(cx)
2015 .as_remote()
2016 .ok_or_else(|| anyhow!("not remote"))?
2017 .id();
2018
2019 if worktree_id.to_proto() != proto.worktree_id {
2020 return Err(anyhow!("worktree id does not match file"));
2021 }
2022
2023 Ok(Self {
2024 worktree,
2025 path: Path::new(&proto.path).into(),
2026 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2027 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2028 is_local: false,
2029 is_deleted: proto.is_deleted,
2030 })
2031 }
2032
2033 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2034 file.and_then(|f| f.as_any().downcast_ref())
2035 }
2036
2037 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2038 self.worktree.read(cx).id()
2039 }
2040
2041 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2042 if self.is_deleted {
2043 None
2044 } else {
2045 Some(self.entry_id)
2046 }
2047 }
2048}
2049
2050#[derive(Clone, Debug, PartialEq, Eq)]
2051pub struct Entry {
2052 pub id: ProjectEntryId,
2053 pub kind: EntryKind,
2054 pub path: Arc<Path>,
2055 pub inode: u64,
2056 pub mtime: SystemTime,
2057 pub is_symlink: bool,
2058 pub is_ignored: bool,
2059}
2060
2061#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2062pub enum EntryKind {
2063 PendingDir,
2064 Dir,
2065 File(CharBag),
2066}
2067
2068#[derive(Clone, Copy, Debug)]
2069pub enum PathChange {
2070 Added,
2071 Removed,
2072 Updated,
2073 AddedOrUpdated,
2074}
2075
2076impl Entry {
2077 fn new(
2078 path: Arc<Path>,
2079 metadata: &fs::Metadata,
2080 next_entry_id: &AtomicUsize,
2081 root_char_bag: CharBag,
2082 ) -> Self {
2083 Self {
2084 id: ProjectEntryId::new(next_entry_id),
2085 kind: if metadata.is_dir {
2086 EntryKind::PendingDir
2087 } else {
2088 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2089 },
2090 path,
2091 inode: metadata.inode,
2092 mtime: metadata.mtime,
2093 is_symlink: metadata.is_symlink,
2094 is_ignored: false,
2095 }
2096 }
2097
2098 pub fn is_dir(&self) -> bool {
2099 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2100 }
2101
2102 pub fn is_file(&self) -> bool {
2103 matches!(self.kind, EntryKind::File(_))
2104 }
2105}
2106
2107impl sum_tree::Item for Entry {
2108 type Summary = EntrySummary;
2109
2110 fn summary(&self) -> Self::Summary {
2111 let visible_count = if self.is_ignored { 0 } else { 1 };
2112 let file_count;
2113 let visible_file_count;
2114 if self.is_file() {
2115 file_count = 1;
2116 visible_file_count = visible_count;
2117 } else {
2118 file_count = 0;
2119 visible_file_count = 0;
2120 }
2121
2122 EntrySummary {
2123 max_path: self.path.clone(),
2124 count: 1,
2125 visible_count,
2126 file_count,
2127 visible_file_count,
2128 }
2129 }
2130}
2131
2132impl sum_tree::KeyedItem for Entry {
2133 type Key = PathKey;
2134
2135 fn key(&self) -> Self::Key {
2136 PathKey(self.path.clone())
2137 }
2138}
2139
2140#[derive(Clone, Debug)]
2141pub struct EntrySummary {
2142 max_path: Arc<Path>,
2143 count: usize,
2144 visible_count: usize,
2145 file_count: usize,
2146 visible_file_count: usize,
2147}
2148
2149impl Default for EntrySummary {
2150 fn default() -> Self {
2151 Self {
2152 max_path: Arc::from(Path::new("")),
2153 count: 0,
2154 visible_count: 0,
2155 file_count: 0,
2156 visible_file_count: 0,
2157 }
2158 }
2159}
2160
2161impl sum_tree::Summary for EntrySummary {
2162 type Context = ();
2163
2164 fn add_summary(&mut self, rhs: &Self, _: &()) {
2165 self.max_path = rhs.max_path.clone();
2166 self.count += rhs.count;
2167 self.visible_count += rhs.visible_count;
2168 self.file_count += rhs.file_count;
2169 self.visible_file_count += rhs.visible_file_count;
2170 }
2171}
2172
2173#[derive(Clone, Debug)]
2174struct PathEntry {
2175 id: ProjectEntryId,
2176 path: Arc<Path>,
2177 is_ignored: bool,
2178 scan_id: usize,
2179}
2180
2181impl sum_tree::Item for PathEntry {
2182 type Summary = PathEntrySummary;
2183
2184 fn summary(&self) -> Self::Summary {
2185 PathEntrySummary { max_id: self.id }
2186 }
2187}
2188
2189impl sum_tree::KeyedItem for PathEntry {
2190 type Key = ProjectEntryId;
2191
2192 fn key(&self) -> Self::Key {
2193 self.id
2194 }
2195}
2196
2197#[derive(Clone, Debug, Default)]
2198struct PathEntrySummary {
2199 max_id: ProjectEntryId,
2200}
2201
2202impl sum_tree::Summary for PathEntrySummary {
2203 type Context = ();
2204
2205 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2206 self.max_id = summary.max_id;
2207 }
2208}
2209
2210impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2211 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2212 *self = summary.max_id;
2213 }
2214}
2215
2216#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2217pub struct PathKey(Arc<Path>);
2218
2219impl Default for PathKey {
2220 fn default() -> Self {
2221 Self(Path::new("").into())
2222 }
2223}
2224
2225impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2226 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2227 self.0 = summary.max_path.clone();
2228 }
2229}
2230
2231struct BackgroundScanner {
2232 fs: Arc<dyn Fs>,
2233 snapshot: Arc<Mutex<LocalSnapshot>>,
2234 changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
2235 notify: UnboundedSender<ScanState>,
2236 executor: Arc<executor::Background>,
2237}
2238
2239impl BackgroundScanner {
2240 fn new(
2241 snapshot: Arc<Mutex<LocalSnapshot>>,
2242 changes: Arc<Mutex<HashMap<Arc<Path>, PathChange>>>,
2243 notify: UnboundedSender<ScanState>,
2244 fs: Arc<dyn Fs>,
2245 executor: Arc<executor::Background>,
2246 ) -> Self {
2247 Self {
2248 fs,
2249 snapshot,
2250 changes,
2251 notify,
2252 executor,
2253 }
2254 }
2255
2256 fn abs_path(&self) -> Arc<Path> {
2257 self.snapshot.lock().abs_path.clone()
2258 }
2259
2260 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2261 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2262 return;
2263 }
2264
2265 if let Err(err) = self.scan_dirs().await {
2266 if self
2267 .notify
2268 .unbounded_send(ScanState::Err(Arc::new(err)))
2269 .is_err()
2270 {
2271 return;
2272 }
2273 }
2274
2275 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2276 return;
2277 }
2278
2279 futures::pin_mut!(events_rx);
2280
2281 // Process any events that occurred while performing the initial scan. These
2282 // events can't be reported as precisely, because there is no snapshot of the
2283 // worktree before they occurred.
2284 if let Some(mut events) = events_rx.next().await {
2285 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2286 events.extend(additional_events);
2287 }
2288 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2289 return;
2290 }
2291 if !self.process_events(events, true).await {
2292 return;
2293 }
2294 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2295 return;
2296 }
2297 }
2298
2299 // Continue processing events until the worktree is dropped.
2300 while let Some(mut events) = events_rx.next().await {
2301 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2302 events.extend(additional_events);
2303 }
2304 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2305 return;
2306 }
2307 if !self.process_events(events, false).await {
2308 return;
2309 }
2310 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2311 return;
2312 }
2313 }
2314 }
2315
2316 async fn scan_dirs(&mut self) -> Result<()> {
2317 let root_char_bag;
2318 let root_abs_path;
2319 let root_inode;
2320 let is_dir;
2321 let next_entry_id;
2322 {
2323 let mut snapshot = self.snapshot.lock();
2324 snapshot.scan_started();
2325 root_char_bag = snapshot.root_char_bag;
2326 root_abs_path = snapshot.abs_path.clone();
2327 root_inode = snapshot.root_entry().map(|e| e.inode);
2328 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2329 next_entry_id = snapshot.next_entry_id.clone();
2330 };
2331
2332 // Populate ignores above the root.
2333 for ancestor in root_abs_path.ancestors().skip(1) {
2334 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2335 {
2336 self.snapshot
2337 .lock()
2338 .ignores_by_parent_abs_path
2339 .insert(ancestor.into(), (ignore.into(), 0));
2340 }
2341 }
2342
2343 let ignore_stack = {
2344 let mut snapshot = self.snapshot.lock();
2345 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2346 if ignore_stack.is_all() {
2347 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2348 root_entry.is_ignored = true;
2349 snapshot.insert_entry(root_entry, self.fs.as_ref());
2350 }
2351 }
2352 ignore_stack
2353 };
2354
2355 if is_dir {
2356 let path: Arc<Path> = Arc::from(Path::new(""));
2357 let mut ancestor_inodes = TreeSet::default();
2358 if let Some(root_inode) = root_inode {
2359 ancestor_inodes.insert(root_inode);
2360 }
2361
2362 let (tx, rx) = channel::unbounded();
2363 self.executor
2364 .block(tx.send(ScanJob {
2365 abs_path: root_abs_path.to_path_buf(),
2366 path,
2367 ignore_stack,
2368 ancestor_inodes,
2369 scan_queue: tx.clone(),
2370 }))
2371 .unwrap();
2372 drop(tx);
2373
2374 self.executor
2375 .scoped(|scope| {
2376 for _ in 0..self.executor.num_cpus() {
2377 scope.spawn(async {
2378 while let Ok(job) = rx.recv().await {
2379 if let Err(err) = self
2380 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2381 .await
2382 {
2383 log::error!("error scanning {:?}: {}", job.abs_path, err);
2384 }
2385 }
2386 });
2387 }
2388 })
2389 .await;
2390
2391 self.snapshot.lock().scan_completed();
2392 }
2393
2394 Ok(())
2395 }
2396
2397 async fn scan_dir(
2398 &self,
2399 root_char_bag: CharBag,
2400 next_entry_id: Arc<AtomicUsize>,
2401 job: &ScanJob,
2402 ) -> Result<()> {
2403 let mut new_entries: Vec<Entry> = Vec::new();
2404 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2405 let mut ignore_stack = job.ignore_stack.clone();
2406 let mut new_ignore = None;
2407
2408 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2409 while let Some(child_abs_path) = child_paths.next().await {
2410 let child_abs_path = match child_abs_path {
2411 Ok(child_abs_path) => child_abs_path,
2412 Err(error) => {
2413 log::error!("error processing entry {:?}", error);
2414 continue;
2415 }
2416 };
2417
2418 let child_name = child_abs_path.file_name().unwrap();
2419 let child_path: Arc<Path> = job.path.join(child_name).into();
2420 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2421 Ok(Some(metadata)) => metadata,
2422 Ok(None) => continue,
2423 Err(err) => {
2424 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2425 continue;
2426 }
2427 };
2428
2429 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2430 if child_name == *GITIGNORE {
2431 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2432 Ok(ignore) => {
2433 let ignore = Arc::new(ignore);
2434 ignore_stack =
2435 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2436 new_ignore = Some(ignore);
2437 }
2438 Err(error) => {
2439 log::error!(
2440 "error loading .gitignore file {:?} - {:?}",
2441 child_name,
2442 error
2443 );
2444 }
2445 }
2446
2447 // Update ignore status of any child entries we've already processed to reflect the
2448 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2449 // there should rarely be too numerous. Update the ignore stack associated with any
2450 // new jobs as well.
2451 let mut new_jobs = new_jobs.iter_mut();
2452 for entry in &mut new_entries {
2453 let entry_abs_path = self.abs_path().join(&entry.path);
2454 entry.is_ignored =
2455 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2456
2457 if entry.is_dir() {
2458 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2459 job.ignore_stack = if entry.is_ignored {
2460 IgnoreStack::all()
2461 } else {
2462 ignore_stack.clone()
2463 };
2464 }
2465 }
2466 }
2467 }
2468
2469 let mut child_entry = Entry::new(
2470 child_path.clone(),
2471 &child_metadata,
2472 &next_entry_id,
2473 root_char_bag,
2474 );
2475
2476 if child_entry.is_dir() {
2477 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2478 child_entry.is_ignored = is_ignored;
2479
2480 // Avoid recursing until crash in the case of a recursive symlink
2481 if !job.ancestor_inodes.contains(&child_entry.inode) {
2482 let mut ancestor_inodes = job.ancestor_inodes.clone();
2483 ancestor_inodes.insert(child_entry.inode);
2484
2485 new_jobs.push(Some(ScanJob {
2486 abs_path: child_abs_path,
2487 path: child_path,
2488 ignore_stack: if is_ignored {
2489 IgnoreStack::all()
2490 } else {
2491 ignore_stack.clone()
2492 },
2493 ancestor_inodes,
2494 scan_queue: job.scan_queue.clone(),
2495 }));
2496 } else {
2497 new_jobs.push(None);
2498 }
2499 } else {
2500 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2501 }
2502
2503 new_entries.push(child_entry);
2504 }
2505
2506 self.snapshot.lock().populate_dir(
2507 job.path.clone(),
2508 new_entries,
2509 new_ignore,
2510 self.fs.as_ref(),
2511 );
2512
2513 for new_job in new_jobs {
2514 if let Some(new_job) = new_job {
2515 job.scan_queue.send(new_job).await.unwrap();
2516 }
2517 }
2518
2519 Ok(())
2520 }
2521
2522 async fn process_events(
2523 &mut self,
2524 mut events: Vec<fsevent::Event>,
2525 received_before_initialized: bool,
2526 ) -> bool {
2527 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2528 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2529
2530 let root_char_bag;
2531 let root_abs_path;
2532 let next_entry_id;
2533 let prev_snapshot;
2534 {
2535 let mut snapshot = self.snapshot.lock();
2536 prev_snapshot = snapshot.snapshot.clone();
2537 root_char_bag = snapshot.root_char_bag;
2538 root_abs_path = snapshot.abs_path.clone();
2539 next_entry_id = snapshot.next_entry_id.clone();
2540 snapshot.scan_started();
2541 }
2542
2543 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2544 path
2545 } else {
2546 return false;
2547 };
2548 let metadata = futures::future::join_all(
2549 events
2550 .iter()
2551 .map(|event| self.fs.metadata(&event.path))
2552 .collect::<Vec<_>>(),
2553 )
2554 .await;
2555
2556 // Hold the snapshot lock while clearing and re-inserting the root entries
2557 // for each event. This way, the snapshot is not observable to the foreground
2558 // thread while this operation is in-progress.
2559 let mut event_paths = Vec::with_capacity(events.len());
2560 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2561 {
2562 let mut snapshot = self.snapshot.lock();
2563 for event in &events {
2564 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2565 snapshot.remove_path(path);
2566 }
2567 }
2568
2569 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2570 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2571 Ok(path) => Arc::from(path.to_path_buf()),
2572 Err(_) => {
2573 log::error!(
2574 "unexpected event {:?} for root path {:?}",
2575 event.path,
2576 root_canonical_path
2577 );
2578 continue;
2579 }
2580 };
2581 event_paths.push(path.clone());
2582 let abs_path = root_abs_path.join(&path);
2583
2584 match metadata {
2585 Ok(Some(metadata)) => {
2586 let ignore_stack =
2587 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2588 let mut fs_entry = Entry::new(
2589 path.clone(),
2590 &metadata,
2591 snapshot.next_entry_id.as_ref(),
2592 snapshot.root_char_bag,
2593 );
2594 fs_entry.is_ignored = ignore_stack.is_all();
2595 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2596
2597 let scan_id = snapshot.scan_id;
2598 if let Some(repo) = snapshot.in_dot_git(&path) {
2599 repo.repo.lock().reload_index();
2600 repo.scan_id = scan_id;
2601 }
2602
2603 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2604 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2605 ancestor_inodes.insert(metadata.inode);
2606 self.executor
2607 .block(scan_queue_tx.send(ScanJob {
2608 abs_path,
2609 path,
2610 ignore_stack,
2611 ancestor_inodes,
2612 scan_queue: scan_queue_tx.clone(),
2613 }))
2614 .unwrap();
2615 }
2616 }
2617 Ok(None) => {}
2618 Err(err) => {
2619 // TODO - create a special 'error' entry in the entries tree to mark this
2620 log::error!("error reading file on event {:?}", err);
2621 }
2622 }
2623 }
2624 drop(scan_queue_tx);
2625 }
2626
2627 // Scan any directories that were created as part of this event batch.
2628 self.executor
2629 .scoped(|scope| {
2630 for _ in 0..self.executor.num_cpus() {
2631 scope.spawn(async {
2632 while let Ok(job) = scan_queue_rx.recv().await {
2633 if let Err(err) = self
2634 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2635 .await
2636 {
2637 log::error!("error scanning {:?}: {}", job.abs_path, err);
2638 }
2639 }
2640 });
2641 }
2642 })
2643 .await;
2644
2645 // Attempt to detect renames only over a single batch of file-system events.
2646 self.snapshot.lock().removed_entry_ids.clear();
2647
2648 self.update_ignore_statuses().await;
2649 self.update_git_repositories();
2650 self.build_change_set(prev_snapshot, event_paths, received_before_initialized);
2651 self.snapshot.lock().scan_completed();
2652 true
2653 }
2654
2655 async fn update_ignore_statuses(&self) {
2656 let mut snapshot = self.snapshot.lock().clone();
2657 let mut ignores_to_update = Vec::new();
2658 let mut ignores_to_delete = Vec::new();
2659 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2660 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2661 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2662 ignores_to_update.push(parent_abs_path.clone());
2663 }
2664
2665 let ignore_path = parent_path.join(&*GITIGNORE);
2666 if snapshot.entry_for_path(ignore_path).is_none() {
2667 ignores_to_delete.push(parent_abs_path.clone());
2668 }
2669 }
2670 }
2671
2672 for parent_abs_path in ignores_to_delete {
2673 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2674 self.snapshot
2675 .lock()
2676 .ignores_by_parent_abs_path
2677 .remove(&parent_abs_path);
2678 }
2679
2680 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2681 ignores_to_update.sort_unstable();
2682 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2683 while let Some(parent_abs_path) = ignores_to_update.next() {
2684 while ignores_to_update
2685 .peek()
2686 .map_or(false, |p| p.starts_with(&parent_abs_path))
2687 {
2688 ignores_to_update.next().unwrap();
2689 }
2690
2691 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2692 ignore_queue_tx
2693 .send(UpdateIgnoreStatusJob {
2694 abs_path: parent_abs_path,
2695 ignore_stack,
2696 ignore_queue: ignore_queue_tx.clone(),
2697 })
2698 .await
2699 .unwrap();
2700 }
2701 drop(ignore_queue_tx);
2702
2703 self.executor
2704 .scoped(|scope| {
2705 for _ in 0..self.executor.num_cpus() {
2706 scope.spawn(async {
2707 while let Ok(job) = ignore_queue_rx.recv().await {
2708 self.update_ignore_status(job, &snapshot).await;
2709 }
2710 });
2711 }
2712 })
2713 .await;
2714 }
2715
2716 fn update_git_repositories(&self) {
2717 let mut snapshot = self.snapshot.lock();
2718 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2719 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2720 snapshot.git_repositories = git_repositories;
2721 }
2722
2723 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2724 let mut ignore_stack = job.ignore_stack;
2725 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2726 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2727 }
2728
2729 let mut entries_by_id_edits = Vec::new();
2730 let mut entries_by_path_edits = Vec::new();
2731 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2732 for mut entry in snapshot.child_entries(path).cloned() {
2733 let was_ignored = entry.is_ignored;
2734 let abs_path = self.abs_path().join(&entry.path);
2735 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2736 if entry.is_dir() {
2737 let child_ignore_stack = if entry.is_ignored {
2738 IgnoreStack::all()
2739 } else {
2740 ignore_stack.clone()
2741 };
2742 job.ignore_queue
2743 .send(UpdateIgnoreStatusJob {
2744 abs_path: abs_path.into(),
2745 ignore_stack: child_ignore_stack,
2746 ignore_queue: job.ignore_queue.clone(),
2747 })
2748 .await
2749 .unwrap();
2750 }
2751
2752 if entry.is_ignored != was_ignored {
2753 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2754 path_entry.scan_id = snapshot.scan_id;
2755 path_entry.is_ignored = entry.is_ignored;
2756 entries_by_id_edits.push(Edit::Insert(path_entry));
2757 entries_by_path_edits.push(Edit::Insert(entry));
2758 }
2759 }
2760
2761 let mut snapshot = self.snapshot.lock();
2762 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2763 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2764 }
2765
2766 fn build_change_set(
2767 &self,
2768 old_snapshot: Snapshot,
2769 event_paths: Vec<Arc<Path>>,
2770 received_before_initialized: bool,
2771 ) {
2772 let new_snapshot = self.snapshot.lock();
2773 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2774 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2775
2776 let mut change_set = self.changes.lock();
2777 for path in event_paths {
2778 let path = PathKey(path);
2779 old_paths.seek(&path, Bias::Left, &());
2780 new_paths.seek(&path, Bias::Left, &());
2781
2782 loop {
2783 match (old_paths.item(), new_paths.item()) {
2784 (Some(old_entry), Some(new_entry)) => {
2785 if old_entry.path > path.0
2786 && new_entry.path > path.0
2787 && !old_entry.path.starts_with(&path.0)
2788 && !new_entry.path.starts_with(&path.0)
2789 {
2790 break;
2791 }
2792
2793 match Ord::cmp(&old_entry.path, &new_entry.path) {
2794 Ordering::Less => {
2795 change_set.insert(old_entry.path.clone(), PathChange::Removed);
2796 old_paths.next(&());
2797 }
2798 Ordering::Equal => {
2799 if received_before_initialized {
2800 // If the worktree was not fully initialized when this event was generated,
2801 // we can't know whether this entry was added during the scan or whether
2802 // it was merely updated.
2803 change_set
2804 .insert(old_entry.path.clone(), PathChange::AddedOrUpdated);
2805 } else if old_entry.mtime != new_entry.mtime {
2806 change_set.insert(old_entry.path.clone(), PathChange::Updated);
2807 }
2808 old_paths.next(&());
2809 new_paths.next(&());
2810 }
2811 Ordering::Greater => {
2812 change_set.insert(new_entry.path.clone(), PathChange::Added);
2813 new_paths.next(&());
2814 }
2815 }
2816 }
2817 (Some(old_entry), None) => {
2818 change_set.insert(old_entry.path.clone(), PathChange::Removed);
2819 old_paths.next(&());
2820 }
2821 (None, Some(new_entry)) => {
2822 change_set.insert(new_entry.path.clone(), PathChange::Added);
2823 new_paths.next(&());
2824 }
2825 (None, None) => break,
2826 }
2827 }
2828 }
2829 }
2830}
2831
2832fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2833 let mut result = root_char_bag;
2834 result.extend(
2835 path.to_string_lossy()
2836 .chars()
2837 .map(|c| c.to_ascii_lowercase()),
2838 );
2839 result
2840}
2841
2842struct ScanJob {
2843 abs_path: PathBuf,
2844 path: Arc<Path>,
2845 ignore_stack: Arc<IgnoreStack>,
2846 scan_queue: Sender<ScanJob>,
2847 ancestor_inodes: TreeSet<u64>,
2848}
2849
2850struct UpdateIgnoreStatusJob {
2851 abs_path: Arc<Path>,
2852 ignore_stack: Arc<IgnoreStack>,
2853 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2854}
2855
2856pub trait WorktreeHandle {
2857 #[cfg(any(test, feature = "test-support"))]
2858 fn flush_fs_events<'a>(
2859 &self,
2860 cx: &'a gpui::TestAppContext,
2861 ) -> futures::future::LocalBoxFuture<'a, ()>;
2862}
2863
2864impl WorktreeHandle for ModelHandle<Worktree> {
2865 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2866 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2867 // extra directory scans, and emit extra scan-state notifications.
2868 //
2869 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2870 // to ensure that all redundant FS events have already been processed.
2871 #[cfg(any(test, feature = "test-support"))]
2872 fn flush_fs_events<'a>(
2873 &self,
2874 cx: &'a gpui::TestAppContext,
2875 ) -> futures::future::LocalBoxFuture<'a, ()> {
2876 use smol::future::FutureExt;
2877
2878 let filename = "fs-event-sentinel";
2879 let tree = self.clone();
2880 let (fs, root_path) = self.read_with(cx, |tree, _| {
2881 let tree = tree.as_local().unwrap();
2882 (tree.fs.clone(), tree.abs_path().clone())
2883 });
2884
2885 async move {
2886 fs.create_file(&root_path.join(filename), Default::default())
2887 .await
2888 .unwrap();
2889 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2890 .await;
2891
2892 fs.remove_file(&root_path.join(filename), Default::default())
2893 .await
2894 .unwrap();
2895 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2896 .await;
2897
2898 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2899 .await;
2900 }
2901 .boxed_local()
2902 }
2903}
2904
2905#[derive(Clone, Debug)]
2906struct TraversalProgress<'a> {
2907 max_path: &'a Path,
2908 count: usize,
2909 visible_count: usize,
2910 file_count: usize,
2911 visible_file_count: usize,
2912}
2913
2914impl<'a> TraversalProgress<'a> {
2915 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2916 match (include_ignored, include_dirs) {
2917 (true, true) => self.count,
2918 (true, false) => self.file_count,
2919 (false, true) => self.visible_count,
2920 (false, false) => self.visible_file_count,
2921 }
2922 }
2923}
2924
2925impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2926 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2927 self.max_path = summary.max_path.as_ref();
2928 self.count += summary.count;
2929 self.visible_count += summary.visible_count;
2930 self.file_count += summary.file_count;
2931 self.visible_file_count += summary.visible_file_count;
2932 }
2933}
2934
2935impl<'a> Default for TraversalProgress<'a> {
2936 fn default() -> Self {
2937 Self {
2938 max_path: Path::new(""),
2939 count: 0,
2940 visible_count: 0,
2941 file_count: 0,
2942 visible_file_count: 0,
2943 }
2944 }
2945}
2946
2947pub struct Traversal<'a> {
2948 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2949 include_ignored: bool,
2950 include_dirs: bool,
2951}
2952
2953impl<'a> Traversal<'a> {
2954 pub fn advance(&mut self) -> bool {
2955 self.advance_to_offset(self.offset() + 1)
2956 }
2957
2958 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2959 self.cursor.seek_forward(
2960 &TraversalTarget::Count {
2961 count: offset,
2962 include_dirs: self.include_dirs,
2963 include_ignored: self.include_ignored,
2964 },
2965 Bias::Right,
2966 &(),
2967 )
2968 }
2969
2970 pub fn advance_to_sibling(&mut self) -> bool {
2971 while let Some(entry) = self.cursor.item() {
2972 self.cursor.seek_forward(
2973 &TraversalTarget::PathSuccessor(&entry.path),
2974 Bias::Left,
2975 &(),
2976 );
2977 if let Some(entry) = self.cursor.item() {
2978 if (self.include_dirs || !entry.is_dir())
2979 && (self.include_ignored || !entry.is_ignored)
2980 {
2981 return true;
2982 }
2983 }
2984 }
2985 false
2986 }
2987
2988 pub fn entry(&self) -> Option<&'a Entry> {
2989 self.cursor.item()
2990 }
2991
2992 pub fn offset(&self) -> usize {
2993 self.cursor
2994 .start()
2995 .count(self.include_dirs, self.include_ignored)
2996 }
2997}
2998
2999impl<'a> Iterator for Traversal<'a> {
3000 type Item = &'a Entry;
3001
3002 fn next(&mut self) -> Option<Self::Item> {
3003 if let Some(item) = self.entry() {
3004 self.advance();
3005 Some(item)
3006 } else {
3007 None
3008 }
3009 }
3010}
3011
3012#[derive(Debug)]
3013enum TraversalTarget<'a> {
3014 Path(&'a Path),
3015 PathSuccessor(&'a Path),
3016 Count {
3017 count: usize,
3018 include_ignored: bool,
3019 include_dirs: bool,
3020 },
3021}
3022
3023impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3024 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3025 match self {
3026 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3027 TraversalTarget::PathSuccessor(path) => {
3028 if !cursor_location.max_path.starts_with(path) {
3029 Ordering::Equal
3030 } else {
3031 Ordering::Greater
3032 }
3033 }
3034 TraversalTarget::Count {
3035 count,
3036 include_dirs,
3037 include_ignored,
3038 } => Ord::cmp(
3039 count,
3040 &cursor_location.count(*include_dirs, *include_ignored),
3041 ),
3042 }
3043 }
3044}
3045
3046struct ChildEntriesIter<'a> {
3047 parent_path: &'a Path,
3048 traversal: Traversal<'a>,
3049}
3050
3051impl<'a> Iterator for ChildEntriesIter<'a> {
3052 type Item = &'a Entry;
3053
3054 fn next(&mut self) -> Option<Self::Item> {
3055 if let Some(item) = self.traversal.entry() {
3056 if item.path.starts_with(&self.parent_path) {
3057 self.traversal.advance_to_sibling();
3058 return Some(item);
3059 }
3060 }
3061 None
3062 }
3063}
3064
3065impl<'a> From<&'a Entry> for proto::Entry {
3066 fn from(entry: &'a Entry) -> Self {
3067 Self {
3068 id: entry.id.to_proto(),
3069 is_dir: entry.is_dir(),
3070 path: entry.path.to_string_lossy().into(),
3071 inode: entry.inode,
3072 mtime: Some(entry.mtime.into()),
3073 is_symlink: entry.is_symlink,
3074 is_ignored: entry.is_ignored,
3075 }
3076 }
3077}
3078
3079impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3080 type Error = anyhow::Error;
3081
3082 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3083 if let Some(mtime) = entry.mtime {
3084 let kind = if entry.is_dir {
3085 EntryKind::Dir
3086 } else {
3087 let mut char_bag = *root_char_bag;
3088 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3089 EntryKind::File(char_bag)
3090 };
3091 let path: Arc<Path> = PathBuf::from(entry.path).into();
3092 Ok(Entry {
3093 id: ProjectEntryId::from_proto(entry.id),
3094 kind,
3095 path,
3096 inode: entry.inode,
3097 mtime: mtime.into(),
3098 is_symlink: entry.is_symlink,
3099 is_ignored: entry.is_ignored,
3100 })
3101 } else {
3102 Err(anyhow!(
3103 "missing mtime in remote worktree entry {:?}",
3104 entry.path
3105 ))
3106 }
3107 }
3108}
3109
3110#[cfg(test)]
3111mod tests {
3112 use super::*;
3113 use client::test::FakeHttpClient;
3114 use fs::repository::FakeGitRepository;
3115 use fs::{FakeFs, RealFs};
3116 use gpui::{executor::Deterministic, TestAppContext};
3117 use rand::prelude::*;
3118 use serde_json::json;
3119 use std::{env, fmt::Write};
3120 use util::test::temp_tree;
3121
3122 #[gpui::test]
3123 async fn test_traversal(cx: &mut TestAppContext) {
3124 let fs = FakeFs::new(cx.background());
3125 fs.insert_tree(
3126 "/root",
3127 json!({
3128 ".gitignore": "a/b\n",
3129 "a": {
3130 "b": "",
3131 "c": "",
3132 }
3133 }),
3134 )
3135 .await;
3136
3137 let http_client = FakeHttpClient::with_404_response();
3138 let client = cx.read(|cx| Client::new(http_client, cx));
3139
3140 let tree = Worktree::local(
3141 client,
3142 Arc::from(Path::new("/root")),
3143 true,
3144 fs,
3145 Default::default(),
3146 &mut cx.to_async(),
3147 )
3148 .await
3149 .unwrap();
3150 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3151 .await;
3152
3153 tree.read_with(cx, |tree, _| {
3154 assert_eq!(
3155 tree.entries(false)
3156 .map(|entry| entry.path.as_ref())
3157 .collect::<Vec<_>>(),
3158 vec![
3159 Path::new(""),
3160 Path::new(".gitignore"),
3161 Path::new("a"),
3162 Path::new("a/c"),
3163 ]
3164 );
3165 assert_eq!(
3166 tree.entries(true)
3167 .map(|entry| entry.path.as_ref())
3168 .collect::<Vec<_>>(),
3169 vec![
3170 Path::new(""),
3171 Path::new(".gitignore"),
3172 Path::new("a"),
3173 Path::new("a/b"),
3174 Path::new("a/c"),
3175 ]
3176 );
3177 })
3178 }
3179
3180 #[gpui::test(iterations = 10)]
3181 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3182 let fs = FakeFs::new(cx.background());
3183 fs.insert_tree(
3184 "/root",
3185 json!({
3186 "lib": {
3187 "a": {
3188 "a.txt": ""
3189 },
3190 "b": {
3191 "b.txt": ""
3192 }
3193 }
3194 }),
3195 )
3196 .await;
3197 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3198 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3199
3200 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3201 let tree = Worktree::local(
3202 client,
3203 Arc::from(Path::new("/root")),
3204 true,
3205 fs.clone(),
3206 Default::default(),
3207 &mut cx.to_async(),
3208 )
3209 .await
3210 .unwrap();
3211
3212 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3213 .await;
3214
3215 tree.read_with(cx, |tree, _| {
3216 assert_eq!(
3217 tree.entries(false)
3218 .map(|entry| entry.path.as_ref())
3219 .collect::<Vec<_>>(),
3220 vec![
3221 Path::new(""),
3222 Path::new("lib"),
3223 Path::new("lib/a"),
3224 Path::new("lib/a/a.txt"),
3225 Path::new("lib/a/lib"),
3226 Path::new("lib/b"),
3227 Path::new("lib/b/b.txt"),
3228 Path::new("lib/b/lib"),
3229 ]
3230 );
3231 });
3232
3233 fs.rename(
3234 Path::new("/root/lib/a/lib"),
3235 Path::new("/root/lib/a/lib-2"),
3236 Default::default(),
3237 )
3238 .await
3239 .unwrap();
3240 executor.run_until_parked();
3241 tree.read_with(cx, |tree, _| {
3242 assert_eq!(
3243 tree.entries(false)
3244 .map(|entry| entry.path.as_ref())
3245 .collect::<Vec<_>>(),
3246 vec![
3247 Path::new(""),
3248 Path::new("lib"),
3249 Path::new("lib/a"),
3250 Path::new("lib/a/a.txt"),
3251 Path::new("lib/a/lib-2"),
3252 Path::new("lib/b"),
3253 Path::new("lib/b/b.txt"),
3254 Path::new("lib/b/lib"),
3255 ]
3256 );
3257 });
3258 }
3259
3260 #[gpui::test]
3261 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3262 let parent_dir = temp_tree(json!({
3263 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3264 "tree": {
3265 ".git": {},
3266 ".gitignore": "ignored-dir\n",
3267 "tracked-dir": {
3268 "tracked-file1": "",
3269 "ancestor-ignored-file1": "",
3270 },
3271 "ignored-dir": {
3272 "ignored-file1": ""
3273 }
3274 }
3275 }));
3276 let dir = parent_dir.path().join("tree");
3277
3278 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3279
3280 let tree = Worktree::local(
3281 client,
3282 dir.as_path(),
3283 true,
3284 Arc::new(RealFs),
3285 Default::default(),
3286 &mut cx.to_async(),
3287 )
3288 .await
3289 .unwrap();
3290 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3291 .await;
3292 tree.flush_fs_events(cx).await;
3293 cx.read(|cx| {
3294 let tree = tree.read(cx);
3295 assert!(
3296 !tree
3297 .entry_for_path("tracked-dir/tracked-file1")
3298 .unwrap()
3299 .is_ignored
3300 );
3301 assert!(
3302 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3303 .unwrap()
3304 .is_ignored
3305 );
3306 assert!(
3307 tree.entry_for_path("ignored-dir/ignored-file1")
3308 .unwrap()
3309 .is_ignored
3310 );
3311 });
3312
3313 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3314 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3315 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3316 tree.flush_fs_events(cx).await;
3317 cx.read(|cx| {
3318 let tree = tree.read(cx);
3319 assert!(
3320 !tree
3321 .entry_for_path("tracked-dir/tracked-file2")
3322 .unwrap()
3323 .is_ignored
3324 );
3325 assert!(
3326 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3327 .unwrap()
3328 .is_ignored
3329 );
3330 assert!(
3331 tree.entry_for_path("ignored-dir/ignored-file2")
3332 .unwrap()
3333 .is_ignored
3334 );
3335 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3336 });
3337 }
3338
3339 #[gpui::test]
3340 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3341 let root = temp_tree(json!({
3342 "dir1": {
3343 ".git": {},
3344 "deps": {
3345 "dep1": {
3346 ".git": {},
3347 "src": {
3348 "a.txt": ""
3349 }
3350 }
3351 },
3352 "src": {
3353 "b.txt": ""
3354 }
3355 },
3356 "c.txt": "",
3357 }));
3358
3359 let http_client = FakeHttpClient::with_404_response();
3360 let client = cx.read(|cx| Client::new(http_client, cx));
3361 let tree = Worktree::local(
3362 client,
3363 root.path(),
3364 true,
3365 Arc::new(RealFs),
3366 Default::default(),
3367 &mut cx.to_async(),
3368 )
3369 .await
3370 .unwrap();
3371
3372 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3373 .await;
3374 tree.flush_fs_events(cx).await;
3375
3376 tree.read_with(cx, |tree, _cx| {
3377 let tree = tree.as_local().unwrap();
3378
3379 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3380
3381 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3382 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3383 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3384
3385 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3386 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3387 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3388 });
3389
3390 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3391 let tree = tree.as_local().unwrap();
3392 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3393 });
3394
3395 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3396 tree.flush_fs_events(cx).await;
3397
3398 tree.read_with(cx, |tree, _cx| {
3399 let tree = tree.as_local().unwrap();
3400 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3401 assert_ne!(
3402 original_scan_id, new_scan_id,
3403 "original {original_scan_id}, new {new_scan_id}"
3404 );
3405 });
3406
3407 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3408 tree.flush_fs_events(cx).await;
3409
3410 tree.read_with(cx, |tree, _cx| {
3411 let tree = tree.as_local().unwrap();
3412
3413 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3414 });
3415 }
3416
3417 #[test]
3418 fn test_changed_repos() {
3419 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3420 GitRepositoryEntry {
3421 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3422 scan_id,
3423 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3424 git_dir_path: git_dir_path.as_ref().into(),
3425 }
3426 }
3427
3428 let prev_repos: Vec<GitRepositoryEntry> = vec![
3429 fake_entry("/.git", 0),
3430 fake_entry("/a/.git", 0),
3431 fake_entry("/a/b/.git", 0),
3432 ];
3433
3434 let new_repos: Vec<GitRepositoryEntry> = vec![
3435 fake_entry("/a/.git", 1),
3436 fake_entry("/a/b/.git", 0),
3437 fake_entry("/a/c/.git", 0),
3438 ];
3439
3440 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3441
3442 // Deletion retained
3443 assert!(res
3444 .iter()
3445 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3446 .is_some());
3447
3448 // Update retained
3449 assert!(res
3450 .iter()
3451 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3452 .is_some());
3453
3454 // Addition retained
3455 assert!(res
3456 .iter()
3457 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3458 .is_some());
3459
3460 // Nochange, not retained
3461 assert!(res
3462 .iter()
3463 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3464 .is_none());
3465 }
3466
3467 #[gpui::test]
3468 async fn test_write_file(cx: &mut TestAppContext) {
3469 let dir = temp_tree(json!({
3470 ".git": {},
3471 ".gitignore": "ignored-dir\n",
3472 "tracked-dir": {},
3473 "ignored-dir": {}
3474 }));
3475
3476 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3477
3478 let tree = Worktree::local(
3479 client,
3480 dir.path(),
3481 true,
3482 Arc::new(RealFs),
3483 Default::default(),
3484 &mut cx.to_async(),
3485 )
3486 .await
3487 .unwrap();
3488 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3489 .await;
3490 tree.flush_fs_events(cx).await;
3491
3492 tree.update(cx, |tree, cx| {
3493 tree.as_local().unwrap().write_file(
3494 Path::new("tracked-dir/file.txt"),
3495 "hello".into(),
3496 Default::default(),
3497 cx,
3498 )
3499 })
3500 .await
3501 .unwrap();
3502 tree.update(cx, |tree, cx| {
3503 tree.as_local().unwrap().write_file(
3504 Path::new("ignored-dir/file.txt"),
3505 "world".into(),
3506 Default::default(),
3507 cx,
3508 )
3509 })
3510 .await
3511 .unwrap();
3512
3513 tree.read_with(cx, |tree, _| {
3514 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3515 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3516 assert!(!tracked.is_ignored);
3517 assert!(ignored.is_ignored);
3518 });
3519 }
3520
3521 #[gpui::test(iterations = 30)]
3522 async fn test_create_directory(cx: &mut TestAppContext) {
3523 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3524
3525 let fs = FakeFs::new(cx.background());
3526 fs.insert_tree(
3527 "/a",
3528 json!({
3529 "b": {},
3530 "c": {},
3531 "d": {},
3532 }),
3533 )
3534 .await;
3535
3536 let tree = Worktree::local(
3537 client,
3538 "/a".as_ref(),
3539 true,
3540 fs,
3541 Default::default(),
3542 &mut cx.to_async(),
3543 )
3544 .await
3545 .unwrap();
3546
3547 let entry = tree
3548 .update(cx, |tree, cx| {
3549 tree.as_local_mut()
3550 .unwrap()
3551 .create_entry("a/e".as_ref(), true, cx)
3552 })
3553 .await
3554 .unwrap();
3555 assert!(entry.is_dir());
3556
3557 cx.foreground().run_until_parked();
3558 tree.read_with(cx, |tree, _| {
3559 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3560 });
3561 }
3562
3563 #[gpui::test(iterations = 100)]
3564 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3565 let operations = env::var("OPERATIONS")
3566 .map(|o| o.parse().unwrap())
3567 .unwrap_or(40);
3568 let initial_entries = env::var("INITIAL_ENTRIES")
3569 .map(|o| o.parse().unwrap())
3570 .unwrap_or(20);
3571
3572 let root_dir = Path::new("/test");
3573 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3574 fs.as_fake().insert_tree(root_dir, json!({})).await;
3575 for _ in 0..initial_entries {
3576 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3577 }
3578 log::info!("generated initial tree");
3579
3580 let next_entry_id = Arc::new(AtomicUsize::default());
3581 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3582 let worktree = Worktree::local(
3583 client.clone(),
3584 root_dir,
3585 true,
3586 fs.clone(),
3587 next_entry_id.clone(),
3588 &mut cx.to_async(),
3589 )
3590 .await
3591 .unwrap();
3592
3593 worktree
3594 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3595 .await;
3596
3597 let mut snapshots = Vec::new();
3598 let mut mutations_len = operations;
3599 while mutations_len > 1 {
3600 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3601 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3602 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3603 let len = rng.gen_range(0..=buffered_event_count);
3604 log::info!("flushing {} events", len);
3605 fs.as_fake().flush_events(len).await;
3606 } else {
3607 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3608 mutations_len -= 1;
3609 }
3610
3611 cx.foreground().run_until_parked();
3612 if rng.gen_bool(0.2) {
3613 log::info!("storing snapshot {}", snapshots.len());
3614 let snapshot =
3615 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3616 snapshots.push(snapshot);
3617 }
3618 }
3619
3620 log::info!("quiescing");
3621 fs.as_fake().flush_events(usize::MAX).await;
3622 cx.foreground().run_until_parked();
3623 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3624 snapshot.check_invariants();
3625
3626 {
3627 let new_worktree = Worktree::local(
3628 client.clone(),
3629 root_dir,
3630 true,
3631 fs.clone(),
3632 next_entry_id,
3633 &mut cx.to_async(),
3634 )
3635 .await
3636 .unwrap();
3637 new_worktree
3638 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3639 .await;
3640 let new_snapshot =
3641 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3642 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3643 }
3644
3645 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3646 let include_ignored = rng.gen::<bool>();
3647 if !include_ignored {
3648 let mut entries_by_path_edits = Vec::new();
3649 let mut entries_by_id_edits = Vec::new();
3650 for entry in prev_snapshot
3651 .entries_by_id
3652 .cursor::<()>()
3653 .filter(|e| e.is_ignored)
3654 {
3655 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3656 entries_by_id_edits.push(Edit::Remove(entry.id));
3657 }
3658
3659 prev_snapshot
3660 .entries_by_path
3661 .edit(entries_by_path_edits, &());
3662 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3663 }
3664
3665 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3666 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3667 assert_eq!(
3668 prev_snapshot.to_vec(include_ignored),
3669 snapshot.to_vec(include_ignored),
3670 "wrong update for snapshot {i}. update: {:?}",
3671 update
3672 );
3673 }
3674 }
3675
3676 async fn randomly_mutate_tree(
3677 fs: &Arc<dyn Fs>,
3678 root_path: &Path,
3679 insertion_probability: f64,
3680 rng: &mut impl Rng,
3681 ) {
3682 let mut files = Vec::new();
3683 let mut dirs = Vec::new();
3684 for path in fs.as_fake().paths().await {
3685 if path.starts_with(root_path) {
3686 if fs.is_file(&path).await {
3687 files.push(path);
3688 } else {
3689 dirs.push(path);
3690 }
3691 }
3692 }
3693
3694 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3695 let path = dirs.choose(rng).unwrap();
3696 let new_path = path.join(gen_name(rng));
3697
3698 if rng.gen() {
3699 log::info!(
3700 "Creating dir {:?}",
3701 new_path.strip_prefix(root_path).unwrap()
3702 );
3703 fs.create_dir(&new_path).await.unwrap();
3704 } else {
3705 log::info!(
3706 "Creating file {:?}",
3707 new_path.strip_prefix(root_path).unwrap()
3708 );
3709 fs.create_file(&new_path, Default::default()).await.unwrap();
3710 }
3711 } else if rng.gen_bool(0.05) {
3712 let ignore_dir_path = dirs.choose(rng).unwrap();
3713 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3714
3715 let subdirs = dirs
3716 .iter()
3717 .filter(|d| d.starts_with(&ignore_dir_path))
3718 .cloned()
3719 .collect::<Vec<_>>();
3720 let subfiles = files
3721 .iter()
3722 .filter(|d| d.starts_with(&ignore_dir_path))
3723 .cloned()
3724 .collect::<Vec<_>>();
3725 let files_to_ignore = {
3726 let len = rng.gen_range(0..=subfiles.len());
3727 subfiles.choose_multiple(rng, len)
3728 };
3729 let dirs_to_ignore = {
3730 let len = rng.gen_range(0..subdirs.len());
3731 subdirs.choose_multiple(rng, len)
3732 };
3733
3734 let mut ignore_contents = String::new();
3735 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3736 writeln!(
3737 ignore_contents,
3738 "{}",
3739 path_to_ignore
3740 .strip_prefix(&ignore_dir_path)
3741 .unwrap()
3742 .to_str()
3743 .unwrap()
3744 )
3745 .unwrap();
3746 }
3747 log::info!(
3748 "Creating {:?} with contents:\n{}",
3749 ignore_path.strip_prefix(&root_path).unwrap(),
3750 ignore_contents
3751 );
3752 fs.save(
3753 &ignore_path,
3754 &ignore_contents.as_str().into(),
3755 Default::default(),
3756 )
3757 .await
3758 .unwrap();
3759 } else {
3760 let old_path = {
3761 let file_path = files.choose(rng);
3762 let dir_path = dirs[1..].choose(rng);
3763 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3764 };
3765
3766 let is_rename = rng.gen();
3767 if is_rename {
3768 let new_path_parent = dirs
3769 .iter()
3770 .filter(|d| !d.starts_with(old_path))
3771 .choose(rng)
3772 .unwrap();
3773
3774 let overwrite_existing_dir =
3775 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3776 let new_path = if overwrite_existing_dir {
3777 fs.remove_dir(
3778 &new_path_parent,
3779 RemoveOptions {
3780 recursive: true,
3781 ignore_if_not_exists: true,
3782 },
3783 )
3784 .await
3785 .unwrap();
3786 new_path_parent.to_path_buf()
3787 } else {
3788 new_path_parent.join(gen_name(rng))
3789 };
3790
3791 log::info!(
3792 "Renaming {:?} to {}{:?}",
3793 old_path.strip_prefix(&root_path).unwrap(),
3794 if overwrite_existing_dir {
3795 "overwrite "
3796 } else {
3797 ""
3798 },
3799 new_path.strip_prefix(&root_path).unwrap()
3800 );
3801 fs.rename(
3802 &old_path,
3803 &new_path,
3804 fs::RenameOptions {
3805 overwrite: true,
3806 ignore_if_exists: true,
3807 },
3808 )
3809 .await
3810 .unwrap();
3811 } else if fs.is_file(&old_path).await {
3812 log::info!(
3813 "Deleting file {:?}",
3814 old_path.strip_prefix(&root_path).unwrap()
3815 );
3816 fs.remove_file(old_path, Default::default()).await.unwrap();
3817 } else {
3818 log::info!(
3819 "Deleting dir {:?}",
3820 old_path.strip_prefix(&root_path).unwrap()
3821 );
3822 fs.remove_dir(
3823 &old_path,
3824 RemoveOptions {
3825 recursive: true,
3826 ignore_if_not_exists: true,
3827 },
3828 )
3829 .await
3830 .unwrap();
3831 }
3832 }
3833 }
3834
3835 fn gen_name(rng: &mut impl Rng) -> String {
3836 (0..6)
3837 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3838 .map(char::from)
3839 .collect()
3840 }
3841
3842 impl LocalSnapshot {
3843 fn check_invariants(&self) {
3844 let mut files = self.files(true, 0);
3845 let mut visible_files = self.files(false, 0);
3846 for entry in self.entries_by_path.cursor::<()>() {
3847 if entry.is_file() {
3848 assert_eq!(files.next().unwrap().inode, entry.inode);
3849 if !entry.is_ignored {
3850 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3851 }
3852 }
3853 }
3854 assert!(files.next().is_none());
3855 assert!(visible_files.next().is_none());
3856
3857 let mut bfs_paths = Vec::new();
3858 let mut stack = vec![Path::new("")];
3859 while let Some(path) = stack.pop() {
3860 bfs_paths.push(path);
3861 let ix = stack.len();
3862 for child_entry in self.child_entries(path) {
3863 stack.insert(ix, &child_entry.path);
3864 }
3865 }
3866
3867 let dfs_paths_via_iter = self
3868 .entries_by_path
3869 .cursor::<()>()
3870 .map(|e| e.path.as_ref())
3871 .collect::<Vec<_>>();
3872 assert_eq!(bfs_paths, dfs_paths_via_iter);
3873
3874 let dfs_paths_via_traversal = self
3875 .entries(true)
3876 .map(|e| e.path.as_ref())
3877 .collect::<Vec<_>>();
3878 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3879
3880 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3881 let ignore_parent_path =
3882 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3883 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3884 assert!(self
3885 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3886 .is_some());
3887 }
3888 }
3889
3890 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3891 let mut paths = Vec::new();
3892 for entry in self.entries_by_path.cursor::<()>() {
3893 if include_ignored || !entry.is_ignored {
3894 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3895 }
3896 }
3897 paths.sort_by(|a, b| a.0.cmp(b.0));
3898 paths
3899 }
3900 }
3901}