1use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
2
3use super::{
4 fs::{self, Fs},
5 ignore::IgnoreStack,
6 DiagnosticSummary,
7};
8use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
9use anyhow::{anyhow, Context, Result};
10use client::{proto, Client};
11use clock::ReplicaId;
12use collections::{HashMap, VecDeque};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 Stream, StreamExt,
19};
20use fuzzy::CharBag;
21use gpui::{
22 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
23 Task,
24};
25use language::{
26 proto::{deserialize_version, serialize_line_ending, serialize_version},
27 Buffer, DiagnosticEntry, LineEnding, PointUtf16, Rope,
28};
29use lazy_static::lazy_static;
30use parking_lot::{Mutex, RwLock};
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use settings::Settings;
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::{OsStr, OsString},
42 fmt,
43 future::Future,
44 ops::{Deref, DerefMut},
45 os::unix::prelude::{OsStrExt, OsStringExt},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::{ResultExt, TryFutureExt};
53
54lazy_static! {
55 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
56}
57
58#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
59pub struct WorktreeId(usize);
60
61#[allow(clippy::large_enum_variant)]
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67pub struct LocalWorktree {
68 snapshot: LocalSnapshot,
69 background_snapshot: Arc<Mutex<LocalSnapshot>>,
70 last_scan_state_rx: watch::Receiver<ScanState>,
71 _background_scanner_task: Option<Task<()>>,
72 poll_task: Option<Task<()>>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
75 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
76 client: Arc<Client>,
77 fs: Arc<dyn Fs>,
78 visible: bool,
79}
80
81pub struct RemoteWorktree {
82 pub snapshot: Snapshot,
83 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
84 project_id: u64,
85 client: Arc<Client>,
86 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
87 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
88 replica_id: ReplicaId,
89 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
90 visible: bool,
91}
92
93#[derive(Clone)]
94pub struct Snapshot {
95 id: WorktreeId,
96 root_name: String,
97 root_char_bag: CharBag,
98 entries_by_path: SumTree<Entry>,
99 entries_by_id: SumTree<PathEntry>,
100 scan_id: usize,
101 is_complete: bool,
102}
103
104#[derive(Clone)]
105pub struct LocalSnapshot {
106 abs_path: Arc<Path>,
107 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
108 git_repositories: Vec<GitRepositoryState>,
109 removed_entry_ids: HashMap<u64, ProjectEntryId>,
110 next_entry_id: Arc<AtomicUsize>,
111 snapshot: Snapshot,
112 extension_counts: HashMap<OsString, usize>,
113}
114
115#[derive(Clone)]
116pub(crate) struct GitRepositoryState {
117 content_path: Arc<Path>,
118 git_dir_path: Arc<Path>,
119 repository: Arc<Mutex<git2::Repository>>,
120}
121
122impl Deref for LocalSnapshot {
123 type Target = Snapshot;
124
125 fn deref(&self) -> &Self::Target {
126 &self.snapshot
127 }
128}
129
130impl DerefMut for LocalSnapshot {
131 fn deref_mut(&mut self) -> &mut Self::Target {
132 &mut self.snapshot
133 }
134}
135
136#[derive(Clone, Debug)]
137enum ScanState {
138 Idle,
139 /// The worktree is performing its initial scan of the filesystem.
140 Initializing,
141 /// The worktree is updating in response to filesystem events.
142 Updating,
143 Err(Arc<anyhow::Error>),
144}
145
146struct ShareState {
147 project_id: u64,
148 snapshots_tx: watch::Sender<LocalSnapshot>,
149 _maintain_remote_snapshot: Option<Task<Option<()>>>,
150}
151
152pub enum Event {
153 UpdatedEntries,
154 UpdatedGitRepositories(Vec<GitRepositoryState>),
155}
156
157impl Entity for Worktree {
158 type Event = Event;
159}
160
161impl Worktree {
162 pub async fn local(
163 client: Arc<Client>,
164 path: impl Into<Arc<Path>>,
165 visible: bool,
166 fs: Arc<dyn Fs>,
167 next_entry_id: Arc<AtomicUsize>,
168 cx: &mut AsyncAppContext,
169 ) -> Result<ModelHandle<Self>> {
170 let (tree, scan_states_tx) =
171 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
172 tree.update(cx, |tree, cx| {
173 let tree = tree.as_local_mut().unwrap();
174 let abs_path = tree.abs_path().clone();
175 let background_snapshot = tree.background_snapshot.clone();
176 let background = cx.background().clone();
177 tree._background_scanner_task = Some(cx.background().spawn(async move {
178 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
179 let scanner =
180 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
181 scanner.run(events).await;
182 }));
183 });
184 Ok(tree)
185 }
186
187 pub fn remote(
188 project_remote_id: u64,
189 replica_id: ReplicaId,
190 worktree: proto::WorktreeMetadata,
191 client: Arc<Client>,
192 cx: &mut MutableAppContext,
193 ) -> ModelHandle<Self> {
194 let remote_id = worktree.id;
195 let root_char_bag: CharBag = worktree
196 .root_name
197 .chars()
198 .map(|c| c.to_ascii_lowercase())
199 .collect();
200 let root_name = worktree.root_name.clone();
201 let visible = worktree.visible;
202 let snapshot = Snapshot {
203 id: WorktreeId(remote_id as usize),
204 root_name,
205 root_char_bag,
206 entries_by_path: Default::default(),
207 entries_by_id: Default::default(),
208 scan_id: 0,
209 is_complete: false,
210 };
211
212 let (updates_tx, mut updates_rx) = mpsc::unbounded();
213 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
214 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
215 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
216 Worktree::Remote(RemoteWorktree {
217 project_id: project_remote_id,
218 replica_id,
219 snapshot: snapshot.clone(),
220 background_snapshot: background_snapshot.clone(),
221 updates_tx: Some(updates_tx),
222 snapshot_subscriptions: Default::default(),
223 client: client.clone(),
224 diagnostic_summaries: Default::default(),
225 visible,
226 })
227 });
228
229 cx.background()
230 .spawn(async move {
231 while let Some(update) = updates_rx.next().await {
232 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
233 log::error!("error applying worktree update: {}", error);
234 }
235 snapshot_updated_tx.send(()).await.ok();
236 }
237 })
238 .detach();
239
240 cx.spawn(|mut cx| {
241 let this = worktree_handle.downgrade();
242 async move {
243 while (snapshot_updated_rx.recv().await).is_some() {
244 if let Some(this) = this.upgrade(&cx) {
245 this.update(&mut cx, |this, cx| {
246 this.poll_snapshot(cx);
247 let this = this.as_remote_mut().unwrap();
248 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
249 if this.observed_snapshot(*scan_id) {
250 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
251 let _ = tx.send(());
252 } else {
253 break;
254 }
255 }
256 });
257 } else {
258 break;
259 }
260 }
261 }
262 })
263 .detach();
264
265 worktree_handle
266 }
267
268 pub fn as_local(&self) -> Option<&LocalWorktree> {
269 if let Worktree::Local(worktree) = self {
270 Some(worktree)
271 } else {
272 None
273 }
274 }
275
276 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
277 if let Worktree::Remote(worktree) = self {
278 Some(worktree)
279 } else {
280 None
281 }
282 }
283
284 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
285 if let Worktree::Local(worktree) = self {
286 Some(worktree)
287 } else {
288 None
289 }
290 }
291
292 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
293 if let Worktree::Remote(worktree) = self {
294 Some(worktree)
295 } else {
296 None
297 }
298 }
299
300 pub fn is_local(&self) -> bool {
301 matches!(self, Worktree::Local(_))
302 }
303
304 pub fn is_remote(&self) -> bool {
305 !self.is_local()
306 }
307
308 pub fn snapshot(&self) -> Snapshot {
309 match self {
310 Worktree::Local(worktree) => worktree.snapshot().snapshot,
311 Worktree::Remote(worktree) => worktree.snapshot(),
312 }
313 }
314
315 pub fn scan_id(&self) -> usize {
316 match self {
317 Worktree::Local(worktree) => worktree.snapshot.scan_id,
318 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
319 }
320 }
321
322 pub fn is_visible(&self) -> bool {
323 match self {
324 Worktree::Local(worktree) => worktree.visible,
325 Worktree::Remote(worktree) => worktree.visible,
326 }
327 }
328
329 pub fn replica_id(&self) -> ReplicaId {
330 match self {
331 Worktree::Local(_) => 0,
332 Worktree::Remote(worktree) => worktree.replica_id,
333 }
334 }
335
336 pub fn diagnostic_summaries(
337 &self,
338 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
339 match self {
340 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
341 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
342 }
343 .iter()
344 .map(|(path, summary)| (path.0.clone(), *summary))
345 }
346
347 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
348 match self {
349 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
350 Self::Remote(worktree) => worktree.poll_snapshot(cx),
351 };
352 }
353}
354
355impl LocalWorktree {
356 async fn create(
357 client: Arc<Client>,
358 path: impl Into<Arc<Path>>,
359 visible: bool,
360 fs: Arc<dyn Fs>,
361 next_entry_id: Arc<AtomicUsize>,
362 cx: &mut AsyncAppContext,
363 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
364 let abs_path = path.into();
365 let path: Arc<Path> = Arc::from(Path::new(""));
366
367 // After determining whether the root entry is a file or a directory, populate the
368 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
369 let root_name = abs_path
370 .file_name()
371 .map_or(String::new(), |f| f.to_string_lossy().to_string());
372 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
373 let metadata = fs
374 .metadata(&abs_path)
375 .await
376 .context("failed to stat worktree path")?;
377
378 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
379 let (mut last_scan_state_tx, last_scan_state_rx) =
380 watch::channel_with(ScanState::Initializing);
381 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
382 let mut snapshot = LocalSnapshot {
383 abs_path,
384 ignores_by_parent_abs_path: Default::default(),
385 git_repositories: Default::default(),
386 removed_entry_ids: Default::default(),
387 next_entry_id,
388 snapshot: Snapshot {
389 id: WorktreeId::from_usize(cx.model_id()),
390 root_name: root_name.clone(),
391 root_char_bag,
392 entries_by_path: Default::default(),
393 entries_by_id: Default::default(),
394 scan_id: 0,
395 is_complete: true,
396 },
397 extension_counts: Default::default(),
398 };
399 if let Some(metadata) = metadata {
400 let entry = Entry::new(
401 path,
402 &metadata,
403 &snapshot.next_entry_id,
404 snapshot.root_char_bag,
405 );
406 snapshot.insert_entry(entry, fs.as_ref());
407 }
408
409 let tree = Self {
410 snapshot: snapshot.clone(),
411 background_snapshot: Arc::new(Mutex::new(snapshot)),
412 last_scan_state_rx,
413 _background_scanner_task: None,
414 share: None,
415 poll_task: None,
416 diagnostics: Default::default(),
417 diagnostic_summaries: Default::default(),
418 client,
419 fs,
420 visible,
421 };
422
423 cx.spawn_weak(|this, mut cx| async move {
424 while let Some(scan_state) = scan_states_rx.next().await {
425 if let Some(this) = this.upgrade(&cx) {
426 last_scan_state_tx.blocking_send(scan_state).ok();
427 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
428 } else {
429 break;
430 }
431 }
432 })
433 .detach();
434
435 Worktree::Local(tree)
436 });
437
438 Ok((tree, scan_states_tx))
439 }
440
441 pub fn contains_abs_path(&self, path: &Path) -> bool {
442 path.starts_with(&self.abs_path)
443 }
444
445 fn absolutize(&self, path: &Path) -> PathBuf {
446 if path.file_name().is_some() {
447 self.abs_path.join(path)
448 } else {
449 self.abs_path.to_path_buf()
450 }
451 }
452
453 pub(crate) fn load_buffer(
454 &mut self,
455 path: &Path,
456 cx: &mut ModelContext<Worktree>,
457 ) -> Task<Result<ModelHandle<Buffer>>> {
458 let path = Arc::from(path);
459 cx.spawn(move |this, mut cx| async move {
460 let (file, contents, head_text) = this
461 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
462 .await?;
463 Ok(cx.add_model(|cx| {
464 let mut buffer = Buffer::from_file(0, contents, head_text, Arc::new(file), cx);
465 buffer.update_git(cx);
466 buffer
467 }))
468 })
469 }
470
471 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
472 self.diagnostics.get(path).cloned()
473 }
474
475 pub fn update_diagnostics(
476 &mut self,
477 language_server_id: usize,
478 worktree_path: Arc<Path>,
479 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
480 _: &mut ModelContext<Worktree>,
481 ) -> Result<bool> {
482 self.diagnostics.remove(&worktree_path);
483 let old_summary = self
484 .diagnostic_summaries
485 .remove(&PathKey(worktree_path.clone()))
486 .unwrap_or_default();
487 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
488 if !new_summary.is_empty() {
489 self.diagnostic_summaries
490 .insert(PathKey(worktree_path.clone()), new_summary);
491 self.diagnostics.insert(worktree_path.clone(), diagnostics);
492 }
493
494 let updated = !old_summary.is_empty() || !new_summary.is_empty();
495 if updated {
496 if let Some(share) = self.share.as_ref() {
497 self.client
498 .send(proto::UpdateDiagnosticSummary {
499 project_id: share.project_id,
500 worktree_id: self.id().to_proto(),
501 summary: Some(proto::DiagnosticSummary {
502 path: worktree_path.to_string_lossy().to_string(),
503 language_server_id: language_server_id as u64,
504 error_count: new_summary.error_count as u32,
505 warning_count: new_summary.warning_count as u32,
506 }),
507 })
508 .log_err();
509 }
510 }
511
512 Ok(updated)
513 }
514
515 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
516 self.poll_task.take();
517
518 match self.scan_state() {
519 ScanState::Idle => {
520 self.snapshot = self.background_snapshot.lock().clone();
521 if let Some(share) = self.share.as_mut() {
522 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
523 }
524 cx.emit(Event::UpdatedEntries);
525 }
526
527 ScanState::Initializing => {
528 let is_fake_fs = self.fs.is_fake();
529 self.snapshot = self.background_snapshot.lock().clone();
530 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
531 if is_fake_fs {
532 #[cfg(any(test, feature = "test-support"))]
533 cx.background().simulate_random_delay().await;
534 } else {
535 smol::Timer::after(Duration::from_millis(100)).await;
536 }
537 if let Some(this) = this.upgrade(&cx) {
538 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
539 }
540 }));
541 cx.emit(Event::UpdatedEntries);
542 }
543
544 _ => {
545 if force {
546 self.snapshot = self.background_snapshot.lock().clone();
547 }
548 }
549 }
550
551 cx.notify();
552 }
553
554 pub fn scan_complete(&self) -> impl Future<Output = ()> {
555 let mut scan_state_rx = self.last_scan_state_rx.clone();
556 async move {
557 let mut scan_state = Some(scan_state_rx.borrow().clone());
558 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
559 scan_state = scan_state_rx.recv().await;
560 }
561 }
562 }
563
564 fn scan_state(&self) -> ScanState {
565 self.last_scan_state_rx.borrow().clone()
566 }
567
568 pub fn snapshot(&self) -> LocalSnapshot {
569 self.snapshot.clone()
570 }
571
572 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
573 proto::WorktreeMetadata {
574 id: self.id().to_proto(),
575 root_name: self.root_name().to_string(),
576 visible: self.visible,
577 }
578 }
579
580 fn load(
581 &self,
582 path: &Path,
583 cx: &mut ModelContext<Worktree>,
584 ) -> Task<Result<(File, String, Option<String>)>> {
585 let handle = cx.handle();
586 let path = Arc::from(path);
587 let abs_path = self.absolutize(&path);
588 let fs = self.fs.clone();
589
590 let files_included = cx
591 .global::<Settings>()
592 .editor_overrides
593 .git_gutter
594 .unwrap_or_default()
595 .files_included;
596
597 cx.spawn(|this, mut cx| async move {
598 let text = fs.load(&abs_path).await?;
599
600 let head_text = if matches!(
601 files_included,
602 settings::GitFilesIncluded::All | settings::GitFilesIncluded::OnlyTracked
603 ) {
604 let fs = fs.clone();
605 let abs_path = abs_path.clone();
606 let task = async move { fs.load_head_text(&abs_path).await };
607 let results = cx.background().spawn(task).await;
608
609 if files_included == settings::GitFilesIncluded::All {
610 results.or_else(|| Some(text.clone()))
611 } else {
612 results
613 }
614 } else {
615 None
616 };
617
618 // Eagerly populate the snapshot with an updated entry for the loaded file
619 let entry = this
620 .update(&mut cx, |this, cx| {
621 this.as_local()
622 .unwrap()
623 .refresh_entry(path, abs_path, None, cx)
624 })
625 .await?;
626
627 Ok((
628 File {
629 entry_id: Some(entry.id),
630 worktree: handle,
631 path: entry.path,
632 mtime: entry.mtime,
633 is_local: true,
634 },
635 text,
636 head_text,
637 ))
638 })
639 }
640
641 pub fn save_buffer_as(
642 &self,
643 buffer_handle: ModelHandle<Buffer>,
644 path: impl Into<Arc<Path>>,
645 cx: &mut ModelContext<Worktree>,
646 ) -> Task<Result<()>> {
647 let buffer = buffer_handle.read(cx);
648 let text = buffer.as_rope().clone();
649 let fingerprint = text.fingerprint();
650 let version = buffer.version();
651 let save = self.write_file(path, text, buffer.line_ending(), cx);
652 let handle = cx.handle();
653 cx.as_mut().spawn(|mut cx| async move {
654 let entry = save.await?;
655 let file = File {
656 entry_id: Some(entry.id),
657 worktree: handle,
658 path: entry.path,
659 mtime: entry.mtime,
660 is_local: true,
661 };
662
663 buffer_handle.update(&mut cx, |buffer, cx| {
664 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
665 });
666
667 Ok(())
668 })
669 }
670
671 pub fn create_entry(
672 &self,
673 path: impl Into<Arc<Path>>,
674 is_dir: bool,
675 cx: &mut ModelContext<Worktree>,
676 ) -> Task<Result<Entry>> {
677 self.write_entry_internal(
678 path,
679 if is_dir {
680 None
681 } else {
682 Some(Default::default())
683 },
684 cx,
685 )
686 }
687
688 pub fn write_file(
689 &self,
690 path: impl Into<Arc<Path>>,
691 text: Rope,
692 line_ending: LineEnding,
693 cx: &mut ModelContext<Worktree>,
694 ) -> Task<Result<Entry>> {
695 self.write_entry_internal(path, Some((text, line_ending)), cx)
696 }
697
698 pub fn delete_entry(
699 &self,
700 entry_id: ProjectEntryId,
701 cx: &mut ModelContext<Worktree>,
702 ) -> Option<Task<Result<()>>> {
703 let entry = self.entry_for_id(entry_id)?.clone();
704 let abs_path = self.absolutize(&entry.path);
705 let delete = cx.background().spawn({
706 let fs = self.fs.clone();
707 let abs_path = abs_path;
708 async move {
709 if entry.is_file() {
710 fs.remove_file(&abs_path, Default::default()).await
711 } else {
712 fs.remove_dir(
713 &abs_path,
714 RemoveOptions {
715 recursive: true,
716 ignore_if_not_exists: false,
717 },
718 )
719 .await
720 }
721 }
722 });
723
724 Some(cx.spawn(|this, mut cx| async move {
725 delete.await?;
726 this.update(&mut cx, |this, cx| {
727 let this = this.as_local_mut().unwrap();
728 {
729 let mut snapshot = this.background_snapshot.lock();
730 snapshot.delete_entry(entry_id);
731 }
732 this.poll_snapshot(true, cx);
733 });
734 Ok(())
735 }))
736 }
737
738 pub fn rename_entry(
739 &self,
740 entry_id: ProjectEntryId,
741 new_path: impl Into<Arc<Path>>,
742 cx: &mut ModelContext<Worktree>,
743 ) -> Option<Task<Result<Entry>>> {
744 let old_path = self.entry_for_id(entry_id)?.path.clone();
745 let new_path = new_path.into();
746 let abs_old_path = self.absolutize(&old_path);
747 let abs_new_path = self.absolutize(&new_path);
748 let rename = cx.background().spawn({
749 let fs = self.fs.clone();
750 let abs_new_path = abs_new_path.clone();
751 async move {
752 fs.rename(&abs_old_path, &abs_new_path, Default::default())
753 .await
754 }
755 });
756
757 Some(cx.spawn(|this, mut cx| async move {
758 rename.await?;
759 let entry = this
760 .update(&mut cx, |this, cx| {
761 this.as_local_mut().unwrap().refresh_entry(
762 new_path.clone(),
763 abs_new_path,
764 Some(old_path),
765 cx,
766 )
767 })
768 .await?;
769 Ok(entry)
770 }))
771 }
772
773 pub fn copy_entry(
774 &self,
775 entry_id: ProjectEntryId,
776 new_path: impl Into<Arc<Path>>,
777 cx: &mut ModelContext<Worktree>,
778 ) -> Option<Task<Result<Entry>>> {
779 let old_path = self.entry_for_id(entry_id)?.path.clone();
780 let new_path = new_path.into();
781 let abs_old_path = self.absolutize(&old_path);
782 let abs_new_path = self.absolutize(&new_path);
783 let copy = cx.background().spawn({
784 let fs = self.fs.clone();
785 let abs_new_path = abs_new_path.clone();
786 async move {
787 copy_recursive(
788 fs.as_ref(),
789 &abs_old_path,
790 &abs_new_path,
791 Default::default(),
792 )
793 .await
794 }
795 });
796
797 Some(cx.spawn(|this, mut cx| async move {
798 copy.await?;
799 let entry = this
800 .update(&mut cx, |this, cx| {
801 this.as_local_mut().unwrap().refresh_entry(
802 new_path.clone(),
803 abs_new_path,
804 None,
805 cx,
806 )
807 })
808 .await?;
809 Ok(entry)
810 }))
811 }
812
813 fn write_entry_internal(
814 &self,
815 path: impl Into<Arc<Path>>,
816 text_if_file: Option<(Rope, LineEnding)>,
817 cx: &mut ModelContext<Worktree>,
818 ) -> Task<Result<Entry>> {
819 let path = path.into();
820 let abs_path = self.absolutize(&path);
821 let write = cx.background().spawn({
822 let fs = self.fs.clone();
823 let abs_path = abs_path.clone();
824 async move {
825 if let Some((text, line_ending)) = text_if_file {
826 fs.save(&abs_path, &text, line_ending).await
827 } else {
828 fs.create_dir(&abs_path).await
829 }
830 }
831 });
832
833 cx.spawn(|this, mut cx| async move {
834 write.await?;
835 let entry = this
836 .update(&mut cx, |this, cx| {
837 this.as_local_mut()
838 .unwrap()
839 .refresh_entry(path, abs_path, None, cx)
840 })
841 .await?;
842 Ok(entry)
843 })
844 }
845
846 fn refresh_entry(
847 &self,
848 path: Arc<Path>,
849 abs_path: PathBuf,
850 old_path: Option<Arc<Path>>,
851 cx: &mut ModelContext<Worktree>,
852 ) -> Task<Result<Entry>> {
853 let fs = self.fs.clone();
854 let root_char_bag;
855 let next_entry_id;
856 {
857 let snapshot = self.background_snapshot.lock();
858 root_char_bag = snapshot.root_char_bag;
859 next_entry_id = snapshot.next_entry_id.clone();
860 }
861 cx.spawn_weak(|this, mut cx| async move {
862 let metadata = fs
863 .metadata(&abs_path)
864 .await?
865 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
866 let this = this
867 .upgrade(&cx)
868 .ok_or_else(|| anyhow!("worktree was dropped"))?;
869 this.update(&mut cx, |this, cx| {
870 let this = this.as_local_mut().unwrap();
871 let inserted_entry;
872 {
873 let mut snapshot = this.background_snapshot.lock();
874 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
875 entry.is_ignored = snapshot
876 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
877 .is_abs_path_ignored(&abs_path, entry.is_dir());
878 if let Some(old_path) = old_path {
879 snapshot.remove_path(&old_path);
880 }
881 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
882 snapshot.scan_id += 1;
883 }
884 this.poll_snapshot(true, cx);
885 Ok(inserted_entry)
886 })
887 })
888 }
889
890 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
891 let (share_tx, share_rx) = oneshot::channel();
892
893 if self.share.is_some() {
894 let _ = share_tx.send(Ok(()));
895 } else {
896 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
897 let rpc = self.client.clone();
898 let worktree_id = cx.model_id() as u64;
899 let maintain_remote_snapshot = cx.background().spawn({
900 let rpc = rpc;
901 let diagnostic_summaries = self.diagnostic_summaries.clone();
902 async move {
903 let mut prev_snapshot = match snapshots_rx.recv().await {
904 Some(snapshot) => {
905 let update = proto::UpdateWorktree {
906 project_id,
907 worktree_id,
908 root_name: snapshot.root_name().to_string(),
909 updated_entries: snapshot
910 .entries_by_path
911 .iter()
912 .map(Into::into)
913 .collect(),
914 removed_entries: Default::default(),
915 scan_id: snapshot.scan_id as u64,
916 is_last_update: true,
917 };
918 if let Err(error) = send_worktree_update(&rpc, update).await {
919 let _ = share_tx.send(Err(error));
920 return Err(anyhow!("failed to send initial update worktree"));
921 } else {
922 let _ = share_tx.send(Ok(()));
923 snapshot
924 }
925 }
926 None => {
927 share_tx
928 .send(Err(anyhow!("worktree dropped before share completed")))
929 .ok();
930 return Err(anyhow!("failed to send initial update worktree"));
931 }
932 };
933
934 for (path, summary) in diagnostic_summaries.iter() {
935 rpc.send(proto::UpdateDiagnosticSummary {
936 project_id,
937 worktree_id,
938 summary: Some(summary.to_proto(&path.0)),
939 })?;
940 }
941
942 while let Some(snapshot) = snapshots_rx.recv().await {
943 send_worktree_update(
944 &rpc,
945 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
946 )
947 .await?;
948 prev_snapshot = snapshot;
949 }
950
951 Ok::<_, anyhow::Error>(())
952 }
953 .log_err()
954 });
955 self.share = Some(ShareState {
956 project_id,
957 snapshots_tx,
958 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
959 });
960 }
961
962 cx.foreground().spawn(async move {
963 share_rx
964 .await
965 .unwrap_or_else(|_| Err(anyhow!("share ended")))
966 })
967 }
968
969 pub fn unshare(&mut self) {
970 self.share.take();
971 }
972
973 pub fn is_shared(&self) -> bool {
974 self.share.is_some()
975 }
976
977 pub fn send_extension_counts(&self, project_id: u64) {
978 let mut extensions = Vec::new();
979 let mut counts = Vec::new();
980
981 for (extension, count) in self.extension_counts() {
982 extensions.push(extension.to_string_lossy().to_string());
983 counts.push(*count as u32);
984 }
985
986 self.client
987 .send(proto::UpdateWorktreeExtensions {
988 project_id,
989 worktree_id: self.id().to_proto(),
990 extensions,
991 counts,
992 })
993 .log_err();
994 }
995}
996
997impl RemoteWorktree {
998 fn snapshot(&self) -> Snapshot {
999 self.snapshot.clone()
1000 }
1001
1002 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1003 self.snapshot = self.background_snapshot.lock().clone();
1004 cx.emit(Event::UpdatedEntries);
1005 cx.notify();
1006 }
1007
1008 pub fn disconnected_from_host(&mut self) {
1009 self.updates_tx.take();
1010 self.snapshot_subscriptions.clear();
1011 }
1012
1013 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1014 if let Some(updates_tx) = &self.updates_tx {
1015 updates_tx
1016 .unbounded_send(update)
1017 .expect("consumer runs to completion");
1018 }
1019 }
1020
1021 fn observed_snapshot(&self, scan_id: usize) -> bool {
1022 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1023 }
1024
1025 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
1026 let (tx, rx) = oneshot::channel();
1027 if self.observed_snapshot(scan_id) {
1028 let _ = tx.send(());
1029 } else {
1030 match self
1031 .snapshot_subscriptions
1032 .binary_search_by_key(&scan_id, |probe| probe.0)
1033 {
1034 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1035 }
1036 }
1037
1038 async move {
1039 let _ = rx.await;
1040 }
1041 }
1042
1043 pub fn update_diagnostic_summary(
1044 &mut self,
1045 path: Arc<Path>,
1046 summary: &proto::DiagnosticSummary,
1047 ) {
1048 let summary = DiagnosticSummary {
1049 language_server_id: summary.language_server_id as usize,
1050 error_count: summary.error_count as usize,
1051 warning_count: summary.warning_count as usize,
1052 };
1053 if summary.is_empty() {
1054 self.diagnostic_summaries.remove(&PathKey(path));
1055 } else {
1056 self.diagnostic_summaries.insert(PathKey(path), summary);
1057 }
1058 }
1059
1060 pub fn insert_entry(
1061 &mut self,
1062 entry: proto::Entry,
1063 scan_id: usize,
1064 cx: &mut ModelContext<Worktree>,
1065 ) -> Task<Result<Entry>> {
1066 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1067 cx.spawn(|this, mut cx| async move {
1068 wait_for_snapshot.await;
1069 this.update(&mut cx, |worktree, _| {
1070 let worktree = worktree.as_remote_mut().unwrap();
1071 let mut snapshot = worktree.background_snapshot.lock();
1072 let entry = snapshot.insert_entry(entry);
1073 worktree.snapshot = snapshot.clone();
1074 entry
1075 })
1076 })
1077 }
1078
1079 pub(crate) fn delete_entry(
1080 &mut self,
1081 id: ProjectEntryId,
1082 scan_id: usize,
1083 cx: &mut ModelContext<Worktree>,
1084 ) -> Task<Result<()>> {
1085 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1086 cx.spawn(|this, mut cx| async move {
1087 wait_for_snapshot.await;
1088 this.update(&mut cx, |worktree, _| {
1089 let worktree = worktree.as_remote_mut().unwrap();
1090 let mut snapshot = worktree.background_snapshot.lock();
1091 snapshot.delete_entry(id);
1092 worktree.snapshot = snapshot.clone();
1093 });
1094 Ok(())
1095 })
1096 }
1097}
1098
1099impl Snapshot {
1100 pub fn id(&self) -> WorktreeId {
1101 self.id
1102 }
1103
1104 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1105 self.entries_by_id.get(&entry_id, &()).is_some()
1106 }
1107
1108 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1109 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1110 let old_entry = self.entries_by_id.insert_or_replace(
1111 PathEntry {
1112 id: entry.id,
1113 path: entry.path.clone(),
1114 is_ignored: entry.is_ignored,
1115 scan_id: 0,
1116 },
1117 &(),
1118 );
1119 if let Some(old_entry) = old_entry {
1120 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1121 }
1122 self.entries_by_path.insert_or_replace(entry.clone(), &());
1123 Ok(entry)
1124 }
1125
1126 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1127 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1128 self.entries_by_path = {
1129 let mut cursor = self.entries_by_path.cursor();
1130 let mut new_entries_by_path =
1131 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1132 while let Some(entry) = cursor.item() {
1133 if entry.path.starts_with(&removed_entry.path) {
1134 self.entries_by_id.remove(&entry.id, &());
1135 cursor.next(&());
1136 } else {
1137 break;
1138 }
1139 }
1140 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1141 new_entries_by_path
1142 };
1143
1144 true
1145 } else {
1146 false
1147 }
1148 }
1149
1150 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1151 let mut entries_by_path_edits = Vec::new();
1152 let mut entries_by_id_edits = Vec::new();
1153 for entry_id in update.removed_entries {
1154 let entry = self
1155 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1156 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1157 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1158 entries_by_id_edits.push(Edit::Remove(entry.id));
1159 }
1160
1161 for entry in update.updated_entries {
1162 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1163 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1164 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1165 }
1166 entries_by_id_edits.push(Edit::Insert(PathEntry {
1167 id: entry.id,
1168 path: entry.path.clone(),
1169 is_ignored: entry.is_ignored,
1170 scan_id: 0,
1171 }));
1172 entries_by_path_edits.push(Edit::Insert(entry));
1173 }
1174
1175 self.entries_by_path.edit(entries_by_path_edits, &());
1176 self.entries_by_id.edit(entries_by_id_edits, &());
1177 self.scan_id = update.scan_id as usize;
1178 self.is_complete = update.is_last_update;
1179
1180 Ok(())
1181 }
1182
1183 pub fn file_count(&self) -> usize {
1184 self.entries_by_path.summary().file_count
1185 }
1186
1187 pub fn visible_file_count(&self) -> usize {
1188 self.entries_by_path.summary().visible_file_count
1189 }
1190
1191 fn traverse_from_offset(
1192 &self,
1193 include_dirs: bool,
1194 include_ignored: bool,
1195 start_offset: usize,
1196 ) -> Traversal {
1197 let mut cursor = self.entries_by_path.cursor();
1198 cursor.seek(
1199 &TraversalTarget::Count {
1200 count: start_offset,
1201 include_dirs,
1202 include_ignored,
1203 },
1204 Bias::Right,
1205 &(),
1206 );
1207 Traversal {
1208 cursor,
1209 include_dirs,
1210 include_ignored,
1211 }
1212 }
1213
1214 fn traverse_from_path(
1215 &self,
1216 include_dirs: bool,
1217 include_ignored: bool,
1218 path: &Path,
1219 ) -> Traversal {
1220 let mut cursor = self.entries_by_path.cursor();
1221 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1222 Traversal {
1223 cursor,
1224 include_dirs,
1225 include_ignored,
1226 }
1227 }
1228
1229 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1230 self.traverse_from_offset(false, include_ignored, start)
1231 }
1232
1233 pub fn entries(&self, include_ignored: bool) -> Traversal {
1234 self.traverse_from_offset(true, include_ignored, 0)
1235 }
1236
1237 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1238 let empty_path = Path::new("");
1239 self.entries_by_path
1240 .cursor::<()>()
1241 .filter(move |entry| entry.path.as_ref() != empty_path)
1242 .map(|entry| &entry.path)
1243 }
1244
1245 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1246 let mut cursor = self.entries_by_path.cursor();
1247 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1248 let traversal = Traversal {
1249 cursor,
1250 include_dirs: true,
1251 include_ignored: true,
1252 };
1253 ChildEntriesIter {
1254 traversal,
1255 parent_path,
1256 }
1257 }
1258
1259 pub fn root_entry(&self) -> Option<&Entry> {
1260 self.entry_for_path("")
1261 }
1262
1263 pub fn root_name(&self) -> &str {
1264 &self.root_name
1265 }
1266
1267 pub fn scan_id(&self) -> usize {
1268 self.scan_id
1269 }
1270
1271 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1272 let path = path.as_ref();
1273 self.traverse_from_path(true, true, path)
1274 .entry()
1275 .and_then(|entry| {
1276 if entry.path.as_ref() == path {
1277 Some(entry)
1278 } else {
1279 None
1280 }
1281 })
1282 }
1283
1284 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1285 let entry = self.entries_by_id.get(&id, &())?;
1286 self.entry_for_path(&entry.path)
1287 }
1288
1289 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1290 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1291 }
1292}
1293
1294impl LocalSnapshot {
1295 pub fn abs_path(&self) -> &Arc<Path> {
1296 &self.abs_path
1297 }
1298
1299 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1300 &self.extension_counts
1301 }
1302
1303 pub(crate) fn git_repository_for_file_path(&self, path: &Path) -> Option<GitRepositoryState> {
1304 None
1305 }
1306
1307 #[cfg(test)]
1308 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1309 let root_name = self.root_name.clone();
1310 proto::UpdateWorktree {
1311 project_id,
1312 worktree_id: self.id().to_proto(),
1313 root_name,
1314 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1315 removed_entries: Default::default(),
1316 scan_id: self.scan_id as u64,
1317 is_last_update: true,
1318 }
1319 }
1320
1321 pub(crate) fn build_update(
1322 &self,
1323 other: &Self,
1324 project_id: u64,
1325 worktree_id: u64,
1326 include_ignored: bool,
1327 ) -> proto::UpdateWorktree {
1328 let mut updated_entries = Vec::new();
1329 let mut removed_entries = Vec::new();
1330 let mut self_entries = self
1331 .entries_by_id
1332 .cursor::<()>()
1333 .filter(|e| include_ignored || !e.is_ignored)
1334 .peekable();
1335 let mut other_entries = other
1336 .entries_by_id
1337 .cursor::<()>()
1338 .filter(|e| include_ignored || !e.is_ignored)
1339 .peekable();
1340 loop {
1341 match (self_entries.peek(), other_entries.peek()) {
1342 (Some(self_entry), Some(other_entry)) => {
1343 match Ord::cmp(&self_entry.id, &other_entry.id) {
1344 Ordering::Less => {
1345 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1346 updated_entries.push(entry);
1347 self_entries.next();
1348 }
1349 Ordering::Equal => {
1350 if self_entry.scan_id != other_entry.scan_id {
1351 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1352 updated_entries.push(entry);
1353 }
1354
1355 self_entries.next();
1356 other_entries.next();
1357 }
1358 Ordering::Greater => {
1359 removed_entries.push(other_entry.id.to_proto());
1360 other_entries.next();
1361 }
1362 }
1363 }
1364 (Some(self_entry), None) => {
1365 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1366 updated_entries.push(entry);
1367 self_entries.next();
1368 }
1369 (None, Some(other_entry)) => {
1370 removed_entries.push(other_entry.id.to_proto());
1371 other_entries.next();
1372 }
1373 (None, None) => break,
1374 }
1375 }
1376
1377 proto::UpdateWorktree {
1378 project_id,
1379 worktree_id,
1380 root_name: self.root_name().to_string(),
1381 updated_entries,
1382 removed_entries,
1383 scan_id: self.scan_id as u64,
1384 is_last_update: true,
1385 }
1386 }
1387
1388 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1389 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1390 let abs_path = self.abs_path.join(&entry.path);
1391 match smol::block_on(build_gitignore(&abs_path, fs)) {
1392 Ok(ignore) => {
1393 self.ignores_by_parent_abs_path.insert(
1394 abs_path.parent().unwrap().into(),
1395 (Arc::new(ignore), self.scan_id),
1396 );
1397 }
1398 Err(error) => {
1399 log::error!(
1400 "error loading .gitignore file {:?} - {:?}",
1401 &entry.path,
1402 error
1403 );
1404 }
1405 }
1406 }
1407
1408 self.reuse_entry_id(&mut entry);
1409
1410 if entry.kind == EntryKind::PendingDir {
1411 if let Some(existing_entry) =
1412 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1413 {
1414 entry.kind = existing_entry.kind;
1415 }
1416 }
1417
1418 self.entries_by_path.insert_or_replace(entry.clone(), &());
1419 let scan_id = self.scan_id;
1420 let removed_entry = self.entries_by_id.insert_or_replace(
1421 PathEntry {
1422 id: entry.id,
1423 path: entry.path.clone(),
1424 is_ignored: entry.is_ignored,
1425 scan_id,
1426 },
1427 &(),
1428 );
1429
1430 if let Some(removed_entry) = removed_entry {
1431 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1432 }
1433 self.inc_extension_count(&entry.path, entry.is_ignored);
1434
1435 entry
1436 }
1437
1438 fn populate_dir(
1439 &mut self,
1440 parent_path: Arc<Path>,
1441 entries: impl IntoIterator<Item = Entry>,
1442 ignore: Option<Arc<Gitignore>>,
1443 ) {
1444 let mut parent_entry = if let Some(parent_entry) =
1445 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1446 {
1447 parent_entry.clone()
1448 } else {
1449 log::warn!(
1450 "populating a directory {:?} that has been removed",
1451 parent_path
1452 );
1453 return;
1454 };
1455
1456 if let Some(ignore) = ignore {
1457 self.ignores_by_parent_abs_path.insert(
1458 self.abs_path.join(&parent_path).into(),
1459 (ignore, self.scan_id),
1460 );
1461 }
1462 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1463 parent_entry.kind = EntryKind::Dir;
1464 } else {
1465 unreachable!();
1466 }
1467
1468 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1469 let mut entries_by_id_edits = Vec::new();
1470
1471 for mut entry in entries {
1472 self.reuse_entry_id(&mut entry);
1473 self.inc_extension_count(&entry.path, entry.is_ignored);
1474 entries_by_id_edits.push(Edit::Insert(PathEntry {
1475 id: entry.id,
1476 path: entry.path.clone(),
1477 is_ignored: entry.is_ignored,
1478 scan_id: self.scan_id,
1479 }));
1480 entries_by_path_edits.push(Edit::Insert(entry));
1481 }
1482
1483 self.entries_by_path.edit(entries_by_path_edits, &());
1484 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1485
1486 for removed_entry in removed_entries {
1487 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1488 }
1489 }
1490
1491 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1492 if !ignored {
1493 if let Some(extension) = path.extension() {
1494 if let Some(count) = self.extension_counts.get_mut(extension) {
1495 *count += 1;
1496 } else {
1497 self.extension_counts.insert(extension.into(), 1);
1498 }
1499 }
1500 }
1501 }
1502
1503 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1504 if !ignored {
1505 if let Some(extension) = path.extension() {
1506 if let Some(count) = self.extension_counts.get_mut(extension) {
1507 *count -= 1;
1508 }
1509 }
1510 }
1511 }
1512
1513 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1514 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1515 entry.id = removed_entry_id;
1516 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1517 entry.id = existing_entry.id;
1518 }
1519 }
1520
1521 fn remove_path(&mut self, path: &Path) {
1522 let mut new_entries;
1523 let removed_entries;
1524 {
1525 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1526 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1527 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1528 new_entries.push_tree(cursor.suffix(&()), &());
1529 }
1530 self.entries_by_path = new_entries;
1531
1532 let mut entries_by_id_edits = Vec::new();
1533 for entry in removed_entries.cursor::<()>() {
1534 let removed_entry_id = self
1535 .removed_entry_ids
1536 .entry(entry.inode)
1537 .or_insert(entry.id);
1538 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1539 entries_by_id_edits.push(Edit::Remove(entry.id));
1540 self.dec_extension_count(&entry.path, entry.is_ignored);
1541 }
1542 self.entries_by_id.edit(entries_by_id_edits, &());
1543
1544 if path.file_name() == Some(&GITIGNORE) {
1545 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1546 if let Some((_, scan_id)) = self
1547 .ignores_by_parent_abs_path
1548 .get_mut(abs_parent_path.as_path())
1549 {
1550 *scan_id = self.snapshot.scan_id;
1551 }
1552 }
1553 }
1554
1555 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1556 let mut inodes = TreeSet::default();
1557 for ancestor in path.ancestors().skip(1) {
1558 if let Some(entry) = self.entry_for_path(ancestor) {
1559 inodes.insert(entry.inode);
1560 }
1561 }
1562 inodes
1563 }
1564
1565 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1566 let mut new_ignores = Vec::new();
1567 for ancestor in abs_path.ancestors().skip(1) {
1568 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1569 new_ignores.push((ancestor, Some(ignore.clone())));
1570 } else {
1571 new_ignores.push((ancestor, None));
1572 }
1573 }
1574
1575 let mut ignore_stack = IgnoreStack::none();
1576 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1577 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1578 ignore_stack = IgnoreStack::all();
1579 break;
1580 } else if let Some(ignore) = ignore {
1581 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1582 }
1583 }
1584
1585 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1586 ignore_stack = IgnoreStack::all();
1587 }
1588
1589 ignore_stack
1590 }
1591}
1592
1593async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1594 let contents = fs.load(abs_path).await?;
1595 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1596 let mut builder = GitignoreBuilder::new(parent);
1597 for line in contents.lines() {
1598 builder.add_line(Some(abs_path.into()), line)?;
1599 }
1600 Ok(builder.build()?)
1601}
1602
1603impl WorktreeId {
1604 pub fn from_usize(handle_id: usize) -> Self {
1605 Self(handle_id)
1606 }
1607
1608 pub(crate) fn from_proto(id: u64) -> Self {
1609 Self(id as usize)
1610 }
1611
1612 pub fn to_proto(&self) -> u64 {
1613 self.0 as u64
1614 }
1615
1616 pub fn to_usize(&self) -> usize {
1617 self.0
1618 }
1619}
1620
1621impl fmt::Display for WorktreeId {
1622 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1623 self.0.fmt(f)
1624 }
1625}
1626
1627impl Deref for Worktree {
1628 type Target = Snapshot;
1629
1630 fn deref(&self) -> &Self::Target {
1631 match self {
1632 Worktree::Local(worktree) => &worktree.snapshot,
1633 Worktree::Remote(worktree) => &worktree.snapshot,
1634 }
1635 }
1636}
1637
1638impl Deref for LocalWorktree {
1639 type Target = LocalSnapshot;
1640
1641 fn deref(&self) -> &Self::Target {
1642 &self.snapshot
1643 }
1644}
1645
1646impl Deref for RemoteWorktree {
1647 type Target = Snapshot;
1648
1649 fn deref(&self) -> &Self::Target {
1650 &self.snapshot
1651 }
1652}
1653
1654impl fmt::Debug for LocalWorktree {
1655 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1656 self.snapshot.fmt(f)
1657 }
1658}
1659
1660impl fmt::Debug for Snapshot {
1661 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1662 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1663 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1664
1665 impl<'a> fmt::Debug for EntriesByPath<'a> {
1666 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1667 f.debug_map()
1668 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1669 .finish()
1670 }
1671 }
1672
1673 impl<'a> fmt::Debug for EntriesById<'a> {
1674 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1675 f.debug_list().entries(self.0.iter()).finish()
1676 }
1677 }
1678
1679 f.debug_struct("Snapshot")
1680 .field("id", &self.id)
1681 .field("root_name", &self.root_name)
1682 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1683 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1684 .finish()
1685 }
1686}
1687
1688#[derive(Clone, PartialEq)]
1689pub struct File {
1690 pub worktree: ModelHandle<Worktree>,
1691 pub path: Arc<Path>,
1692 pub mtime: SystemTime,
1693 pub(crate) entry_id: Option<ProjectEntryId>,
1694 pub(crate) is_local: bool,
1695}
1696
1697impl language::File for File {
1698 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1699 if self.is_local {
1700 Some(self)
1701 } else {
1702 None
1703 }
1704 }
1705
1706 fn mtime(&self) -> SystemTime {
1707 self.mtime
1708 }
1709
1710 fn path(&self) -> &Arc<Path> {
1711 &self.path
1712 }
1713
1714 fn full_path(&self, cx: &AppContext) -> PathBuf {
1715 let mut full_path = PathBuf::new();
1716 full_path.push(self.worktree.read(cx).root_name());
1717 if self.path.components().next().is_some() {
1718 full_path.push(&self.path);
1719 }
1720 full_path
1721 }
1722
1723 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1724 /// of its worktree, then this method will return the name of the worktree itself.
1725 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1726 self.path
1727 .file_name()
1728 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1729 }
1730
1731 fn is_deleted(&self) -> bool {
1732 self.entry_id.is_none()
1733 }
1734
1735 fn save(
1736 &self,
1737 buffer_id: u64,
1738 text: Rope,
1739 version: clock::Global,
1740 line_ending: LineEnding,
1741 cx: &mut MutableAppContext,
1742 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1743 self.worktree.update(cx, |worktree, cx| match worktree {
1744 Worktree::Local(worktree) => {
1745 let rpc = worktree.client.clone();
1746 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1747 let fingerprint = text.fingerprint();
1748 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1749 cx.background().spawn(async move {
1750 let entry = save.await?;
1751 if let Some(project_id) = project_id {
1752 rpc.send(proto::BufferSaved {
1753 project_id,
1754 buffer_id,
1755 version: serialize_version(&version),
1756 mtime: Some(entry.mtime.into()),
1757 fingerprint: fingerprint.clone(),
1758 })?;
1759 }
1760 Ok((version, fingerprint, entry.mtime))
1761 })
1762 }
1763 Worktree::Remote(worktree) => {
1764 let rpc = worktree.client.clone();
1765 let project_id = worktree.project_id;
1766 cx.foreground().spawn(async move {
1767 let response = rpc
1768 .request(proto::SaveBuffer {
1769 project_id,
1770 buffer_id,
1771 version: serialize_version(&version),
1772 })
1773 .await?;
1774 let version = deserialize_version(response.version);
1775 let mtime = response
1776 .mtime
1777 .ok_or_else(|| anyhow!("missing mtime"))?
1778 .into();
1779 Ok((version, response.fingerprint, mtime))
1780 })
1781 }
1782 })
1783 }
1784
1785 fn as_any(&self) -> &dyn Any {
1786 self
1787 }
1788
1789 fn to_proto(&self) -> rpc::proto::File {
1790 rpc::proto::File {
1791 worktree_id: self.worktree.id() as u64,
1792 entry_id: self.entry_id.map(|entry_id| entry_id.to_proto()),
1793 path: self.path.to_string_lossy().into(),
1794 mtime: Some(self.mtime.into()),
1795 }
1796 }
1797}
1798
1799impl language::LocalFile for File {
1800 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1801 self.worktree
1802 .read(cx)
1803 .as_local()
1804 .unwrap()
1805 .abs_path
1806 .join(&self.path)
1807 }
1808
1809 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1810 let worktree = self.worktree.read(cx).as_local().unwrap();
1811 let abs_path = worktree.absolutize(&self.path);
1812 let fs = worktree.fs.clone();
1813 cx.background()
1814 .spawn(async move { fs.load(&abs_path).await })
1815 }
1816
1817 fn buffer_reloaded(
1818 &self,
1819 buffer_id: u64,
1820 version: &clock::Global,
1821 fingerprint: String,
1822 line_ending: LineEnding,
1823 mtime: SystemTime,
1824 cx: &mut MutableAppContext,
1825 ) {
1826 let worktree = self.worktree.read(cx).as_local().unwrap();
1827 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1828 worktree
1829 .client
1830 .send(proto::BufferReloaded {
1831 project_id,
1832 buffer_id,
1833 version: serialize_version(version),
1834 mtime: Some(mtime.into()),
1835 fingerprint,
1836 line_ending: serialize_line_ending(line_ending) as i32,
1837 })
1838 .log_err();
1839 }
1840 }
1841}
1842
1843impl File {
1844 pub fn from_proto(
1845 proto: rpc::proto::File,
1846 worktree: ModelHandle<Worktree>,
1847 cx: &AppContext,
1848 ) -> Result<Self> {
1849 let worktree_id = worktree
1850 .read(cx)
1851 .as_remote()
1852 .ok_or_else(|| anyhow!("not remote"))?
1853 .id();
1854
1855 if worktree_id.to_proto() != proto.worktree_id {
1856 return Err(anyhow!("worktree id does not match file"));
1857 }
1858
1859 Ok(Self {
1860 worktree,
1861 path: Path::new(&proto.path).into(),
1862 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1863 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
1864 is_local: false,
1865 })
1866 }
1867
1868 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
1869 file.and_then(|f| f.as_any().downcast_ref())
1870 }
1871
1872 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1873 self.worktree.read(cx).id()
1874 }
1875
1876 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1877 self.entry_id
1878 }
1879}
1880
1881#[derive(Clone, Debug, PartialEq, Eq)]
1882pub struct Entry {
1883 pub id: ProjectEntryId,
1884 pub kind: EntryKind,
1885 pub path: Arc<Path>,
1886 pub inode: u64,
1887 pub mtime: SystemTime,
1888 pub is_symlink: bool,
1889 pub is_ignored: bool,
1890}
1891
1892#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1893pub enum EntryKind {
1894 PendingDir,
1895 Dir,
1896 File(CharBag),
1897}
1898
1899impl Entry {
1900 fn new(
1901 path: Arc<Path>,
1902 metadata: &fs::Metadata,
1903 next_entry_id: &AtomicUsize,
1904 root_char_bag: CharBag,
1905 ) -> Self {
1906 Self {
1907 id: ProjectEntryId::new(next_entry_id),
1908 kind: if metadata.is_dir {
1909 EntryKind::PendingDir
1910 } else {
1911 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1912 },
1913 path,
1914 inode: metadata.inode,
1915 mtime: metadata.mtime,
1916 is_symlink: metadata.is_symlink,
1917 is_ignored: false,
1918 }
1919 }
1920
1921 pub fn is_dir(&self) -> bool {
1922 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1923 }
1924
1925 pub fn is_file(&self) -> bool {
1926 matches!(self.kind, EntryKind::File(_))
1927 }
1928}
1929
1930impl sum_tree::Item for Entry {
1931 type Summary = EntrySummary;
1932
1933 fn summary(&self) -> Self::Summary {
1934 let visible_count = if self.is_ignored { 0 } else { 1 };
1935 let file_count;
1936 let visible_file_count;
1937 if self.is_file() {
1938 file_count = 1;
1939 visible_file_count = visible_count;
1940 } else {
1941 file_count = 0;
1942 visible_file_count = 0;
1943 }
1944
1945 EntrySummary {
1946 max_path: self.path.clone(),
1947 count: 1,
1948 visible_count,
1949 file_count,
1950 visible_file_count,
1951 }
1952 }
1953}
1954
1955impl sum_tree::KeyedItem for Entry {
1956 type Key = PathKey;
1957
1958 fn key(&self) -> Self::Key {
1959 PathKey(self.path.clone())
1960 }
1961}
1962
1963#[derive(Clone, Debug)]
1964pub struct EntrySummary {
1965 max_path: Arc<Path>,
1966 count: usize,
1967 visible_count: usize,
1968 file_count: usize,
1969 visible_file_count: usize,
1970}
1971
1972impl Default for EntrySummary {
1973 fn default() -> Self {
1974 Self {
1975 max_path: Arc::from(Path::new("")),
1976 count: 0,
1977 visible_count: 0,
1978 file_count: 0,
1979 visible_file_count: 0,
1980 }
1981 }
1982}
1983
1984impl sum_tree::Summary for EntrySummary {
1985 type Context = ();
1986
1987 fn add_summary(&mut self, rhs: &Self, _: &()) {
1988 self.max_path = rhs.max_path.clone();
1989 self.count += rhs.count;
1990 self.visible_count += rhs.visible_count;
1991 self.file_count += rhs.file_count;
1992 self.visible_file_count += rhs.visible_file_count;
1993 }
1994}
1995
1996#[derive(Clone, Debug)]
1997struct PathEntry {
1998 id: ProjectEntryId,
1999 path: Arc<Path>,
2000 is_ignored: bool,
2001 scan_id: usize,
2002}
2003
2004impl sum_tree::Item for PathEntry {
2005 type Summary = PathEntrySummary;
2006
2007 fn summary(&self) -> Self::Summary {
2008 PathEntrySummary { max_id: self.id }
2009 }
2010}
2011
2012impl sum_tree::KeyedItem for PathEntry {
2013 type Key = ProjectEntryId;
2014
2015 fn key(&self) -> Self::Key {
2016 self.id
2017 }
2018}
2019
2020#[derive(Clone, Debug, Default)]
2021struct PathEntrySummary {
2022 max_id: ProjectEntryId,
2023}
2024
2025impl sum_tree::Summary for PathEntrySummary {
2026 type Context = ();
2027
2028 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2029 self.max_id = summary.max_id;
2030 }
2031}
2032
2033impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2034 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2035 *self = summary.max_id;
2036 }
2037}
2038
2039#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2040pub struct PathKey(Arc<Path>);
2041
2042impl Default for PathKey {
2043 fn default() -> Self {
2044 Self(Path::new("").into())
2045 }
2046}
2047
2048impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2049 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2050 self.0 = summary.max_path.clone();
2051 }
2052}
2053
2054struct BackgroundScanner {
2055 fs: Arc<dyn Fs>,
2056 snapshot: Arc<Mutex<LocalSnapshot>>,
2057 notify: UnboundedSender<ScanState>,
2058 executor: Arc<executor::Background>,
2059}
2060
2061impl BackgroundScanner {
2062 fn new(
2063 snapshot: Arc<Mutex<LocalSnapshot>>,
2064 notify: UnboundedSender<ScanState>,
2065 fs: Arc<dyn Fs>,
2066 executor: Arc<executor::Background>,
2067 ) -> Self {
2068 Self {
2069 fs,
2070 snapshot,
2071 notify,
2072 executor,
2073 }
2074 }
2075
2076 fn abs_path(&self) -> Arc<Path> {
2077 self.snapshot.lock().abs_path.clone()
2078 }
2079
2080 fn snapshot(&self) -> LocalSnapshot {
2081 self.snapshot.lock().clone()
2082 }
2083
2084 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2085 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2086 return;
2087 }
2088
2089 if let Err(err) = self.scan_dirs().await {
2090 if self
2091 .notify
2092 .unbounded_send(ScanState::Err(Arc::new(err)))
2093 .is_err()
2094 {
2095 return;
2096 }
2097 }
2098
2099 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2100 return;
2101 }
2102
2103 futures::pin_mut!(events_rx);
2104
2105 while let Some(mut events) = events_rx.next().await {
2106 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2107 events.extend(additional_events);
2108 }
2109
2110 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2111 break;
2112 }
2113
2114 if !self.process_events(events).await {
2115 break;
2116 }
2117
2118 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2119 break;
2120 }
2121 }
2122 }
2123
2124 async fn scan_dirs(&mut self) -> Result<()> {
2125 let root_char_bag;
2126 let root_abs_path;
2127 let root_inode;
2128 let is_dir;
2129 let next_entry_id;
2130 {
2131 let snapshot = self.snapshot.lock();
2132 root_char_bag = snapshot.root_char_bag;
2133 root_abs_path = snapshot.abs_path.clone();
2134 root_inode = snapshot.root_entry().map(|e| e.inode);
2135 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2136 next_entry_id = snapshot.next_entry_id.clone();
2137 };
2138
2139 // Populate ignores above the root.
2140 for ancestor in root_abs_path.ancestors().skip(1) {
2141 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2142 {
2143 self.snapshot
2144 .lock()
2145 .ignores_by_parent_abs_path
2146 .insert(ancestor.into(), (ignore.into(), 0));
2147 }
2148 }
2149
2150 let ignore_stack = {
2151 let mut snapshot = self.snapshot.lock();
2152 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2153 if ignore_stack.is_all() {
2154 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2155 root_entry.is_ignored = true;
2156 snapshot.insert_entry(root_entry, self.fs.as_ref());
2157 }
2158 }
2159 ignore_stack
2160 };
2161
2162 if is_dir {
2163 let path: Arc<Path> = Arc::from(Path::new(""));
2164 let mut ancestor_inodes = TreeSet::default();
2165 if let Some(root_inode) = root_inode {
2166 ancestor_inodes.insert(root_inode);
2167 }
2168
2169 let (tx, rx) = channel::unbounded();
2170 self.executor
2171 .block(tx.send(ScanJob {
2172 abs_path: root_abs_path.to_path_buf(),
2173 path,
2174 ignore_stack,
2175 ancestor_inodes,
2176 scan_queue: tx.clone(),
2177 }))
2178 .unwrap();
2179 drop(tx);
2180
2181 self.executor
2182 .scoped(|scope| {
2183 for _ in 0..self.executor.num_cpus() {
2184 scope.spawn(async {
2185 while let Ok(job) = rx.recv().await {
2186 if let Err(err) = self
2187 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2188 .await
2189 {
2190 log::error!("error scanning {:?}: {}", job.abs_path, err);
2191 }
2192 }
2193 });
2194 }
2195 })
2196 .await;
2197 }
2198
2199 Ok(())
2200 }
2201
2202 async fn scan_dir(
2203 &self,
2204 root_char_bag: CharBag,
2205 next_entry_id: Arc<AtomicUsize>,
2206 job: &ScanJob,
2207 ) -> Result<()> {
2208 let mut new_entries: Vec<Entry> = Vec::new();
2209 let mut new_jobs: Vec<ScanJob> = Vec::new();
2210 let mut ignore_stack = job.ignore_stack.clone();
2211 let mut new_ignore = None;
2212
2213 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2214 while let Some(child_abs_path) = child_paths.next().await {
2215 let child_abs_path = match child_abs_path {
2216 Ok(child_abs_path) => child_abs_path,
2217 Err(error) => {
2218 log::error!("error processing entry {:?}", error);
2219 continue;
2220 }
2221 };
2222 let child_name = child_abs_path.file_name().unwrap();
2223 let child_path: Arc<Path> = job.path.join(child_name).into();
2224 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2225 Ok(Some(metadata)) => metadata,
2226 Ok(None) => continue,
2227 Err(err) => {
2228 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2229 continue;
2230 }
2231 };
2232
2233 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2234 if child_name == *GITIGNORE {
2235 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2236 Ok(ignore) => {
2237 let ignore = Arc::new(ignore);
2238 ignore_stack =
2239 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2240 new_ignore = Some(ignore);
2241 }
2242 Err(error) => {
2243 log::error!(
2244 "error loading .gitignore file {:?} - {:?}",
2245 child_name,
2246 error
2247 );
2248 }
2249 }
2250
2251 // Update ignore status of any child entries we've already processed to reflect the
2252 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2253 // there should rarely be too numerous. Update the ignore stack associated with any
2254 // new jobs as well.
2255 let mut new_jobs = new_jobs.iter_mut();
2256 for entry in &mut new_entries {
2257 let entry_abs_path = self.abs_path().join(&entry.path);
2258 entry.is_ignored =
2259 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2260 if entry.is_dir() {
2261 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2262 IgnoreStack::all()
2263 } else {
2264 ignore_stack.clone()
2265 };
2266 }
2267 }
2268 }
2269
2270 let mut child_entry = Entry::new(
2271 child_path.clone(),
2272 &child_metadata,
2273 &next_entry_id,
2274 root_char_bag,
2275 );
2276
2277 if child_entry.is_dir() {
2278 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2279 child_entry.is_ignored = is_ignored;
2280
2281 if !job.ancestor_inodes.contains(&child_entry.inode) {
2282 let mut ancestor_inodes = job.ancestor_inodes.clone();
2283 ancestor_inodes.insert(child_entry.inode);
2284 new_jobs.push(ScanJob {
2285 abs_path: child_abs_path,
2286 path: child_path,
2287 ignore_stack: if is_ignored {
2288 IgnoreStack::all()
2289 } else {
2290 ignore_stack.clone()
2291 },
2292 ancestor_inodes,
2293 scan_queue: job.scan_queue.clone(),
2294 });
2295 }
2296 } else {
2297 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2298 }
2299
2300 new_entries.push(child_entry);
2301 }
2302
2303 self.snapshot
2304 .lock()
2305 .populate_dir(job.path.clone(), new_entries, new_ignore);
2306 for new_job in new_jobs {
2307 job.scan_queue.send(new_job).await.unwrap();
2308 }
2309
2310 Ok(())
2311 }
2312
2313 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2314 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2315 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2316
2317 let root_char_bag;
2318 let root_abs_path;
2319 let next_entry_id;
2320 {
2321 let snapshot = self.snapshot.lock();
2322 root_char_bag = snapshot.root_char_bag;
2323 root_abs_path = snapshot.abs_path.clone();
2324 next_entry_id = snapshot.next_entry_id.clone();
2325 }
2326
2327 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2328 path
2329 } else {
2330 return false;
2331 };
2332 let metadata = futures::future::join_all(
2333 events
2334 .iter()
2335 .map(|event| self.fs.metadata(&event.path))
2336 .collect::<Vec<_>>(),
2337 )
2338 .await;
2339
2340 // Hold the snapshot lock while clearing and re-inserting the root entries
2341 // for each event. This way, the snapshot is not observable to the foreground
2342 // thread while this operation is in-progress.
2343 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2344 {
2345 let mut snapshot = self.snapshot.lock();
2346 snapshot.scan_id += 1;
2347 for event in &events {
2348 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2349 snapshot.remove_path(path);
2350 }
2351 }
2352
2353 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2354 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2355 Ok(path) => Arc::from(path.to_path_buf()),
2356 Err(_) => {
2357 log::error!(
2358 "unexpected event {:?} for root path {:?}",
2359 event.path,
2360 root_canonical_path
2361 );
2362 continue;
2363 }
2364 };
2365 let abs_path = root_abs_path.join(&path);
2366
2367 match metadata {
2368 Ok(Some(metadata)) => {
2369 let ignore_stack =
2370 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2371 let mut fs_entry = Entry::new(
2372 path.clone(),
2373 &metadata,
2374 snapshot.next_entry_id.as_ref(),
2375 snapshot.root_char_bag,
2376 );
2377 fs_entry.is_ignored = ignore_stack.is_all();
2378 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2379
2380 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2381 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2382 ancestor_inodes.insert(metadata.inode);
2383 self.executor
2384 .block(scan_queue_tx.send(ScanJob {
2385 abs_path,
2386 path,
2387 ignore_stack,
2388 ancestor_inodes,
2389 scan_queue: scan_queue_tx.clone(),
2390 }))
2391 .unwrap();
2392 }
2393 }
2394 Ok(None) => {}
2395 Err(err) => {
2396 // TODO - create a special 'error' entry in the entries tree to mark this
2397 log::error!("error reading file on event {:?}", err);
2398 }
2399 }
2400 }
2401 drop(scan_queue_tx);
2402 }
2403
2404 // Scan any directories that were created as part of this event batch.
2405 self.executor
2406 .scoped(|scope| {
2407 for _ in 0..self.executor.num_cpus() {
2408 scope.spawn(async {
2409 while let Ok(job) = scan_queue_rx.recv().await {
2410 if let Err(err) = self
2411 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2412 .await
2413 {
2414 log::error!("error scanning {:?}: {}", job.abs_path, err);
2415 }
2416 }
2417 });
2418 }
2419 })
2420 .await;
2421
2422 // Attempt to detect renames only over a single batch of file-system events.
2423 self.snapshot.lock().removed_entry_ids.clear();
2424
2425 self.update_ignore_statuses().await;
2426 true
2427 }
2428
2429 async fn update_ignore_statuses(&self) {
2430 let mut snapshot = self.snapshot();
2431
2432 let mut ignores_to_update = Vec::new();
2433 let mut ignores_to_delete = Vec::new();
2434 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2435 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2436 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2437 ignores_to_update.push(parent_abs_path.clone());
2438 }
2439
2440 let ignore_path = parent_path.join(&*GITIGNORE);
2441 if snapshot.entry_for_path(ignore_path).is_none() {
2442 ignores_to_delete.push(parent_abs_path.clone());
2443 }
2444 }
2445 }
2446
2447 for parent_abs_path in ignores_to_delete {
2448 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2449 self.snapshot
2450 .lock()
2451 .ignores_by_parent_abs_path
2452 .remove(&parent_abs_path);
2453 }
2454
2455 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2456 ignores_to_update.sort_unstable();
2457 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2458 while let Some(parent_abs_path) = ignores_to_update.next() {
2459 while ignores_to_update
2460 .peek()
2461 .map_or(false, |p| p.starts_with(&parent_abs_path))
2462 {
2463 ignores_to_update.next().unwrap();
2464 }
2465
2466 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2467 ignore_queue_tx
2468 .send(UpdateIgnoreStatusJob {
2469 abs_path: parent_abs_path,
2470 ignore_stack,
2471 ignore_queue: ignore_queue_tx.clone(),
2472 })
2473 .await
2474 .unwrap();
2475 }
2476 drop(ignore_queue_tx);
2477
2478 self.executor
2479 .scoped(|scope| {
2480 for _ in 0..self.executor.num_cpus() {
2481 scope.spawn(async {
2482 while let Ok(job) = ignore_queue_rx.recv().await {
2483 self.update_ignore_status(job, &snapshot).await;
2484 }
2485 });
2486 }
2487 })
2488 .await;
2489 }
2490
2491 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2492 let mut ignore_stack = job.ignore_stack;
2493 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2494 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2495 }
2496
2497 let mut entries_by_id_edits = Vec::new();
2498 let mut entries_by_path_edits = Vec::new();
2499 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2500 for mut entry in snapshot.child_entries(path).cloned() {
2501 let was_ignored = entry.is_ignored;
2502 let abs_path = self.abs_path().join(&entry.path);
2503 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2504 if entry.is_dir() {
2505 let child_ignore_stack = if entry.is_ignored {
2506 IgnoreStack::all()
2507 } else {
2508 ignore_stack.clone()
2509 };
2510 job.ignore_queue
2511 .send(UpdateIgnoreStatusJob {
2512 abs_path: abs_path.into(),
2513 ignore_stack: child_ignore_stack,
2514 ignore_queue: job.ignore_queue.clone(),
2515 })
2516 .await
2517 .unwrap();
2518 }
2519
2520 if entry.is_ignored != was_ignored {
2521 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2522 path_entry.scan_id = snapshot.scan_id;
2523 path_entry.is_ignored = entry.is_ignored;
2524 entries_by_id_edits.push(Edit::Insert(path_entry));
2525 entries_by_path_edits.push(Edit::Insert(entry));
2526 }
2527 }
2528
2529 let mut snapshot = self.snapshot.lock();
2530 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2531 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2532 }
2533}
2534
2535fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2536 let mut result = root_char_bag;
2537 result.extend(
2538 path.to_string_lossy()
2539 .chars()
2540 .map(|c| c.to_ascii_lowercase()),
2541 );
2542 result
2543}
2544
2545struct ScanJob {
2546 abs_path: PathBuf,
2547 path: Arc<Path>,
2548 ignore_stack: Arc<IgnoreStack>,
2549 scan_queue: Sender<ScanJob>,
2550 ancestor_inodes: TreeSet<u64>,
2551}
2552
2553struct UpdateIgnoreStatusJob {
2554 abs_path: Arc<Path>,
2555 ignore_stack: Arc<IgnoreStack>,
2556 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2557}
2558
2559pub trait WorktreeHandle {
2560 #[cfg(any(test, feature = "test-support"))]
2561 fn flush_fs_events<'a>(
2562 &self,
2563 cx: &'a gpui::TestAppContext,
2564 ) -> futures::future::LocalBoxFuture<'a, ()>;
2565}
2566
2567impl WorktreeHandle for ModelHandle<Worktree> {
2568 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2569 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2570 // extra directory scans, and emit extra scan-state notifications.
2571 //
2572 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2573 // to ensure that all redundant FS events have already been processed.
2574 #[cfg(any(test, feature = "test-support"))]
2575 fn flush_fs_events<'a>(
2576 &self,
2577 cx: &'a gpui::TestAppContext,
2578 ) -> futures::future::LocalBoxFuture<'a, ()> {
2579 use smol::future::FutureExt;
2580
2581 let filename = "fs-event-sentinel";
2582 let tree = self.clone();
2583 let (fs, root_path) = self.read_with(cx, |tree, _| {
2584 let tree = tree.as_local().unwrap();
2585 (tree.fs.clone(), tree.abs_path().clone())
2586 });
2587
2588 async move {
2589 fs.create_file(&root_path.join(filename), Default::default())
2590 .await
2591 .unwrap();
2592 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2593 .await;
2594
2595 fs.remove_file(&root_path.join(filename), Default::default())
2596 .await
2597 .unwrap();
2598 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2599 .await;
2600
2601 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2602 .await;
2603 }
2604 .boxed_local()
2605 }
2606}
2607
2608#[derive(Clone, Debug)]
2609struct TraversalProgress<'a> {
2610 max_path: &'a Path,
2611 count: usize,
2612 visible_count: usize,
2613 file_count: usize,
2614 visible_file_count: usize,
2615}
2616
2617impl<'a> TraversalProgress<'a> {
2618 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2619 match (include_ignored, include_dirs) {
2620 (true, true) => self.count,
2621 (true, false) => self.file_count,
2622 (false, true) => self.visible_count,
2623 (false, false) => self.visible_file_count,
2624 }
2625 }
2626}
2627
2628impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2629 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2630 self.max_path = summary.max_path.as_ref();
2631 self.count += summary.count;
2632 self.visible_count += summary.visible_count;
2633 self.file_count += summary.file_count;
2634 self.visible_file_count += summary.visible_file_count;
2635 }
2636}
2637
2638impl<'a> Default for TraversalProgress<'a> {
2639 fn default() -> Self {
2640 Self {
2641 max_path: Path::new(""),
2642 count: 0,
2643 visible_count: 0,
2644 file_count: 0,
2645 visible_file_count: 0,
2646 }
2647 }
2648}
2649
2650pub struct Traversal<'a> {
2651 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2652 include_ignored: bool,
2653 include_dirs: bool,
2654}
2655
2656impl<'a> Traversal<'a> {
2657 pub fn advance(&mut self) -> bool {
2658 self.advance_to_offset(self.offset() + 1)
2659 }
2660
2661 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2662 self.cursor.seek_forward(
2663 &TraversalTarget::Count {
2664 count: offset,
2665 include_dirs: self.include_dirs,
2666 include_ignored: self.include_ignored,
2667 },
2668 Bias::Right,
2669 &(),
2670 )
2671 }
2672
2673 pub fn advance_to_sibling(&mut self) -> bool {
2674 while let Some(entry) = self.cursor.item() {
2675 self.cursor.seek_forward(
2676 &TraversalTarget::PathSuccessor(&entry.path),
2677 Bias::Left,
2678 &(),
2679 );
2680 if let Some(entry) = self.cursor.item() {
2681 if (self.include_dirs || !entry.is_dir())
2682 && (self.include_ignored || !entry.is_ignored)
2683 {
2684 return true;
2685 }
2686 }
2687 }
2688 false
2689 }
2690
2691 pub fn entry(&self) -> Option<&'a Entry> {
2692 self.cursor.item()
2693 }
2694
2695 pub fn offset(&self) -> usize {
2696 self.cursor
2697 .start()
2698 .count(self.include_dirs, self.include_ignored)
2699 }
2700}
2701
2702impl<'a> Iterator for Traversal<'a> {
2703 type Item = &'a Entry;
2704
2705 fn next(&mut self) -> Option<Self::Item> {
2706 if let Some(item) = self.entry() {
2707 self.advance();
2708 Some(item)
2709 } else {
2710 None
2711 }
2712 }
2713}
2714
2715#[derive(Debug)]
2716enum TraversalTarget<'a> {
2717 Path(&'a Path),
2718 PathSuccessor(&'a Path),
2719 Count {
2720 count: usize,
2721 include_ignored: bool,
2722 include_dirs: bool,
2723 },
2724}
2725
2726impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2727 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2728 match self {
2729 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2730 TraversalTarget::PathSuccessor(path) => {
2731 if !cursor_location.max_path.starts_with(path) {
2732 Ordering::Equal
2733 } else {
2734 Ordering::Greater
2735 }
2736 }
2737 TraversalTarget::Count {
2738 count,
2739 include_dirs,
2740 include_ignored,
2741 } => Ord::cmp(
2742 count,
2743 &cursor_location.count(*include_dirs, *include_ignored),
2744 ),
2745 }
2746 }
2747}
2748
2749struct ChildEntriesIter<'a> {
2750 parent_path: &'a Path,
2751 traversal: Traversal<'a>,
2752}
2753
2754impl<'a> Iterator for ChildEntriesIter<'a> {
2755 type Item = &'a Entry;
2756
2757 fn next(&mut self) -> Option<Self::Item> {
2758 if let Some(item) = self.traversal.entry() {
2759 if item.path.starts_with(&self.parent_path) {
2760 self.traversal.advance_to_sibling();
2761 return Some(item);
2762 }
2763 }
2764 None
2765 }
2766}
2767
2768impl<'a> From<&'a Entry> for proto::Entry {
2769 fn from(entry: &'a Entry) -> Self {
2770 Self {
2771 id: entry.id.to_proto(),
2772 is_dir: entry.is_dir(),
2773 path: entry.path.as_os_str().as_bytes().to_vec(),
2774 inode: entry.inode,
2775 mtime: Some(entry.mtime.into()),
2776 is_symlink: entry.is_symlink,
2777 is_ignored: entry.is_ignored,
2778 }
2779 }
2780}
2781
2782impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2783 type Error = anyhow::Error;
2784
2785 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2786 if let Some(mtime) = entry.mtime {
2787 let kind = if entry.is_dir {
2788 EntryKind::Dir
2789 } else {
2790 let mut char_bag = *root_char_bag;
2791 char_bag.extend(
2792 String::from_utf8_lossy(&entry.path)
2793 .chars()
2794 .map(|c| c.to_ascii_lowercase()),
2795 );
2796 EntryKind::File(char_bag)
2797 };
2798 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2799 Ok(Entry {
2800 id: ProjectEntryId::from_proto(entry.id),
2801 kind,
2802 path,
2803 inode: entry.inode,
2804 mtime: mtime.into(),
2805 is_symlink: entry.is_symlink,
2806 is_ignored: entry.is_ignored,
2807 })
2808 } else {
2809 Err(anyhow!(
2810 "missing mtime in remote worktree entry {:?}",
2811 entry.path
2812 ))
2813 }
2814 }
2815}
2816
2817async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2818 #[cfg(any(test, feature = "test-support"))]
2819 const MAX_CHUNK_SIZE: usize = 2;
2820 #[cfg(not(any(test, feature = "test-support")))]
2821 const MAX_CHUNK_SIZE: usize = 256;
2822
2823 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2824 client.request(update).await?;
2825 }
2826
2827 Ok(())
2828}
2829
2830#[cfg(test)]
2831mod tests {
2832 use super::*;
2833 use crate::fs::FakeFs;
2834 use anyhow::Result;
2835 use client::test::FakeHttpClient;
2836 use fs::RealFs;
2837 use gpui::{executor::Deterministic, TestAppContext};
2838 use rand::prelude::*;
2839 use serde_json::json;
2840 use std::{
2841 env,
2842 fmt::Write,
2843 time::{SystemTime, UNIX_EPOCH},
2844 };
2845 use util::test::temp_tree;
2846
2847 #[gpui::test]
2848 async fn test_traversal(cx: &mut TestAppContext) {
2849 let fs = FakeFs::new(cx.background());
2850 fs.insert_tree(
2851 "/root",
2852 json!({
2853 ".gitignore": "a/b\n",
2854 "a": {
2855 "b": "",
2856 "c": "",
2857 }
2858 }),
2859 )
2860 .await;
2861
2862 let http_client = FakeHttpClient::with_404_response();
2863 let client = cx.read(|cx| Client::new(http_client, cx));
2864
2865 let tree = Worktree::local(
2866 client,
2867 Arc::from(Path::new("/root")),
2868 true,
2869 fs,
2870 Default::default(),
2871 &mut cx.to_async(),
2872 )
2873 .await
2874 .unwrap();
2875 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2876 .await;
2877
2878 tree.read_with(cx, |tree, _| {
2879 assert_eq!(
2880 tree.entries(false)
2881 .map(|entry| entry.path.as_ref())
2882 .collect::<Vec<_>>(),
2883 vec![
2884 Path::new(""),
2885 Path::new(".gitignore"),
2886 Path::new("a"),
2887 Path::new("a/c"),
2888 ]
2889 );
2890 assert_eq!(
2891 tree.entries(true)
2892 .map(|entry| entry.path.as_ref())
2893 .collect::<Vec<_>>(),
2894 vec![
2895 Path::new(""),
2896 Path::new(".gitignore"),
2897 Path::new("a"),
2898 Path::new("a/b"),
2899 Path::new("a/c"),
2900 ]
2901 );
2902 })
2903 }
2904
2905 #[gpui::test(iterations = 10)]
2906 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
2907 let fs = FakeFs::new(cx.background());
2908 fs.insert_tree(
2909 "/root",
2910 json!({
2911 "lib": {
2912 "a": {
2913 "a.txt": ""
2914 },
2915 "b": {
2916 "b.txt": ""
2917 }
2918 }
2919 }),
2920 )
2921 .await;
2922 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
2923 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
2924
2925 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
2926 let tree = Worktree::local(
2927 client,
2928 Arc::from(Path::new("/root")),
2929 true,
2930 fs.clone(),
2931 Default::default(),
2932 &mut cx.to_async(),
2933 )
2934 .await
2935 .unwrap();
2936
2937 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2938 .await;
2939
2940 tree.read_with(cx, |tree, _| {
2941 assert_eq!(
2942 tree.entries(false)
2943 .map(|entry| entry.path.as_ref())
2944 .collect::<Vec<_>>(),
2945 vec![
2946 Path::new(""),
2947 Path::new("lib"),
2948 Path::new("lib/a"),
2949 Path::new("lib/a/a.txt"),
2950 Path::new("lib/a/lib"),
2951 Path::new("lib/b"),
2952 Path::new("lib/b/b.txt"),
2953 Path::new("lib/b/lib"),
2954 ]
2955 );
2956 });
2957
2958 fs.rename(
2959 Path::new("/root/lib/a/lib"),
2960 Path::new("/root/lib/a/lib-2"),
2961 Default::default(),
2962 )
2963 .await
2964 .unwrap();
2965 executor.run_until_parked();
2966 tree.read_with(cx, |tree, _| {
2967 assert_eq!(
2968 tree.entries(false)
2969 .map(|entry| entry.path.as_ref())
2970 .collect::<Vec<_>>(),
2971 vec![
2972 Path::new(""),
2973 Path::new("lib"),
2974 Path::new("lib/a"),
2975 Path::new("lib/a/a.txt"),
2976 Path::new("lib/a/lib-2"),
2977 Path::new("lib/b"),
2978 Path::new("lib/b/b.txt"),
2979 Path::new("lib/b/lib"),
2980 ]
2981 );
2982 });
2983 }
2984
2985 #[gpui::test]
2986 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
2987 let parent_dir = temp_tree(json!({
2988 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
2989 "tree": {
2990 ".git": {},
2991 ".gitignore": "ignored-dir\n",
2992 "tracked-dir": {
2993 "tracked-file1": "",
2994 "ancestor-ignored-file1": "",
2995 },
2996 "ignored-dir": {
2997 "ignored-file1": ""
2998 }
2999 }
3000 }));
3001 let dir = parent_dir.path().join("tree");
3002
3003 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3004
3005 let tree = Worktree::local(
3006 client,
3007 dir.as_path(),
3008 true,
3009 Arc::new(RealFs),
3010 Default::default(),
3011 &mut cx.to_async(),
3012 )
3013 .await
3014 .unwrap();
3015 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3016 .await;
3017 tree.flush_fs_events(cx).await;
3018 cx.read(|cx| {
3019 let tree = tree.read(cx);
3020 assert!(
3021 !tree
3022 .entry_for_path("tracked-dir/tracked-file1")
3023 .unwrap()
3024 .is_ignored
3025 );
3026 assert!(
3027 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3028 .unwrap()
3029 .is_ignored
3030 );
3031 assert!(
3032 tree.entry_for_path("ignored-dir/ignored-file1")
3033 .unwrap()
3034 .is_ignored
3035 );
3036 });
3037
3038 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3039 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3040 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3041 tree.flush_fs_events(cx).await;
3042 cx.read(|cx| {
3043 let tree = tree.read(cx);
3044 assert!(
3045 !tree
3046 .entry_for_path("tracked-dir/tracked-file2")
3047 .unwrap()
3048 .is_ignored
3049 );
3050 assert!(
3051 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3052 .unwrap()
3053 .is_ignored
3054 );
3055 assert!(
3056 tree.entry_for_path("ignored-dir/ignored-file2")
3057 .unwrap()
3058 .is_ignored
3059 );
3060 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3061 });
3062 }
3063
3064 #[gpui::test]
3065 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3066 let fs = FakeFs::new(cx.background());
3067 fs.insert_tree(
3068 "/root",
3069 json!({
3070 "dir1": {
3071 ".git": {},
3072 "deps": {
3073 "dep1": {
3074 ".git": {},
3075 "src": {
3076 "a.txt": ""
3077 }
3078 }
3079 },
3080 "src": {
3081 "b.txt": ""
3082 }
3083 },
3084 "c.txt": ""
3085 }),
3086 )
3087 .await;
3088
3089 let http_client = FakeHttpClient::with_404_response();
3090 let client = Client::new(http_client);
3091 let tree = Worktree::local(
3092 client,
3093 Arc::from(Path::new("/root")),
3094 true,
3095 fs.clone(),
3096 Default::default(),
3097 &mut cx.to_async(),
3098 )
3099 .await
3100 .unwrap();
3101
3102 cx.foreground().run_until_parked();
3103
3104 tree.read_with(cx, |tree, cx| {
3105 let tree = tree.as_local().unwrap();
3106
3107 assert!(tree.git_repository_for_file_path("c.txt".as_ref()).is_none());
3108
3109 let repo1 = tree.git_repository_for_file_path("dir1/src/b.txt".as_ref()).unwrap().lock();
3110 assert_eq!(repo1.content_path.as_ref(), Path::new("dir1"));
3111 assert_eq!(repo1.git_dir_path.as_ref(), Path::new("dir1/.git"));
3112
3113 let repo2 = tree.git_repository_for_file_path("dir1/deps/dep1/src/a.txt".as_ref()).unwrap().lock();
3114 assert_eq!(repo2.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3115 assert_eq!(repo2.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"));
3116 });
3117 }
3118
3119 #[gpui::test]
3120 async fn test_write_file(cx: &mut TestAppContext) {
3121 let dir = temp_tree(json!({
3122 ".git": {},
3123 ".gitignore": "ignored-dir\n",
3124 "tracked-dir": {},
3125 "ignored-dir": {}
3126 }));
3127
3128 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3129
3130 let tree = Worktree::local(
3131 client,
3132 dir.path(),
3133 true,
3134 Arc::new(RealFs),
3135 Default::default(),
3136 &mut cx.to_async(),
3137 )
3138 .await
3139 .unwrap();
3140 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3141 .await;
3142 tree.flush_fs_events(cx).await;
3143
3144 tree.update(cx, |tree, cx| {
3145 tree.as_local().unwrap().write_file(
3146 Path::new("tracked-dir/file.txt"),
3147 "hello".into(),
3148 Default::default(),
3149 cx,
3150 )
3151 })
3152 .await
3153 .unwrap();
3154 tree.update(cx, |tree, cx| {
3155 tree.as_local().unwrap().write_file(
3156 Path::new("ignored-dir/file.txt"),
3157 "world".into(),
3158 Default::default(),
3159 cx,
3160 )
3161 })
3162 .await
3163 .unwrap();
3164
3165 tree.read_with(cx, |tree, _| {
3166 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3167 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3168 assert!(!tracked.is_ignored);
3169 assert!(ignored.is_ignored);
3170 });
3171 }
3172
3173 #[gpui::test(iterations = 30)]
3174 async fn test_create_directory(cx: &mut TestAppContext) {
3175 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3176
3177 let fs = FakeFs::new(cx.background());
3178 fs.insert_tree(
3179 "/a",
3180 json!({
3181 "b": {},
3182 "c": {},
3183 "d": {},
3184 }),
3185 )
3186 .await;
3187
3188 let tree = Worktree::local(
3189 client,
3190 "/a".as_ref(),
3191 true,
3192 fs,
3193 Default::default(),
3194 &mut cx.to_async(),
3195 )
3196 .await
3197 .unwrap();
3198
3199 let entry = tree
3200 .update(cx, |tree, cx| {
3201 tree.as_local_mut()
3202 .unwrap()
3203 .create_entry("a/e".as_ref(), true, cx)
3204 })
3205 .await
3206 .unwrap();
3207 assert!(entry.is_dir());
3208
3209 cx.foreground().run_until_parked();
3210 tree.read_with(cx, |tree, _| {
3211 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3212 });
3213 }
3214
3215 #[gpui::test(iterations = 100)]
3216 fn test_random(mut rng: StdRng) {
3217 let operations = env::var("OPERATIONS")
3218 .map(|o| o.parse().unwrap())
3219 .unwrap_or(40);
3220 let initial_entries = env::var("INITIAL_ENTRIES")
3221 .map(|o| o.parse().unwrap())
3222 .unwrap_or(20);
3223
3224 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3225 for _ in 0..initial_entries {
3226 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3227 }
3228 log::info!("Generated initial tree");
3229
3230 let (notify_tx, _notify_rx) = mpsc::unbounded();
3231 let fs = Arc::new(RealFs);
3232 let next_entry_id = Arc::new(AtomicUsize::new(0));
3233 let mut initial_snapshot = LocalSnapshot {
3234 abs_path: root_dir.path().into(),
3235 removed_entry_ids: Default::default(),
3236 ignores_by_parent_abs_path: Default::default(),
3237 git_repositories: Default::default(),
3238 next_entry_id: next_entry_id.clone(),
3239 snapshot: Snapshot {
3240 id: WorktreeId::from_usize(0),
3241 entries_by_path: Default::default(),
3242 entries_by_id: Default::default(),
3243 root_name: Default::default(),
3244 root_char_bag: Default::default(),
3245 scan_id: 0,
3246 is_complete: true,
3247 },
3248 extension_counts: Default::default(),
3249 };
3250 initial_snapshot.insert_entry(
3251 Entry::new(
3252 Path::new("").into(),
3253 &smol::block_on(fs.metadata(root_dir.path()))
3254 .unwrap()
3255 .unwrap(),
3256 &next_entry_id,
3257 Default::default(),
3258 ),
3259 fs.as_ref(),
3260 );
3261 let mut scanner = BackgroundScanner::new(
3262 Arc::new(Mutex::new(initial_snapshot.clone())),
3263 notify_tx,
3264 fs.clone(),
3265 Arc::new(gpui::executor::Background::new()),
3266 );
3267 smol::block_on(scanner.scan_dirs()).unwrap();
3268 scanner.snapshot().check_invariants();
3269
3270 let mut events = Vec::new();
3271 let mut snapshots = Vec::new();
3272 let mut mutations_len = operations;
3273 while mutations_len > 1 {
3274 if !events.is_empty() && rng.gen_bool(0.4) {
3275 let len = rng.gen_range(0..=events.len());
3276 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3277 log::info!("Delivering events: {:#?}", to_deliver);
3278 smol::block_on(scanner.process_events(to_deliver));
3279 scanner.snapshot().check_invariants();
3280 } else {
3281 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3282 mutations_len -= 1;
3283 }
3284
3285 if rng.gen_bool(0.2) {
3286 snapshots.push(scanner.snapshot());
3287 }
3288 }
3289 log::info!("Quiescing: {:#?}", events);
3290 smol::block_on(scanner.process_events(events));
3291 scanner.snapshot().check_invariants();
3292
3293 let (notify_tx, _notify_rx) = mpsc::unbounded();
3294 let mut new_scanner = BackgroundScanner::new(
3295 Arc::new(Mutex::new(initial_snapshot)),
3296 notify_tx,
3297 scanner.fs.clone(),
3298 scanner.executor.clone(),
3299 );
3300 smol::block_on(new_scanner.scan_dirs()).unwrap();
3301 assert_eq!(
3302 scanner.snapshot().to_vec(true),
3303 new_scanner.snapshot().to_vec(true)
3304 );
3305
3306 for mut prev_snapshot in snapshots {
3307 let include_ignored = rng.gen::<bool>();
3308 if !include_ignored {
3309 let mut entries_by_path_edits = Vec::new();
3310 let mut entries_by_id_edits = Vec::new();
3311 for entry in prev_snapshot
3312 .entries_by_id
3313 .cursor::<()>()
3314 .filter(|e| e.is_ignored)
3315 {
3316 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3317 entries_by_id_edits.push(Edit::Remove(entry.id));
3318 }
3319
3320 prev_snapshot
3321 .entries_by_path
3322 .edit(entries_by_path_edits, &());
3323 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3324 }
3325
3326 let update = scanner
3327 .snapshot()
3328 .build_update(&prev_snapshot, 0, 0, include_ignored);
3329 prev_snapshot.apply_remote_update(update).unwrap();
3330 assert_eq!(
3331 prev_snapshot.to_vec(true),
3332 scanner.snapshot().to_vec(include_ignored)
3333 );
3334 }
3335 }
3336
3337 fn randomly_mutate_tree(
3338 root_path: &Path,
3339 insertion_probability: f64,
3340 rng: &mut impl Rng,
3341 ) -> Result<Vec<fsevent::Event>> {
3342 let root_path = root_path.canonicalize().unwrap();
3343 let (dirs, files) = read_dir_recursive(root_path.clone());
3344
3345 let mut events = Vec::new();
3346 let mut record_event = |path: PathBuf| {
3347 events.push(fsevent::Event {
3348 event_id: SystemTime::now()
3349 .duration_since(UNIX_EPOCH)
3350 .unwrap()
3351 .as_secs(),
3352 flags: fsevent::StreamFlags::empty(),
3353 path,
3354 });
3355 };
3356
3357 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3358 let path = dirs.choose(rng).unwrap();
3359 let new_path = path.join(gen_name(rng));
3360
3361 if rng.gen() {
3362 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3363 std::fs::create_dir(&new_path)?;
3364 } else {
3365 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3366 std::fs::write(&new_path, "")?;
3367 }
3368 record_event(new_path);
3369 } else if rng.gen_bool(0.05) {
3370 let ignore_dir_path = dirs.choose(rng).unwrap();
3371 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3372
3373 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3374 let files_to_ignore = {
3375 let len = rng.gen_range(0..=subfiles.len());
3376 subfiles.choose_multiple(rng, len)
3377 };
3378 let dirs_to_ignore = {
3379 let len = rng.gen_range(0..subdirs.len());
3380 subdirs.choose_multiple(rng, len)
3381 };
3382
3383 let mut ignore_contents = String::new();
3384 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3385 writeln!(
3386 ignore_contents,
3387 "{}",
3388 path_to_ignore
3389 .strip_prefix(&ignore_dir_path)?
3390 .to_str()
3391 .unwrap()
3392 )
3393 .unwrap();
3394 }
3395 log::info!(
3396 "Creating {:?} with contents:\n{}",
3397 ignore_path.strip_prefix(&root_path)?,
3398 ignore_contents
3399 );
3400 std::fs::write(&ignore_path, ignore_contents).unwrap();
3401 record_event(ignore_path);
3402 } else {
3403 let old_path = {
3404 let file_path = files.choose(rng);
3405 let dir_path = dirs[1..].choose(rng);
3406 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3407 };
3408
3409 let is_rename = rng.gen();
3410 if is_rename {
3411 let new_path_parent = dirs
3412 .iter()
3413 .filter(|d| !d.starts_with(old_path))
3414 .choose(rng)
3415 .unwrap();
3416
3417 let overwrite_existing_dir =
3418 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3419 let new_path = if overwrite_existing_dir {
3420 std::fs::remove_dir_all(&new_path_parent).ok();
3421 new_path_parent.to_path_buf()
3422 } else {
3423 new_path_parent.join(gen_name(rng))
3424 };
3425
3426 log::info!(
3427 "Renaming {:?} to {}{:?}",
3428 old_path.strip_prefix(&root_path)?,
3429 if overwrite_existing_dir {
3430 "overwrite "
3431 } else {
3432 ""
3433 },
3434 new_path.strip_prefix(&root_path)?
3435 );
3436 std::fs::rename(&old_path, &new_path)?;
3437 record_event(old_path.clone());
3438 record_event(new_path);
3439 } else if old_path.is_dir() {
3440 let (dirs, files) = read_dir_recursive(old_path.clone());
3441
3442 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3443 std::fs::remove_dir_all(&old_path).unwrap();
3444 for file in files {
3445 record_event(file);
3446 }
3447 for dir in dirs {
3448 record_event(dir);
3449 }
3450 } else {
3451 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3452 std::fs::remove_file(old_path).unwrap();
3453 record_event(old_path.clone());
3454 }
3455 }
3456
3457 Ok(events)
3458 }
3459
3460 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3461 let child_entries = std::fs::read_dir(&path).unwrap();
3462 let mut dirs = vec![path];
3463 let mut files = Vec::new();
3464 for child_entry in child_entries {
3465 let child_path = child_entry.unwrap().path();
3466 if child_path.is_dir() {
3467 let (child_dirs, child_files) = read_dir_recursive(child_path);
3468 dirs.extend(child_dirs);
3469 files.extend(child_files);
3470 } else {
3471 files.push(child_path);
3472 }
3473 }
3474 (dirs, files)
3475 }
3476
3477 fn gen_name(rng: &mut impl Rng) -> String {
3478 (0..6)
3479 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3480 .map(char::from)
3481 .collect()
3482 }
3483
3484 impl LocalSnapshot {
3485 fn check_invariants(&self) {
3486 let mut files = self.files(true, 0);
3487 let mut visible_files = self.files(false, 0);
3488 for entry in self.entries_by_path.cursor::<()>() {
3489 if entry.is_file() {
3490 assert_eq!(files.next().unwrap().inode, entry.inode);
3491 if !entry.is_ignored {
3492 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3493 }
3494 }
3495 }
3496 assert!(files.next().is_none());
3497 assert!(visible_files.next().is_none());
3498
3499 let mut bfs_paths = Vec::new();
3500 let mut stack = vec![Path::new("")];
3501 while let Some(path) = stack.pop() {
3502 bfs_paths.push(path);
3503 let ix = stack.len();
3504 for child_entry in self.child_entries(path) {
3505 stack.insert(ix, &child_entry.path);
3506 }
3507 }
3508
3509 let dfs_paths_via_iter = self
3510 .entries_by_path
3511 .cursor::<()>()
3512 .map(|e| e.path.as_ref())
3513 .collect::<Vec<_>>();
3514 assert_eq!(bfs_paths, dfs_paths_via_iter);
3515
3516 let dfs_paths_via_traversal = self
3517 .entries(true)
3518 .map(|e| e.path.as_ref())
3519 .collect::<Vec<_>>();
3520 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3521
3522 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3523 let ignore_parent_path =
3524 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3525 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3526 assert!(self
3527 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3528 .is_some());
3529 }
3530
3531 // Ensure extension counts are correct.
3532 let mut expected_extension_counts = HashMap::default();
3533 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3534 *expected_extension_counts
3535 .entry(extension.into())
3536 .or_insert(0) += 1;
3537 }
3538 assert_eq!(self.extension_counts, expected_extension_counts);
3539 }
3540
3541 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3542 let mut paths = Vec::new();
3543 for entry in self.entries_by_path.cursor::<()>() {
3544 if include_ignored || !entry.is_ignored {
3545 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3546 }
3547 }
3548 paths.sort_by(|a, b| a.0.cmp(b.0));
3549 paths
3550 }
3551 }
3552}