1use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
2
3use super::{
4 fs::{self, Fs},
5 ignore::IgnoreStack,
6 DiagnosticSummary,
7};
8use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
9use anyhow::{anyhow, Context, Result};
10use client::{proto, Client};
11use clock::ReplicaId;
12use collections::{HashMap, VecDeque};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 Stream, StreamExt,
19};
20use fuzzy::CharBag;
21use gpui::{
22 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
23 Task,
24};
25use language::{
26 proto::{deserialize_version, serialize_line_ending, serialize_version},
27 Buffer, DiagnosticEntry, LineEnding, PointUtf16, Rope,
28};
29use lazy_static::lazy_static;
30use parking_lot::Mutex;
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::{OsStr, OsString},
41 fmt,
42 future::Future,
43 ops::{Deref, DerefMut},
44 os::unix::prelude::{OsStrExt, OsStringExt},
45 path::{Path, PathBuf},
46 sync::{atomic::AtomicUsize, Arc},
47 task::Poll,
48 time::{Duration, SystemTime},
49};
50use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
51use util::{ResultExt, TryFutureExt};
52
53lazy_static! {
54 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
55}
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60#[allow(clippy::large_enum_variant)]
61pub enum Worktree {
62 Local(LocalWorktree),
63 Remote(RemoteWorktree),
64}
65
66pub struct LocalWorktree {
67 snapshot: LocalSnapshot,
68 background_snapshot: Arc<Mutex<LocalSnapshot>>,
69 last_scan_state_rx: watch::Receiver<ScanState>,
70 _background_scanner_task: Option<Task<()>>,
71 poll_task: Option<Task<()>>,
72 share: Option<ShareState>,
73 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
74 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
75 client: Arc<Client>,
76 fs: Arc<dyn Fs>,
77 visible: bool,
78}
79
80pub struct RemoteWorktree {
81 pub snapshot: Snapshot,
82 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
83 project_id: u64,
84 client: Arc<Client>,
85 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
86 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
87 replica_id: ReplicaId,
88 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
89 visible: bool,
90}
91
92#[derive(Clone)]
93pub struct Snapshot {
94 id: WorktreeId,
95 root_name: String,
96 root_char_bag: CharBag,
97 entries_by_path: SumTree<Entry>,
98 entries_by_id: SumTree<PathEntry>,
99 scan_id: usize,
100 is_complete: bool,
101}
102
103#[derive(Clone)]
104pub struct LocalSnapshot {
105 abs_path: Arc<Path>,
106 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
107 removed_entry_ids: HashMap<u64, ProjectEntryId>,
108 next_entry_id: Arc<AtomicUsize>,
109 snapshot: Snapshot,
110 extension_counts: HashMap<OsString, usize>,
111}
112
113impl Deref for LocalSnapshot {
114 type Target = Snapshot;
115
116 fn deref(&self) -> &Self::Target {
117 &self.snapshot
118 }
119}
120
121impl DerefMut for LocalSnapshot {
122 fn deref_mut(&mut self) -> &mut Self::Target {
123 &mut self.snapshot
124 }
125}
126
127#[derive(Clone, Debug)]
128enum ScanState {
129 Idle,
130 /// The worktree is performing its initial scan of the filesystem.
131 Initializing,
132 /// The worktree is updating in response to filesystem events.
133 Updating,
134 Err(Arc<anyhow::Error>),
135}
136
137struct ShareState {
138 project_id: u64,
139 snapshots_tx: watch::Sender<LocalSnapshot>,
140 _maintain_remote_snapshot: Option<Task<Option<()>>>,
141}
142
143pub enum Event {
144 UpdatedEntries,
145}
146
147impl Entity for Worktree {
148 type Event = Event;
149}
150
151impl Worktree {
152 pub async fn local(
153 client: Arc<Client>,
154 path: impl Into<Arc<Path>>,
155 visible: bool,
156 fs: Arc<dyn Fs>,
157 next_entry_id: Arc<AtomicUsize>,
158 cx: &mut AsyncAppContext,
159 ) -> Result<ModelHandle<Self>> {
160 let (tree, scan_states_tx) =
161 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
162 tree.update(cx, |tree, cx| {
163 let tree = tree.as_local_mut().unwrap();
164 let abs_path = tree.abs_path().clone();
165 let background_snapshot = tree.background_snapshot.clone();
166 let background = cx.background().clone();
167 tree._background_scanner_task = Some(cx.background().spawn(async move {
168 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
169 let scanner =
170 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
171 scanner.run(events).await;
172 }));
173 });
174 Ok(tree)
175 }
176
177 pub fn remote(
178 project_remote_id: u64,
179 replica_id: ReplicaId,
180 worktree: proto::WorktreeMetadata,
181 client: Arc<Client>,
182 cx: &mut MutableAppContext,
183 ) -> ModelHandle<Self> {
184 let remote_id = worktree.id;
185 let root_char_bag: CharBag = worktree
186 .root_name
187 .chars()
188 .map(|c| c.to_ascii_lowercase())
189 .collect();
190 let root_name = worktree.root_name.clone();
191 let visible = worktree.visible;
192 let snapshot = Snapshot {
193 id: WorktreeId(remote_id as usize),
194 root_name,
195 root_char_bag,
196 entries_by_path: Default::default(),
197 entries_by_id: Default::default(),
198 scan_id: 0,
199 is_complete: false,
200 };
201
202 let (updates_tx, mut updates_rx) = mpsc::unbounded();
203 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
204 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
205 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
206 Worktree::Remote(RemoteWorktree {
207 project_id: project_remote_id,
208 replica_id,
209 snapshot: snapshot.clone(),
210 background_snapshot: background_snapshot.clone(),
211 updates_tx: Some(updates_tx),
212 snapshot_subscriptions: Default::default(),
213 client: client.clone(),
214 diagnostic_summaries: Default::default(),
215 visible,
216 })
217 });
218
219 cx.background()
220 .spawn(async move {
221 while let Some(update) = updates_rx.next().await {
222 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
223 log::error!("error applying worktree update: {}", error);
224 }
225 snapshot_updated_tx.send(()).await.ok();
226 }
227 })
228 .detach();
229
230 cx.spawn(|mut cx| {
231 let this = worktree_handle.downgrade();
232 async move {
233 while (snapshot_updated_rx.recv().await).is_some() {
234 if let Some(this) = this.upgrade(&cx) {
235 this.update(&mut cx, |this, cx| {
236 this.poll_snapshot(cx);
237 let this = this.as_remote_mut().unwrap();
238 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
239 if this.observed_snapshot(*scan_id) {
240 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
241 let _ = tx.send(());
242 } else {
243 break;
244 }
245 }
246 });
247 } else {
248 break;
249 }
250 }
251 }
252 })
253 .detach();
254
255 worktree_handle
256 }
257
258 pub fn as_local(&self) -> Option<&LocalWorktree> {
259 if let Worktree::Local(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
267 if let Worktree::Remote(worktree) = self {
268 Some(worktree)
269 } else {
270 None
271 }
272 }
273
274 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
275 if let Worktree::Local(worktree) = self {
276 Some(worktree)
277 } else {
278 None
279 }
280 }
281
282 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
283 if let Worktree::Remote(worktree) = self {
284 Some(worktree)
285 } else {
286 None
287 }
288 }
289
290 pub fn is_local(&self) -> bool {
291 matches!(self, Worktree::Local(_))
292 }
293
294 pub fn is_remote(&self) -> bool {
295 !self.is_local()
296 }
297
298 pub fn snapshot(&self) -> Snapshot {
299 match self {
300 Worktree::Local(worktree) => worktree.snapshot().snapshot,
301 Worktree::Remote(worktree) => worktree.snapshot(),
302 }
303 }
304
305 pub fn scan_id(&self) -> usize {
306 match self {
307 Worktree::Local(worktree) => worktree.snapshot.scan_id,
308 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
309 }
310 }
311
312 pub fn is_visible(&self) -> bool {
313 match self {
314 Worktree::Local(worktree) => worktree.visible,
315 Worktree::Remote(worktree) => worktree.visible,
316 }
317 }
318
319 pub fn replica_id(&self) -> ReplicaId {
320 match self {
321 Worktree::Local(_) => 0,
322 Worktree::Remote(worktree) => worktree.replica_id,
323 }
324 }
325
326 pub fn diagnostic_summaries(
327 &self,
328 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
329 match self {
330 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
331 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
332 }
333 .iter()
334 .map(|(path, summary)| (path.0.clone(), *summary))
335 }
336
337 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
338 match self {
339 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
340 Self::Remote(worktree) => worktree.poll_snapshot(cx),
341 };
342 }
343}
344
345impl LocalWorktree {
346 async fn create(
347 client: Arc<Client>,
348 path: impl Into<Arc<Path>>,
349 visible: bool,
350 fs: Arc<dyn Fs>,
351 next_entry_id: Arc<AtomicUsize>,
352 cx: &mut AsyncAppContext,
353 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
354 let abs_path = path.into();
355 let path: Arc<Path> = Arc::from(Path::new(""));
356
357 // After determining whether the root entry is a file or a directory, populate the
358 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
359 let root_name = abs_path
360 .file_name()
361 .map_or(String::new(), |f| f.to_string_lossy().to_string());
362 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
363 let metadata = fs
364 .metadata(&abs_path)
365 .await
366 .context("failed to stat worktree path")?;
367
368 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
369 let (mut last_scan_state_tx, last_scan_state_rx) =
370 watch::channel_with(ScanState::Initializing);
371 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
372 let mut snapshot = LocalSnapshot {
373 abs_path,
374 ignores_by_parent_abs_path: Default::default(),
375 removed_entry_ids: Default::default(),
376 next_entry_id,
377 snapshot: Snapshot {
378 id: WorktreeId::from_usize(cx.model_id()),
379 root_name: root_name.clone(),
380 root_char_bag,
381 entries_by_path: Default::default(),
382 entries_by_id: Default::default(),
383 scan_id: 0,
384 is_complete: true,
385 },
386 extension_counts: Default::default(),
387 };
388 if let Some(metadata) = metadata {
389 let entry = Entry::new(
390 path,
391 &metadata,
392 &snapshot.next_entry_id,
393 snapshot.root_char_bag,
394 );
395 snapshot.insert_entry(entry, fs.as_ref());
396 }
397
398 let tree = Self {
399 snapshot: snapshot.clone(),
400 background_snapshot: Arc::new(Mutex::new(snapshot)),
401 last_scan_state_rx,
402 _background_scanner_task: None,
403 share: None,
404 poll_task: None,
405 diagnostics: Default::default(),
406 diagnostic_summaries: Default::default(),
407 client,
408 fs,
409 visible,
410 };
411
412 cx.spawn_weak(|this, mut cx| async move {
413 while let Some(scan_state) = scan_states_rx.next().await {
414 if let Some(this) = this.upgrade(&cx) {
415 last_scan_state_tx.blocking_send(scan_state).ok();
416 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
417 } else {
418 break;
419 }
420 }
421 })
422 .detach();
423
424 Worktree::Local(tree)
425 });
426
427 Ok((tree, scan_states_tx))
428 }
429
430 pub fn contains_abs_path(&self, path: &Path) -> bool {
431 path.starts_with(&self.abs_path)
432 }
433
434 fn absolutize(&self, path: &Path) -> PathBuf {
435 if path.file_name().is_some() {
436 self.abs_path.join(path)
437 } else {
438 self.abs_path.to_path_buf()
439 }
440 }
441
442 pub(crate) fn load_buffer(
443 &mut self,
444 path: &Path,
445 cx: &mut ModelContext<Worktree>,
446 ) -> Task<Result<ModelHandle<Buffer>>> {
447 let path = Arc::from(path);
448 cx.spawn(move |this, mut cx| async move {
449 let (file, contents, head_text) = this
450 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
451 .await?;
452 Ok(cx.add_model(|cx| {
453 let mut buffer = Buffer::from_file(0, contents, head_text, Arc::new(file), cx);
454 buffer.update_git(cx);
455 buffer
456 }))
457 })
458 }
459
460 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
461 self.diagnostics.get(path).cloned()
462 }
463
464 pub fn update_diagnostics(
465 &mut self,
466 language_server_id: usize,
467 worktree_path: Arc<Path>,
468 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
469 _: &mut ModelContext<Worktree>,
470 ) -> Result<bool> {
471 self.diagnostics.remove(&worktree_path);
472 let old_summary = self
473 .diagnostic_summaries
474 .remove(&PathKey(worktree_path.clone()))
475 .unwrap_or_default();
476 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
477 if !new_summary.is_empty() {
478 self.diagnostic_summaries
479 .insert(PathKey(worktree_path.clone()), new_summary);
480 self.diagnostics.insert(worktree_path.clone(), diagnostics);
481 }
482
483 let updated = !old_summary.is_empty() || !new_summary.is_empty();
484 if updated {
485 if let Some(share) = self.share.as_ref() {
486 self.client
487 .send(proto::UpdateDiagnosticSummary {
488 project_id: share.project_id,
489 worktree_id: self.id().to_proto(),
490 summary: Some(proto::DiagnosticSummary {
491 path: worktree_path.to_string_lossy().to_string(),
492 language_server_id: language_server_id as u64,
493 error_count: new_summary.error_count as u32,
494 warning_count: new_summary.warning_count as u32,
495 }),
496 })
497 .log_err();
498 }
499 }
500
501 Ok(updated)
502 }
503
504 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
505 self.poll_task.take();
506 match self.scan_state() {
507 ScanState::Idle => {
508 self.snapshot = self.background_snapshot.lock().clone();
509 if let Some(share) = self.share.as_mut() {
510 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
511 }
512 cx.emit(Event::UpdatedEntries);
513 }
514 ScanState::Initializing => {
515 let is_fake_fs = self.fs.is_fake();
516 self.snapshot = self.background_snapshot.lock().clone();
517 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
518 if is_fake_fs {
519 #[cfg(any(test, feature = "test-support"))]
520 cx.background().simulate_random_delay().await;
521 } else {
522 smol::Timer::after(Duration::from_millis(100)).await;
523 }
524 if let Some(this) = this.upgrade(&cx) {
525 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
526 }
527 }));
528 cx.emit(Event::UpdatedEntries);
529 }
530 _ => {
531 if force {
532 self.snapshot = self.background_snapshot.lock().clone();
533 }
534 }
535 }
536 cx.notify();
537 }
538
539 pub fn scan_complete(&self) -> impl Future<Output = ()> {
540 let mut scan_state_rx = self.last_scan_state_rx.clone();
541 async move {
542 let mut scan_state = Some(scan_state_rx.borrow().clone());
543 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
544 scan_state = scan_state_rx.recv().await;
545 }
546 }
547 }
548
549 fn scan_state(&self) -> ScanState {
550 self.last_scan_state_rx.borrow().clone()
551 }
552
553 pub fn snapshot(&self) -> LocalSnapshot {
554 self.snapshot.clone()
555 }
556
557 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
558 proto::WorktreeMetadata {
559 id: self.id().to_proto(),
560 root_name: self.root_name().to_string(),
561 visible: self.visible,
562 }
563 }
564
565 fn load(
566 &self,
567 path: &Path,
568 cx: &mut ModelContext<Worktree>,
569 ) -> Task<Result<(File, String, Option<String>)>> {
570 let handle = cx.handle();
571 let path = Arc::from(path);
572 let abs_path = self.absolutize(&path);
573 let fs = self.fs.clone();
574 cx.spawn(|this, mut cx| async move {
575 let text = fs.load(&abs_path).await?;
576 let head_text = fs.load_head_text(&abs_path).await;
577
578 // Eagerly populate the snapshot with an updated entry for the loaded file
579 let entry = this
580 .update(&mut cx, |this, cx| {
581 this.as_local()
582 .unwrap()
583 .refresh_entry(path, abs_path, None, cx)
584 })
585 .await?;
586
587 Ok((
588 File {
589 entry_id: Some(entry.id),
590 worktree: handle,
591 path: entry.path,
592 mtime: entry.mtime,
593 is_local: true,
594 },
595 text,
596 head_text,
597 ))
598 })
599 }
600
601 pub fn save_buffer_as(
602 &self,
603 buffer_handle: ModelHandle<Buffer>,
604 path: impl Into<Arc<Path>>,
605 cx: &mut ModelContext<Worktree>,
606 ) -> Task<Result<()>> {
607 let buffer = buffer_handle.read(cx);
608 let text = buffer.as_rope().clone();
609 let fingerprint = text.fingerprint();
610 let version = buffer.version();
611 let save = self.write_file(path, text, buffer.line_ending(), cx);
612 let handle = cx.handle();
613 cx.as_mut().spawn(|mut cx| async move {
614 let entry = save.await?;
615 let file = File {
616 entry_id: Some(entry.id),
617 worktree: handle,
618 path: entry.path,
619 mtime: entry.mtime,
620 is_local: true,
621 };
622
623 buffer_handle.update(&mut cx, |buffer, cx| {
624 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
625 });
626
627 Ok(())
628 })
629 }
630
631 pub fn create_entry(
632 &self,
633 path: impl Into<Arc<Path>>,
634 is_dir: bool,
635 cx: &mut ModelContext<Worktree>,
636 ) -> Task<Result<Entry>> {
637 self.write_entry_internal(
638 path,
639 if is_dir {
640 None
641 } else {
642 Some(Default::default())
643 },
644 cx,
645 )
646 }
647
648 pub fn write_file(
649 &self,
650 path: impl Into<Arc<Path>>,
651 text: Rope,
652 line_ending: LineEnding,
653 cx: &mut ModelContext<Worktree>,
654 ) -> Task<Result<Entry>> {
655 self.write_entry_internal(path, Some((text, line_ending)), cx)
656 }
657
658 pub fn delete_entry(
659 &self,
660 entry_id: ProjectEntryId,
661 cx: &mut ModelContext<Worktree>,
662 ) -> Option<Task<Result<()>>> {
663 let entry = self.entry_for_id(entry_id)?.clone();
664 let abs_path = self.absolutize(&entry.path);
665 let delete = cx.background().spawn({
666 let fs = self.fs.clone();
667 let abs_path = abs_path;
668 async move {
669 if entry.is_file() {
670 fs.remove_file(&abs_path, Default::default()).await
671 } else {
672 fs.remove_dir(
673 &abs_path,
674 RemoveOptions {
675 recursive: true,
676 ignore_if_not_exists: false,
677 },
678 )
679 .await
680 }
681 }
682 });
683
684 Some(cx.spawn(|this, mut cx| async move {
685 delete.await?;
686 this.update(&mut cx, |this, cx| {
687 let this = this.as_local_mut().unwrap();
688 {
689 let mut snapshot = this.background_snapshot.lock();
690 snapshot.delete_entry(entry_id);
691 }
692 this.poll_snapshot(true, cx);
693 });
694 Ok(())
695 }))
696 }
697
698 pub fn rename_entry(
699 &self,
700 entry_id: ProjectEntryId,
701 new_path: impl Into<Arc<Path>>,
702 cx: &mut ModelContext<Worktree>,
703 ) -> Option<Task<Result<Entry>>> {
704 let old_path = self.entry_for_id(entry_id)?.path.clone();
705 let new_path = new_path.into();
706 let abs_old_path = self.absolutize(&old_path);
707 let abs_new_path = self.absolutize(&new_path);
708 let rename = cx.background().spawn({
709 let fs = self.fs.clone();
710 let abs_new_path = abs_new_path.clone();
711 async move {
712 fs.rename(&abs_old_path, &abs_new_path, Default::default())
713 .await
714 }
715 });
716
717 Some(cx.spawn(|this, mut cx| async move {
718 rename.await?;
719 let entry = this
720 .update(&mut cx, |this, cx| {
721 this.as_local_mut().unwrap().refresh_entry(
722 new_path.clone(),
723 abs_new_path,
724 Some(old_path),
725 cx,
726 )
727 })
728 .await?;
729 Ok(entry)
730 }))
731 }
732
733 pub fn copy_entry(
734 &self,
735 entry_id: ProjectEntryId,
736 new_path: impl Into<Arc<Path>>,
737 cx: &mut ModelContext<Worktree>,
738 ) -> Option<Task<Result<Entry>>> {
739 let old_path = self.entry_for_id(entry_id)?.path.clone();
740 let new_path = new_path.into();
741 let abs_old_path = self.absolutize(&old_path);
742 let abs_new_path = self.absolutize(&new_path);
743 let copy = cx.background().spawn({
744 let fs = self.fs.clone();
745 let abs_new_path = abs_new_path.clone();
746 async move {
747 copy_recursive(
748 fs.as_ref(),
749 &abs_old_path,
750 &abs_new_path,
751 Default::default(),
752 )
753 .await
754 }
755 });
756
757 Some(cx.spawn(|this, mut cx| async move {
758 copy.await?;
759 let entry = this
760 .update(&mut cx, |this, cx| {
761 this.as_local_mut().unwrap().refresh_entry(
762 new_path.clone(),
763 abs_new_path,
764 None,
765 cx,
766 )
767 })
768 .await?;
769 Ok(entry)
770 }))
771 }
772
773 fn write_entry_internal(
774 &self,
775 path: impl Into<Arc<Path>>,
776 text_if_file: Option<(Rope, LineEnding)>,
777 cx: &mut ModelContext<Worktree>,
778 ) -> Task<Result<Entry>> {
779 let path = path.into();
780 let abs_path = self.absolutize(&path);
781 let write = cx.background().spawn({
782 let fs = self.fs.clone();
783 let abs_path = abs_path.clone();
784 async move {
785 if let Some((text, line_ending)) = text_if_file {
786 fs.save(&abs_path, &text, line_ending).await
787 } else {
788 fs.create_dir(&abs_path).await
789 }
790 }
791 });
792
793 cx.spawn(|this, mut cx| async move {
794 write.await?;
795 let entry = this
796 .update(&mut cx, |this, cx| {
797 this.as_local_mut()
798 .unwrap()
799 .refresh_entry(path, abs_path, None, cx)
800 })
801 .await?;
802 Ok(entry)
803 })
804 }
805
806 fn refresh_entry(
807 &self,
808 path: Arc<Path>,
809 abs_path: PathBuf,
810 old_path: Option<Arc<Path>>,
811 cx: &mut ModelContext<Worktree>,
812 ) -> Task<Result<Entry>> {
813 let fs = self.fs.clone();
814 let root_char_bag;
815 let next_entry_id;
816 {
817 let snapshot = self.background_snapshot.lock();
818 root_char_bag = snapshot.root_char_bag;
819 next_entry_id = snapshot.next_entry_id.clone();
820 }
821 cx.spawn_weak(|this, mut cx| async move {
822 let metadata = fs
823 .metadata(&abs_path)
824 .await?
825 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
826 let this = this
827 .upgrade(&cx)
828 .ok_or_else(|| anyhow!("worktree was dropped"))?;
829 this.update(&mut cx, |this, cx| {
830 let this = this.as_local_mut().unwrap();
831 let inserted_entry;
832 {
833 let mut snapshot = this.background_snapshot.lock();
834 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
835 entry.is_ignored = snapshot
836 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
837 .is_abs_path_ignored(&abs_path, entry.is_dir());
838 if let Some(old_path) = old_path {
839 snapshot.remove_path(&old_path);
840 }
841 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
842 snapshot.scan_id += 1;
843 }
844 this.poll_snapshot(true, cx);
845 Ok(inserted_entry)
846 })
847 })
848 }
849
850 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
851 let (share_tx, share_rx) = oneshot::channel();
852
853 if self.share.is_some() {
854 let _ = share_tx.send(Ok(()));
855 } else {
856 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
857 let rpc = self.client.clone();
858 let worktree_id = cx.model_id() as u64;
859 let maintain_remote_snapshot = cx.background().spawn({
860 let rpc = rpc;
861 let diagnostic_summaries = self.diagnostic_summaries.clone();
862 async move {
863 let mut prev_snapshot = match snapshots_rx.recv().await {
864 Some(snapshot) => {
865 let update = proto::UpdateWorktree {
866 project_id,
867 worktree_id,
868 root_name: snapshot.root_name().to_string(),
869 updated_entries: snapshot
870 .entries_by_path
871 .iter()
872 .map(Into::into)
873 .collect(),
874 removed_entries: Default::default(),
875 scan_id: snapshot.scan_id as u64,
876 is_last_update: true,
877 };
878 if let Err(error) = send_worktree_update(&rpc, update).await {
879 let _ = share_tx.send(Err(error));
880 return Err(anyhow!("failed to send initial update worktree"));
881 } else {
882 let _ = share_tx.send(Ok(()));
883 snapshot
884 }
885 }
886 None => {
887 share_tx
888 .send(Err(anyhow!("worktree dropped before share completed")))
889 .ok();
890 return Err(anyhow!("failed to send initial update worktree"));
891 }
892 };
893
894 for (path, summary) in diagnostic_summaries.iter() {
895 rpc.send(proto::UpdateDiagnosticSummary {
896 project_id,
897 worktree_id,
898 summary: Some(summary.to_proto(&path.0)),
899 })?;
900 }
901
902 while let Some(snapshot) = snapshots_rx.recv().await {
903 send_worktree_update(
904 &rpc,
905 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
906 )
907 .await?;
908 prev_snapshot = snapshot;
909 }
910
911 Ok::<_, anyhow::Error>(())
912 }
913 .log_err()
914 });
915 self.share = Some(ShareState {
916 project_id,
917 snapshots_tx,
918 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
919 });
920 }
921
922 cx.foreground().spawn(async move {
923 share_rx
924 .await
925 .unwrap_or_else(|_| Err(anyhow!("share ended")))
926 })
927 }
928
929 pub fn unshare(&mut self) {
930 self.share.take();
931 }
932
933 pub fn is_shared(&self) -> bool {
934 self.share.is_some()
935 }
936
937 pub fn send_extension_counts(&self, project_id: u64) {
938 let mut extensions = Vec::new();
939 let mut counts = Vec::new();
940
941 for (extension, count) in self.extension_counts() {
942 extensions.push(extension.to_string_lossy().to_string());
943 counts.push(*count as u32);
944 }
945
946 self.client
947 .send(proto::UpdateWorktreeExtensions {
948 project_id,
949 worktree_id: self.id().to_proto(),
950 extensions,
951 counts,
952 })
953 .log_err();
954 }
955}
956
957impl RemoteWorktree {
958 fn snapshot(&self) -> Snapshot {
959 self.snapshot.clone()
960 }
961
962 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
963 self.snapshot = self.background_snapshot.lock().clone();
964 cx.emit(Event::UpdatedEntries);
965 cx.notify();
966 }
967
968 pub fn disconnected_from_host(&mut self) {
969 self.updates_tx.take();
970 self.snapshot_subscriptions.clear();
971 }
972
973 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
974 if let Some(updates_tx) = &self.updates_tx {
975 updates_tx
976 .unbounded_send(update)
977 .expect("consumer runs to completion");
978 }
979 }
980
981 fn observed_snapshot(&self, scan_id: usize) -> bool {
982 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
983 }
984
985 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
986 let (tx, rx) = oneshot::channel();
987 if self.observed_snapshot(scan_id) {
988 let _ = tx.send(());
989 } else {
990 match self
991 .snapshot_subscriptions
992 .binary_search_by_key(&scan_id, |probe| probe.0)
993 {
994 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
995 }
996 }
997
998 async move {
999 let _ = rx.await;
1000 }
1001 }
1002
1003 pub fn update_diagnostic_summary(
1004 &mut self,
1005 path: Arc<Path>,
1006 summary: &proto::DiagnosticSummary,
1007 ) {
1008 let summary = DiagnosticSummary {
1009 language_server_id: summary.language_server_id as usize,
1010 error_count: summary.error_count as usize,
1011 warning_count: summary.warning_count as usize,
1012 };
1013 if summary.is_empty() {
1014 self.diagnostic_summaries.remove(&PathKey(path));
1015 } else {
1016 self.diagnostic_summaries.insert(PathKey(path), summary);
1017 }
1018 }
1019
1020 pub fn insert_entry(
1021 &mut self,
1022 entry: proto::Entry,
1023 scan_id: usize,
1024 cx: &mut ModelContext<Worktree>,
1025 ) -> Task<Result<Entry>> {
1026 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1027 cx.spawn(|this, mut cx| async move {
1028 wait_for_snapshot.await;
1029 this.update(&mut cx, |worktree, _| {
1030 let worktree = worktree.as_remote_mut().unwrap();
1031 let mut snapshot = worktree.background_snapshot.lock();
1032 let entry = snapshot.insert_entry(entry);
1033 worktree.snapshot = snapshot.clone();
1034 entry
1035 })
1036 })
1037 }
1038
1039 pub(crate) fn delete_entry(
1040 &mut self,
1041 id: ProjectEntryId,
1042 scan_id: usize,
1043 cx: &mut ModelContext<Worktree>,
1044 ) -> Task<Result<()>> {
1045 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1046 cx.spawn(|this, mut cx| async move {
1047 wait_for_snapshot.await;
1048 this.update(&mut cx, |worktree, _| {
1049 let worktree = worktree.as_remote_mut().unwrap();
1050 let mut snapshot = worktree.background_snapshot.lock();
1051 snapshot.delete_entry(id);
1052 worktree.snapshot = snapshot.clone();
1053 });
1054 Ok(())
1055 })
1056 }
1057}
1058
1059impl Snapshot {
1060 pub fn id(&self) -> WorktreeId {
1061 self.id
1062 }
1063
1064 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1065 self.entries_by_id.get(&entry_id, &()).is_some()
1066 }
1067
1068 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1069 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1070 let old_entry = self.entries_by_id.insert_or_replace(
1071 PathEntry {
1072 id: entry.id,
1073 path: entry.path.clone(),
1074 is_ignored: entry.is_ignored,
1075 scan_id: 0,
1076 },
1077 &(),
1078 );
1079 if let Some(old_entry) = old_entry {
1080 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1081 }
1082 self.entries_by_path.insert_or_replace(entry.clone(), &());
1083 Ok(entry)
1084 }
1085
1086 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1087 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1088 self.entries_by_path = {
1089 let mut cursor = self.entries_by_path.cursor();
1090 let mut new_entries_by_path =
1091 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1092 while let Some(entry) = cursor.item() {
1093 if entry.path.starts_with(&removed_entry.path) {
1094 self.entries_by_id.remove(&entry.id, &());
1095 cursor.next(&());
1096 } else {
1097 break;
1098 }
1099 }
1100 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1101 new_entries_by_path
1102 };
1103
1104 true
1105 } else {
1106 false
1107 }
1108 }
1109
1110 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1111 let mut entries_by_path_edits = Vec::new();
1112 let mut entries_by_id_edits = Vec::new();
1113 for entry_id in update.removed_entries {
1114 let entry = self
1115 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1116 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1117 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1118 entries_by_id_edits.push(Edit::Remove(entry.id));
1119 }
1120
1121 for entry in update.updated_entries {
1122 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1123 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1124 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1125 }
1126 entries_by_id_edits.push(Edit::Insert(PathEntry {
1127 id: entry.id,
1128 path: entry.path.clone(),
1129 is_ignored: entry.is_ignored,
1130 scan_id: 0,
1131 }));
1132 entries_by_path_edits.push(Edit::Insert(entry));
1133 }
1134
1135 self.entries_by_path.edit(entries_by_path_edits, &());
1136 self.entries_by_id.edit(entries_by_id_edits, &());
1137 self.scan_id = update.scan_id as usize;
1138 self.is_complete = update.is_last_update;
1139
1140 Ok(())
1141 }
1142
1143 pub fn file_count(&self) -> usize {
1144 self.entries_by_path.summary().file_count
1145 }
1146
1147 pub fn visible_file_count(&self) -> usize {
1148 self.entries_by_path.summary().visible_file_count
1149 }
1150
1151 fn traverse_from_offset(
1152 &self,
1153 include_dirs: bool,
1154 include_ignored: bool,
1155 start_offset: usize,
1156 ) -> Traversal {
1157 let mut cursor = self.entries_by_path.cursor();
1158 cursor.seek(
1159 &TraversalTarget::Count {
1160 count: start_offset,
1161 include_dirs,
1162 include_ignored,
1163 },
1164 Bias::Right,
1165 &(),
1166 );
1167 Traversal {
1168 cursor,
1169 include_dirs,
1170 include_ignored,
1171 }
1172 }
1173
1174 fn traverse_from_path(
1175 &self,
1176 include_dirs: bool,
1177 include_ignored: bool,
1178 path: &Path,
1179 ) -> Traversal {
1180 let mut cursor = self.entries_by_path.cursor();
1181 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1182 Traversal {
1183 cursor,
1184 include_dirs,
1185 include_ignored,
1186 }
1187 }
1188
1189 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1190 self.traverse_from_offset(false, include_ignored, start)
1191 }
1192
1193 pub fn entries(&self, include_ignored: bool) -> Traversal {
1194 self.traverse_from_offset(true, include_ignored, 0)
1195 }
1196
1197 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1198 let empty_path = Path::new("");
1199 self.entries_by_path
1200 .cursor::<()>()
1201 .filter(move |entry| entry.path.as_ref() != empty_path)
1202 .map(|entry| &entry.path)
1203 }
1204
1205 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1206 let mut cursor = self.entries_by_path.cursor();
1207 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1208 let traversal = Traversal {
1209 cursor,
1210 include_dirs: true,
1211 include_ignored: true,
1212 };
1213 ChildEntriesIter {
1214 traversal,
1215 parent_path,
1216 }
1217 }
1218
1219 pub fn root_entry(&self) -> Option<&Entry> {
1220 self.entry_for_path("")
1221 }
1222
1223 pub fn root_name(&self) -> &str {
1224 &self.root_name
1225 }
1226
1227 pub fn scan_id(&self) -> usize {
1228 self.scan_id
1229 }
1230
1231 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1232 let path = path.as_ref();
1233 self.traverse_from_path(true, true, path)
1234 .entry()
1235 .and_then(|entry| {
1236 if entry.path.as_ref() == path {
1237 Some(entry)
1238 } else {
1239 None
1240 }
1241 })
1242 }
1243
1244 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1245 let entry = self.entries_by_id.get(&id, &())?;
1246 self.entry_for_path(&entry.path)
1247 }
1248
1249 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1250 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1251 }
1252}
1253
1254impl LocalSnapshot {
1255 pub fn abs_path(&self) -> &Arc<Path> {
1256 &self.abs_path
1257 }
1258
1259 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1260 &self.extension_counts
1261 }
1262
1263 #[cfg(test)]
1264 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1265 let root_name = self.root_name.clone();
1266 proto::UpdateWorktree {
1267 project_id,
1268 worktree_id: self.id().to_proto(),
1269 root_name,
1270 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1271 removed_entries: Default::default(),
1272 scan_id: self.scan_id as u64,
1273 is_last_update: true,
1274 }
1275 }
1276
1277 pub(crate) fn build_update(
1278 &self,
1279 other: &Self,
1280 project_id: u64,
1281 worktree_id: u64,
1282 include_ignored: bool,
1283 ) -> proto::UpdateWorktree {
1284 let mut updated_entries = Vec::new();
1285 let mut removed_entries = Vec::new();
1286 let mut self_entries = self
1287 .entries_by_id
1288 .cursor::<()>()
1289 .filter(|e| include_ignored || !e.is_ignored)
1290 .peekable();
1291 let mut other_entries = other
1292 .entries_by_id
1293 .cursor::<()>()
1294 .filter(|e| include_ignored || !e.is_ignored)
1295 .peekable();
1296 loop {
1297 match (self_entries.peek(), other_entries.peek()) {
1298 (Some(self_entry), Some(other_entry)) => {
1299 match Ord::cmp(&self_entry.id, &other_entry.id) {
1300 Ordering::Less => {
1301 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1302 updated_entries.push(entry);
1303 self_entries.next();
1304 }
1305 Ordering::Equal => {
1306 if self_entry.scan_id != other_entry.scan_id {
1307 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1308 updated_entries.push(entry);
1309 }
1310
1311 self_entries.next();
1312 other_entries.next();
1313 }
1314 Ordering::Greater => {
1315 removed_entries.push(other_entry.id.to_proto());
1316 other_entries.next();
1317 }
1318 }
1319 }
1320 (Some(self_entry), None) => {
1321 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1322 updated_entries.push(entry);
1323 self_entries.next();
1324 }
1325 (None, Some(other_entry)) => {
1326 removed_entries.push(other_entry.id.to_proto());
1327 other_entries.next();
1328 }
1329 (None, None) => break,
1330 }
1331 }
1332
1333 proto::UpdateWorktree {
1334 project_id,
1335 worktree_id,
1336 root_name: self.root_name().to_string(),
1337 updated_entries,
1338 removed_entries,
1339 scan_id: self.scan_id as u64,
1340 is_last_update: true,
1341 }
1342 }
1343
1344 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1345 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1346 let abs_path = self.abs_path.join(&entry.path);
1347 match smol::block_on(build_gitignore(&abs_path, fs)) {
1348 Ok(ignore) => {
1349 self.ignores_by_parent_abs_path.insert(
1350 abs_path.parent().unwrap().into(),
1351 (Arc::new(ignore), self.scan_id),
1352 );
1353 }
1354 Err(error) => {
1355 log::error!(
1356 "error loading .gitignore file {:?} - {:?}",
1357 &entry.path,
1358 error
1359 );
1360 }
1361 }
1362 }
1363
1364 self.reuse_entry_id(&mut entry);
1365
1366 if entry.kind == EntryKind::PendingDir {
1367 if let Some(existing_entry) =
1368 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1369 {
1370 entry.kind = existing_entry.kind;
1371 }
1372 }
1373
1374 self.entries_by_path.insert_or_replace(entry.clone(), &());
1375 let scan_id = self.scan_id;
1376 let removed_entry = self.entries_by_id.insert_or_replace(
1377 PathEntry {
1378 id: entry.id,
1379 path: entry.path.clone(),
1380 is_ignored: entry.is_ignored,
1381 scan_id,
1382 },
1383 &(),
1384 );
1385
1386 if let Some(removed_entry) = removed_entry {
1387 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1388 }
1389 self.inc_extension_count(&entry.path, entry.is_ignored);
1390
1391 entry
1392 }
1393
1394 fn populate_dir(
1395 &mut self,
1396 parent_path: Arc<Path>,
1397 entries: impl IntoIterator<Item = Entry>,
1398 ignore: Option<Arc<Gitignore>>,
1399 ) {
1400 let mut parent_entry = if let Some(parent_entry) =
1401 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1402 {
1403 parent_entry.clone()
1404 } else {
1405 log::warn!(
1406 "populating a directory {:?} that has been removed",
1407 parent_path
1408 );
1409 return;
1410 };
1411
1412 if let Some(ignore) = ignore {
1413 self.ignores_by_parent_abs_path.insert(
1414 self.abs_path.join(&parent_path).into(),
1415 (ignore, self.scan_id),
1416 );
1417 }
1418 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1419 parent_entry.kind = EntryKind::Dir;
1420 } else {
1421 unreachable!();
1422 }
1423
1424 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1425 let mut entries_by_id_edits = Vec::new();
1426
1427 for mut entry in entries {
1428 self.reuse_entry_id(&mut entry);
1429 self.inc_extension_count(&entry.path, entry.is_ignored);
1430 entries_by_id_edits.push(Edit::Insert(PathEntry {
1431 id: entry.id,
1432 path: entry.path.clone(),
1433 is_ignored: entry.is_ignored,
1434 scan_id: self.scan_id,
1435 }));
1436 entries_by_path_edits.push(Edit::Insert(entry));
1437 }
1438
1439 self.entries_by_path.edit(entries_by_path_edits, &());
1440 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1441
1442 for removed_entry in removed_entries {
1443 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1444 }
1445 }
1446
1447 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1448 if !ignored {
1449 if let Some(extension) = path.extension() {
1450 if let Some(count) = self.extension_counts.get_mut(extension) {
1451 *count += 1;
1452 } else {
1453 self.extension_counts.insert(extension.into(), 1);
1454 }
1455 }
1456 }
1457 }
1458
1459 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1460 if !ignored {
1461 if let Some(extension) = path.extension() {
1462 if let Some(count) = self.extension_counts.get_mut(extension) {
1463 *count -= 1;
1464 }
1465 }
1466 }
1467 }
1468
1469 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1470 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1471 entry.id = removed_entry_id;
1472 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1473 entry.id = existing_entry.id;
1474 }
1475 }
1476
1477 fn remove_path(&mut self, path: &Path) {
1478 let mut new_entries;
1479 let removed_entries;
1480 {
1481 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1482 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1483 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1484 new_entries.push_tree(cursor.suffix(&()), &());
1485 }
1486 self.entries_by_path = new_entries;
1487
1488 let mut entries_by_id_edits = Vec::new();
1489 for entry in removed_entries.cursor::<()>() {
1490 let removed_entry_id = self
1491 .removed_entry_ids
1492 .entry(entry.inode)
1493 .or_insert(entry.id);
1494 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1495 entries_by_id_edits.push(Edit::Remove(entry.id));
1496 self.dec_extension_count(&entry.path, entry.is_ignored);
1497 }
1498 self.entries_by_id.edit(entries_by_id_edits, &());
1499
1500 if path.file_name() == Some(&GITIGNORE) {
1501 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1502 if let Some((_, scan_id)) = self
1503 .ignores_by_parent_abs_path
1504 .get_mut(abs_parent_path.as_path())
1505 {
1506 *scan_id = self.snapshot.scan_id;
1507 }
1508 }
1509 }
1510
1511 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1512 let mut inodes = TreeSet::default();
1513 for ancestor in path.ancestors().skip(1) {
1514 if let Some(entry) = self.entry_for_path(ancestor) {
1515 inodes.insert(entry.inode);
1516 }
1517 }
1518 inodes
1519 }
1520
1521 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1522 let mut new_ignores = Vec::new();
1523 for ancestor in abs_path.ancestors().skip(1) {
1524 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1525 new_ignores.push((ancestor, Some(ignore.clone())));
1526 } else {
1527 new_ignores.push((ancestor, None));
1528 }
1529 }
1530
1531 let mut ignore_stack = IgnoreStack::none();
1532 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1533 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1534 ignore_stack = IgnoreStack::all();
1535 break;
1536 } else if let Some(ignore) = ignore {
1537 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1538 }
1539 }
1540
1541 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1542 ignore_stack = IgnoreStack::all();
1543 }
1544
1545 ignore_stack
1546 }
1547}
1548
1549async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1550 let contents = fs.load(abs_path).await?;
1551 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1552 let mut builder = GitignoreBuilder::new(parent);
1553 for line in contents.lines() {
1554 builder.add_line(Some(abs_path.into()), line)?;
1555 }
1556 Ok(builder.build()?)
1557}
1558
1559impl WorktreeId {
1560 pub fn from_usize(handle_id: usize) -> Self {
1561 Self(handle_id)
1562 }
1563
1564 pub(crate) fn from_proto(id: u64) -> Self {
1565 Self(id as usize)
1566 }
1567
1568 pub fn to_proto(&self) -> u64 {
1569 self.0 as u64
1570 }
1571
1572 pub fn to_usize(&self) -> usize {
1573 self.0
1574 }
1575}
1576
1577impl fmt::Display for WorktreeId {
1578 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1579 self.0.fmt(f)
1580 }
1581}
1582
1583impl Deref for Worktree {
1584 type Target = Snapshot;
1585
1586 fn deref(&self) -> &Self::Target {
1587 match self {
1588 Worktree::Local(worktree) => &worktree.snapshot,
1589 Worktree::Remote(worktree) => &worktree.snapshot,
1590 }
1591 }
1592}
1593
1594impl Deref for LocalWorktree {
1595 type Target = LocalSnapshot;
1596
1597 fn deref(&self) -> &Self::Target {
1598 &self.snapshot
1599 }
1600}
1601
1602impl Deref for RemoteWorktree {
1603 type Target = Snapshot;
1604
1605 fn deref(&self) -> &Self::Target {
1606 &self.snapshot
1607 }
1608}
1609
1610impl fmt::Debug for LocalWorktree {
1611 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1612 self.snapshot.fmt(f)
1613 }
1614}
1615
1616impl fmt::Debug for Snapshot {
1617 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1618 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1619 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1620
1621 impl<'a> fmt::Debug for EntriesByPath<'a> {
1622 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1623 f.debug_map()
1624 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1625 .finish()
1626 }
1627 }
1628
1629 impl<'a> fmt::Debug for EntriesById<'a> {
1630 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1631 f.debug_list().entries(self.0.iter()).finish()
1632 }
1633 }
1634
1635 f.debug_struct("Snapshot")
1636 .field("id", &self.id)
1637 .field("root_name", &self.root_name)
1638 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1639 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1640 .finish()
1641 }
1642}
1643
1644#[derive(Clone, PartialEq)]
1645pub struct File {
1646 pub worktree: ModelHandle<Worktree>,
1647 pub path: Arc<Path>,
1648 pub mtime: SystemTime,
1649 pub(crate) entry_id: Option<ProjectEntryId>,
1650 pub(crate) is_local: bool,
1651}
1652
1653impl language::File for File {
1654 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1655 if self.is_local {
1656 Some(self)
1657 } else {
1658 None
1659 }
1660 }
1661
1662 fn mtime(&self) -> SystemTime {
1663 self.mtime
1664 }
1665
1666 fn path(&self) -> &Arc<Path> {
1667 &self.path
1668 }
1669
1670 fn full_path(&self, cx: &AppContext) -> PathBuf {
1671 let mut full_path = PathBuf::new();
1672 full_path.push(self.worktree.read(cx).root_name());
1673 if self.path.components().next().is_some() {
1674 full_path.push(&self.path);
1675 }
1676 full_path
1677 }
1678
1679 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1680 /// of its worktree, then this method will return the name of the worktree itself.
1681 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1682 self.path
1683 .file_name()
1684 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1685 }
1686
1687 fn is_deleted(&self) -> bool {
1688 self.entry_id.is_none()
1689 }
1690
1691 fn save(
1692 &self,
1693 buffer_id: u64,
1694 text: Rope,
1695 version: clock::Global,
1696 line_ending: LineEnding,
1697 cx: &mut MutableAppContext,
1698 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1699 self.worktree.update(cx, |worktree, cx| match worktree {
1700 Worktree::Local(worktree) => {
1701 let rpc = worktree.client.clone();
1702 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1703 let fingerprint = text.fingerprint();
1704 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1705 cx.background().spawn(async move {
1706 let entry = save.await?;
1707 if let Some(project_id) = project_id {
1708 rpc.send(proto::BufferSaved {
1709 project_id,
1710 buffer_id,
1711 version: serialize_version(&version),
1712 mtime: Some(entry.mtime.into()),
1713 fingerprint: fingerprint.clone(),
1714 })?;
1715 }
1716 Ok((version, fingerprint, entry.mtime))
1717 })
1718 }
1719 Worktree::Remote(worktree) => {
1720 let rpc = worktree.client.clone();
1721 let project_id = worktree.project_id;
1722 cx.foreground().spawn(async move {
1723 let response = rpc
1724 .request(proto::SaveBuffer {
1725 project_id,
1726 buffer_id,
1727 version: serialize_version(&version),
1728 })
1729 .await?;
1730 let version = deserialize_version(response.version);
1731 let mtime = response
1732 .mtime
1733 .ok_or_else(|| anyhow!("missing mtime"))?
1734 .into();
1735 Ok((version, response.fingerprint, mtime))
1736 })
1737 }
1738 })
1739 }
1740
1741 fn as_any(&self) -> &dyn Any {
1742 self
1743 }
1744
1745 fn to_proto(&self) -> rpc::proto::File {
1746 rpc::proto::File {
1747 worktree_id: self.worktree.id() as u64,
1748 entry_id: self.entry_id.map(|entry_id| entry_id.to_proto()),
1749 path: self.path.to_string_lossy().into(),
1750 mtime: Some(self.mtime.into()),
1751 }
1752 }
1753}
1754
1755impl language::LocalFile for File {
1756 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1757 self.worktree
1758 .read(cx)
1759 .as_local()
1760 .unwrap()
1761 .abs_path
1762 .join(&self.path)
1763 }
1764
1765 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1766 let worktree = self.worktree.read(cx).as_local().unwrap();
1767 let abs_path = worktree.absolutize(&self.path);
1768 let fs = worktree.fs.clone();
1769 cx.background()
1770 .spawn(async move { fs.load(&abs_path).await })
1771 }
1772
1773 fn buffer_reloaded(
1774 &self,
1775 buffer_id: u64,
1776 version: &clock::Global,
1777 fingerprint: String,
1778 line_ending: LineEnding,
1779 mtime: SystemTime,
1780 cx: &mut MutableAppContext,
1781 ) {
1782 let worktree = self.worktree.read(cx).as_local().unwrap();
1783 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1784 worktree
1785 .client
1786 .send(proto::BufferReloaded {
1787 project_id,
1788 buffer_id,
1789 version: serialize_version(version),
1790 mtime: Some(mtime.into()),
1791 fingerprint,
1792 line_ending: serialize_line_ending(line_ending) as i32,
1793 })
1794 .log_err();
1795 }
1796 }
1797}
1798
1799impl File {
1800 pub fn from_proto(
1801 proto: rpc::proto::File,
1802 worktree: ModelHandle<Worktree>,
1803 cx: &AppContext,
1804 ) -> Result<Self> {
1805 let worktree_id = worktree
1806 .read(cx)
1807 .as_remote()
1808 .ok_or_else(|| anyhow!("not remote"))?
1809 .id();
1810
1811 if worktree_id.to_proto() != proto.worktree_id {
1812 return Err(anyhow!("worktree id does not match file"));
1813 }
1814
1815 Ok(Self {
1816 worktree,
1817 path: Path::new(&proto.path).into(),
1818 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1819 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
1820 is_local: false,
1821 })
1822 }
1823
1824 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
1825 file.and_then(|f| f.as_any().downcast_ref())
1826 }
1827
1828 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1829 self.worktree.read(cx).id()
1830 }
1831
1832 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1833 self.entry_id
1834 }
1835}
1836
1837#[derive(Clone, Debug, PartialEq, Eq)]
1838pub struct Entry {
1839 pub id: ProjectEntryId,
1840 pub kind: EntryKind,
1841 pub path: Arc<Path>,
1842 pub inode: u64,
1843 pub mtime: SystemTime,
1844 pub is_symlink: bool,
1845 pub is_ignored: bool,
1846}
1847
1848#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1849pub enum EntryKind {
1850 PendingDir,
1851 Dir,
1852 File(CharBag),
1853}
1854
1855impl Entry {
1856 fn new(
1857 path: Arc<Path>,
1858 metadata: &fs::Metadata,
1859 next_entry_id: &AtomicUsize,
1860 root_char_bag: CharBag,
1861 ) -> Self {
1862 Self {
1863 id: ProjectEntryId::new(next_entry_id),
1864 kind: if metadata.is_dir {
1865 EntryKind::PendingDir
1866 } else {
1867 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1868 },
1869 path,
1870 inode: metadata.inode,
1871 mtime: metadata.mtime,
1872 is_symlink: metadata.is_symlink,
1873 is_ignored: false,
1874 }
1875 }
1876
1877 pub fn is_dir(&self) -> bool {
1878 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1879 }
1880
1881 pub fn is_file(&self) -> bool {
1882 matches!(self.kind, EntryKind::File(_))
1883 }
1884}
1885
1886impl sum_tree::Item for Entry {
1887 type Summary = EntrySummary;
1888
1889 fn summary(&self) -> Self::Summary {
1890 let visible_count = if self.is_ignored { 0 } else { 1 };
1891 let file_count;
1892 let visible_file_count;
1893 if self.is_file() {
1894 file_count = 1;
1895 visible_file_count = visible_count;
1896 } else {
1897 file_count = 0;
1898 visible_file_count = 0;
1899 }
1900
1901 EntrySummary {
1902 max_path: self.path.clone(),
1903 count: 1,
1904 visible_count,
1905 file_count,
1906 visible_file_count,
1907 }
1908 }
1909}
1910
1911impl sum_tree::KeyedItem for Entry {
1912 type Key = PathKey;
1913
1914 fn key(&self) -> Self::Key {
1915 PathKey(self.path.clone())
1916 }
1917}
1918
1919#[derive(Clone, Debug)]
1920pub struct EntrySummary {
1921 max_path: Arc<Path>,
1922 count: usize,
1923 visible_count: usize,
1924 file_count: usize,
1925 visible_file_count: usize,
1926}
1927
1928impl Default for EntrySummary {
1929 fn default() -> Self {
1930 Self {
1931 max_path: Arc::from(Path::new("")),
1932 count: 0,
1933 visible_count: 0,
1934 file_count: 0,
1935 visible_file_count: 0,
1936 }
1937 }
1938}
1939
1940impl sum_tree::Summary for EntrySummary {
1941 type Context = ();
1942
1943 fn add_summary(&mut self, rhs: &Self, _: &()) {
1944 self.max_path = rhs.max_path.clone();
1945 self.count += rhs.count;
1946 self.visible_count += rhs.visible_count;
1947 self.file_count += rhs.file_count;
1948 self.visible_file_count += rhs.visible_file_count;
1949 }
1950}
1951
1952#[derive(Clone, Debug)]
1953struct PathEntry {
1954 id: ProjectEntryId,
1955 path: Arc<Path>,
1956 is_ignored: bool,
1957 scan_id: usize,
1958}
1959
1960impl sum_tree::Item for PathEntry {
1961 type Summary = PathEntrySummary;
1962
1963 fn summary(&self) -> Self::Summary {
1964 PathEntrySummary { max_id: self.id }
1965 }
1966}
1967
1968impl sum_tree::KeyedItem for PathEntry {
1969 type Key = ProjectEntryId;
1970
1971 fn key(&self) -> Self::Key {
1972 self.id
1973 }
1974}
1975
1976#[derive(Clone, Debug, Default)]
1977struct PathEntrySummary {
1978 max_id: ProjectEntryId,
1979}
1980
1981impl sum_tree::Summary for PathEntrySummary {
1982 type Context = ();
1983
1984 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
1985 self.max_id = summary.max_id;
1986 }
1987}
1988
1989impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
1990 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
1991 *self = summary.max_id;
1992 }
1993}
1994
1995#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1996pub struct PathKey(Arc<Path>);
1997
1998impl Default for PathKey {
1999 fn default() -> Self {
2000 Self(Path::new("").into())
2001 }
2002}
2003
2004impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2005 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2006 self.0 = summary.max_path.clone();
2007 }
2008}
2009
2010struct BackgroundScanner {
2011 fs: Arc<dyn Fs>,
2012 snapshot: Arc<Mutex<LocalSnapshot>>,
2013 notify: UnboundedSender<ScanState>,
2014 executor: Arc<executor::Background>,
2015}
2016
2017impl BackgroundScanner {
2018 fn new(
2019 snapshot: Arc<Mutex<LocalSnapshot>>,
2020 notify: UnboundedSender<ScanState>,
2021 fs: Arc<dyn Fs>,
2022 executor: Arc<executor::Background>,
2023 ) -> Self {
2024 Self {
2025 fs,
2026 snapshot,
2027 notify,
2028 executor,
2029 }
2030 }
2031
2032 fn abs_path(&self) -> Arc<Path> {
2033 self.snapshot.lock().abs_path.clone()
2034 }
2035
2036 fn snapshot(&self) -> LocalSnapshot {
2037 self.snapshot.lock().clone()
2038 }
2039
2040 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2041 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2042 return;
2043 }
2044
2045 if let Err(err) = self.scan_dirs().await {
2046 if self
2047 .notify
2048 .unbounded_send(ScanState::Err(Arc::new(err)))
2049 .is_err()
2050 {
2051 return;
2052 }
2053 }
2054
2055 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2056 return;
2057 }
2058
2059 futures::pin_mut!(events_rx);
2060
2061 while let Some(mut events) = events_rx.next().await {
2062 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2063 events.extend(additional_events);
2064 }
2065
2066 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2067 break;
2068 }
2069
2070 if !self.process_events(events).await {
2071 break;
2072 }
2073
2074 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2075 break;
2076 }
2077 }
2078 }
2079
2080 async fn scan_dirs(&mut self) -> Result<()> {
2081 let root_char_bag;
2082 let root_abs_path;
2083 let root_inode;
2084 let is_dir;
2085 let next_entry_id;
2086 {
2087 let snapshot = self.snapshot.lock();
2088 root_char_bag = snapshot.root_char_bag;
2089 root_abs_path = snapshot.abs_path.clone();
2090 root_inode = snapshot.root_entry().map(|e| e.inode);
2091 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2092 next_entry_id = snapshot.next_entry_id.clone();
2093 };
2094
2095 // Populate ignores above the root.
2096 for ancestor in root_abs_path.ancestors().skip(1) {
2097 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2098 {
2099 self.snapshot
2100 .lock()
2101 .ignores_by_parent_abs_path
2102 .insert(ancestor.into(), (ignore.into(), 0));
2103 }
2104 }
2105
2106 let ignore_stack = {
2107 let mut snapshot = self.snapshot.lock();
2108 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2109 if ignore_stack.is_all() {
2110 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2111 root_entry.is_ignored = true;
2112 snapshot.insert_entry(root_entry, self.fs.as_ref());
2113 }
2114 }
2115 ignore_stack
2116 };
2117
2118 if is_dir {
2119 let path: Arc<Path> = Arc::from(Path::new(""));
2120 let mut ancestor_inodes = TreeSet::default();
2121 if let Some(root_inode) = root_inode {
2122 ancestor_inodes.insert(root_inode);
2123 }
2124
2125 let (tx, rx) = channel::unbounded();
2126 self.executor
2127 .block(tx.send(ScanJob {
2128 abs_path: root_abs_path.to_path_buf(),
2129 path,
2130 ignore_stack,
2131 ancestor_inodes,
2132 scan_queue: tx.clone(),
2133 }))
2134 .unwrap();
2135 drop(tx);
2136
2137 self.executor
2138 .scoped(|scope| {
2139 for _ in 0..self.executor.num_cpus() {
2140 scope.spawn(async {
2141 while let Ok(job) = rx.recv().await {
2142 if let Err(err) = self
2143 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2144 .await
2145 {
2146 log::error!("error scanning {:?}: {}", job.abs_path, err);
2147 }
2148 }
2149 });
2150 }
2151 })
2152 .await;
2153 }
2154
2155 Ok(())
2156 }
2157
2158 async fn scan_dir(
2159 &self,
2160 root_char_bag: CharBag,
2161 next_entry_id: Arc<AtomicUsize>,
2162 job: &ScanJob,
2163 ) -> Result<()> {
2164 let mut new_entries: Vec<Entry> = Vec::new();
2165 let mut new_jobs: Vec<ScanJob> = Vec::new();
2166 let mut ignore_stack = job.ignore_stack.clone();
2167 let mut new_ignore = None;
2168
2169 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2170 while let Some(child_abs_path) = child_paths.next().await {
2171 let child_abs_path = match child_abs_path {
2172 Ok(child_abs_path) => child_abs_path,
2173 Err(error) => {
2174 log::error!("error processing entry {:?}", error);
2175 continue;
2176 }
2177 };
2178 let child_name = child_abs_path.file_name().unwrap();
2179 let child_path: Arc<Path> = job.path.join(child_name).into();
2180 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2181 Ok(Some(metadata)) => metadata,
2182 Ok(None) => continue,
2183 Err(err) => {
2184 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2185 continue;
2186 }
2187 };
2188
2189 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2190 if child_name == *GITIGNORE {
2191 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2192 Ok(ignore) => {
2193 let ignore = Arc::new(ignore);
2194 ignore_stack =
2195 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2196 new_ignore = Some(ignore);
2197 }
2198 Err(error) => {
2199 log::error!(
2200 "error loading .gitignore file {:?} - {:?}",
2201 child_name,
2202 error
2203 );
2204 }
2205 }
2206
2207 // Update ignore status of any child entries we've already processed to reflect the
2208 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2209 // there should rarely be too numerous. Update the ignore stack associated with any
2210 // new jobs as well.
2211 let mut new_jobs = new_jobs.iter_mut();
2212 for entry in &mut new_entries {
2213 let entry_abs_path = self.abs_path().join(&entry.path);
2214 entry.is_ignored =
2215 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2216 if entry.is_dir() {
2217 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2218 IgnoreStack::all()
2219 } else {
2220 ignore_stack.clone()
2221 };
2222 }
2223 }
2224 }
2225
2226 let mut child_entry = Entry::new(
2227 child_path.clone(),
2228 &child_metadata,
2229 &next_entry_id,
2230 root_char_bag,
2231 );
2232
2233 if child_entry.is_dir() {
2234 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2235 child_entry.is_ignored = is_ignored;
2236
2237 if !job.ancestor_inodes.contains(&child_entry.inode) {
2238 let mut ancestor_inodes = job.ancestor_inodes.clone();
2239 ancestor_inodes.insert(child_entry.inode);
2240 new_jobs.push(ScanJob {
2241 abs_path: child_abs_path,
2242 path: child_path,
2243 ignore_stack: if is_ignored {
2244 IgnoreStack::all()
2245 } else {
2246 ignore_stack.clone()
2247 },
2248 ancestor_inodes,
2249 scan_queue: job.scan_queue.clone(),
2250 });
2251 }
2252 } else {
2253 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2254 }
2255
2256 new_entries.push(child_entry);
2257 }
2258
2259 self.snapshot
2260 .lock()
2261 .populate_dir(job.path.clone(), new_entries, new_ignore);
2262 for new_job in new_jobs {
2263 job.scan_queue.send(new_job).await.unwrap();
2264 }
2265
2266 Ok(())
2267 }
2268
2269 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2270 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2271 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2272
2273 let root_char_bag;
2274 let root_abs_path;
2275 let next_entry_id;
2276 {
2277 let snapshot = self.snapshot.lock();
2278 root_char_bag = snapshot.root_char_bag;
2279 root_abs_path = snapshot.abs_path.clone();
2280 next_entry_id = snapshot.next_entry_id.clone();
2281 }
2282
2283 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2284 path
2285 } else {
2286 return false;
2287 };
2288 let metadata = futures::future::join_all(
2289 events
2290 .iter()
2291 .map(|event| self.fs.metadata(&event.path))
2292 .collect::<Vec<_>>(),
2293 )
2294 .await;
2295
2296 // Hold the snapshot lock while clearing and re-inserting the root entries
2297 // for each event. This way, the snapshot is not observable to the foreground
2298 // thread while this operation is in-progress.
2299 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2300 {
2301 let mut snapshot = self.snapshot.lock();
2302 snapshot.scan_id += 1;
2303 for event in &events {
2304 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2305 snapshot.remove_path(path);
2306 }
2307 }
2308
2309 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2310 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2311 Ok(path) => Arc::from(path.to_path_buf()),
2312 Err(_) => {
2313 log::error!(
2314 "unexpected event {:?} for root path {:?}",
2315 event.path,
2316 root_canonical_path
2317 );
2318 continue;
2319 }
2320 };
2321 let abs_path = root_abs_path.join(&path);
2322
2323 match metadata {
2324 Ok(Some(metadata)) => {
2325 let ignore_stack =
2326 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2327 let mut fs_entry = Entry::new(
2328 path.clone(),
2329 &metadata,
2330 snapshot.next_entry_id.as_ref(),
2331 snapshot.root_char_bag,
2332 );
2333 fs_entry.is_ignored = ignore_stack.is_all();
2334 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2335
2336 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2337 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2338 ancestor_inodes.insert(metadata.inode);
2339 self.executor
2340 .block(scan_queue_tx.send(ScanJob {
2341 abs_path,
2342 path,
2343 ignore_stack,
2344 ancestor_inodes,
2345 scan_queue: scan_queue_tx.clone(),
2346 }))
2347 .unwrap();
2348 }
2349 }
2350 Ok(None) => {}
2351 Err(err) => {
2352 // TODO - create a special 'error' entry in the entries tree to mark this
2353 log::error!("error reading file on event {:?}", err);
2354 }
2355 }
2356 }
2357 drop(scan_queue_tx);
2358 }
2359
2360 // Scan any directories that were created as part of this event batch.
2361 self.executor
2362 .scoped(|scope| {
2363 for _ in 0..self.executor.num_cpus() {
2364 scope.spawn(async {
2365 while let Ok(job) = scan_queue_rx.recv().await {
2366 if let Err(err) = self
2367 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2368 .await
2369 {
2370 log::error!("error scanning {:?}: {}", job.abs_path, err);
2371 }
2372 }
2373 });
2374 }
2375 })
2376 .await;
2377
2378 // Attempt to detect renames only over a single batch of file-system events.
2379 self.snapshot.lock().removed_entry_ids.clear();
2380
2381 self.update_ignore_statuses().await;
2382 true
2383 }
2384
2385 async fn update_ignore_statuses(&self) {
2386 let mut snapshot = self.snapshot();
2387
2388 let mut ignores_to_update = Vec::new();
2389 let mut ignores_to_delete = Vec::new();
2390 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2391 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2392 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2393 ignores_to_update.push(parent_abs_path.clone());
2394 }
2395
2396 let ignore_path = parent_path.join(&*GITIGNORE);
2397 if snapshot.entry_for_path(ignore_path).is_none() {
2398 ignores_to_delete.push(parent_abs_path.clone());
2399 }
2400 }
2401 }
2402
2403 for parent_abs_path in ignores_to_delete {
2404 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2405 self.snapshot
2406 .lock()
2407 .ignores_by_parent_abs_path
2408 .remove(&parent_abs_path);
2409 }
2410
2411 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2412 ignores_to_update.sort_unstable();
2413 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2414 while let Some(parent_abs_path) = ignores_to_update.next() {
2415 while ignores_to_update
2416 .peek()
2417 .map_or(false, |p| p.starts_with(&parent_abs_path))
2418 {
2419 ignores_to_update.next().unwrap();
2420 }
2421
2422 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2423 ignore_queue_tx
2424 .send(UpdateIgnoreStatusJob {
2425 abs_path: parent_abs_path,
2426 ignore_stack,
2427 ignore_queue: ignore_queue_tx.clone(),
2428 })
2429 .await
2430 .unwrap();
2431 }
2432 drop(ignore_queue_tx);
2433
2434 self.executor
2435 .scoped(|scope| {
2436 for _ in 0..self.executor.num_cpus() {
2437 scope.spawn(async {
2438 while let Ok(job) = ignore_queue_rx.recv().await {
2439 self.update_ignore_status(job, &snapshot).await;
2440 }
2441 });
2442 }
2443 })
2444 .await;
2445 }
2446
2447 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2448 let mut ignore_stack = job.ignore_stack;
2449 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2450 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2451 }
2452
2453 let mut entries_by_id_edits = Vec::new();
2454 let mut entries_by_path_edits = Vec::new();
2455 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2456 for mut entry in snapshot.child_entries(path).cloned() {
2457 let was_ignored = entry.is_ignored;
2458 let abs_path = self.abs_path().join(&entry.path);
2459 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2460 if entry.is_dir() {
2461 let child_ignore_stack = if entry.is_ignored {
2462 IgnoreStack::all()
2463 } else {
2464 ignore_stack.clone()
2465 };
2466 job.ignore_queue
2467 .send(UpdateIgnoreStatusJob {
2468 abs_path: abs_path.into(),
2469 ignore_stack: child_ignore_stack,
2470 ignore_queue: job.ignore_queue.clone(),
2471 })
2472 .await
2473 .unwrap();
2474 }
2475
2476 if entry.is_ignored != was_ignored {
2477 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2478 path_entry.scan_id = snapshot.scan_id;
2479 path_entry.is_ignored = entry.is_ignored;
2480 entries_by_id_edits.push(Edit::Insert(path_entry));
2481 entries_by_path_edits.push(Edit::Insert(entry));
2482 }
2483 }
2484
2485 let mut snapshot = self.snapshot.lock();
2486 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2487 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2488 }
2489}
2490
2491fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2492 let mut result = root_char_bag;
2493 result.extend(
2494 path.to_string_lossy()
2495 .chars()
2496 .map(|c| c.to_ascii_lowercase()),
2497 );
2498 result
2499}
2500
2501struct ScanJob {
2502 abs_path: PathBuf,
2503 path: Arc<Path>,
2504 ignore_stack: Arc<IgnoreStack>,
2505 scan_queue: Sender<ScanJob>,
2506 ancestor_inodes: TreeSet<u64>,
2507}
2508
2509struct UpdateIgnoreStatusJob {
2510 abs_path: Arc<Path>,
2511 ignore_stack: Arc<IgnoreStack>,
2512 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2513}
2514
2515pub trait WorktreeHandle {
2516 #[cfg(any(test, feature = "test-support"))]
2517 fn flush_fs_events<'a>(
2518 &self,
2519 cx: &'a gpui::TestAppContext,
2520 ) -> futures::future::LocalBoxFuture<'a, ()>;
2521}
2522
2523impl WorktreeHandle for ModelHandle<Worktree> {
2524 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2525 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2526 // extra directory scans, and emit extra scan-state notifications.
2527 //
2528 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2529 // to ensure that all redundant FS events have already been processed.
2530 #[cfg(any(test, feature = "test-support"))]
2531 fn flush_fs_events<'a>(
2532 &self,
2533 cx: &'a gpui::TestAppContext,
2534 ) -> futures::future::LocalBoxFuture<'a, ()> {
2535 use smol::future::FutureExt;
2536
2537 let filename = "fs-event-sentinel";
2538 let tree = self.clone();
2539 let (fs, root_path) = self.read_with(cx, |tree, _| {
2540 let tree = tree.as_local().unwrap();
2541 (tree.fs.clone(), tree.abs_path().clone())
2542 });
2543
2544 async move {
2545 fs.create_file(&root_path.join(filename), Default::default())
2546 .await
2547 .unwrap();
2548 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2549 .await;
2550
2551 fs.remove_file(&root_path.join(filename), Default::default())
2552 .await
2553 .unwrap();
2554 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2555 .await;
2556
2557 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2558 .await;
2559 }
2560 .boxed_local()
2561 }
2562}
2563
2564#[derive(Clone, Debug)]
2565struct TraversalProgress<'a> {
2566 max_path: &'a Path,
2567 count: usize,
2568 visible_count: usize,
2569 file_count: usize,
2570 visible_file_count: usize,
2571}
2572
2573impl<'a> TraversalProgress<'a> {
2574 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2575 match (include_ignored, include_dirs) {
2576 (true, true) => self.count,
2577 (true, false) => self.file_count,
2578 (false, true) => self.visible_count,
2579 (false, false) => self.visible_file_count,
2580 }
2581 }
2582}
2583
2584impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2585 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2586 self.max_path = summary.max_path.as_ref();
2587 self.count += summary.count;
2588 self.visible_count += summary.visible_count;
2589 self.file_count += summary.file_count;
2590 self.visible_file_count += summary.visible_file_count;
2591 }
2592}
2593
2594impl<'a> Default for TraversalProgress<'a> {
2595 fn default() -> Self {
2596 Self {
2597 max_path: Path::new(""),
2598 count: 0,
2599 visible_count: 0,
2600 file_count: 0,
2601 visible_file_count: 0,
2602 }
2603 }
2604}
2605
2606pub struct Traversal<'a> {
2607 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2608 include_ignored: bool,
2609 include_dirs: bool,
2610}
2611
2612impl<'a> Traversal<'a> {
2613 pub fn advance(&mut self) -> bool {
2614 self.advance_to_offset(self.offset() + 1)
2615 }
2616
2617 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2618 self.cursor.seek_forward(
2619 &TraversalTarget::Count {
2620 count: offset,
2621 include_dirs: self.include_dirs,
2622 include_ignored: self.include_ignored,
2623 },
2624 Bias::Right,
2625 &(),
2626 )
2627 }
2628
2629 pub fn advance_to_sibling(&mut self) -> bool {
2630 while let Some(entry) = self.cursor.item() {
2631 self.cursor.seek_forward(
2632 &TraversalTarget::PathSuccessor(&entry.path),
2633 Bias::Left,
2634 &(),
2635 );
2636 if let Some(entry) = self.cursor.item() {
2637 if (self.include_dirs || !entry.is_dir())
2638 && (self.include_ignored || !entry.is_ignored)
2639 {
2640 return true;
2641 }
2642 }
2643 }
2644 false
2645 }
2646
2647 pub fn entry(&self) -> Option<&'a Entry> {
2648 self.cursor.item()
2649 }
2650
2651 pub fn offset(&self) -> usize {
2652 self.cursor
2653 .start()
2654 .count(self.include_dirs, self.include_ignored)
2655 }
2656}
2657
2658impl<'a> Iterator for Traversal<'a> {
2659 type Item = &'a Entry;
2660
2661 fn next(&mut self) -> Option<Self::Item> {
2662 if let Some(item) = self.entry() {
2663 self.advance();
2664 Some(item)
2665 } else {
2666 None
2667 }
2668 }
2669}
2670
2671#[derive(Debug)]
2672enum TraversalTarget<'a> {
2673 Path(&'a Path),
2674 PathSuccessor(&'a Path),
2675 Count {
2676 count: usize,
2677 include_ignored: bool,
2678 include_dirs: bool,
2679 },
2680}
2681
2682impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2683 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2684 match self {
2685 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2686 TraversalTarget::PathSuccessor(path) => {
2687 if !cursor_location.max_path.starts_with(path) {
2688 Ordering::Equal
2689 } else {
2690 Ordering::Greater
2691 }
2692 }
2693 TraversalTarget::Count {
2694 count,
2695 include_dirs,
2696 include_ignored,
2697 } => Ord::cmp(
2698 count,
2699 &cursor_location.count(*include_dirs, *include_ignored),
2700 ),
2701 }
2702 }
2703}
2704
2705struct ChildEntriesIter<'a> {
2706 parent_path: &'a Path,
2707 traversal: Traversal<'a>,
2708}
2709
2710impl<'a> Iterator for ChildEntriesIter<'a> {
2711 type Item = &'a Entry;
2712
2713 fn next(&mut self) -> Option<Self::Item> {
2714 if let Some(item) = self.traversal.entry() {
2715 if item.path.starts_with(&self.parent_path) {
2716 self.traversal.advance_to_sibling();
2717 return Some(item);
2718 }
2719 }
2720 None
2721 }
2722}
2723
2724impl<'a> From<&'a Entry> for proto::Entry {
2725 fn from(entry: &'a Entry) -> Self {
2726 Self {
2727 id: entry.id.to_proto(),
2728 is_dir: entry.is_dir(),
2729 path: entry.path.as_os_str().as_bytes().to_vec(),
2730 inode: entry.inode,
2731 mtime: Some(entry.mtime.into()),
2732 is_symlink: entry.is_symlink,
2733 is_ignored: entry.is_ignored,
2734 }
2735 }
2736}
2737
2738impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2739 type Error = anyhow::Error;
2740
2741 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2742 if let Some(mtime) = entry.mtime {
2743 let kind = if entry.is_dir {
2744 EntryKind::Dir
2745 } else {
2746 let mut char_bag = *root_char_bag;
2747 char_bag.extend(
2748 String::from_utf8_lossy(&entry.path)
2749 .chars()
2750 .map(|c| c.to_ascii_lowercase()),
2751 );
2752 EntryKind::File(char_bag)
2753 };
2754 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2755 Ok(Entry {
2756 id: ProjectEntryId::from_proto(entry.id),
2757 kind,
2758 path,
2759 inode: entry.inode,
2760 mtime: mtime.into(),
2761 is_symlink: entry.is_symlink,
2762 is_ignored: entry.is_ignored,
2763 })
2764 } else {
2765 Err(anyhow!(
2766 "missing mtime in remote worktree entry {:?}",
2767 entry.path
2768 ))
2769 }
2770 }
2771}
2772
2773async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2774 #[cfg(any(test, feature = "test-support"))]
2775 const MAX_CHUNK_SIZE: usize = 2;
2776 #[cfg(not(any(test, feature = "test-support")))]
2777 const MAX_CHUNK_SIZE: usize = 256;
2778
2779 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2780 client.request(update).await?;
2781 }
2782
2783 Ok(())
2784}
2785
2786#[cfg(test)]
2787mod tests {
2788 use super::*;
2789 use crate::fs::FakeFs;
2790 use anyhow::Result;
2791 use client::test::FakeHttpClient;
2792 use fs::RealFs;
2793 use gpui::{executor::Deterministic, TestAppContext};
2794 use rand::prelude::*;
2795 use serde_json::json;
2796 use std::{
2797 env,
2798 fmt::Write,
2799 time::{SystemTime, UNIX_EPOCH},
2800 };
2801 use util::test::temp_tree;
2802
2803 #[gpui::test]
2804 async fn test_traversal(cx: &mut TestAppContext) {
2805 let fs = FakeFs::new(cx.background());
2806 fs.insert_tree(
2807 "/root",
2808 json!({
2809 ".gitignore": "a/b\n",
2810 "a": {
2811 "b": "",
2812 "c": "",
2813 }
2814 }),
2815 )
2816 .await;
2817
2818 let http_client = FakeHttpClient::with_404_response();
2819 let client = cx.read(|cx| Client::new(http_client, cx));
2820
2821 let tree = Worktree::local(
2822 client,
2823 Arc::from(Path::new("/root")),
2824 true,
2825 fs,
2826 Default::default(),
2827 &mut cx.to_async(),
2828 )
2829 .await
2830 .unwrap();
2831 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2832 .await;
2833
2834 tree.read_with(cx, |tree, _| {
2835 assert_eq!(
2836 tree.entries(false)
2837 .map(|entry| entry.path.as_ref())
2838 .collect::<Vec<_>>(),
2839 vec![
2840 Path::new(""),
2841 Path::new(".gitignore"),
2842 Path::new("a"),
2843 Path::new("a/c"),
2844 ]
2845 );
2846 assert_eq!(
2847 tree.entries(true)
2848 .map(|entry| entry.path.as_ref())
2849 .collect::<Vec<_>>(),
2850 vec![
2851 Path::new(""),
2852 Path::new(".gitignore"),
2853 Path::new("a"),
2854 Path::new("a/b"),
2855 Path::new("a/c"),
2856 ]
2857 );
2858 })
2859 }
2860
2861 #[gpui::test(iterations = 10)]
2862 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
2863 let fs = FakeFs::new(cx.background());
2864 fs.insert_tree(
2865 "/root",
2866 json!({
2867 "lib": {
2868 "a": {
2869 "a.txt": ""
2870 },
2871 "b": {
2872 "b.txt": ""
2873 }
2874 }
2875 }),
2876 )
2877 .await;
2878 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
2879 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
2880
2881 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
2882 let tree = Worktree::local(
2883 client,
2884 Arc::from(Path::new("/root")),
2885 true,
2886 fs.clone(),
2887 Default::default(),
2888 &mut cx.to_async(),
2889 )
2890 .await
2891 .unwrap();
2892
2893 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2894 .await;
2895
2896 tree.read_with(cx, |tree, _| {
2897 assert_eq!(
2898 tree.entries(false)
2899 .map(|entry| entry.path.as_ref())
2900 .collect::<Vec<_>>(),
2901 vec![
2902 Path::new(""),
2903 Path::new("lib"),
2904 Path::new("lib/a"),
2905 Path::new("lib/a/a.txt"),
2906 Path::new("lib/a/lib"),
2907 Path::new("lib/b"),
2908 Path::new("lib/b/b.txt"),
2909 Path::new("lib/b/lib"),
2910 ]
2911 );
2912 });
2913
2914 fs.rename(
2915 Path::new("/root/lib/a/lib"),
2916 Path::new("/root/lib/a/lib-2"),
2917 Default::default(),
2918 )
2919 .await
2920 .unwrap();
2921 executor.run_until_parked();
2922 tree.read_with(cx, |tree, _| {
2923 assert_eq!(
2924 tree.entries(false)
2925 .map(|entry| entry.path.as_ref())
2926 .collect::<Vec<_>>(),
2927 vec![
2928 Path::new(""),
2929 Path::new("lib"),
2930 Path::new("lib/a"),
2931 Path::new("lib/a/a.txt"),
2932 Path::new("lib/a/lib-2"),
2933 Path::new("lib/b"),
2934 Path::new("lib/b/b.txt"),
2935 Path::new("lib/b/lib"),
2936 ]
2937 );
2938 });
2939 }
2940
2941 #[gpui::test]
2942 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
2943 let parent_dir = temp_tree(json!({
2944 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
2945 "tree": {
2946 ".git": {},
2947 ".gitignore": "ignored-dir\n",
2948 "tracked-dir": {
2949 "tracked-file1": "",
2950 "ancestor-ignored-file1": "",
2951 },
2952 "ignored-dir": {
2953 "ignored-file1": ""
2954 }
2955 }
2956 }));
2957 let dir = parent_dir.path().join("tree");
2958
2959 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
2960
2961 let tree = Worktree::local(
2962 client,
2963 dir.as_path(),
2964 true,
2965 Arc::new(RealFs),
2966 Default::default(),
2967 &mut cx.to_async(),
2968 )
2969 .await
2970 .unwrap();
2971 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2972 .await;
2973 tree.flush_fs_events(cx).await;
2974 cx.read(|cx| {
2975 let tree = tree.read(cx);
2976 assert!(
2977 !tree
2978 .entry_for_path("tracked-dir/tracked-file1")
2979 .unwrap()
2980 .is_ignored
2981 );
2982 assert!(
2983 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
2984 .unwrap()
2985 .is_ignored
2986 );
2987 assert!(
2988 tree.entry_for_path("ignored-dir/ignored-file1")
2989 .unwrap()
2990 .is_ignored
2991 );
2992 });
2993
2994 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
2995 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
2996 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
2997 tree.flush_fs_events(cx).await;
2998 cx.read(|cx| {
2999 let tree = tree.read(cx);
3000 assert!(
3001 !tree
3002 .entry_for_path("tracked-dir/tracked-file2")
3003 .unwrap()
3004 .is_ignored
3005 );
3006 assert!(
3007 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3008 .unwrap()
3009 .is_ignored
3010 );
3011 assert!(
3012 tree.entry_for_path("ignored-dir/ignored-file2")
3013 .unwrap()
3014 .is_ignored
3015 );
3016 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3017 });
3018 }
3019
3020 #[gpui::test]
3021 async fn test_write_file(cx: &mut TestAppContext) {
3022 let dir = temp_tree(json!({
3023 ".git": {},
3024 ".gitignore": "ignored-dir\n",
3025 "tracked-dir": {},
3026 "ignored-dir": {}
3027 }));
3028
3029 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3030
3031 let tree = Worktree::local(
3032 client,
3033 dir.path(),
3034 true,
3035 Arc::new(RealFs),
3036 Default::default(),
3037 &mut cx.to_async(),
3038 )
3039 .await
3040 .unwrap();
3041 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3042 .await;
3043 tree.flush_fs_events(cx).await;
3044
3045 tree.update(cx, |tree, cx| {
3046 tree.as_local().unwrap().write_file(
3047 Path::new("tracked-dir/file.txt"),
3048 "hello".into(),
3049 Default::default(),
3050 cx,
3051 )
3052 })
3053 .await
3054 .unwrap();
3055 tree.update(cx, |tree, cx| {
3056 tree.as_local().unwrap().write_file(
3057 Path::new("ignored-dir/file.txt"),
3058 "world".into(),
3059 Default::default(),
3060 cx,
3061 )
3062 })
3063 .await
3064 .unwrap();
3065
3066 tree.read_with(cx, |tree, _| {
3067 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3068 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3069 assert!(!tracked.is_ignored);
3070 assert!(ignored.is_ignored);
3071 });
3072 }
3073
3074 #[gpui::test(iterations = 30)]
3075 async fn test_create_directory(cx: &mut TestAppContext) {
3076 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3077
3078 let fs = FakeFs::new(cx.background());
3079 fs.insert_tree(
3080 "/a",
3081 json!({
3082 "b": {},
3083 "c": {},
3084 "d": {},
3085 }),
3086 )
3087 .await;
3088
3089 let tree = Worktree::local(
3090 client,
3091 "/a".as_ref(),
3092 true,
3093 fs,
3094 Default::default(),
3095 &mut cx.to_async(),
3096 )
3097 .await
3098 .unwrap();
3099
3100 let entry = tree
3101 .update(cx, |tree, cx| {
3102 tree.as_local_mut()
3103 .unwrap()
3104 .create_entry("a/e".as_ref(), true, cx)
3105 })
3106 .await
3107 .unwrap();
3108 assert!(entry.is_dir());
3109
3110 cx.foreground().run_until_parked();
3111 tree.read_with(cx, |tree, _| {
3112 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3113 });
3114 }
3115
3116 #[gpui::test(iterations = 100)]
3117 fn test_random(mut rng: StdRng) {
3118 let operations = env::var("OPERATIONS")
3119 .map(|o| o.parse().unwrap())
3120 .unwrap_or(40);
3121 let initial_entries = env::var("INITIAL_ENTRIES")
3122 .map(|o| o.parse().unwrap())
3123 .unwrap_or(20);
3124
3125 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3126 for _ in 0..initial_entries {
3127 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3128 }
3129 log::info!("Generated initial tree");
3130
3131 let (notify_tx, _notify_rx) = mpsc::unbounded();
3132 let fs = Arc::new(RealFs);
3133 let next_entry_id = Arc::new(AtomicUsize::new(0));
3134 let mut initial_snapshot = LocalSnapshot {
3135 abs_path: root_dir.path().into(),
3136 removed_entry_ids: Default::default(),
3137 ignores_by_parent_abs_path: Default::default(),
3138 next_entry_id: next_entry_id.clone(),
3139 snapshot: Snapshot {
3140 id: WorktreeId::from_usize(0),
3141 entries_by_path: Default::default(),
3142 entries_by_id: Default::default(),
3143 root_name: Default::default(),
3144 root_char_bag: Default::default(),
3145 scan_id: 0,
3146 is_complete: true,
3147 },
3148 extension_counts: Default::default(),
3149 };
3150 initial_snapshot.insert_entry(
3151 Entry::new(
3152 Path::new("").into(),
3153 &smol::block_on(fs.metadata(root_dir.path()))
3154 .unwrap()
3155 .unwrap(),
3156 &next_entry_id,
3157 Default::default(),
3158 ),
3159 fs.as_ref(),
3160 );
3161 let mut scanner = BackgroundScanner::new(
3162 Arc::new(Mutex::new(initial_snapshot.clone())),
3163 notify_tx,
3164 fs.clone(),
3165 Arc::new(gpui::executor::Background::new()),
3166 );
3167 smol::block_on(scanner.scan_dirs()).unwrap();
3168 scanner.snapshot().check_invariants();
3169
3170 let mut events = Vec::new();
3171 let mut snapshots = Vec::new();
3172 let mut mutations_len = operations;
3173 while mutations_len > 1 {
3174 if !events.is_empty() && rng.gen_bool(0.4) {
3175 let len = rng.gen_range(0..=events.len());
3176 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3177 log::info!("Delivering events: {:#?}", to_deliver);
3178 smol::block_on(scanner.process_events(to_deliver));
3179 scanner.snapshot().check_invariants();
3180 } else {
3181 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3182 mutations_len -= 1;
3183 }
3184
3185 if rng.gen_bool(0.2) {
3186 snapshots.push(scanner.snapshot());
3187 }
3188 }
3189 log::info!("Quiescing: {:#?}", events);
3190 smol::block_on(scanner.process_events(events));
3191 scanner.snapshot().check_invariants();
3192
3193 let (notify_tx, _notify_rx) = mpsc::unbounded();
3194 let mut new_scanner = BackgroundScanner::new(
3195 Arc::new(Mutex::new(initial_snapshot)),
3196 notify_tx,
3197 scanner.fs.clone(),
3198 scanner.executor.clone(),
3199 );
3200 smol::block_on(new_scanner.scan_dirs()).unwrap();
3201 assert_eq!(
3202 scanner.snapshot().to_vec(true),
3203 new_scanner.snapshot().to_vec(true)
3204 );
3205
3206 for mut prev_snapshot in snapshots {
3207 let include_ignored = rng.gen::<bool>();
3208 if !include_ignored {
3209 let mut entries_by_path_edits = Vec::new();
3210 let mut entries_by_id_edits = Vec::new();
3211 for entry in prev_snapshot
3212 .entries_by_id
3213 .cursor::<()>()
3214 .filter(|e| e.is_ignored)
3215 {
3216 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3217 entries_by_id_edits.push(Edit::Remove(entry.id));
3218 }
3219
3220 prev_snapshot
3221 .entries_by_path
3222 .edit(entries_by_path_edits, &());
3223 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3224 }
3225
3226 let update = scanner
3227 .snapshot()
3228 .build_update(&prev_snapshot, 0, 0, include_ignored);
3229 prev_snapshot.apply_remote_update(update).unwrap();
3230 assert_eq!(
3231 prev_snapshot.to_vec(true),
3232 scanner.snapshot().to_vec(include_ignored)
3233 );
3234 }
3235 }
3236
3237 fn randomly_mutate_tree(
3238 root_path: &Path,
3239 insertion_probability: f64,
3240 rng: &mut impl Rng,
3241 ) -> Result<Vec<fsevent::Event>> {
3242 let root_path = root_path.canonicalize().unwrap();
3243 let (dirs, files) = read_dir_recursive(root_path.clone());
3244
3245 let mut events = Vec::new();
3246 let mut record_event = |path: PathBuf| {
3247 events.push(fsevent::Event {
3248 event_id: SystemTime::now()
3249 .duration_since(UNIX_EPOCH)
3250 .unwrap()
3251 .as_secs(),
3252 flags: fsevent::StreamFlags::empty(),
3253 path,
3254 });
3255 };
3256
3257 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3258 let path = dirs.choose(rng).unwrap();
3259 let new_path = path.join(gen_name(rng));
3260
3261 if rng.gen() {
3262 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3263 std::fs::create_dir(&new_path)?;
3264 } else {
3265 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3266 std::fs::write(&new_path, "")?;
3267 }
3268 record_event(new_path);
3269 } else if rng.gen_bool(0.05) {
3270 let ignore_dir_path = dirs.choose(rng).unwrap();
3271 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3272
3273 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3274 let files_to_ignore = {
3275 let len = rng.gen_range(0..=subfiles.len());
3276 subfiles.choose_multiple(rng, len)
3277 };
3278 let dirs_to_ignore = {
3279 let len = rng.gen_range(0..subdirs.len());
3280 subdirs.choose_multiple(rng, len)
3281 };
3282
3283 let mut ignore_contents = String::new();
3284 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3285 writeln!(
3286 ignore_contents,
3287 "{}",
3288 path_to_ignore
3289 .strip_prefix(&ignore_dir_path)?
3290 .to_str()
3291 .unwrap()
3292 )
3293 .unwrap();
3294 }
3295 log::info!(
3296 "Creating {:?} with contents:\n{}",
3297 ignore_path.strip_prefix(&root_path)?,
3298 ignore_contents
3299 );
3300 std::fs::write(&ignore_path, ignore_contents).unwrap();
3301 record_event(ignore_path);
3302 } else {
3303 let old_path = {
3304 let file_path = files.choose(rng);
3305 let dir_path = dirs[1..].choose(rng);
3306 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3307 };
3308
3309 let is_rename = rng.gen();
3310 if is_rename {
3311 let new_path_parent = dirs
3312 .iter()
3313 .filter(|d| !d.starts_with(old_path))
3314 .choose(rng)
3315 .unwrap();
3316
3317 let overwrite_existing_dir =
3318 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3319 let new_path = if overwrite_existing_dir {
3320 std::fs::remove_dir_all(&new_path_parent).ok();
3321 new_path_parent.to_path_buf()
3322 } else {
3323 new_path_parent.join(gen_name(rng))
3324 };
3325
3326 log::info!(
3327 "Renaming {:?} to {}{:?}",
3328 old_path.strip_prefix(&root_path)?,
3329 if overwrite_existing_dir {
3330 "overwrite "
3331 } else {
3332 ""
3333 },
3334 new_path.strip_prefix(&root_path)?
3335 );
3336 std::fs::rename(&old_path, &new_path)?;
3337 record_event(old_path.clone());
3338 record_event(new_path);
3339 } else if old_path.is_dir() {
3340 let (dirs, files) = read_dir_recursive(old_path.clone());
3341
3342 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3343 std::fs::remove_dir_all(&old_path).unwrap();
3344 for file in files {
3345 record_event(file);
3346 }
3347 for dir in dirs {
3348 record_event(dir);
3349 }
3350 } else {
3351 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3352 std::fs::remove_file(old_path).unwrap();
3353 record_event(old_path.clone());
3354 }
3355 }
3356
3357 Ok(events)
3358 }
3359
3360 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3361 let child_entries = std::fs::read_dir(&path).unwrap();
3362 let mut dirs = vec![path];
3363 let mut files = Vec::new();
3364 for child_entry in child_entries {
3365 let child_path = child_entry.unwrap().path();
3366 if child_path.is_dir() {
3367 let (child_dirs, child_files) = read_dir_recursive(child_path);
3368 dirs.extend(child_dirs);
3369 files.extend(child_files);
3370 } else {
3371 files.push(child_path);
3372 }
3373 }
3374 (dirs, files)
3375 }
3376
3377 fn gen_name(rng: &mut impl Rng) -> String {
3378 (0..6)
3379 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3380 .map(char::from)
3381 .collect()
3382 }
3383
3384 impl LocalSnapshot {
3385 fn check_invariants(&self) {
3386 let mut files = self.files(true, 0);
3387 let mut visible_files = self.files(false, 0);
3388 for entry in self.entries_by_path.cursor::<()>() {
3389 if entry.is_file() {
3390 assert_eq!(files.next().unwrap().inode, entry.inode);
3391 if !entry.is_ignored {
3392 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3393 }
3394 }
3395 }
3396 assert!(files.next().is_none());
3397 assert!(visible_files.next().is_none());
3398
3399 let mut bfs_paths = Vec::new();
3400 let mut stack = vec![Path::new("")];
3401 while let Some(path) = stack.pop() {
3402 bfs_paths.push(path);
3403 let ix = stack.len();
3404 for child_entry in self.child_entries(path) {
3405 stack.insert(ix, &child_entry.path);
3406 }
3407 }
3408
3409 let dfs_paths_via_iter = self
3410 .entries_by_path
3411 .cursor::<()>()
3412 .map(|e| e.path.as_ref())
3413 .collect::<Vec<_>>();
3414 assert_eq!(bfs_paths, dfs_paths_via_iter);
3415
3416 let dfs_paths_via_traversal = self
3417 .entries(true)
3418 .map(|e| e.path.as_ref())
3419 .collect::<Vec<_>>();
3420 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3421
3422 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3423 let ignore_parent_path =
3424 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3425 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3426 assert!(self
3427 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3428 .is_some());
3429 }
3430
3431 // Ensure extension counts are correct.
3432 let mut expected_extension_counts = HashMap::default();
3433 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3434 *expected_extension_counts
3435 .entry(extension.into())
3436 .or_insert(0) += 1;
3437 }
3438 assert_eq!(self.extension_counts, expected_extension_counts);
3439 }
3440
3441 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3442 let mut paths = Vec::new();
3443 for entry in self.entries_by_path.cursor::<()>() {
3444 if include_ignored || !entry.is_ignored {
3445 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3446 }
3447 }
3448 paths.sort_by(|a, b| a.0.cmp(b.0));
3449 paths
3450 }
3451 }
3452}