1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Result};
8use client::{proto, Client, TypedEnvelope};
9use clock::ReplicaId;
10use collections::{HashMap, VecDeque};
11use futures::{
12 channel::mpsc::{self, UnboundedSender},
13 Stream, StreamExt,
14};
15use fuzzy::CharBag;
16use gpui::{
17 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
18 Task,
19};
20use language::{Buffer, DiagnosticEntry, Operation, PointUtf16, Rope};
21use lazy_static::lazy_static;
22use parking_lot::Mutex;
23use postage::{
24 oneshot,
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::{Deref, DerefMut},
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap};
46use util::ResultExt;
47
48lazy_static! {
49 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
50}
51
52#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
53pub struct WorktreeId(usize);
54
55pub enum Worktree {
56 Local(LocalWorktree),
57 Remote(RemoteWorktree),
58}
59
60pub struct LocalWorktree {
61 snapshot: LocalSnapshot,
62 config: WorktreeConfig,
63 background_snapshot: Arc<Mutex<LocalSnapshot>>,
64 last_scan_state_rx: watch::Receiver<ScanState>,
65 _background_scanner_task: Option<Task<()>>,
66 poll_task: Option<Task<()>>,
67 registration: Registration,
68 share: Option<ShareState>,
69 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
70 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
71 queued_operations: Vec<(u64, Operation)>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 weak: bool,
75}
76
77pub struct RemoteWorktree {
78 pub(crate) snapshot: Snapshot,
79 project_id: u64,
80 snapshot_rx: watch::Receiver<Snapshot>,
81 client: Arc<Client>,
82 updates_tx: UnboundedSender<proto::UpdateWorktree>,
83 replica_id: ReplicaId,
84 queued_operations: Vec<(u64, Operation)>,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 weak: bool,
87 next_update_id: u64,
88 pending_updates: VecDeque<proto::UpdateWorktree>,
89}
90
91#[derive(Clone)]
92pub struct Snapshot {
93 id: WorktreeId,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98}
99
100#[derive(Clone)]
101pub struct LocalSnapshot {
102 abs_path: Arc<Path>,
103 scan_id: usize,
104 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
105 removed_entry_ids: HashMap<u64, usize>,
106 next_entry_id: Arc<AtomicUsize>,
107 snapshot: Snapshot,
108}
109
110impl Deref for LocalSnapshot {
111 type Target = Snapshot;
112
113 fn deref(&self) -> &Self::Target {
114 &self.snapshot
115 }
116}
117
118impl DerefMut for LocalSnapshot {
119 fn deref_mut(&mut self) -> &mut Self::Target {
120 &mut self.snapshot
121 }
122}
123
124#[derive(Clone, Debug)]
125enum ScanState {
126 Idle,
127 Scanning,
128 Err(Arc<anyhow::Error>),
129}
130
131#[derive(Debug, Eq, PartialEq)]
132enum Registration {
133 None,
134 Pending,
135 Done { project_id: u64 },
136}
137
138struct ShareState {
139 project_id: u64,
140 snapshots_tx: Sender<LocalSnapshot>,
141 _maintain_remote_snapshot: Option<Task<()>>,
142}
143
144#[derive(Default, Deserialize)]
145struct WorktreeConfig {
146 collaborators: Vec<String>,
147}
148
149pub enum Event {
150 UpdatedEntries,
151}
152
153impl Entity for Worktree {
154 type Event = Event;
155
156 fn release(&mut self, _: &mut MutableAppContext) {
157 if let Some(worktree) = self.as_local_mut() {
158 if let Registration::Done { project_id } = worktree.registration {
159 let client = worktree.client.clone();
160 let unregister_message = proto::UnregisterWorktree {
161 project_id,
162 worktree_id: worktree.id().to_proto(),
163 };
164 client.send(unregister_message).log_err();
165 }
166 }
167 }
168}
169
170impl Worktree {
171 pub async fn local(
172 client: Arc<Client>,
173 path: impl Into<Arc<Path>>,
174 weak: bool,
175 fs: Arc<dyn Fs>,
176 cx: &mut AsyncAppContext,
177 ) -> Result<ModelHandle<Self>> {
178 let (tree, scan_states_tx) = LocalWorktree::new(client, path, weak, fs.clone(), cx).await?;
179 tree.update(cx, |tree, cx| {
180 let tree = tree.as_local_mut().unwrap();
181 let abs_path = tree.abs_path().clone();
182 let background_snapshot = tree.background_snapshot.clone();
183 let background = cx.background().clone();
184 tree._background_scanner_task = Some(cx.background().spawn(async move {
185 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
186 let scanner =
187 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
188 scanner.run(events).await;
189 }));
190 });
191 Ok(tree)
192 }
193
194 pub fn remote(
195 project_remote_id: u64,
196 replica_id: ReplicaId,
197 worktree: proto::Worktree,
198 client: Arc<Client>,
199 cx: &mut MutableAppContext,
200 ) -> (ModelHandle<Self>, Task<()>) {
201 let remote_id = worktree.id;
202 let root_char_bag: CharBag = worktree
203 .root_name
204 .chars()
205 .map(|c| c.to_ascii_lowercase())
206 .collect();
207 let root_name = worktree.root_name.clone();
208 let weak = worktree.weak;
209 let snapshot = Snapshot {
210 id: WorktreeId(remote_id as usize),
211 root_name,
212 root_char_bag,
213 entries_by_path: Default::default(),
214 entries_by_id: Default::default(),
215 };
216
217 let (updates_tx, mut updates_rx) = mpsc::unbounded();
218 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
219 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
220 Worktree::Remote(RemoteWorktree {
221 project_id: project_remote_id,
222 replica_id,
223 snapshot: snapshot.clone(),
224 snapshot_rx: snapshot_rx.clone(),
225 updates_tx,
226 client: client.clone(),
227 queued_operations: Default::default(),
228 diagnostic_summaries: TreeMap::from_ordered_entries(
229 worktree.diagnostic_summaries.into_iter().map(|summary| {
230 (
231 PathKey(PathBuf::from(summary.path).into()),
232 DiagnosticSummary {
233 error_count: summary.error_count as usize,
234 warning_count: summary.warning_count as usize,
235 info_count: summary.info_count as usize,
236 hint_count: summary.hint_count as usize,
237 },
238 )
239 }),
240 ),
241 weak,
242 next_update_id: worktree.next_update_id,
243 pending_updates: Default::default(),
244 })
245 });
246
247 let deserialize_task = cx.spawn({
248 let worktree_handle = worktree_handle.clone();
249 |cx| async move {
250 let (entries_by_path, entries_by_id) = cx
251 .background()
252 .spawn(async move {
253 let mut entries_by_path_edits = Vec::new();
254 let mut entries_by_id_edits = Vec::new();
255 for entry in worktree.entries {
256 match Entry::try_from((&root_char_bag, entry)) {
257 Ok(entry) => {
258 entries_by_id_edits.push(Edit::Insert(PathEntry {
259 id: entry.id,
260 path: entry.path.clone(),
261 is_ignored: entry.is_ignored,
262 scan_id: 0,
263 }));
264 entries_by_path_edits.push(Edit::Insert(entry));
265 }
266 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
267 }
268 }
269
270 let mut entries_by_path = SumTree::new();
271 let mut entries_by_id = SumTree::new();
272 entries_by_path.edit(entries_by_path_edits, &());
273 entries_by_id.edit(entries_by_id_edits, &());
274
275 (entries_by_path, entries_by_id)
276 })
277 .await;
278
279 {
280 let mut snapshot = snapshot_tx.borrow_mut();
281 snapshot.entries_by_path = entries_by_path;
282 snapshot.entries_by_id = entries_by_id;
283 }
284
285 cx.background()
286 .spawn(async move {
287 while let Some(update) = updates_rx.next().await {
288 let mut snapshot = snapshot_tx.borrow().clone();
289 if let Err(error) = snapshot.apply_remote_update(update) {
290 log::error!("error applying worktree update: {}", error);
291 }
292 *snapshot_tx.borrow_mut() = snapshot;
293 }
294 })
295 .detach();
296
297 {
298 let mut snapshot_rx = snapshot_rx.clone();
299 let this = worktree_handle.downgrade();
300 cx.spawn(|mut cx| async move {
301 while let Some(_) = snapshot_rx.recv().await {
302 if let Some(this) = this.upgrade(&cx) {
303 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
304 } else {
305 break;
306 }
307 }
308 })
309 .detach();
310 }
311 }
312 });
313 (worktree_handle, deserialize_task)
314 }
315
316 pub fn as_local(&self) -> Option<&LocalWorktree> {
317 if let Worktree::Local(worktree) = self {
318 Some(worktree)
319 } else {
320 None
321 }
322 }
323
324 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
325 if let Worktree::Remote(worktree) = self {
326 Some(worktree)
327 } else {
328 None
329 }
330 }
331
332 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
333 if let Worktree::Local(worktree) = self {
334 Some(worktree)
335 } else {
336 None
337 }
338 }
339
340 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
341 if let Worktree::Remote(worktree) = self {
342 Some(worktree)
343 } else {
344 None
345 }
346 }
347
348 pub fn is_local(&self) -> bool {
349 matches!(self, Worktree::Local(_))
350 }
351
352 pub fn snapshot(&self) -> Snapshot {
353 match self {
354 Worktree::Local(worktree) => worktree.snapshot().snapshot,
355 Worktree::Remote(worktree) => worktree.snapshot(),
356 }
357 }
358
359 pub fn is_weak(&self) -> bool {
360 match self {
361 Worktree::Local(worktree) => worktree.weak,
362 Worktree::Remote(worktree) => worktree.weak,
363 }
364 }
365
366 pub fn replica_id(&self) -> ReplicaId {
367 match self {
368 Worktree::Local(_) => 0,
369 Worktree::Remote(worktree) => worktree.replica_id,
370 }
371 }
372
373 pub fn diagnostic_summaries<'a>(
374 &'a self,
375 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
376 match self {
377 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
378 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
379 }
380 .iter()
381 .map(|(path, summary)| (path.0.clone(), summary.clone()))
382 }
383
384 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
385 match self {
386 Self::Local(worktree) => {
387 let is_fake_fs = worktree.fs.is_fake();
388 worktree.snapshot = worktree.background_snapshot.lock().clone();
389 if worktree.is_scanning() {
390 if worktree.poll_task.is_none() {
391 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
392 if is_fake_fs {
393 smol::future::yield_now().await;
394 } else {
395 smol::Timer::after(Duration::from_millis(100)).await;
396 }
397 this.update(&mut cx, |this, cx| {
398 this.as_local_mut().unwrap().poll_task = None;
399 this.poll_snapshot(cx);
400 })
401 }));
402 }
403 } else {
404 worktree.poll_task.take();
405 cx.emit(Event::UpdatedEntries);
406 }
407 }
408 Self::Remote(worktree) => {
409 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
410 cx.emit(Event::UpdatedEntries);
411 }
412 };
413
414 cx.notify();
415 }
416
417 fn send_buffer_update(
418 &mut self,
419 buffer_id: u64,
420 operation: Operation,
421 cx: &mut ModelContext<Self>,
422 ) {
423 if let Some((project_id, rpc)) = match self {
424 Worktree::Local(worktree) => worktree
425 .share
426 .as_ref()
427 .map(|share| (share.project_id, worktree.client.clone())),
428 Worktree::Remote(worktree) => Some((worktree.project_id, worktree.client.clone())),
429 } {
430 cx.spawn(|worktree, mut cx| async move {
431 if let Err(error) = rpc
432 .request(proto::UpdateBuffer {
433 project_id,
434 buffer_id,
435 operations: vec![language::proto::serialize_operation(&operation)],
436 })
437 .await
438 {
439 worktree.update(&mut cx, |worktree, _| {
440 log::error!("error sending buffer operation: {}", error);
441 match worktree {
442 Worktree::Local(t) => &mut t.queued_operations,
443 Worktree::Remote(t) => &mut t.queued_operations,
444 }
445 .push((buffer_id, operation));
446 });
447 }
448 })
449 .detach();
450 }
451 }
452}
453
454impl LocalWorktree {
455 async fn new(
456 client: Arc<Client>,
457 path: impl Into<Arc<Path>>,
458 weak: bool,
459 fs: Arc<dyn Fs>,
460 cx: &mut AsyncAppContext,
461 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
462 let abs_path = path.into();
463 let path: Arc<Path> = Arc::from(Path::new(""));
464 let next_entry_id = AtomicUsize::new(0);
465
466 // After determining whether the root entry is a file or a directory, populate the
467 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
468 let root_name = abs_path
469 .file_name()
470 .map_or(String::new(), |f| f.to_string_lossy().to_string());
471 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
472 let metadata = fs.metadata(&abs_path).await?;
473
474 let mut config = WorktreeConfig::default();
475 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
476 if let Ok(parsed) = toml::from_str(&zed_toml) {
477 config = parsed;
478 }
479 }
480
481 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
482 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
483 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
484 let mut snapshot = LocalSnapshot {
485 abs_path,
486 scan_id: 0,
487 ignores: Default::default(),
488 removed_entry_ids: Default::default(),
489 next_entry_id: Arc::new(next_entry_id),
490 snapshot: Snapshot {
491 id: WorktreeId::from_usize(cx.model_id()),
492 root_name: root_name.clone(),
493 root_char_bag,
494 entries_by_path: Default::default(),
495 entries_by_id: Default::default(),
496 },
497 };
498 if let Some(metadata) = metadata {
499 let entry = Entry::new(
500 path.into(),
501 &metadata,
502 &snapshot.next_entry_id,
503 snapshot.root_char_bag,
504 );
505 snapshot.insert_entry(entry, fs.as_ref());
506 }
507
508 let tree = Self {
509 snapshot: snapshot.clone(),
510 config,
511 background_snapshot: Arc::new(Mutex::new(snapshot)),
512 last_scan_state_rx,
513 _background_scanner_task: None,
514 registration: Registration::None,
515 share: None,
516 poll_task: None,
517 diagnostics: Default::default(),
518 diagnostic_summaries: Default::default(),
519 queued_operations: Default::default(),
520 client,
521 fs,
522 weak,
523 };
524
525 cx.spawn_weak(|this, mut cx| async move {
526 while let Some(scan_state) = scan_states_rx.next().await {
527 if let Some(handle) = this.upgrade(&cx) {
528 let to_send = handle.update(&mut cx, |this, cx| {
529 last_scan_state_tx.blocking_send(scan_state).ok();
530 this.poll_snapshot(cx);
531 let tree = this.as_local_mut().unwrap();
532 if !tree.is_scanning() {
533 if let Some(share) = tree.share.as_ref() {
534 return Some((tree.snapshot(), share.snapshots_tx.clone()));
535 }
536 }
537 None
538 });
539
540 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
541 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
542 log::error!("error submitting snapshot to send {}", err);
543 }
544 }
545 } else {
546 break;
547 }
548 }
549 })
550 .detach();
551
552 Worktree::Local(tree)
553 });
554
555 Ok((tree, scan_states_tx))
556 }
557
558 pub fn abs_path(&self) -> &Arc<Path> {
559 &self.abs_path
560 }
561
562 pub fn contains_abs_path(&self, path: &Path) -> bool {
563 path.starts_with(&self.abs_path)
564 }
565
566 fn absolutize(&self, path: &Path) -> PathBuf {
567 if path.file_name().is_some() {
568 self.abs_path.join(path)
569 } else {
570 self.abs_path.to_path_buf()
571 }
572 }
573
574 pub fn authorized_logins(&self) -> Vec<String> {
575 self.config.collaborators.clone()
576 }
577
578 pub(crate) fn load_buffer(
579 &mut self,
580 path: &Path,
581 cx: &mut ModelContext<Worktree>,
582 ) -> Task<Result<ModelHandle<Buffer>>> {
583 let path = Arc::from(path);
584 cx.spawn(move |this, mut cx| async move {
585 let (file, contents) = this
586 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
587 .await?;
588 Ok(cx.add_model(|cx| Buffer::from_file(0, contents, Box::new(file), cx)))
589 })
590 }
591
592 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
593 self.diagnostics.get(path).cloned()
594 }
595
596 pub fn update_diagnostics(
597 &mut self,
598 worktree_path: Arc<Path>,
599 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
600 _: &mut ModelContext<Worktree>,
601 ) -> Result<()> {
602 let summary = DiagnosticSummary::new(&diagnostics);
603 self.diagnostic_summaries
604 .insert(PathKey(worktree_path.clone()), summary.clone());
605 self.diagnostics.insert(worktree_path.clone(), diagnostics);
606
607 if let Some(share) = self.share.as_ref() {
608 self.client
609 .send(proto::UpdateDiagnosticSummary {
610 project_id: share.project_id,
611 worktree_id: self.id().to_proto(),
612 summary: Some(proto::DiagnosticSummary {
613 path: worktree_path.to_string_lossy().to_string(),
614 error_count: summary.error_count as u32,
615 warning_count: summary.warning_count as u32,
616 info_count: summary.info_count as u32,
617 hint_count: summary.hint_count as u32,
618 }),
619 })
620 .log_err();
621 }
622
623 Ok(())
624 }
625
626 pub fn scan_complete(&self) -> impl Future<Output = ()> {
627 let mut scan_state_rx = self.last_scan_state_rx.clone();
628 async move {
629 let mut scan_state = Some(scan_state_rx.borrow().clone());
630 while let Some(ScanState::Scanning) = scan_state {
631 scan_state = scan_state_rx.recv().await;
632 }
633 }
634 }
635
636 fn is_scanning(&self) -> bool {
637 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
638 true
639 } else {
640 false
641 }
642 }
643
644 pub fn snapshot(&self) -> LocalSnapshot {
645 self.snapshot.clone()
646 }
647
648 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
649 let handle = cx.handle();
650 let path = Arc::from(path);
651 let abs_path = self.absolutize(&path);
652 let background_snapshot = self.background_snapshot.clone();
653 let fs = self.fs.clone();
654 cx.spawn(|this, mut cx| async move {
655 let text = fs.load(&abs_path).await?;
656 // Eagerly populate the snapshot with an updated entry for the loaded file
657 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
658 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
659 Ok((
660 File {
661 entry_id: Some(entry.id),
662 worktree: handle,
663 path: entry.path,
664 mtime: entry.mtime,
665 is_local: true,
666 },
667 text,
668 ))
669 })
670 }
671
672 pub fn save_buffer_as(
673 &self,
674 buffer_handle: ModelHandle<Buffer>,
675 path: impl Into<Arc<Path>>,
676 cx: &mut ModelContext<Worktree>,
677 ) -> Task<Result<()>> {
678 let buffer = buffer_handle.read(cx);
679 let text = buffer.as_rope().clone();
680 let version = buffer.version();
681 let save = self.save(path, text, cx);
682 let handle = cx.handle();
683 cx.as_mut().spawn(|mut cx| async move {
684 let entry = save.await?;
685 let file = File {
686 entry_id: Some(entry.id),
687 worktree: handle,
688 path: entry.path,
689 mtime: entry.mtime,
690 is_local: true,
691 };
692
693 buffer_handle.update(&mut cx, |buffer, cx| {
694 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
695 });
696
697 Ok(())
698 })
699 }
700
701 fn save(
702 &self,
703 path: impl Into<Arc<Path>>,
704 text: Rope,
705 cx: &mut ModelContext<Worktree>,
706 ) -> Task<Result<Entry>> {
707 let path = path.into();
708 let abs_path = self.absolutize(&path);
709 let background_snapshot = self.background_snapshot.clone();
710 let fs = self.fs.clone();
711 let save = cx.background().spawn(async move {
712 fs.save(&abs_path, &text).await?;
713 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
714 });
715
716 cx.spawn(|this, mut cx| async move {
717 let entry = save.await?;
718 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
719 Ok(entry)
720 })
721 }
722
723 pub fn register(
724 &mut self,
725 project_id: u64,
726 cx: &mut ModelContext<Worktree>,
727 ) -> Task<anyhow::Result<()>> {
728 if self.registration != Registration::None {
729 return Task::ready(Ok(()));
730 }
731
732 self.registration = Registration::Pending;
733 let client = self.client.clone();
734 let register_message = proto::RegisterWorktree {
735 project_id,
736 worktree_id: self.id().to_proto(),
737 root_name: self.root_name().to_string(),
738 authorized_logins: self.authorized_logins(),
739 };
740 cx.spawn(|this, mut cx| async move {
741 let response = client.request(register_message).await;
742 this.update(&mut cx, |this, _| {
743 let worktree = this.as_local_mut().unwrap();
744 match response {
745 Ok(_) => {
746 worktree.registration = Registration::Done { project_id };
747 Ok(())
748 }
749 Err(error) => {
750 worktree.registration = Registration::None;
751 Err(error)
752 }
753 }
754 })
755 })
756 }
757
758 pub fn share(
759 &mut self,
760 project_id: u64,
761 cx: &mut ModelContext<Worktree>,
762 ) -> Task<anyhow::Result<()>> {
763 if self.share.is_some() {
764 return Task::ready(Ok(()));
765 }
766
767 let snapshot = self.snapshot();
768 let rpc = self.client.clone();
769 let worktree_id = cx.model_id() as u64;
770 let (snapshots_to_send_tx, snapshots_to_send_rx) =
771 smol::channel::unbounded::<LocalSnapshot>();
772 let (mut share_tx, mut share_rx) = oneshot::channel();
773 let maintain_remote_snapshot = cx.background().spawn({
774 let rpc = rpc.clone();
775 let snapshot = snapshot.clone();
776 let diagnostic_summaries = self.diagnostic_summaries.clone();
777 let weak = self.weak;
778 async move {
779 if let Err(error) = rpc
780 .request(proto::ShareWorktree {
781 project_id,
782 worktree: Some(snapshot.to_proto(&diagnostic_summaries, weak)),
783 })
784 .await
785 {
786 let _ = share_tx.try_send(Err(error));
787 return;
788 } else {
789 let _ = share_tx.try_send(Ok(()));
790 }
791
792 let mut update_id = 0;
793 let mut prev_snapshot = snapshot;
794 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
795 let message = snapshot.build_update(
796 &prev_snapshot,
797 project_id,
798 worktree_id,
799 update_id,
800 false,
801 );
802 match rpc.request(message).await {
803 Ok(_) => {
804 prev_snapshot = snapshot;
805 update_id += 1;
806 }
807 Err(err) => log::error!("error sending snapshot diff {}", err),
808 }
809 }
810 }
811 });
812 self.share = Some(ShareState {
813 project_id,
814 snapshots_tx: snapshots_to_send_tx,
815 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
816 });
817
818 cx.foreground().spawn(async move {
819 match share_rx.next().await {
820 Some(result) => result,
821 None => Err(anyhow!("unshared before sharing completed")),
822 }
823 })
824 }
825
826 pub fn unshare(&mut self) {
827 self.share.take();
828 }
829
830 pub fn is_shared(&self) -> bool {
831 self.share.is_some()
832 }
833}
834
835impl RemoteWorktree {
836 fn snapshot(&self) -> Snapshot {
837 self.snapshot.clone()
838 }
839
840 pub fn update_from_remote(
841 &mut self,
842 envelope: TypedEnvelope<proto::UpdateWorktree>,
843 ) -> Result<()> {
844 let update = envelope.payload;
845 if update.id > self.next_update_id {
846 let ix = match self
847 .pending_updates
848 .binary_search_by_key(&update.id, |pending| pending.id)
849 {
850 Ok(ix) | Err(ix) => ix,
851 };
852 self.pending_updates.insert(ix, update);
853 } else {
854 let tx = self.updates_tx.clone();
855 self.next_update_id += 1;
856 tx.unbounded_send(update)
857 .expect("consumer runs to completion");
858 while let Some(update) = self.pending_updates.front() {
859 if update.id == self.next_update_id {
860 self.next_update_id += 1;
861 tx.unbounded_send(self.pending_updates.pop_front().unwrap())
862 .expect("consumer runs to completion");
863 } else {
864 break;
865 }
866 }
867 }
868
869 Ok(())
870 }
871
872 pub fn has_pending_updates(&self) -> bool {
873 !self.pending_updates.is_empty()
874 }
875
876 pub fn update_diagnostic_summary(
877 &mut self,
878 path: Arc<Path>,
879 summary: &proto::DiagnosticSummary,
880 ) {
881 self.diagnostic_summaries.insert(
882 PathKey(path.clone()),
883 DiagnosticSummary {
884 error_count: summary.error_count as usize,
885 warning_count: summary.warning_count as usize,
886 info_count: summary.info_count as usize,
887 hint_count: summary.hint_count as usize,
888 },
889 );
890 }
891}
892
893impl Snapshot {
894 pub fn id(&self) -> WorktreeId {
895 self.id
896 }
897
898 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
899 let mut entries_by_path_edits = Vec::new();
900 let mut entries_by_id_edits = Vec::new();
901 for entry_id in update.removed_entries {
902 let entry_id = entry_id as usize;
903 let entry = self
904 .entry_for_id(entry_id)
905 .ok_or_else(|| anyhow!("unknown entry"))?;
906 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
907 entries_by_id_edits.push(Edit::Remove(entry.id));
908 }
909
910 for entry in update.updated_entries {
911 let entry = Entry::try_from((&self.root_char_bag, entry))?;
912 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
913 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
914 }
915 entries_by_id_edits.push(Edit::Insert(PathEntry {
916 id: entry.id,
917 path: entry.path.clone(),
918 is_ignored: entry.is_ignored,
919 scan_id: 0,
920 }));
921 entries_by_path_edits.push(Edit::Insert(entry));
922 }
923
924 self.entries_by_path.edit(entries_by_path_edits, &());
925 self.entries_by_id.edit(entries_by_id_edits, &());
926
927 Ok(())
928 }
929
930 pub fn file_count(&self) -> usize {
931 self.entries_by_path.summary().file_count
932 }
933
934 pub fn visible_file_count(&self) -> usize {
935 self.entries_by_path.summary().visible_file_count
936 }
937
938 fn traverse_from_offset(
939 &self,
940 include_dirs: bool,
941 include_ignored: bool,
942 start_offset: usize,
943 ) -> Traversal {
944 let mut cursor = self.entries_by_path.cursor();
945 cursor.seek(
946 &TraversalTarget::Count {
947 count: start_offset,
948 include_dirs,
949 include_ignored,
950 },
951 Bias::Right,
952 &(),
953 );
954 Traversal {
955 cursor,
956 include_dirs,
957 include_ignored,
958 }
959 }
960
961 fn traverse_from_path(
962 &self,
963 include_dirs: bool,
964 include_ignored: bool,
965 path: &Path,
966 ) -> Traversal {
967 let mut cursor = self.entries_by_path.cursor();
968 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
969 Traversal {
970 cursor,
971 include_dirs,
972 include_ignored,
973 }
974 }
975
976 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
977 self.traverse_from_offset(false, include_ignored, start)
978 }
979
980 pub fn entries(&self, include_ignored: bool) -> Traversal {
981 self.traverse_from_offset(true, include_ignored, 0)
982 }
983
984 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
985 let empty_path = Path::new("");
986 self.entries_by_path
987 .cursor::<()>()
988 .filter(move |entry| entry.path.as_ref() != empty_path)
989 .map(|entry| &entry.path)
990 }
991
992 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
993 let mut cursor = self.entries_by_path.cursor();
994 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
995 let traversal = Traversal {
996 cursor,
997 include_dirs: true,
998 include_ignored: true,
999 };
1000 ChildEntriesIter {
1001 traversal,
1002 parent_path,
1003 }
1004 }
1005
1006 pub fn root_entry(&self) -> Option<&Entry> {
1007 self.entry_for_path("")
1008 }
1009
1010 pub fn root_name(&self) -> &str {
1011 &self.root_name
1012 }
1013
1014 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1015 let path = path.as_ref();
1016 self.traverse_from_path(true, true, path)
1017 .entry()
1018 .and_then(|entry| {
1019 if entry.path.as_ref() == path {
1020 Some(entry)
1021 } else {
1022 None
1023 }
1024 })
1025 }
1026
1027 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1028 let entry = self.entries_by_id.get(&id, &())?;
1029 self.entry_for_path(&entry.path)
1030 }
1031
1032 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1033 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1034 }
1035}
1036
1037impl LocalSnapshot {
1038 pub(crate) fn to_proto(
1039 &self,
1040 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1041 weak: bool,
1042 ) -> proto::Worktree {
1043 let root_name = self.root_name.clone();
1044 proto::Worktree {
1045 id: self.id.0 as u64,
1046 root_name,
1047 entries: self
1048 .entries_by_path
1049 .iter()
1050 .filter(|e| !e.is_ignored)
1051 .map(Into::into)
1052 .collect(),
1053 diagnostic_summaries: diagnostic_summaries
1054 .iter()
1055 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1056 .collect(),
1057 weak,
1058 next_update_id: 0,
1059 }
1060 }
1061
1062 pub(crate) fn build_update(
1063 &self,
1064 other: &Self,
1065 project_id: u64,
1066 worktree_id: u64,
1067 update_id: u64,
1068 include_ignored: bool,
1069 ) -> proto::UpdateWorktree {
1070 let mut updated_entries = Vec::new();
1071 let mut removed_entries = Vec::new();
1072 let mut self_entries = self
1073 .entries_by_id
1074 .cursor::<()>()
1075 .filter(|e| include_ignored || !e.is_ignored)
1076 .peekable();
1077 let mut other_entries = other
1078 .entries_by_id
1079 .cursor::<()>()
1080 .filter(|e| include_ignored || !e.is_ignored)
1081 .peekable();
1082 loop {
1083 match (self_entries.peek(), other_entries.peek()) {
1084 (Some(self_entry), Some(other_entry)) => {
1085 match Ord::cmp(&self_entry.id, &other_entry.id) {
1086 Ordering::Less => {
1087 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1088 updated_entries.push(entry);
1089 self_entries.next();
1090 }
1091 Ordering::Equal => {
1092 if self_entry.scan_id != other_entry.scan_id {
1093 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1094 updated_entries.push(entry);
1095 }
1096
1097 self_entries.next();
1098 other_entries.next();
1099 }
1100 Ordering::Greater => {
1101 removed_entries.push(other_entry.id as u64);
1102 other_entries.next();
1103 }
1104 }
1105 }
1106 (Some(self_entry), None) => {
1107 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1108 updated_entries.push(entry);
1109 self_entries.next();
1110 }
1111 (None, Some(other_entry)) => {
1112 removed_entries.push(other_entry.id as u64);
1113 other_entries.next();
1114 }
1115 (None, None) => break,
1116 }
1117 }
1118
1119 proto::UpdateWorktree {
1120 id: update_id as u64,
1121 project_id,
1122 worktree_id,
1123 root_name: self.root_name().to_string(),
1124 updated_entries,
1125 removed_entries,
1126 }
1127 }
1128
1129 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1130 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1131 let abs_path = self.abs_path.join(&entry.path);
1132 match build_gitignore(&abs_path, fs) {
1133 Ok(ignore) => {
1134 let ignore_dir_path = entry.path.parent().unwrap();
1135 self.ignores
1136 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1137 }
1138 Err(error) => {
1139 log::error!(
1140 "error loading .gitignore file {:?} - {:?}",
1141 &entry.path,
1142 error
1143 );
1144 }
1145 }
1146 }
1147
1148 self.reuse_entry_id(&mut entry);
1149 self.entries_by_path.insert_or_replace(entry.clone(), &());
1150 let scan_id = self.scan_id;
1151 self.entries_by_id.insert_or_replace(
1152 PathEntry {
1153 id: entry.id,
1154 path: entry.path.clone(),
1155 is_ignored: entry.is_ignored,
1156 scan_id,
1157 },
1158 &(),
1159 );
1160 entry
1161 }
1162
1163 fn populate_dir(
1164 &mut self,
1165 parent_path: Arc<Path>,
1166 entries: impl IntoIterator<Item = Entry>,
1167 ignore: Option<Arc<Gitignore>>,
1168 ) {
1169 let mut parent_entry = self
1170 .entries_by_path
1171 .get(&PathKey(parent_path.clone()), &())
1172 .unwrap()
1173 .clone();
1174 if let Some(ignore) = ignore {
1175 self.ignores.insert(parent_path, (ignore, self.scan_id));
1176 }
1177 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1178 parent_entry.kind = EntryKind::Dir;
1179 } else {
1180 unreachable!();
1181 }
1182
1183 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1184 let mut entries_by_id_edits = Vec::new();
1185
1186 for mut entry in entries {
1187 self.reuse_entry_id(&mut entry);
1188 entries_by_id_edits.push(Edit::Insert(PathEntry {
1189 id: entry.id,
1190 path: entry.path.clone(),
1191 is_ignored: entry.is_ignored,
1192 scan_id: self.scan_id,
1193 }));
1194 entries_by_path_edits.push(Edit::Insert(entry));
1195 }
1196
1197 self.entries_by_path.edit(entries_by_path_edits, &());
1198 self.entries_by_id.edit(entries_by_id_edits, &());
1199 }
1200
1201 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1202 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1203 entry.id = removed_entry_id;
1204 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1205 entry.id = existing_entry.id;
1206 }
1207 }
1208
1209 fn remove_path(&mut self, path: &Path) {
1210 let mut new_entries;
1211 let removed_entries;
1212 {
1213 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1214 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1215 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1216 new_entries.push_tree(cursor.suffix(&()), &());
1217 }
1218 self.entries_by_path = new_entries;
1219
1220 let mut entries_by_id_edits = Vec::new();
1221 for entry in removed_entries.cursor::<()>() {
1222 let removed_entry_id = self
1223 .removed_entry_ids
1224 .entry(entry.inode)
1225 .or_insert(entry.id);
1226 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1227 entries_by_id_edits.push(Edit::Remove(entry.id));
1228 }
1229 self.entries_by_id.edit(entries_by_id_edits, &());
1230
1231 if path.file_name() == Some(&GITIGNORE) {
1232 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1233 *scan_id = self.scan_id;
1234 }
1235 }
1236 }
1237
1238 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1239 let mut new_ignores = Vec::new();
1240 for ancestor in path.ancestors().skip(1) {
1241 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1242 new_ignores.push((ancestor, Some(ignore.clone())));
1243 } else {
1244 new_ignores.push((ancestor, None));
1245 }
1246 }
1247
1248 let mut ignore_stack = IgnoreStack::none();
1249 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1250 if ignore_stack.is_path_ignored(&parent_path, true) {
1251 ignore_stack = IgnoreStack::all();
1252 break;
1253 } else if let Some(ignore) = ignore {
1254 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1255 }
1256 }
1257
1258 if ignore_stack.is_path_ignored(path, is_dir) {
1259 ignore_stack = IgnoreStack::all();
1260 }
1261
1262 ignore_stack
1263 }
1264}
1265
1266fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1267 let contents = smol::block_on(fs.load(&abs_path))?;
1268 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1269 let mut builder = GitignoreBuilder::new(parent);
1270 for line in contents.lines() {
1271 builder.add_line(Some(abs_path.into()), line)?;
1272 }
1273 Ok(builder.build()?)
1274}
1275
1276impl WorktreeId {
1277 pub fn from_usize(handle_id: usize) -> Self {
1278 Self(handle_id)
1279 }
1280
1281 pub(crate) fn from_proto(id: u64) -> Self {
1282 Self(id as usize)
1283 }
1284
1285 pub fn to_proto(&self) -> u64 {
1286 self.0 as u64
1287 }
1288
1289 pub fn to_usize(&self) -> usize {
1290 self.0
1291 }
1292}
1293
1294impl fmt::Display for WorktreeId {
1295 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1296 self.0.fmt(f)
1297 }
1298}
1299
1300impl Deref for Worktree {
1301 type Target = Snapshot;
1302
1303 fn deref(&self) -> &Self::Target {
1304 match self {
1305 Worktree::Local(worktree) => &worktree.snapshot,
1306 Worktree::Remote(worktree) => &worktree.snapshot,
1307 }
1308 }
1309}
1310
1311impl Deref for LocalWorktree {
1312 type Target = LocalSnapshot;
1313
1314 fn deref(&self) -> &Self::Target {
1315 &self.snapshot
1316 }
1317}
1318
1319impl Deref for RemoteWorktree {
1320 type Target = Snapshot;
1321
1322 fn deref(&self) -> &Self::Target {
1323 &self.snapshot
1324 }
1325}
1326
1327impl fmt::Debug for LocalWorktree {
1328 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1329 self.snapshot.fmt(f)
1330 }
1331}
1332
1333impl fmt::Debug for Snapshot {
1334 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1335 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1336 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1337
1338 impl<'a> fmt::Debug for EntriesByPath<'a> {
1339 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1340 f.debug_map()
1341 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1342 .finish()
1343 }
1344 }
1345
1346 impl<'a> fmt::Debug for EntriesById<'a> {
1347 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1348 f.debug_list().entries(self.0.iter()).finish()
1349 }
1350 }
1351
1352 f.debug_struct("Snapshot")
1353 .field("id", &self.id)
1354 .field("root_name", &self.root_name)
1355 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1356 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1357 .finish()
1358 }
1359}
1360
1361#[derive(Clone, PartialEq)]
1362pub struct File {
1363 pub worktree: ModelHandle<Worktree>,
1364 pub path: Arc<Path>,
1365 pub mtime: SystemTime,
1366 pub(crate) entry_id: Option<usize>,
1367 pub(crate) is_local: bool,
1368}
1369
1370impl language::File for File {
1371 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1372 if self.is_local {
1373 Some(self)
1374 } else {
1375 None
1376 }
1377 }
1378
1379 fn mtime(&self) -> SystemTime {
1380 self.mtime
1381 }
1382
1383 fn path(&self) -> &Arc<Path> {
1384 &self.path
1385 }
1386
1387 fn full_path(&self, cx: &AppContext) -> PathBuf {
1388 let mut full_path = PathBuf::new();
1389 full_path.push(self.worktree.read(cx).root_name());
1390 if self.path.components().next().is_some() {
1391 full_path.push(&self.path);
1392 }
1393 full_path
1394 }
1395
1396 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1397 /// of its worktree, then this method will return the name of the worktree itself.
1398 fn file_name(&self, cx: &AppContext) -> OsString {
1399 self.path
1400 .file_name()
1401 .map(|name| name.into())
1402 .unwrap_or_else(|| OsString::from(&self.worktree.read(cx).root_name))
1403 }
1404
1405 fn is_deleted(&self) -> bool {
1406 self.entry_id.is_none()
1407 }
1408
1409 fn save(
1410 &self,
1411 buffer_id: u64,
1412 text: Rope,
1413 version: clock::Global,
1414 cx: &mut MutableAppContext,
1415 ) -> Task<Result<(clock::Global, SystemTime)>> {
1416 self.worktree.update(cx, |worktree, cx| match worktree {
1417 Worktree::Local(worktree) => {
1418 let rpc = worktree.client.clone();
1419 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1420 let save = worktree.save(self.path.clone(), text, cx);
1421 cx.background().spawn(async move {
1422 let entry = save.await?;
1423 if let Some(project_id) = project_id {
1424 rpc.send(proto::BufferSaved {
1425 project_id,
1426 buffer_id,
1427 version: (&version).into(),
1428 mtime: Some(entry.mtime.into()),
1429 })?;
1430 }
1431 Ok((version, entry.mtime))
1432 })
1433 }
1434 Worktree::Remote(worktree) => {
1435 let rpc = worktree.client.clone();
1436 let project_id = worktree.project_id;
1437 cx.foreground().spawn(async move {
1438 let response = rpc
1439 .request(proto::SaveBuffer {
1440 project_id,
1441 buffer_id,
1442 version: (&version).into(),
1443 })
1444 .await?;
1445 let version = response.version.try_into()?;
1446 let mtime = response
1447 .mtime
1448 .ok_or_else(|| anyhow!("missing mtime"))?
1449 .into();
1450 Ok((version, mtime))
1451 })
1452 }
1453 })
1454 }
1455
1456 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1457 self.worktree.update(cx, |worktree, cx| {
1458 worktree.send_buffer_update(buffer_id, operation, cx);
1459 });
1460 }
1461
1462 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1463 self.worktree.update(cx, |worktree, _| {
1464 if let Worktree::Remote(worktree) = worktree {
1465 worktree
1466 .client
1467 .send(proto::CloseBuffer {
1468 project_id: worktree.project_id,
1469 buffer_id,
1470 })
1471 .log_err();
1472 }
1473 });
1474 }
1475
1476 fn as_any(&self) -> &dyn Any {
1477 self
1478 }
1479
1480 fn to_proto(&self) -> rpc::proto::File {
1481 rpc::proto::File {
1482 worktree_id: self.worktree.id() as u64,
1483 entry_id: self.entry_id.map(|entry_id| entry_id as u64),
1484 path: self.path.to_string_lossy().into(),
1485 mtime: Some(self.mtime.into()),
1486 }
1487 }
1488}
1489
1490impl language::LocalFile for File {
1491 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1492 self.worktree
1493 .read(cx)
1494 .as_local()
1495 .unwrap()
1496 .abs_path
1497 .join(&self.path)
1498 }
1499
1500 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1501 let worktree = self.worktree.read(cx).as_local().unwrap();
1502 let abs_path = worktree.absolutize(&self.path);
1503 let fs = worktree.fs.clone();
1504 cx.background()
1505 .spawn(async move { fs.load(&abs_path).await })
1506 }
1507
1508 fn buffer_reloaded(
1509 &self,
1510 buffer_id: u64,
1511 version: &clock::Global,
1512 mtime: SystemTime,
1513 cx: &mut MutableAppContext,
1514 ) {
1515 let worktree = self.worktree.read(cx).as_local().unwrap();
1516 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1517 worktree
1518 .client
1519 .send(proto::BufferReloaded {
1520 project_id,
1521 buffer_id,
1522 version: version.into(),
1523 mtime: Some(mtime.into()),
1524 })
1525 .log_err();
1526 }
1527 }
1528}
1529
1530impl File {
1531 pub fn from_proto(
1532 proto: rpc::proto::File,
1533 worktree: ModelHandle<Worktree>,
1534 cx: &AppContext,
1535 ) -> Result<Self> {
1536 let worktree_id = worktree
1537 .read(cx)
1538 .as_remote()
1539 .ok_or_else(|| anyhow!("not remote"))?
1540 .id();
1541
1542 if worktree_id.to_proto() != proto.worktree_id {
1543 return Err(anyhow!("worktree id does not match file"));
1544 }
1545
1546 Ok(Self {
1547 worktree,
1548 path: Path::new(&proto.path).into(),
1549 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1550 entry_id: proto.entry_id.map(|entry_id| entry_id as usize),
1551 is_local: false,
1552 })
1553 }
1554
1555 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
1556 file.and_then(|f| f.as_any().downcast_ref())
1557 }
1558
1559 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1560 self.worktree.read(cx).id()
1561 }
1562}
1563
1564#[derive(Clone, Debug, PartialEq, Eq)]
1565pub struct Entry {
1566 pub id: usize,
1567 pub kind: EntryKind,
1568 pub path: Arc<Path>,
1569 pub inode: u64,
1570 pub mtime: SystemTime,
1571 pub is_symlink: bool,
1572 pub is_ignored: bool,
1573}
1574
1575#[derive(Clone, Debug, PartialEq, Eq)]
1576pub enum EntryKind {
1577 PendingDir,
1578 Dir,
1579 File(CharBag),
1580}
1581
1582impl Entry {
1583 fn new(
1584 path: Arc<Path>,
1585 metadata: &fs::Metadata,
1586 next_entry_id: &AtomicUsize,
1587 root_char_bag: CharBag,
1588 ) -> Self {
1589 Self {
1590 id: next_entry_id.fetch_add(1, SeqCst),
1591 kind: if metadata.is_dir {
1592 EntryKind::PendingDir
1593 } else {
1594 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1595 },
1596 path,
1597 inode: metadata.inode,
1598 mtime: metadata.mtime,
1599 is_symlink: metadata.is_symlink,
1600 is_ignored: false,
1601 }
1602 }
1603
1604 pub fn is_dir(&self) -> bool {
1605 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1606 }
1607
1608 pub fn is_file(&self) -> bool {
1609 matches!(self.kind, EntryKind::File(_))
1610 }
1611}
1612
1613impl sum_tree::Item for Entry {
1614 type Summary = EntrySummary;
1615
1616 fn summary(&self) -> Self::Summary {
1617 let visible_count = if self.is_ignored { 0 } else { 1 };
1618 let file_count;
1619 let visible_file_count;
1620 if self.is_file() {
1621 file_count = 1;
1622 visible_file_count = visible_count;
1623 } else {
1624 file_count = 0;
1625 visible_file_count = 0;
1626 }
1627
1628 EntrySummary {
1629 max_path: self.path.clone(),
1630 count: 1,
1631 visible_count,
1632 file_count,
1633 visible_file_count,
1634 }
1635 }
1636}
1637
1638impl sum_tree::KeyedItem for Entry {
1639 type Key = PathKey;
1640
1641 fn key(&self) -> Self::Key {
1642 PathKey(self.path.clone())
1643 }
1644}
1645
1646#[derive(Clone, Debug)]
1647pub struct EntrySummary {
1648 max_path: Arc<Path>,
1649 count: usize,
1650 visible_count: usize,
1651 file_count: usize,
1652 visible_file_count: usize,
1653}
1654
1655impl Default for EntrySummary {
1656 fn default() -> Self {
1657 Self {
1658 max_path: Arc::from(Path::new("")),
1659 count: 0,
1660 visible_count: 0,
1661 file_count: 0,
1662 visible_file_count: 0,
1663 }
1664 }
1665}
1666
1667impl sum_tree::Summary for EntrySummary {
1668 type Context = ();
1669
1670 fn add_summary(&mut self, rhs: &Self, _: &()) {
1671 self.max_path = rhs.max_path.clone();
1672 self.visible_count += rhs.visible_count;
1673 self.file_count += rhs.file_count;
1674 self.visible_file_count += rhs.visible_file_count;
1675 }
1676}
1677
1678#[derive(Clone, Debug)]
1679struct PathEntry {
1680 id: usize,
1681 path: Arc<Path>,
1682 is_ignored: bool,
1683 scan_id: usize,
1684}
1685
1686impl sum_tree::Item for PathEntry {
1687 type Summary = PathEntrySummary;
1688
1689 fn summary(&self) -> Self::Summary {
1690 PathEntrySummary { max_id: self.id }
1691 }
1692}
1693
1694impl sum_tree::KeyedItem for PathEntry {
1695 type Key = usize;
1696
1697 fn key(&self) -> Self::Key {
1698 self.id
1699 }
1700}
1701
1702#[derive(Clone, Debug, Default)]
1703struct PathEntrySummary {
1704 max_id: usize,
1705}
1706
1707impl sum_tree::Summary for PathEntrySummary {
1708 type Context = ();
1709
1710 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
1711 self.max_id = summary.max_id;
1712 }
1713}
1714
1715impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
1716 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
1717 *self = summary.max_id;
1718 }
1719}
1720
1721#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1722pub struct PathKey(Arc<Path>);
1723
1724impl Default for PathKey {
1725 fn default() -> Self {
1726 Self(Path::new("").into())
1727 }
1728}
1729
1730impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1731 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1732 self.0 = summary.max_path.clone();
1733 }
1734}
1735
1736struct BackgroundScanner {
1737 fs: Arc<dyn Fs>,
1738 snapshot: Arc<Mutex<LocalSnapshot>>,
1739 notify: UnboundedSender<ScanState>,
1740 executor: Arc<executor::Background>,
1741}
1742
1743impl BackgroundScanner {
1744 fn new(
1745 snapshot: Arc<Mutex<LocalSnapshot>>,
1746 notify: UnboundedSender<ScanState>,
1747 fs: Arc<dyn Fs>,
1748 executor: Arc<executor::Background>,
1749 ) -> Self {
1750 Self {
1751 fs,
1752 snapshot,
1753 notify,
1754 executor,
1755 }
1756 }
1757
1758 fn abs_path(&self) -> Arc<Path> {
1759 self.snapshot.lock().abs_path.clone()
1760 }
1761
1762 fn snapshot(&self) -> LocalSnapshot {
1763 self.snapshot.lock().clone()
1764 }
1765
1766 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
1767 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
1768 return;
1769 }
1770
1771 if let Err(err) = self.scan_dirs().await {
1772 if self
1773 .notify
1774 .unbounded_send(ScanState::Err(Arc::new(err)))
1775 .is_err()
1776 {
1777 return;
1778 }
1779 }
1780
1781 if self.notify.unbounded_send(ScanState::Idle).is_err() {
1782 return;
1783 }
1784
1785 futures::pin_mut!(events_rx);
1786 while let Some(events) = events_rx.next().await {
1787 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
1788 break;
1789 }
1790
1791 if !self.process_events(events).await {
1792 break;
1793 }
1794
1795 if self.notify.unbounded_send(ScanState::Idle).is_err() {
1796 break;
1797 }
1798 }
1799 }
1800
1801 async fn scan_dirs(&mut self) -> Result<()> {
1802 let root_char_bag;
1803 let next_entry_id;
1804 let is_dir;
1805 {
1806 let snapshot = self.snapshot.lock();
1807 root_char_bag = snapshot.root_char_bag;
1808 next_entry_id = snapshot.next_entry_id.clone();
1809 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
1810 };
1811
1812 if is_dir {
1813 let path: Arc<Path> = Arc::from(Path::new(""));
1814 let abs_path = self.abs_path();
1815 let (tx, rx) = channel::unbounded();
1816 tx.send(ScanJob {
1817 abs_path: abs_path.to_path_buf(),
1818 path,
1819 ignore_stack: IgnoreStack::none(),
1820 scan_queue: tx.clone(),
1821 })
1822 .await
1823 .unwrap();
1824 drop(tx);
1825
1826 self.executor
1827 .scoped(|scope| {
1828 for _ in 0..self.executor.num_cpus() {
1829 scope.spawn(async {
1830 while let Ok(job) = rx.recv().await {
1831 if let Err(err) = self
1832 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
1833 .await
1834 {
1835 log::error!("error scanning {:?}: {}", job.abs_path, err);
1836 }
1837 }
1838 });
1839 }
1840 })
1841 .await;
1842 }
1843
1844 Ok(())
1845 }
1846
1847 async fn scan_dir(
1848 &self,
1849 root_char_bag: CharBag,
1850 next_entry_id: Arc<AtomicUsize>,
1851 job: &ScanJob,
1852 ) -> Result<()> {
1853 let mut new_entries: Vec<Entry> = Vec::new();
1854 let mut new_jobs: Vec<ScanJob> = Vec::new();
1855 let mut ignore_stack = job.ignore_stack.clone();
1856 let mut new_ignore = None;
1857
1858 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
1859 while let Some(child_abs_path) = child_paths.next().await {
1860 let child_abs_path = match child_abs_path {
1861 Ok(child_abs_path) => child_abs_path,
1862 Err(error) => {
1863 log::error!("error processing entry {:?}", error);
1864 continue;
1865 }
1866 };
1867 let child_name = child_abs_path.file_name().unwrap();
1868 let child_path: Arc<Path> = job.path.join(child_name).into();
1869 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
1870 Some(metadata) => metadata,
1871 None => continue,
1872 };
1873
1874 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1875 if child_name == *GITIGNORE {
1876 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
1877 Ok(ignore) => {
1878 let ignore = Arc::new(ignore);
1879 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1880 new_ignore = Some(ignore);
1881 }
1882 Err(error) => {
1883 log::error!(
1884 "error loading .gitignore file {:?} - {:?}",
1885 child_name,
1886 error
1887 );
1888 }
1889 }
1890
1891 // Update ignore status of any child entries we've already processed to reflect the
1892 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1893 // there should rarely be too numerous. Update the ignore stack associated with any
1894 // new jobs as well.
1895 let mut new_jobs = new_jobs.iter_mut();
1896 for entry in &mut new_entries {
1897 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1898 if entry.is_dir() {
1899 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1900 IgnoreStack::all()
1901 } else {
1902 ignore_stack.clone()
1903 };
1904 }
1905 }
1906 }
1907
1908 let mut child_entry = Entry::new(
1909 child_path.clone(),
1910 &child_metadata,
1911 &next_entry_id,
1912 root_char_bag,
1913 );
1914
1915 if child_metadata.is_dir {
1916 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1917 child_entry.is_ignored = is_ignored;
1918 new_entries.push(child_entry);
1919 new_jobs.push(ScanJob {
1920 abs_path: child_abs_path,
1921 path: child_path,
1922 ignore_stack: if is_ignored {
1923 IgnoreStack::all()
1924 } else {
1925 ignore_stack.clone()
1926 },
1927 scan_queue: job.scan_queue.clone(),
1928 });
1929 } else {
1930 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1931 new_entries.push(child_entry);
1932 };
1933 }
1934
1935 self.snapshot
1936 .lock()
1937 .populate_dir(job.path.clone(), new_entries, new_ignore);
1938 for new_job in new_jobs {
1939 job.scan_queue.send(new_job).await.unwrap();
1940 }
1941
1942 Ok(())
1943 }
1944
1945 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1946 let mut snapshot = self.snapshot();
1947 snapshot.scan_id += 1;
1948
1949 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
1950 abs_path
1951 } else {
1952 return false;
1953 };
1954 let root_char_bag = snapshot.root_char_bag;
1955 let next_entry_id = snapshot.next_entry_id.clone();
1956
1957 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1958 events.dedup_by(|a, b| a.path.starts_with(&b.path));
1959
1960 for event in &events {
1961 match event.path.strip_prefix(&root_abs_path) {
1962 Ok(path) => snapshot.remove_path(&path),
1963 Err(_) => {
1964 log::error!(
1965 "unexpected event {:?} for root path {:?}",
1966 event.path,
1967 root_abs_path
1968 );
1969 continue;
1970 }
1971 }
1972 }
1973
1974 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
1975 for event in events {
1976 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
1977 Ok(path) => Arc::from(path.to_path_buf()),
1978 Err(_) => {
1979 log::error!(
1980 "unexpected event {:?} for root path {:?}",
1981 event.path,
1982 root_abs_path
1983 );
1984 continue;
1985 }
1986 };
1987
1988 match self.fs.metadata(&event.path).await {
1989 Ok(Some(metadata)) => {
1990 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
1991 let mut fs_entry = Entry::new(
1992 path.clone(),
1993 &metadata,
1994 snapshot.next_entry_id.as_ref(),
1995 snapshot.root_char_bag,
1996 );
1997 fs_entry.is_ignored = ignore_stack.is_all();
1998 snapshot.insert_entry(fs_entry, self.fs.as_ref());
1999 if metadata.is_dir {
2000 scan_queue_tx
2001 .send(ScanJob {
2002 abs_path: event.path,
2003 path,
2004 ignore_stack,
2005 scan_queue: scan_queue_tx.clone(),
2006 })
2007 .await
2008 .unwrap();
2009 }
2010 }
2011 Ok(None) => {}
2012 Err(err) => {
2013 // TODO - create a special 'error' entry in the entries tree to mark this
2014 log::error!("error reading file on event {:?}", err);
2015 }
2016 }
2017 }
2018
2019 *self.snapshot.lock() = snapshot;
2020
2021 // Scan any directories that were created as part of this event batch.
2022 drop(scan_queue_tx);
2023 self.executor
2024 .scoped(|scope| {
2025 for _ in 0..self.executor.num_cpus() {
2026 scope.spawn(async {
2027 while let Ok(job) = scan_queue_rx.recv().await {
2028 if let Err(err) = self
2029 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2030 .await
2031 {
2032 log::error!("error scanning {:?}: {}", job.abs_path, err);
2033 }
2034 }
2035 });
2036 }
2037 })
2038 .await;
2039
2040 // Attempt to detect renames only over a single batch of file-system events.
2041 self.snapshot.lock().removed_entry_ids.clear();
2042
2043 self.update_ignore_statuses().await;
2044 true
2045 }
2046
2047 async fn update_ignore_statuses(&self) {
2048 let mut snapshot = self.snapshot();
2049
2050 let mut ignores_to_update = Vec::new();
2051 let mut ignores_to_delete = Vec::new();
2052 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2053 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2054 ignores_to_update.push(parent_path.clone());
2055 }
2056
2057 let ignore_path = parent_path.join(&*GITIGNORE);
2058 if snapshot.entry_for_path(ignore_path).is_none() {
2059 ignores_to_delete.push(parent_path.clone());
2060 }
2061 }
2062
2063 for parent_path in ignores_to_delete {
2064 snapshot.ignores.remove(&parent_path);
2065 self.snapshot.lock().ignores.remove(&parent_path);
2066 }
2067
2068 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2069 ignores_to_update.sort_unstable();
2070 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2071 while let Some(parent_path) = ignores_to_update.next() {
2072 while ignores_to_update
2073 .peek()
2074 .map_or(false, |p| p.starts_with(&parent_path))
2075 {
2076 ignores_to_update.next().unwrap();
2077 }
2078
2079 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2080 ignore_queue_tx
2081 .send(UpdateIgnoreStatusJob {
2082 path: parent_path,
2083 ignore_stack,
2084 ignore_queue: ignore_queue_tx.clone(),
2085 })
2086 .await
2087 .unwrap();
2088 }
2089 drop(ignore_queue_tx);
2090
2091 self.executor
2092 .scoped(|scope| {
2093 for _ in 0..self.executor.num_cpus() {
2094 scope.spawn(async {
2095 while let Ok(job) = ignore_queue_rx.recv().await {
2096 self.update_ignore_status(job, &snapshot).await;
2097 }
2098 });
2099 }
2100 })
2101 .await;
2102 }
2103
2104 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2105 let mut ignore_stack = job.ignore_stack;
2106 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2107 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2108 }
2109
2110 let mut entries_by_id_edits = Vec::new();
2111 let mut entries_by_path_edits = Vec::new();
2112 for mut entry in snapshot.child_entries(&job.path).cloned() {
2113 let was_ignored = entry.is_ignored;
2114 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2115 if entry.is_dir() {
2116 let child_ignore_stack = if entry.is_ignored {
2117 IgnoreStack::all()
2118 } else {
2119 ignore_stack.clone()
2120 };
2121 job.ignore_queue
2122 .send(UpdateIgnoreStatusJob {
2123 path: entry.path.clone(),
2124 ignore_stack: child_ignore_stack,
2125 ignore_queue: job.ignore_queue.clone(),
2126 })
2127 .await
2128 .unwrap();
2129 }
2130
2131 if entry.is_ignored != was_ignored {
2132 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2133 path_entry.scan_id = snapshot.scan_id;
2134 path_entry.is_ignored = entry.is_ignored;
2135 entries_by_id_edits.push(Edit::Insert(path_entry));
2136 entries_by_path_edits.push(Edit::Insert(entry));
2137 }
2138 }
2139
2140 let mut snapshot = self.snapshot.lock();
2141 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2142 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2143 }
2144}
2145
2146async fn refresh_entry(
2147 fs: &dyn Fs,
2148 snapshot: &Mutex<LocalSnapshot>,
2149 path: Arc<Path>,
2150 abs_path: &Path,
2151) -> Result<Entry> {
2152 let root_char_bag;
2153 let next_entry_id;
2154 {
2155 let snapshot = snapshot.lock();
2156 root_char_bag = snapshot.root_char_bag;
2157 next_entry_id = snapshot.next_entry_id.clone();
2158 }
2159 let entry = Entry::new(
2160 path,
2161 &fs.metadata(abs_path)
2162 .await?
2163 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2164 &next_entry_id,
2165 root_char_bag,
2166 );
2167 Ok(snapshot.lock().insert_entry(entry, fs))
2168}
2169
2170fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2171 let mut result = root_char_bag;
2172 result.extend(
2173 path.to_string_lossy()
2174 .chars()
2175 .map(|c| c.to_ascii_lowercase()),
2176 );
2177 result
2178}
2179
2180struct ScanJob {
2181 abs_path: PathBuf,
2182 path: Arc<Path>,
2183 ignore_stack: Arc<IgnoreStack>,
2184 scan_queue: Sender<ScanJob>,
2185}
2186
2187struct UpdateIgnoreStatusJob {
2188 path: Arc<Path>,
2189 ignore_stack: Arc<IgnoreStack>,
2190 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2191}
2192
2193pub trait WorktreeHandle {
2194 #[cfg(any(test, feature = "test-support"))]
2195 fn flush_fs_events<'a>(
2196 &self,
2197 cx: &'a gpui::TestAppContext,
2198 ) -> futures::future::LocalBoxFuture<'a, ()>;
2199}
2200
2201impl WorktreeHandle for ModelHandle<Worktree> {
2202 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2203 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2204 // extra directory scans, and emit extra scan-state notifications.
2205 //
2206 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2207 // to ensure that all redundant FS events have already been processed.
2208 #[cfg(any(test, feature = "test-support"))]
2209 fn flush_fs_events<'a>(
2210 &self,
2211 cx: &'a gpui::TestAppContext,
2212 ) -> futures::future::LocalBoxFuture<'a, ()> {
2213 use smol::future::FutureExt;
2214
2215 let filename = "fs-event-sentinel";
2216 let tree = self.clone();
2217 let (fs, root_path) = self.read_with(cx, |tree, _| {
2218 let tree = tree.as_local().unwrap();
2219 (tree.fs.clone(), tree.abs_path().clone())
2220 });
2221
2222 async move {
2223 fs.create_file(&root_path.join(filename), Default::default())
2224 .await
2225 .unwrap();
2226 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2227 .await;
2228
2229 fs.remove_file(&root_path.join(filename), Default::default())
2230 .await
2231 .unwrap();
2232 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2233 .await;
2234
2235 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2236 .await;
2237 }
2238 .boxed_local()
2239 }
2240}
2241
2242#[derive(Clone, Debug)]
2243struct TraversalProgress<'a> {
2244 max_path: &'a Path,
2245 count: usize,
2246 visible_count: usize,
2247 file_count: usize,
2248 visible_file_count: usize,
2249}
2250
2251impl<'a> TraversalProgress<'a> {
2252 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2253 match (include_ignored, include_dirs) {
2254 (true, true) => self.count,
2255 (true, false) => self.file_count,
2256 (false, true) => self.visible_count,
2257 (false, false) => self.visible_file_count,
2258 }
2259 }
2260}
2261
2262impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2263 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2264 self.max_path = summary.max_path.as_ref();
2265 self.count += summary.count;
2266 self.visible_count += summary.visible_count;
2267 self.file_count += summary.file_count;
2268 self.visible_file_count += summary.visible_file_count;
2269 }
2270}
2271
2272impl<'a> Default for TraversalProgress<'a> {
2273 fn default() -> Self {
2274 Self {
2275 max_path: Path::new(""),
2276 count: 0,
2277 visible_count: 0,
2278 file_count: 0,
2279 visible_file_count: 0,
2280 }
2281 }
2282}
2283
2284pub struct Traversal<'a> {
2285 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2286 include_ignored: bool,
2287 include_dirs: bool,
2288}
2289
2290impl<'a> Traversal<'a> {
2291 pub fn advance(&mut self) -> bool {
2292 self.advance_to_offset(self.offset() + 1)
2293 }
2294
2295 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2296 self.cursor.seek_forward(
2297 &TraversalTarget::Count {
2298 count: offset,
2299 include_dirs: self.include_dirs,
2300 include_ignored: self.include_ignored,
2301 },
2302 Bias::Right,
2303 &(),
2304 )
2305 }
2306
2307 pub fn advance_to_sibling(&mut self) -> bool {
2308 while let Some(entry) = self.cursor.item() {
2309 self.cursor.seek_forward(
2310 &TraversalTarget::PathSuccessor(&entry.path),
2311 Bias::Left,
2312 &(),
2313 );
2314 if let Some(entry) = self.cursor.item() {
2315 if (self.include_dirs || !entry.is_dir())
2316 && (self.include_ignored || !entry.is_ignored)
2317 {
2318 return true;
2319 }
2320 }
2321 }
2322 false
2323 }
2324
2325 pub fn entry(&self) -> Option<&'a Entry> {
2326 self.cursor.item()
2327 }
2328
2329 pub fn offset(&self) -> usize {
2330 self.cursor
2331 .start()
2332 .count(self.include_dirs, self.include_ignored)
2333 }
2334}
2335
2336impl<'a> Iterator for Traversal<'a> {
2337 type Item = &'a Entry;
2338
2339 fn next(&mut self) -> Option<Self::Item> {
2340 if let Some(item) = self.entry() {
2341 self.advance();
2342 Some(item)
2343 } else {
2344 None
2345 }
2346 }
2347}
2348
2349#[derive(Debug)]
2350enum TraversalTarget<'a> {
2351 Path(&'a Path),
2352 PathSuccessor(&'a Path),
2353 Count {
2354 count: usize,
2355 include_ignored: bool,
2356 include_dirs: bool,
2357 },
2358}
2359
2360impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2361 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2362 match self {
2363 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2364 TraversalTarget::PathSuccessor(path) => {
2365 if !cursor_location.max_path.starts_with(path) {
2366 Ordering::Equal
2367 } else {
2368 Ordering::Greater
2369 }
2370 }
2371 TraversalTarget::Count {
2372 count,
2373 include_dirs,
2374 include_ignored,
2375 } => Ord::cmp(
2376 count,
2377 &cursor_location.count(*include_dirs, *include_ignored),
2378 ),
2379 }
2380 }
2381}
2382
2383struct ChildEntriesIter<'a> {
2384 parent_path: &'a Path,
2385 traversal: Traversal<'a>,
2386}
2387
2388impl<'a> Iterator for ChildEntriesIter<'a> {
2389 type Item = &'a Entry;
2390
2391 fn next(&mut self) -> Option<Self::Item> {
2392 if let Some(item) = self.traversal.entry() {
2393 if item.path.starts_with(&self.parent_path) {
2394 self.traversal.advance_to_sibling();
2395 return Some(item);
2396 }
2397 }
2398 None
2399 }
2400}
2401
2402impl<'a> From<&'a Entry> for proto::Entry {
2403 fn from(entry: &'a Entry) -> Self {
2404 Self {
2405 id: entry.id as u64,
2406 is_dir: entry.is_dir(),
2407 path: entry.path.to_string_lossy().to_string(),
2408 inode: entry.inode,
2409 mtime: Some(entry.mtime.into()),
2410 is_symlink: entry.is_symlink,
2411 is_ignored: entry.is_ignored,
2412 }
2413 }
2414}
2415
2416impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2417 type Error = anyhow::Error;
2418
2419 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2420 if let Some(mtime) = entry.mtime {
2421 let kind = if entry.is_dir {
2422 EntryKind::Dir
2423 } else {
2424 let mut char_bag = root_char_bag.clone();
2425 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2426 EntryKind::File(char_bag)
2427 };
2428 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2429 Ok(Entry {
2430 id: entry.id as usize,
2431 kind,
2432 path: path.clone(),
2433 inode: entry.inode,
2434 mtime: mtime.into(),
2435 is_symlink: entry.is_symlink,
2436 is_ignored: entry.is_ignored,
2437 })
2438 } else {
2439 Err(anyhow!(
2440 "missing mtime in remote worktree entry {:?}",
2441 entry.path
2442 ))
2443 }
2444 }
2445}
2446
2447#[cfg(test)]
2448mod tests {
2449 use super::*;
2450 use crate::fs::FakeFs;
2451 use anyhow::Result;
2452 use client::test::FakeHttpClient;
2453 use fs::RealFs;
2454 use rand::prelude::*;
2455 use serde_json::json;
2456 use std::{
2457 env,
2458 fmt::Write,
2459 time::{SystemTime, UNIX_EPOCH},
2460 };
2461 use util::{post_inc, test::temp_tree};
2462
2463 #[gpui::test]
2464 async fn test_traversal(cx: gpui::TestAppContext) {
2465 let fs = FakeFs::new(cx.background());
2466 fs.insert_tree(
2467 "/root",
2468 json!({
2469 ".gitignore": "a/b\n",
2470 "a": {
2471 "b": "",
2472 "c": "",
2473 }
2474 }),
2475 )
2476 .await;
2477
2478 let http_client = FakeHttpClient::with_404_response();
2479 let client = Client::new(http_client);
2480
2481 let tree = Worktree::local(
2482 client,
2483 Arc::from(Path::new("/root")),
2484 false,
2485 Arc::new(fs),
2486 &mut cx.to_async(),
2487 )
2488 .await
2489 .unwrap();
2490 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2491 .await;
2492
2493 tree.read_with(&cx, |tree, _| {
2494 assert_eq!(
2495 tree.entries(false)
2496 .map(|entry| entry.path.as_ref())
2497 .collect::<Vec<_>>(),
2498 vec![
2499 Path::new(""),
2500 Path::new(".gitignore"),
2501 Path::new("a"),
2502 Path::new("a/c"),
2503 ]
2504 );
2505 })
2506 }
2507
2508 #[gpui::test]
2509 async fn test_rescan_with_gitignore(cx: gpui::TestAppContext) {
2510 let dir = temp_tree(json!({
2511 ".git": {},
2512 ".gitignore": "ignored-dir\n",
2513 "tracked-dir": {
2514 "tracked-file1": "tracked contents",
2515 },
2516 "ignored-dir": {
2517 "ignored-file1": "ignored contents",
2518 }
2519 }));
2520
2521 let http_client = FakeHttpClient::with_404_response();
2522 let client = Client::new(http_client.clone());
2523
2524 let tree = Worktree::local(
2525 client,
2526 dir.path(),
2527 false,
2528 Arc::new(RealFs),
2529 &mut cx.to_async(),
2530 )
2531 .await
2532 .unwrap();
2533 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2534 .await;
2535 tree.flush_fs_events(&cx).await;
2536 cx.read(|cx| {
2537 let tree = tree.read(cx);
2538 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2539 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2540 assert_eq!(tracked.is_ignored, false);
2541 assert_eq!(ignored.is_ignored, true);
2542 });
2543
2544 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2545 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2546 tree.flush_fs_events(&cx).await;
2547 cx.read(|cx| {
2548 let tree = tree.read(cx);
2549 let dot_git = tree.entry_for_path(".git").unwrap();
2550 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2551 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2552 assert_eq!(tracked.is_ignored, false);
2553 assert_eq!(ignored.is_ignored, true);
2554 assert_eq!(dot_git.is_ignored, true);
2555 });
2556 }
2557
2558 #[gpui::test(iterations = 100)]
2559 fn test_random(mut rng: StdRng) {
2560 let operations = env::var("OPERATIONS")
2561 .map(|o| o.parse().unwrap())
2562 .unwrap_or(40);
2563 let initial_entries = env::var("INITIAL_ENTRIES")
2564 .map(|o| o.parse().unwrap())
2565 .unwrap_or(20);
2566
2567 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
2568 for _ in 0..initial_entries {
2569 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2570 }
2571 log::info!("Generated initial tree");
2572
2573 let (notify_tx, _notify_rx) = mpsc::unbounded();
2574 let fs = Arc::new(RealFs);
2575 let next_entry_id = Arc::new(AtomicUsize::new(0));
2576 let mut initial_snapshot = LocalSnapshot {
2577 abs_path: root_dir.path().into(),
2578 scan_id: 0,
2579 removed_entry_ids: Default::default(),
2580 ignores: Default::default(),
2581 next_entry_id: next_entry_id.clone(),
2582 snapshot: Snapshot {
2583 id: WorktreeId::from_usize(0),
2584 entries_by_path: Default::default(),
2585 entries_by_id: Default::default(),
2586 root_name: Default::default(),
2587 root_char_bag: Default::default(),
2588 },
2589 };
2590 initial_snapshot.insert_entry(
2591 Entry::new(
2592 Path::new("").into(),
2593 &smol::block_on(fs.metadata(root_dir.path()))
2594 .unwrap()
2595 .unwrap(),
2596 &next_entry_id,
2597 Default::default(),
2598 ),
2599 fs.as_ref(),
2600 );
2601 let mut scanner = BackgroundScanner::new(
2602 Arc::new(Mutex::new(initial_snapshot.clone())),
2603 notify_tx,
2604 fs.clone(),
2605 Arc::new(gpui::executor::Background::new()),
2606 );
2607 smol::block_on(scanner.scan_dirs()).unwrap();
2608 scanner.snapshot().check_invariants();
2609
2610 let mut events = Vec::new();
2611 let mut snapshots = Vec::new();
2612 let mut mutations_len = operations;
2613 while mutations_len > 1 {
2614 if !events.is_empty() && rng.gen_bool(0.4) {
2615 let len = rng.gen_range(0..=events.len());
2616 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2617 log::info!("Delivering events: {:#?}", to_deliver);
2618 smol::block_on(scanner.process_events(to_deliver));
2619 scanner.snapshot().check_invariants();
2620 } else {
2621 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2622 mutations_len -= 1;
2623 }
2624
2625 if rng.gen_bool(0.2) {
2626 snapshots.push(scanner.snapshot());
2627 }
2628 }
2629 log::info!("Quiescing: {:#?}", events);
2630 smol::block_on(scanner.process_events(events));
2631 scanner.snapshot().check_invariants();
2632
2633 let (notify_tx, _notify_rx) = mpsc::unbounded();
2634 let mut new_scanner = BackgroundScanner::new(
2635 Arc::new(Mutex::new(initial_snapshot)),
2636 notify_tx,
2637 scanner.fs.clone(),
2638 scanner.executor.clone(),
2639 );
2640 smol::block_on(new_scanner.scan_dirs()).unwrap();
2641 assert_eq!(
2642 scanner.snapshot().to_vec(true),
2643 new_scanner.snapshot().to_vec(true)
2644 );
2645
2646 let mut update_id = 0;
2647 for mut prev_snapshot in snapshots {
2648 let include_ignored = rng.gen::<bool>();
2649 if !include_ignored {
2650 let mut entries_by_path_edits = Vec::new();
2651 let mut entries_by_id_edits = Vec::new();
2652 for entry in prev_snapshot
2653 .entries_by_id
2654 .cursor::<()>()
2655 .filter(|e| e.is_ignored)
2656 {
2657 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2658 entries_by_id_edits.push(Edit::Remove(entry.id));
2659 }
2660
2661 prev_snapshot
2662 .entries_by_path
2663 .edit(entries_by_path_edits, &());
2664 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
2665 }
2666
2667 let update = scanner.snapshot().build_update(
2668 &prev_snapshot,
2669 0,
2670 0,
2671 post_inc(&mut update_id),
2672 include_ignored,
2673 );
2674 prev_snapshot.apply_remote_update(update).unwrap();
2675 assert_eq!(
2676 prev_snapshot.to_vec(true),
2677 scanner.snapshot().to_vec(include_ignored)
2678 );
2679 }
2680 }
2681
2682 fn randomly_mutate_tree(
2683 root_path: &Path,
2684 insertion_probability: f64,
2685 rng: &mut impl Rng,
2686 ) -> Result<Vec<fsevent::Event>> {
2687 let root_path = root_path.canonicalize().unwrap();
2688 let (dirs, files) = read_dir_recursive(root_path.clone());
2689
2690 let mut events = Vec::new();
2691 let mut record_event = |path: PathBuf| {
2692 events.push(fsevent::Event {
2693 event_id: SystemTime::now()
2694 .duration_since(UNIX_EPOCH)
2695 .unwrap()
2696 .as_secs(),
2697 flags: fsevent::StreamFlags::empty(),
2698 path,
2699 });
2700 };
2701
2702 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2703 let path = dirs.choose(rng).unwrap();
2704 let new_path = path.join(gen_name(rng));
2705
2706 if rng.gen() {
2707 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2708 std::fs::create_dir(&new_path)?;
2709 } else {
2710 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2711 std::fs::write(&new_path, "")?;
2712 }
2713 record_event(new_path);
2714 } else if rng.gen_bool(0.05) {
2715 let ignore_dir_path = dirs.choose(rng).unwrap();
2716 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2717
2718 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2719 let files_to_ignore = {
2720 let len = rng.gen_range(0..=subfiles.len());
2721 subfiles.choose_multiple(rng, len)
2722 };
2723 let dirs_to_ignore = {
2724 let len = rng.gen_range(0..subdirs.len());
2725 subdirs.choose_multiple(rng, len)
2726 };
2727
2728 let mut ignore_contents = String::new();
2729 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2730 write!(
2731 ignore_contents,
2732 "{}\n",
2733 path_to_ignore
2734 .strip_prefix(&ignore_dir_path)?
2735 .to_str()
2736 .unwrap()
2737 )
2738 .unwrap();
2739 }
2740 log::info!(
2741 "Creating {:?} with contents:\n{}",
2742 ignore_path.strip_prefix(&root_path)?,
2743 ignore_contents
2744 );
2745 std::fs::write(&ignore_path, ignore_contents).unwrap();
2746 record_event(ignore_path);
2747 } else {
2748 let old_path = {
2749 let file_path = files.choose(rng);
2750 let dir_path = dirs[1..].choose(rng);
2751 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2752 };
2753
2754 let is_rename = rng.gen();
2755 if is_rename {
2756 let new_path_parent = dirs
2757 .iter()
2758 .filter(|d| !d.starts_with(old_path))
2759 .choose(rng)
2760 .unwrap();
2761
2762 let overwrite_existing_dir =
2763 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2764 let new_path = if overwrite_existing_dir {
2765 std::fs::remove_dir_all(&new_path_parent).ok();
2766 new_path_parent.to_path_buf()
2767 } else {
2768 new_path_parent.join(gen_name(rng))
2769 };
2770
2771 log::info!(
2772 "Renaming {:?} to {}{:?}",
2773 old_path.strip_prefix(&root_path)?,
2774 if overwrite_existing_dir {
2775 "overwrite "
2776 } else {
2777 ""
2778 },
2779 new_path.strip_prefix(&root_path)?
2780 );
2781 std::fs::rename(&old_path, &new_path)?;
2782 record_event(old_path.clone());
2783 record_event(new_path);
2784 } else if old_path.is_dir() {
2785 let (dirs, files) = read_dir_recursive(old_path.clone());
2786
2787 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2788 std::fs::remove_dir_all(&old_path).unwrap();
2789 for file in files {
2790 record_event(file);
2791 }
2792 for dir in dirs {
2793 record_event(dir);
2794 }
2795 } else {
2796 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2797 std::fs::remove_file(old_path).unwrap();
2798 record_event(old_path.clone());
2799 }
2800 }
2801
2802 Ok(events)
2803 }
2804
2805 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2806 let child_entries = std::fs::read_dir(&path).unwrap();
2807 let mut dirs = vec![path];
2808 let mut files = Vec::new();
2809 for child_entry in child_entries {
2810 let child_path = child_entry.unwrap().path();
2811 if child_path.is_dir() {
2812 let (child_dirs, child_files) = read_dir_recursive(child_path);
2813 dirs.extend(child_dirs);
2814 files.extend(child_files);
2815 } else {
2816 files.push(child_path);
2817 }
2818 }
2819 (dirs, files)
2820 }
2821
2822 fn gen_name(rng: &mut impl Rng) -> String {
2823 (0..6)
2824 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2825 .map(char::from)
2826 .collect()
2827 }
2828
2829 impl LocalSnapshot {
2830 fn check_invariants(&self) {
2831 let mut files = self.files(true, 0);
2832 let mut visible_files = self.files(false, 0);
2833 for entry in self.entries_by_path.cursor::<()>() {
2834 if entry.is_file() {
2835 assert_eq!(files.next().unwrap().inode, entry.inode);
2836 if !entry.is_ignored {
2837 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2838 }
2839 }
2840 }
2841 assert!(files.next().is_none());
2842 assert!(visible_files.next().is_none());
2843
2844 let mut bfs_paths = Vec::new();
2845 let mut stack = vec![Path::new("")];
2846 while let Some(path) = stack.pop() {
2847 bfs_paths.push(path);
2848 let ix = stack.len();
2849 for child_entry in self.child_entries(path) {
2850 stack.insert(ix, &child_entry.path);
2851 }
2852 }
2853
2854 let dfs_paths = self
2855 .entries_by_path
2856 .cursor::<()>()
2857 .map(|e| e.path.as_ref())
2858 .collect::<Vec<_>>();
2859 assert_eq!(bfs_paths, dfs_paths);
2860
2861 for (ignore_parent_path, _) in &self.ignores {
2862 assert!(self.entry_for_path(ignore_parent_path).is_some());
2863 assert!(self
2864 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2865 .is_some());
2866 }
2867 }
2868
2869 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2870 let mut paths = Vec::new();
2871 for entry in self.entries_by_path.cursor::<()>() {
2872 if include_ignored || !entry.is_ignored {
2873 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2874 }
2875 }
2876 paths.sort_by(|a, b| a.0.cmp(&b.0));
2877 paths
2878 }
2879 }
2880}