1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, TypedEnvelope};
9use clock::ReplicaId;
10use collections::{HashMap, VecDeque};
11use futures::{
12 channel::mpsc::{self, UnboundedSender},
13 Stream, StreamExt,
14};
15use fuzzy::CharBag;
16use gpui::{
17 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
18 Task,
19};
20use language::{Buffer, DiagnosticEntry, Operation, PointUtf16, Rope};
21use lazy_static::lazy_static;
22use parking_lot::Mutex;
23use postage::{
24 oneshot,
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::{Deref, DerefMut},
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap};
46use util::{ResultExt, TryFutureExt};
47
48lazy_static! {
49 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
50}
51
52#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
53pub struct WorktreeId(usize);
54
55pub enum Worktree {
56 Local(LocalWorktree),
57 Remote(RemoteWorktree),
58}
59
60pub struct LocalWorktree {
61 snapshot: LocalSnapshot,
62 config: WorktreeConfig,
63 background_snapshot: Arc<Mutex<LocalSnapshot>>,
64 last_scan_state_rx: watch::Receiver<ScanState>,
65 _background_scanner_task: Option<Task<()>>,
66 poll_task: Option<Task<()>>,
67 registration: Registration,
68 share: Option<ShareState>,
69 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
70 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
71 queued_operations: Vec<(u64, Operation)>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 weak: bool,
75}
76
77pub struct RemoteWorktree {
78 pub(crate) snapshot: Snapshot,
79 project_id: u64,
80 snapshot_rx: watch::Receiver<Snapshot>,
81 client: Arc<Client>,
82 updates_tx: UnboundedSender<proto::UpdateWorktree>,
83 replica_id: ReplicaId,
84 queued_operations: Vec<(u64, Operation)>,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 weak: bool,
87 pending_updates: VecDeque<proto::UpdateWorktree>,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 root_name: String,
94 root_char_bag: CharBag,
95 entries_by_path: SumTree<Entry>,
96 entries_by_id: SumTree<PathEntry>,
97}
98
99#[derive(Clone)]
100pub struct LocalSnapshot {
101 abs_path: Arc<Path>,
102 scan_id: usize,
103 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
104 removed_entry_ids: HashMap<u64, usize>,
105 next_entry_id: Arc<AtomicUsize>,
106 snapshot: Snapshot,
107}
108
109impl Deref for LocalSnapshot {
110 type Target = Snapshot;
111
112 fn deref(&self) -> &Self::Target {
113 &self.snapshot
114 }
115}
116
117impl DerefMut for LocalSnapshot {
118 fn deref_mut(&mut self) -> &mut Self::Target {
119 &mut self.snapshot
120 }
121}
122
123#[derive(Clone, Debug)]
124enum ScanState {
125 Idle,
126 Scanning,
127 Err(Arc<anyhow::Error>),
128}
129
130#[derive(Debug, Eq, PartialEq)]
131enum Registration {
132 None,
133 Pending,
134 Done { project_id: u64 },
135}
136
137struct ShareState {
138 project_id: u64,
139 snapshots_tx: Sender<LocalSnapshot>,
140 _maintain_remote_snapshot: Option<Task<Option<()>>>,
141}
142
143#[derive(Default, Deserialize)]
144struct WorktreeConfig {
145 collaborators: Vec<String>,
146}
147
148pub enum Event {
149 UpdatedEntries,
150}
151
152impl Entity for Worktree {
153 type Event = Event;
154
155 fn release(&mut self, _: &mut MutableAppContext) {
156 if let Some(worktree) = self.as_local_mut() {
157 if let Registration::Done { project_id } = worktree.registration {
158 let client = worktree.client.clone();
159 let unregister_message = proto::UnregisterWorktree {
160 project_id,
161 worktree_id: worktree.id().to_proto(),
162 };
163 client.send(unregister_message).log_err();
164 }
165 }
166 }
167}
168
169impl Worktree {
170 pub async fn local(
171 client: Arc<Client>,
172 path: impl Into<Arc<Path>>,
173 weak: bool,
174 fs: Arc<dyn Fs>,
175 cx: &mut AsyncAppContext,
176 ) -> Result<ModelHandle<Self>> {
177 let (tree, scan_states_tx) = LocalWorktree::new(client, path, weak, fs.clone(), cx).await?;
178 tree.update(cx, |tree, cx| {
179 let tree = tree.as_local_mut().unwrap();
180 let abs_path = tree.abs_path().clone();
181 let background_snapshot = tree.background_snapshot.clone();
182 let background = cx.background().clone();
183 tree._background_scanner_task = Some(cx.background().spawn(async move {
184 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
185 let scanner =
186 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
187 scanner.run(events).await;
188 }));
189 });
190 Ok(tree)
191 }
192
193 pub fn remote(
194 project_remote_id: u64,
195 replica_id: ReplicaId,
196 worktree: proto::Worktree,
197 client: Arc<Client>,
198 cx: &mut MutableAppContext,
199 ) -> (ModelHandle<Self>, Task<()>) {
200 let remote_id = worktree.id;
201 let root_char_bag: CharBag = worktree
202 .root_name
203 .chars()
204 .map(|c| c.to_ascii_lowercase())
205 .collect();
206 let root_name = worktree.root_name.clone();
207 let weak = worktree.weak;
208 let snapshot = Snapshot {
209 id: WorktreeId(remote_id as usize),
210 root_name,
211 root_char_bag,
212 entries_by_path: Default::default(),
213 entries_by_id: Default::default(),
214 };
215
216 let (updates_tx, mut updates_rx) = mpsc::unbounded();
217 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
218 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
219 Worktree::Remote(RemoteWorktree {
220 project_id: project_remote_id,
221 replica_id,
222 snapshot: snapshot.clone(),
223 snapshot_rx: snapshot_rx.clone(),
224 updates_tx,
225 client: client.clone(),
226 queued_operations: Default::default(),
227 diagnostic_summaries: TreeMap::from_ordered_entries(
228 worktree.diagnostic_summaries.into_iter().map(|summary| {
229 (
230 PathKey(PathBuf::from(summary.path).into()),
231 DiagnosticSummary {
232 error_count: summary.error_count as usize,
233 warning_count: summary.warning_count as usize,
234 info_count: summary.info_count as usize,
235 hint_count: summary.hint_count as usize,
236 },
237 )
238 }),
239 ),
240 weak,
241 pending_updates: Default::default(),
242 })
243 });
244
245 let deserialize_task = cx.spawn({
246 let worktree_handle = worktree_handle.clone();
247 |cx| async move {
248 let (entries_by_path, entries_by_id) = cx
249 .background()
250 .spawn(async move {
251 let mut entries_by_path_edits = Vec::new();
252 let mut entries_by_id_edits = Vec::new();
253 for entry in worktree.entries {
254 match Entry::try_from((&root_char_bag, entry)) {
255 Ok(entry) => {
256 entries_by_id_edits.push(Edit::Insert(PathEntry {
257 id: entry.id,
258 path: entry.path.clone(),
259 is_ignored: entry.is_ignored,
260 scan_id: 0,
261 }));
262 entries_by_path_edits.push(Edit::Insert(entry));
263 }
264 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
265 }
266 }
267
268 let mut entries_by_path = SumTree::new();
269 let mut entries_by_id = SumTree::new();
270 entries_by_path.edit(entries_by_path_edits, &());
271 entries_by_id.edit(entries_by_id_edits, &());
272
273 (entries_by_path, entries_by_id)
274 })
275 .await;
276
277 {
278 let mut snapshot = snapshot_tx.borrow_mut();
279 snapshot.entries_by_path = entries_by_path;
280 snapshot.entries_by_id = entries_by_id;
281 }
282
283 cx.background()
284 .spawn(async move {
285 while let Some(update) = updates_rx.next().await {
286 let mut snapshot = snapshot_tx.borrow().clone();
287 if let Err(error) = snapshot.apply_remote_update(update) {
288 log::error!("error applying worktree update: {}", error);
289 }
290 *snapshot_tx.borrow_mut() = snapshot;
291 }
292 })
293 .detach();
294
295 {
296 let mut snapshot_rx = snapshot_rx.clone();
297 let this = worktree_handle.downgrade();
298 cx.spawn(|mut cx| async move {
299 while let Some(_) = snapshot_rx.recv().await {
300 if let Some(this) = this.upgrade(&cx) {
301 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
302 } else {
303 break;
304 }
305 }
306 })
307 .detach();
308 }
309 }
310 });
311 (worktree_handle, deserialize_task)
312 }
313
314 pub fn as_local(&self) -> Option<&LocalWorktree> {
315 if let Worktree::Local(worktree) = self {
316 Some(worktree)
317 } else {
318 None
319 }
320 }
321
322 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
323 if let Worktree::Remote(worktree) = self {
324 Some(worktree)
325 } else {
326 None
327 }
328 }
329
330 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
331 if let Worktree::Local(worktree) = self {
332 Some(worktree)
333 } else {
334 None
335 }
336 }
337
338 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
339 if let Worktree::Remote(worktree) = self {
340 Some(worktree)
341 } else {
342 None
343 }
344 }
345
346 pub fn is_local(&self) -> bool {
347 matches!(self, Worktree::Local(_))
348 }
349
350 pub fn snapshot(&self) -> Snapshot {
351 match self {
352 Worktree::Local(worktree) => worktree.snapshot().snapshot,
353 Worktree::Remote(worktree) => worktree.snapshot(),
354 }
355 }
356
357 pub fn is_weak(&self) -> bool {
358 match self {
359 Worktree::Local(worktree) => worktree.weak,
360 Worktree::Remote(worktree) => worktree.weak,
361 }
362 }
363
364 pub fn replica_id(&self) -> ReplicaId {
365 match self {
366 Worktree::Local(_) => 0,
367 Worktree::Remote(worktree) => worktree.replica_id,
368 }
369 }
370
371 pub fn diagnostic_summaries<'a>(
372 &'a self,
373 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
374 match self {
375 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
376 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
377 }
378 .iter()
379 .map(|(path, summary)| (path.0.clone(), summary.clone()))
380 }
381
382 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
383 match self {
384 Self::Local(worktree) => {
385 let is_fake_fs = worktree.fs.is_fake();
386 worktree.snapshot = worktree.background_snapshot.lock().clone();
387 if worktree.is_scanning() {
388 if worktree.poll_task.is_none() {
389 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
390 if is_fake_fs {
391 smol::future::yield_now().await;
392 } else {
393 smol::Timer::after(Duration::from_millis(100)).await;
394 }
395 this.update(&mut cx, |this, cx| {
396 this.as_local_mut().unwrap().poll_task = None;
397 this.poll_snapshot(cx);
398 })
399 }));
400 }
401 } else {
402 worktree.poll_task.take();
403 cx.emit(Event::UpdatedEntries);
404 }
405 }
406 Self::Remote(worktree) => {
407 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
408 cx.emit(Event::UpdatedEntries);
409 }
410 };
411
412 cx.notify();
413 }
414
415 fn send_buffer_update(
416 &mut self,
417 buffer_id: u64,
418 operation: Operation,
419 cx: &mut ModelContext<Self>,
420 ) {
421 if let Some((project_id, rpc)) = match self {
422 Worktree::Local(worktree) => worktree
423 .share
424 .as_ref()
425 .map(|share| (share.project_id, worktree.client.clone())),
426 Worktree::Remote(worktree) => Some((worktree.project_id, worktree.client.clone())),
427 } {
428 cx.spawn(|worktree, mut cx| async move {
429 if let Err(error) = rpc
430 .request(proto::UpdateBuffer {
431 project_id,
432 buffer_id,
433 operations: vec![language::proto::serialize_operation(&operation)],
434 })
435 .await
436 {
437 worktree.update(&mut cx, |worktree, _| {
438 log::error!("error sending buffer operation: {}", error);
439 match worktree {
440 Worktree::Local(t) => &mut t.queued_operations,
441 Worktree::Remote(t) => &mut t.queued_operations,
442 }
443 .push((buffer_id, operation));
444 });
445 }
446 })
447 .detach();
448 }
449 }
450}
451
452impl LocalWorktree {
453 async fn new(
454 client: Arc<Client>,
455 path: impl Into<Arc<Path>>,
456 weak: bool,
457 fs: Arc<dyn Fs>,
458 cx: &mut AsyncAppContext,
459 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
460 let abs_path = path.into();
461 let path: Arc<Path> = Arc::from(Path::new(""));
462 let next_entry_id = AtomicUsize::new(0);
463
464 // After determining whether the root entry is a file or a directory, populate the
465 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
466 let root_name = abs_path
467 .file_name()
468 .map_or(String::new(), |f| f.to_string_lossy().to_string());
469 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
470 let metadata = fs
471 .metadata(&abs_path)
472 .await
473 .context("failed to stat worktree path")?;
474
475 let mut config = WorktreeConfig::default();
476 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
477 if let Ok(parsed) = toml::from_str(&zed_toml) {
478 config = parsed;
479 }
480 }
481
482 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
483 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
484 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
485 let mut snapshot = LocalSnapshot {
486 abs_path,
487 scan_id: 0,
488 ignores: Default::default(),
489 removed_entry_ids: Default::default(),
490 next_entry_id: Arc::new(next_entry_id),
491 snapshot: Snapshot {
492 id: WorktreeId::from_usize(cx.model_id()),
493 root_name: root_name.clone(),
494 root_char_bag,
495 entries_by_path: Default::default(),
496 entries_by_id: Default::default(),
497 },
498 };
499 if let Some(metadata) = metadata {
500 let entry = Entry::new(
501 path.into(),
502 &metadata,
503 &snapshot.next_entry_id,
504 snapshot.root_char_bag,
505 );
506 snapshot.insert_entry(entry, fs.as_ref());
507 }
508
509 let tree = Self {
510 snapshot: snapshot.clone(),
511 config,
512 background_snapshot: Arc::new(Mutex::new(snapshot)),
513 last_scan_state_rx,
514 _background_scanner_task: None,
515 registration: Registration::None,
516 share: None,
517 poll_task: None,
518 diagnostics: Default::default(),
519 diagnostic_summaries: Default::default(),
520 queued_operations: Default::default(),
521 client,
522 fs,
523 weak,
524 };
525
526 cx.spawn_weak(|this, mut cx| async move {
527 while let Some(scan_state) = scan_states_rx.next().await {
528 if let Some(handle) = this.upgrade(&cx) {
529 let to_send = handle.update(&mut cx, |this, cx| {
530 last_scan_state_tx.blocking_send(scan_state).ok();
531 this.poll_snapshot(cx);
532 let tree = this.as_local_mut().unwrap();
533 if !tree.is_scanning() {
534 if let Some(share) = tree.share.as_ref() {
535 return Some((tree.snapshot(), share.snapshots_tx.clone()));
536 }
537 }
538 None
539 });
540
541 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
542 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
543 log::error!("error submitting snapshot to send {}", err);
544 }
545 }
546 } else {
547 break;
548 }
549 }
550 })
551 .detach();
552
553 Worktree::Local(tree)
554 });
555
556 Ok((tree, scan_states_tx))
557 }
558
559 pub fn abs_path(&self) -> &Arc<Path> {
560 &self.abs_path
561 }
562
563 pub fn contains_abs_path(&self, path: &Path) -> bool {
564 path.starts_with(&self.abs_path)
565 }
566
567 fn absolutize(&self, path: &Path) -> PathBuf {
568 if path.file_name().is_some() {
569 self.abs_path.join(path)
570 } else {
571 self.abs_path.to_path_buf()
572 }
573 }
574
575 pub fn authorized_logins(&self) -> Vec<String> {
576 self.config.collaborators.clone()
577 }
578
579 pub(crate) fn load_buffer(
580 &mut self,
581 path: &Path,
582 cx: &mut ModelContext<Worktree>,
583 ) -> Task<Result<ModelHandle<Buffer>>> {
584 let path = Arc::from(path);
585 cx.spawn(move |this, mut cx| async move {
586 let (file, contents) = this
587 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
588 .await?;
589 Ok(cx.add_model(|cx| Buffer::from_file(0, contents, Box::new(file), cx)))
590 })
591 }
592
593 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
594 self.diagnostics.get(path).cloned()
595 }
596
597 pub fn update_diagnostics(
598 &mut self,
599 worktree_path: Arc<Path>,
600 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
601 _: &mut ModelContext<Worktree>,
602 ) -> Result<()> {
603 let summary = DiagnosticSummary::new(&diagnostics);
604 self.diagnostic_summaries
605 .insert(PathKey(worktree_path.clone()), summary.clone());
606 self.diagnostics.insert(worktree_path.clone(), diagnostics);
607
608 if let Some(share) = self.share.as_ref() {
609 self.client
610 .send(proto::UpdateDiagnosticSummary {
611 project_id: share.project_id,
612 worktree_id: self.id().to_proto(),
613 summary: Some(proto::DiagnosticSummary {
614 path: worktree_path.to_string_lossy().to_string(),
615 error_count: summary.error_count as u32,
616 warning_count: summary.warning_count as u32,
617 info_count: summary.info_count as u32,
618 hint_count: summary.hint_count as u32,
619 }),
620 })
621 .log_err();
622 }
623
624 Ok(())
625 }
626
627 pub fn scan_complete(&self) -> impl Future<Output = ()> {
628 let mut scan_state_rx = self.last_scan_state_rx.clone();
629 async move {
630 let mut scan_state = Some(scan_state_rx.borrow().clone());
631 while let Some(ScanState::Scanning) = scan_state {
632 scan_state = scan_state_rx.recv().await;
633 }
634 }
635 }
636
637 fn is_scanning(&self) -> bool {
638 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
639 true
640 } else {
641 false
642 }
643 }
644
645 pub fn snapshot(&self) -> LocalSnapshot {
646 self.snapshot.clone()
647 }
648
649 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
650 let handle = cx.handle();
651 let path = Arc::from(path);
652 let abs_path = self.absolutize(&path);
653 let background_snapshot = self.background_snapshot.clone();
654 let fs = self.fs.clone();
655 cx.spawn(|this, mut cx| async move {
656 let text = fs.load(&abs_path).await?;
657 // Eagerly populate the snapshot with an updated entry for the loaded file
658 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
659 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
660 Ok((
661 File {
662 entry_id: Some(entry.id),
663 worktree: handle,
664 path: entry.path,
665 mtime: entry.mtime,
666 is_local: true,
667 },
668 text,
669 ))
670 })
671 }
672
673 pub fn save_buffer_as(
674 &self,
675 buffer_handle: ModelHandle<Buffer>,
676 path: impl Into<Arc<Path>>,
677 cx: &mut ModelContext<Worktree>,
678 ) -> Task<Result<()>> {
679 let buffer = buffer_handle.read(cx);
680 let text = buffer.as_rope().clone();
681 let version = buffer.version();
682 let save = self.save(path, text, cx);
683 let handle = cx.handle();
684 cx.as_mut().spawn(|mut cx| async move {
685 let entry = save.await?;
686 let file = File {
687 entry_id: Some(entry.id),
688 worktree: handle,
689 path: entry.path,
690 mtime: entry.mtime,
691 is_local: true,
692 };
693
694 buffer_handle.update(&mut cx, |buffer, cx| {
695 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
696 });
697
698 Ok(())
699 })
700 }
701
702 fn save(
703 &self,
704 path: impl Into<Arc<Path>>,
705 text: Rope,
706 cx: &mut ModelContext<Worktree>,
707 ) -> Task<Result<Entry>> {
708 let path = path.into();
709 let abs_path = self.absolutize(&path);
710 let background_snapshot = self.background_snapshot.clone();
711 let fs = self.fs.clone();
712 let save = cx.background().spawn(async move {
713 fs.save(&abs_path, &text).await?;
714 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
715 });
716
717 cx.spawn(|this, mut cx| async move {
718 let entry = save.await?;
719 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
720 Ok(entry)
721 })
722 }
723
724 pub fn register(
725 &mut self,
726 project_id: u64,
727 cx: &mut ModelContext<Worktree>,
728 ) -> Task<anyhow::Result<()>> {
729 if self.registration != Registration::None {
730 return Task::ready(Ok(()));
731 }
732
733 self.registration = Registration::Pending;
734 let client = self.client.clone();
735 let register_message = proto::RegisterWorktree {
736 project_id,
737 worktree_id: self.id().to_proto(),
738 root_name: self.root_name().to_string(),
739 authorized_logins: self.authorized_logins(),
740 weak: self.weak,
741 };
742 cx.spawn(|this, mut cx| async move {
743 let response = client.request(register_message).await;
744 this.update(&mut cx, |this, _| {
745 let worktree = this.as_local_mut().unwrap();
746 match response {
747 Ok(_) => {
748 worktree.registration = Registration::Done { project_id };
749 Ok(())
750 }
751 Err(error) => {
752 worktree.registration = Registration::None;
753 Err(error)
754 }
755 }
756 })
757 })
758 }
759
760 pub fn share(
761 &mut self,
762 project_id: u64,
763 cx: &mut ModelContext<Worktree>,
764 ) -> impl Future<Output = Result<()>> {
765 let (mut share_tx, mut share_rx) = oneshot::channel();
766 if self.share.is_some() {
767 let _ = share_tx.try_send(Ok(()));
768 } else {
769 let snapshot = self.snapshot();
770 let rpc = self.client.clone();
771 let worktree_id = cx.model_id() as u64;
772 let (snapshots_to_send_tx, snapshots_to_send_rx) =
773 smol::channel::unbounded::<LocalSnapshot>();
774 let maintain_remote_snapshot = cx.background().spawn({
775 let rpc = rpc.clone();
776 let snapshot = snapshot.clone();
777 let diagnostic_summaries = self.diagnostic_summaries.clone();
778 async move {
779 if let Err(error) = rpc
780 .request(proto::UpdateWorktree {
781 project_id,
782 worktree_id,
783 root_name: snapshot.root_name().to_string(),
784 updated_entries: snapshot
785 .entries_by_path
786 .iter()
787 .filter(|e| !e.is_ignored)
788 .map(Into::into)
789 .collect(),
790 removed_entries: Default::default(),
791 })
792 .await
793 {
794 let _ = share_tx.try_send(Err(error));
795 return Err(anyhow!("failed to send initial update worktree"));
796 } else {
797 let _ = share_tx.try_send(Ok(()));
798 }
799
800 for (path, summary) in diagnostic_summaries.iter() {
801 rpc.send(proto::UpdateDiagnosticSummary {
802 project_id,
803 worktree_id,
804 summary: Some(summary.to_proto(&path.0)),
805 })?;
806 }
807
808 let mut prev_snapshot = snapshot;
809 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
810 let message =
811 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
812 rpc.request(message).await?;
813 prev_snapshot = snapshot;
814 }
815
816 Ok::<_, anyhow::Error>(())
817 }
818 .log_err()
819 });
820 self.share = Some(ShareState {
821 project_id,
822 snapshots_tx: snapshots_to_send_tx,
823 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
824 });
825 }
826
827 async move {
828 share_rx
829 .next()
830 .await
831 .unwrap_or_else(|| Err(anyhow!("share ended")))
832 }
833 }
834
835 pub fn unshare(&mut self) {
836 self.share.take();
837 }
838
839 pub fn is_shared(&self) -> bool {
840 self.share.is_some()
841 }
842}
843
844impl RemoteWorktree {
845 fn snapshot(&self) -> Snapshot {
846 self.snapshot.clone()
847 }
848
849 pub fn update_from_remote(
850 &mut self,
851 envelope: TypedEnvelope<proto::UpdateWorktree>,
852 ) -> Result<()> {
853 self.updates_tx
854 .unbounded_send(envelope.payload)
855 .expect("consumer runs to completion");
856
857 Ok(())
858 }
859
860 pub fn has_pending_updates(&self) -> bool {
861 !self.pending_updates.is_empty()
862 }
863
864 pub fn update_diagnostic_summary(
865 &mut self,
866 path: Arc<Path>,
867 summary: &proto::DiagnosticSummary,
868 ) {
869 self.diagnostic_summaries.insert(
870 PathKey(path.clone()),
871 DiagnosticSummary {
872 error_count: summary.error_count as usize,
873 warning_count: summary.warning_count as usize,
874 info_count: summary.info_count as usize,
875 hint_count: summary.hint_count as usize,
876 },
877 );
878 }
879}
880
881impl Snapshot {
882 pub fn id(&self) -> WorktreeId {
883 self.id
884 }
885
886 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
887 let mut entries_by_path_edits = Vec::new();
888 let mut entries_by_id_edits = Vec::new();
889 for entry_id in update.removed_entries {
890 let entry_id = entry_id as usize;
891 let entry = self
892 .entry_for_id(entry_id)
893 .ok_or_else(|| anyhow!("unknown entry"))?;
894 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
895 entries_by_id_edits.push(Edit::Remove(entry.id));
896 }
897
898 for entry in update.updated_entries {
899 let entry = Entry::try_from((&self.root_char_bag, entry))?;
900 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
901 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
902 }
903 entries_by_id_edits.push(Edit::Insert(PathEntry {
904 id: entry.id,
905 path: entry.path.clone(),
906 is_ignored: entry.is_ignored,
907 scan_id: 0,
908 }));
909 entries_by_path_edits.push(Edit::Insert(entry));
910 }
911
912 self.entries_by_path.edit(entries_by_path_edits, &());
913 self.entries_by_id.edit(entries_by_id_edits, &());
914
915 Ok(())
916 }
917
918 pub fn file_count(&self) -> usize {
919 self.entries_by_path.summary().file_count
920 }
921
922 pub fn visible_file_count(&self) -> usize {
923 self.entries_by_path.summary().visible_file_count
924 }
925
926 fn traverse_from_offset(
927 &self,
928 include_dirs: bool,
929 include_ignored: bool,
930 start_offset: usize,
931 ) -> Traversal {
932 let mut cursor = self.entries_by_path.cursor();
933 cursor.seek(
934 &TraversalTarget::Count {
935 count: start_offset,
936 include_dirs,
937 include_ignored,
938 },
939 Bias::Right,
940 &(),
941 );
942 Traversal {
943 cursor,
944 include_dirs,
945 include_ignored,
946 }
947 }
948
949 fn traverse_from_path(
950 &self,
951 include_dirs: bool,
952 include_ignored: bool,
953 path: &Path,
954 ) -> Traversal {
955 let mut cursor = self.entries_by_path.cursor();
956 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
957 Traversal {
958 cursor,
959 include_dirs,
960 include_ignored,
961 }
962 }
963
964 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
965 self.traverse_from_offset(false, include_ignored, start)
966 }
967
968 pub fn entries(&self, include_ignored: bool) -> Traversal {
969 self.traverse_from_offset(true, include_ignored, 0)
970 }
971
972 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
973 let empty_path = Path::new("");
974 self.entries_by_path
975 .cursor::<()>()
976 .filter(move |entry| entry.path.as_ref() != empty_path)
977 .map(|entry| &entry.path)
978 }
979
980 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
981 let mut cursor = self.entries_by_path.cursor();
982 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
983 let traversal = Traversal {
984 cursor,
985 include_dirs: true,
986 include_ignored: true,
987 };
988 ChildEntriesIter {
989 traversal,
990 parent_path,
991 }
992 }
993
994 pub fn root_entry(&self) -> Option<&Entry> {
995 self.entry_for_path("")
996 }
997
998 pub fn root_name(&self) -> &str {
999 &self.root_name
1000 }
1001
1002 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1003 let path = path.as_ref();
1004 self.traverse_from_path(true, true, path)
1005 .entry()
1006 .and_then(|entry| {
1007 if entry.path.as_ref() == path {
1008 Some(entry)
1009 } else {
1010 None
1011 }
1012 })
1013 }
1014
1015 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1016 let entry = self.entries_by_id.get(&id, &())?;
1017 self.entry_for_path(&entry.path)
1018 }
1019
1020 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1021 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1022 }
1023}
1024
1025impl LocalSnapshot {
1026 #[cfg(test)]
1027 pub(crate) fn to_proto(
1028 &self,
1029 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1030 weak: bool,
1031 ) -> proto::Worktree {
1032 let root_name = self.root_name.clone();
1033 proto::Worktree {
1034 id: self.id.0 as u64,
1035 root_name,
1036 entries: self
1037 .entries_by_path
1038 .iter()
1039 .filter(|e| !e.is_ignored)
1040 .map(Into::into)
1041 .collect(),
1042 diagnostic_summaries: diagnostic_summaries
1043 .iter()
1044 .map(|(path, summary)| summary.to_proto(&path.0))
1045 .collect(),
1046 weak,
1047 }
1048 }
1049
1050 pub(crate) fn build_update(
1051 &self,
1052 other: &Self,
1053 project_id: u64,
1054 worktree_id: u64,
1055 include_ignored: bool,
1056 ) -> proto::UpdateWorktree {
1057 let mut updated_entries = Vec::new();
1058 let mut removed_entries = Vec::new();
1059 let mut self_entries = self
1060 .entries_by_id
1061 .cursor::<()>()
1062 .filter(|e| include_ignored || !e.is_ignored)
1063 .peekable();
1064 let mut other_entries = other
1065 .entries_by_id
1066 .cursor::<()>()
1067 .filter(|e| include_ignored || !e.is_ignored)
1068 .peekable();
1069 loop {
1070 match (self_entries.peek(), other_entries.peek()) {
1071 (Some(self_entry), Some(other_entry)) => {
1072 match Ord::cmp(&self_entry.id, &other_entry.id) {
1073 Ordering::Less => {
1074 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1075 updated_entries.push(entry);
1076 self_entries.next();
1077 }
1078 Ordering::Equal => {
1079 if self_entry.scan_id != other_entry.scan_id {
1080 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1081 updated_entries.push(entry);
1082 }
1083
1084 self_entries.next();
1085 other_entries.next();
1086 }
1087 Ordering::Greater => {
1088 removed_entries.push(other_entry.id as u64);
1089 other_entries.next();
1090 }
1091 }
1092 }
1093 (Some(self_entry), None) => {
1094 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1095 updated_entries.push(entry);
1096 self_entries.next();
1097 }
1098 (None, Some(other_entry)) => {
1099 removed_entries.push(other_entry.id as u64);
1100 other_entries.next();
1101 }
1102 (None, None) => break,
1103 }
1104 }
1105
1106 proto::UpdateWorktree {
1107 project_id,
1108 worktree_id,
1109 root_name: self.root_name().to_string(),
1110 updated_entries,
1111 removed_entries,
1112 }
1113 }
1114
1115 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1116 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1117 let abs_path = self.abs_path.join(&entry.path);
1118 match build_gitignore(&abs_path, fs) {
1119 Ok(ignore) => {
1120 let ignore_dir_path = entry.path.parent().unwrap();
1121 self.ignores
1122 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1123 }
1124 Err(error) => {
1125 log::error!(
1126 "error loading .gitignore file {:?} - {:?}",
1127 &entry.path,
1128 error
1129 );
1130 }
1131 }
1132 }
1133
1134 self.reuse_entry_id(&mut entry);
1135 self.entries_by_path.insert_or_replace(entry.clone(), &());
1136 let scan_id = self.scan_id;
1137 self.entries_by_id.insert_or_replace(
1138 PathEntry {
1139 id: entry.id,
1140 path: entry.path.clone(),
1141 is_ignored: entry.is_ignored,
1142 scan_id,
1143 },
1144 &(),
1145 );
1146 entry
1147 }
1148
1149 fn populate_dir(
1150 &mut self,
1151 parent_path: Arc<Path>,
1152 entries: impl IntoIterator<Item = Entry>,
1153 ignore: Option<Arc<Gitignore>>,
1154 ) {
1155 let mut parent_entry = self
1156 .entries_by_path
1157 .get(&PathKey(parent_path.clone()), &())
1158 .unwrap()
1159 .clone();
1160 if let Some(ignore) = ignore {
1161 self.ignores.insert(parent_path, (ignore, self.scan_id));
1162 }
1163 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1164 parent_entry.kind = EntryKind::Dir;
1165 } else {
1166 unreachable!();
1167 }
1168
1169 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1170 let mut entries_by_id_edits = Vec::new();
1171
1172 for mut entry in entries {
1173 self.reuse_entry_id(&mut entry);
1174 entries_by_id_edits.push(Edit::Insert(PathEntry {
1175 id: entry.id,
1176 path: entry.path.clone(),
1177 is_ignored: entry.is_ignored,
1178 scan_id: self.scan_id,
1179 }));
1180 entries_by_path_edits.push(Edit::Insert(entry));
1181 }
1182
1183 self.entries_by_path.edit(entries_by_path_edits, &());
1184 self.entries_by_id.edit(entries_by_id_edits, &());
1185 }
1186
1187 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1188 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1189 entry.id = removed_entry_id;
1190 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1191 entry.id = existing_entry.id;
1192 }
1193 }
1194
1195 fn remove_path(&mut self, path: &Path) {
1196 let mut new_entries;
1197 let removed_entries;
1198 {
1199 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1200 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1201 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1202 new_entries.push_tree(cursor.suffix(&()), &());
1203 }
1204 self.entries_by_path = new_entries;
1205
1206 let mut entries_by_id_edits = Vec::new();
1207 for entry in removed_entries.cursor::<()>() {
1208 let removed_entry_id = self
1209 .removed_entry_ids
1210 .entry(entry.inode)
1211 .or_insert(entry.id);
1212 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1213 entries_by_id_edits.push(Edit::Remove(entry.id));
1214 }
1215 self.entries_by_id.edit(entries_by_id_edits, &());
1216
1217 if path.file_name() == Some(&GITIGNORE) {
1218 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1219 *scan_id = self.scan_id;
1220 }
1221 }
1222 }
1223
1224 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1225 let mut new_ignores = Vec::new();
1226 for ancestor in path.ancestors().skip(1) {
1227 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1228 new_ignores.push((ancestor, Some(ignore.clone())));
1229 } else {
1230 new_ignores.push((ancestor, None));
1231 }
1232 }
1233
1234 let mut ignore_stack = IgnoreStack::none();
1235 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1236 if ignore_stack.is_path_ignored(&parent_path, true) {
1237 ignore_stack = IgnoreStack::all();
1238 break;
1239 } else if let Some(ignore) = ignore {
1240 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1241 }
1242 }
1243
1244 if ignore_stack.is_path_ignored(path, is_dir) {
1245 ignore_stack = IgnoreStack::all();
1246 }
1247
1248 ignore_stack
1249 }
1250}
1251
1252fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1253 let contents = smol::block_on(fs.load(&abs_path))?;
1254 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1255 let mut builder = GitignoreBuilder::new(parent);
1256 for line in contents.lines() {
1257 builder.add_line(Some(abs_path.into()), line)?;
1258 }
1259 Ok(builder.build()?)
1260}
1261
1262impl WorktreeId {
1263 pub fn from_usize(handle_id: usize) -> Self {
1264 Self(handle_id)
1265 }
1266
1267 pub(crate) fn from_proto(id: u64) -> Self {
1268 Self(id as usize)
1269 }
1270
1271 pub fn to_proto(&self) -> u64 {
1272 self.0 as u64
1273 }
1274
1275 pub fn to_usize(&self) -> usize {
1276 self.0
1277 }
1278}
1279
1280impl fmt::Display for WorktreeId {
1281 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1282 self.0.fmt(f)
1283 }
1284}
1285
1286impl Deref for Worktree {
1287 type Target = Snapshot;
1288
1289 fn deref(&self) -> &Self::Target {
1290 match self {
1291 Worktree::Local(worktree) => &worktree.snapshot,
1292 Worktree::Remote(worktree) => &worktree.snapshot,
1293 }
1294 }
1295}
1296
1297impl Deref for LocalWorktree {
1298 type Target = LocalSnapshot;
1299
1300 fn deref(&self) -> &Self::Target {
1301 &self.snapshot
1302 }
1303}
1304
1305impl Deref for RemoteWorktree {
1306 type Target = Snapshot;
1307
1308 fn deref(&self) -> &Self::Target {
1309 &self.snapshot
1310 }
1311}
1312
1313impl fmt::Debug for LocalWorktree {
1314 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1315 self.snapshot.fmt(f)
1316 }
1317}
1318
1319impl fmt::Debug for Snapshot {
1320 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1321 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1322 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1323
1324 impl<'a> fmt::Debug for EntriesByPath<'a> {
1325 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1326 f.debug_map()
1327 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1328 .finish()
1329 }
1330 }
1331
1332 impl<'a> fmt::Debug for EntriesById<'a> {
1333 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1334 f.debug_list().entries(self.0.iter()).finish()
1335 }
1336 }
1337
1338 f.debug_struct("Snapshot")
1339 .field("id", &self.id)
1340 .field("root_name", &self.root_name)
1341 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1342 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1343 .finish()
1344 }
1345}
1346
1347#[derive(Clone, PartialEq)]
1348pub struct File {
1349 pub worktree: ModelHandle<Worktree>,
1350 pub path: Arc<Path>,
1351 pub mtime: SystemTime,
1352 pub(crate) entry_id: Option<usize>,
1353 pub(crate) is_local: bool,
1354}
1355
1356impl language::File for File {
1357 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1358 if self.is_local {
1359 Some(self)
1360 } else {
1361 None
1362 }
1363 }
1364
1365 fn mtime(&self) -> SystemTime {
1366 self.mtime
1367 }
1368
1369 fn path(&self) -> &Arc<Path> {
1370 &self.path
1371 }
1372
1373 fn full_path(&self, cx: &AppContext) -> PathBuf {
1374 let mut full_path = PathBuf::new();
1375 full_path.push(self.worktree.read(cx).root_name());
1376 if self.path.components().next().is_some() {
1377 full_path.push(&self.path);
1378 }
1379 full_path
1380 }
1381
1382 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1383 /// of its worktree, then this method will return the name of the worktree itself.
1384 fn file_name(&self, cx: &AppContext) -> OsString {
1385 self.path
1386 .file_name()
1387 .map(|name| name.into())
1388 .unwrap_or_else(|| OsString::from(&self.worktree.read(cx).root_name))
1389 }
1390
1391 fn is_deleted(&self) -> bool {
1392 self.entry_id.is_none()
1393 }
1394
1395 fn save(
1396 &self,
1397 buffer_id: u64,
1398 text: Rope,
1399 version: clock::Global,
1400 cx: &mut MutableAppContext,
1401 ) -> Task<Result<(clock::Global, SystemTime)>> {
1402 self.worktree.update(cx, |worktree, cx| match worktree {
1403 Worktree::Local(worktree) => {
1404 let rpc = worktree.client.clone();
1405 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1406 let save = worktree.save(self.path.clone(), text, cx);
1407 cx.background().spawn(async move {
1408 let entry = save.await?;
1409 if let Some(project_id) = project_id {
1410 rpc.send(proto::BufferSaved {
1411 project_id,
1412 buffer_id,
1413 version: (&version).into(),
1414 mtime: Some(entry.mtime.into()),
1415 })?;
1416 }
1417 Ok((version, entry.mtime))
1418 })
1419 }
1420 Worktree::Remote(worktree) => {
1421 let rpc = worktree.client.clone();
1422 let project_id = worktree.project_id;
1423 cx.foreground().spawn(async move {
1424 let response = rpc
1425 .request(proto::SaveBuffer {
1426 project_id,
1427 buffer_id,
1428 version: (&version).into(),
1429 })
1430 .await?;
1431 let version = response.version.try_into()?;
1432 let mtime = response
1433 .mtime
1434 .ok_or_else(|| anyhow!("missing mtime"))?
1435 .into();
1436 Ok((version, mtime))
1437 })
1438 }
1439 })
1440 }
1441
1442 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1443 self.worktree.update(cx, |worktree, cx| {
1444 worktree.send_buffer_update(buffer_id, operation, cx);
1445 });
1446 }
1447
1448 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1449 self.worktree.update(cx, |worktree, _| {
1450 if let Worktree::Remote(worktree) = worktree {
1451 worktree
1452 .client
1453 .send(proto::CloseBuffer {
1454 project_id: worktree.project_id,
1455 buffer_id,
1456 })
1457 .log_err();
1458 }
1459 });
1460 }
1461
1462 fn as_any(&self) -> &dyn Any {
1463 self
1464 }
1465
1466 fn to_proto(&self) -> rpc::proto::File {
1467 rpc::proto::File {
1468 worktree_id: self.worktree.id() as u64,
1469 entry_id: self.entry_id.map(|entry_id| entry_id as u64),
1470 path: self.path.to_string_lossy().into(),
1471 mtime: Some(self.mtime.into()),
1472 }
1473 }
1474}
1475
1476impl language::LocalFile for File {
1477 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1478 self.worktree
1479 .read(cx)
1480 .as_local()
1481 .unwrap()
1482 .abs_path
1483 .join(&self.path)
1484 }
1485
1486 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1487 let worktree = self.worktree.read(cx).as_local().unwrap();
1488 let abs_path = worktree.absolutize(&self.path);
1489 let fs = worktree.fs.clone();
1490 cx.background()
1491 .spawn(async move { fs.load(&abs_path).await })
1492 }
1493
1494 fn buffer_reloaded(
1495 &self,
1496 buffer_id: u64,
1497 version: &clock::Global,
1498 mtime: SystemTime,
1499 cx: &mut MutableAppContext,
1500 ) {
1501 let worktree = self.worktree.read(cx).as_local().unwrap();
1502 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1503 worktree
1504 .client
1505 .send(proto::BufferReloaded {
1506 project_id,
1507 buffer_id,
1508 version: version.into(),
1509 mtime: Some(mtime.into()),
1510 })
1511 .log_err();
1512 }
1513 }
1514}
1515
1516impl File {
1517 pub fn from_proto(
1518 proto: rpc::proto::File,
1519 worktree: ModelHandle<Worktree>,
1520 cx: &AppContext,
1521 ) -> Result<Self> {
1522 let worktree_id = worktree
1523 .read(cx)
1524 .as_remote()
1525 .ok_or_else(|| anyhow!("not remote"))?
1526 .id();
1527
1528 if worktree_id.to_proto() != proto.worktree_id {
1529 return Err(anyhow!("worktree id does not match file"));
1530 }
1531
1532 Ok(Self {
1533 worktree,
1534 path: Path::new(&proto.path).into(),
1535 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1536 entry_id: proto.entry_id.map(|entry_id| entry_id as usize),
1537 is_local: false,
1538 })
1539 }
1540
1541 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
1542 file.and_then(|f| f.as_any().downcast_ref())
1543 }
1544
1545 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1546 self.worktree.read(cx).id()
1547 }
1548}
1549
1550#[derive(Clone, Debug, PartialEq, Eq)]
1551pub struct Entry {
1552 pub id: usize,
1553 pub kind: EntryKind,
1554 pub path: Arc<Path>,
1555 pub inode: u64,
1556 pub mtime: SystemTime,
1557 pub is_symlink: bool,
1558 pub is_ignored: bool,
1559}
1560
1561#[derive(Clone, Debug, PartialEq, Eq)]
1562pub enum EntryKind {
1563 PendingDir,
1564 Dir,
1565 File(CharBag),
1566}
1567
1568impl Entry {
1569 fn new(
1570 path: Arc<Path>,
1571 metadata: &fs::Metadata,
1572 next_entry_id: &AtomicUsize,
1573 root_char_bag: CharBag,
1574 ) -> Self {
1575 Self {
1576 id: next_entry_id.fetch_add(1, SeqCst),
1577 kind: if metadata.is_dir {
1578 EntryKind::PendingDir
1579 } else {
1580 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1581 },
1582 path,
1583 inode: metadata.inode,
1584 mtime: metadata.mtime,
1585 is_symlink: metadata.is_symlink,
1586 is_ignored: false,
1587 }
1588 }
1589
1590 pub fn is_dir(&self) -> bool {
1591 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1592 }
1593
1594 pub fn is_file(&self) -> bool {
1595 matches!(self.kind, EntryKind::File(_))
1596 }
1597}
1598
1599impl sum_tree::Item for Entry {
1600 type Summary = EntrySummary;
1601
1602 fn summary(&self) -> Self::Summary {
1603 let visible_count = if self.is_ignored { 0 } else { 1 };
1604 let file_count;
1605 let visible_file_count;
1606 if self.is_file() {
1607 file_count = 1;
1608 visible_file_count = visible_count;
1609 } else {
1610 file_count = 0;
1611 visible_file_count = 0;
1612 }
1613
1614 EntrySummary {
1615 max_path: self.path.clone(),
1616 count: 1,
1617 visible_count,
1618 file_count,
1619 visible_file_count,
1620 }
1621 }
1622}
1623
1624impl sum_tree::KeyedItem for Entry {
1625 type Key = PathKey;
1626
1627 fn key(&self) -> Self::Key {
1628 PathKey(self.path.clone())
1629 }
1630}
1631
1632#[derive(Clone, Debug)]
1633pub struct EntrySummary {
1634 max_path: Arc<Path>,
1635 count: usize,
1636 visible_count: usize,
1637 file_count: usize,
1638 visible_file_count: usize,
1639}
1640
1641impl Default for EntrySummary {
1642 fn default() -> Self {
1643 Self {
1644 max_path: Arc::from(Path::new("")),
1645 count: 0,
1646 visible_count: 0,
1647 file_count: 0,
1648 visible_file_count: 0,
1649 }
1650 }
1651}
1652
1653impl sum_tree::Summary for EntrySummary {
1654 type Context = ();
1655
1656 fn add_summary(&mut self, rhs: &Self, _: &()) {
1657 self.max_path = rhs.max_path.clone();
1658 self.visible_count += rhs.visible_count;
1659 self.file_count += rhs.file_count;
1660 self.visible_file_count += rhs.visible_file_count;
1661 }
1662}
1663
1664#[derive(Clone, Debug)]
1665struct PathEntry {
1666 id: usize,
1667 path: Arc<Path>,
1668 is_ignored: bool,
1669 scan_id: usize,
1670}
1671
1672impl sum_tree::Item for PathEntry {
1673 type Summary = PathEntrySummary;
1674
1675 fn summary(&self) -> Self::Summary {
1676 PathEntrySummary { max_id: self.id }
1677 }
1678}
1679
1680impl sum_tree::KeyedItem for PathEntry {
1681 type Key = usize;
1682
1683 fn key(&self) -> Self::Key {
1684 self.id
1685 }
1686}
1687
1688#[derive(Clone, Debug, Default)]
1689struct PathEntrySummary {
1690 max_id: usize,
1691}
1692
1693impl sum_tree::Summary for PathEntrySummary {
1694 type Context = ();
1695
1696 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
1697 self.max_id = summary.max_id;
1698 }
1699}
1700
1701impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
1702 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
1703 *self = summary.max_id;
1704 }
1705}
1706
1707#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1708pub struct PathKey(Arc<Path>);
1709
1710impl Default for PathKey {
1711 fn default() -> Self {
1712 Self(Path::new("").into())
1713 }
1714}
1715
1716impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1717 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1718 self.0 = summary.max_path.clone();
1719 }
1720}
1721
1722struct BackgroundScanner {
1723 fs: Arc<dyn Fs>,
1724 snapshot: Arc<Mutex<LocalSnapshot>>,
1725 notify: UnboundedSender<ScanState>,
1726 executor: Arc<executor::Background>,
1727}
1728
1729impl BackgroundScanner {
1730 fn new(
1731 snapshot: Arc<Mutex<LocalSnapshot>>,
1732 notify: UnboundedSender<ScanState>,
1733 fs: Arc<dyn Fs>,
1734 executor: Arc<executor::Background>,
1735 ) -> Self {
1736 Self {
1737 fs,
1738 snapshot,
1739 notify,
1740 executor,
1741 }
1742 }
1743
1744 fn abs_path(&self) -> Arc<Path> {
1745 self.snapshot.lock().abs_path.clone()
1746 }
1747
1748 fn snapshot(&self) -> LocalSnapshot {
1749 self.snapshot.lock().clone()
1750 }
1751
1752 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
1753 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
1754 return;
1755 }
1756
1757 if let Err(err) = self.scan_dirs().await {
1758 if self
1759 .notify
1760 .unbounded_send(ScanState::Err(Arc::new(err)))
1761 .is_err()
1762 {
1763 return;
1764 }
1765 }
1766
1767 if self.notify.unbounded_send(ScanState::Idle).is_err() {
1768 return;
1769 }
1770
1771 futures::pin_mut!(events_rx);
1772 while let Some(events) = events_rx.next().await {
1773 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
1774 break;
1775 }
1776
1777 if !self.process_events(events).await {
1778 break;
1779 }
1780
1781 if self.notify.unbounded_send(ScanState::Idle).is_err() {
1782 break;
1783 }
1784 }
1785 }
1786
1787 async fn scan_dirs(&mut self) -> Result<()> {
1788 let root_char_bag;
1789 let next_entry_id;
1790 let is_dir;
1791 {
1792 let snapshot = self.snapshot.lock();
1793 root_char_bag = snapshot.root_char_bag;
1794 next_entry_id = snapshot.next_entry_id.clone();
1795 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
1796 };
1797
1798 if is_dir {
1799 let path: Arc<Path> = Arc::from(Path::new(""));
1800 let abs_path = self.abs_path();
1801 let (tx, rx) = channel::unbounded();
1802 tx.send(ScanJob {
1803 abs_path: abs_path.to_path_buf(),
1804 path,
1805 ignore_stack: IgnoreStack::none(),
1806 scan_queue: tx.clone(),
1807 })
1808 .await
1809 .unwrap();
1810 drop(tx);
1811
1812 self.executor
1813 .scoped(|scope| {
1814 for _ in 0..self.executor.num_cpus() {
1815 scope.spawn(async {
1816 while let Ok(job) = rx.recv().await {
1817 if let Err(err) = self
1818 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
1819 .await
1820 {
1821 log::error!("error scanning {:?}: {}", job.abs_path, err);
1822 }
1823 }
1824 });
1825 }
1826 })
1827 .await;
1828 }
1829
1830 Ok(())
1831 }
1832
1833 async fn scan_dir(
1834 &self,
1835 root_char_bag: CharBag,
1836 next_entry_id: Arc<AtomicUsize>,
1837 job: &ScanJob,
1838 ) -> Result<()> {
1839 let mut new_entries: Vec<Entry> = Vec::new();
1840 let mut new_jobs: Vec<ScanJob> = Vec::new();
1841 let mut ignore_stack = job.ignore_stack.clone();
1842 let mut new_ignore = None;
1843
1844 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
1845 while let Some(child_abs_path) = child_paths.next().await {
1846 let child_abs_path = match child_abs_path {
1847 Ok(child_abs_path) => child_abs_path,
1848 Err(error) => {
1849 log::error!("error processing entry {:?}", error);
1850 continue;
1851 }
1852 };
1853 let child_name = child_abs_path.file_name().unwrap();
1854 let child_path: Arc<Path> = job.path.join(child_name).into();
1855 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
1856 Some(metadata) => metadata,
1857 None => continue,
1858 };
1859
1860 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1861 if child_name == *GITIGNORE {
1862 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
1863 Ok(ignore) => {
1864 let ignore = Arc::new(ignore);
1865 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1866 new_ignore = Some(ignore);
1867 }
1868 Err(error) => {
1869 log::error!(
1870 "error loading .gitignore file {:?} - {:?}",
1871 child_name,
1872 error
1873 );
1874 }
1875 }
1876
1877 // Update ignore status of any child entries we've already processed to reflect the
1878 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1879 // there should rarely be too numerous. Update the ignore stack associated with any
1880 // new jobs as well.
1881 let mut new_jobs = new_jobs.iter_mut();
1882 for entry in &mut new_entries {
1883 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1884 if entry.is_dir() {
1885 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1886 IgnoreStack::all()
1887 } else {
1888 ignore_stack.clone()
1889 };
1890 }
1891 }
1892 }
1893
1894 let mut child_entry = Entry::new(
1895 child_path.clone(),
1896 &child_metadata,
1897 &next_entry_id,
1898 root_char_bag,
1899 );
1900
1901 if child_metadata.is_dir {
1902 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1903 child_entry.is_ignored = is_ignored;
1904 new_entries.push(child_entry);
1905 new_jobs.push(ScanJob {
1906 abs_path: child_abs_path,
1907 path: child_path,
1908 ignore_stack: if is_ignored {
1909 IgnoreStack::all()
1910 } else {
1911 ignore_stack.clone()
1912 },
1913 scan_queue: job.scan_queue.clone(),
1914 });
1915 } else {
1916 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1917 new_entries.push(child_entry);
1918 };
1919 }
1920
1921 self.snapshot
1922 .lock()
1923 .populate_dir(job.path.clone(), new_entries, new_ignore);
1924 for new_job in new_jobs {
1925 job.scan_queue.send(new_job).await.unwrap();
1926 }
1927
1928 Ok(())
1929 }
1930
1931 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1932 let mut snapshot = self.snapshot();
1933 snapshot.scan_id += 1;
1934
1935 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
1936 abs_path
1937 } else {
1938 return false;
1939 };
1940 let root_char_bag = snapshot.root_char_bag;
1941 let next_entry_id = snapshot.next_entry_id.clone();
1942
1943 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1944 events.dedup_by(|a, b| a.path.starts_with(&b.path));
1945
1946 for event in &events {
1947 match event.path.strip_prefix(&root_abs_path) {
1948 Ok(path) => snapshot.remove_path(&path),
1949 Err(_) => {
1950 log::error!(
1951 "unexpected event {:?} for root path {:?}",
1952 event.path,
1953 root_abs_path
1954 );
1955 continue;
1956 }
1957 }
1958 }
1959
1960 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
1961 for event in events {
1962 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
1963 Ok(path) => Arc::from(path.to_path_buf()),
1964 Err(_) => {
1965 log::error!(
1966 "unexpected event {:?} for root path {:?}",
1967 event.path,
1968 root_abs_path
1969 );
1970 continue;
1971 }
1972 };
1973
1974 match self.fs.metadata(&event.path).await {
1975 Ok(Some(metadata)) => {
1976 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
1977 let mut fs_entry = Entry::new(
1978 path.clone(),
1979 &metadata,
1980 snapshot.next_entry_id.as_ref(),
1981 snapshot.root_char_bag,
1982 );
1983 fs_entry.is_ignored = ignore_stack.is_all();
1984 snapshot.insert_entry(fs_entry, self.fs.as_ref());
1985 if metadata.is_dir {
1986 scan_queue_tx
1987 .send(ScanJob {
1988 abs_path: event.path,
1989 path,
1990 ignore_stack,
1991 scan_queue: scan_queue_tx.clone(),
1992 })
1993 .await
1994 .unwrap();
1995 }
1996 }
1997 Ok(None) => {}
1998 Err(err) => {
1999 // TODO - create a special 'error' entry in the entries tree to mark this
2000 log::error!("error reading file on event {:?}", err);
2001 }
2002 }
2003 }
2004
2005 *self.snapshot.lock() = snapshot;
2006
2007 // Scan any directories that were created as part of this event batch.
2008 drop(scan_queue_tx);
2009 self.executor
2010 .scoped(|scope| {
2011 for _ in 0..self.executor.num_cpus() {
2012 scope.spawn(async {
2013 while let Ok(job) = scan_queue_rx.recv().await {
2014 if let Err(err) = self
2015 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2016 .await
2017 {
2018 log::error!("error scanning {:?}: {}", job.abs_path, err);
2019 }
2020 }
2021 });
2022 }
2023 })
2024 .await;
2025
2026 // Attempt to detect renames only over a single batch of file-system events.
2027 self.snapshot.lock().removed_entry_ids.clear();
2028
2029 self.update_ignore_statuses().await;
2030 true
2031 }
2032
2033 async fn update_ignore_statuses(&self) {
2034 let mut snapshot = self.snapshot();
2035
2036 let mut ignores_to_update = Vec::new();
2037 let mut ignores_to_delete = Vec::new();
2038 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2039 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2040 ignores_to_update.push(parent_path.clone());
2041 }
2042
2043 let ignore_path = parent_path.join(&*GITIGNORE);
2044 if snapshot.entry_for_path(ignore_path).is_none() {
2045 ignores_to_delete.push(parent_path.clone());
2046 }
2047 }
2048
2049 for parent_path in ignores_to_delete {
2050 snapshot.ignores.remove(&parent_path);
2051 self.snapshot.lock().ignores.remove(&parent_path);
2052 }
2053
2054 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2055 ignores_to_update.sort_unstable();
2056 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2057 while let Some(parent_path) = ignores_to_update.next() {
2058 while ignores_to_update
2059 .peek()
2060 .map_or(false, |p| p.starts_with(&parent_path))
2061 {
2062 ignores_to_update.next().unwrap();
2063 }
2064
2065 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2066 ignore_queue_tx
2067 .send(UpdateIgnoreStatusJob {
2068 path: parent_path,
2069 ignore_stack,
2070 ignore_queue: ignore_queue_tx.clone(),
2071 })
2072 .await
2073 .unwrap();
2074 }
2075 drop(ignore_queue_tx);
2076
2077 self.executor
2078 .scoped(|scope| {
2079 for _ in 0..self.executor.num_cpus() {
2080 scope.spawn(async {
2081 while let Ok(job) = ignore_queue_rx.recv().await {
2082 self.update_ignore_status(job, &snapshot).await;
2083 }
2084 });
2085 }
2086 })
2087 .await;
2088 }
2089
2090 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2091 let mut ignore_stack = job.ignore_stack;
2092 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2093 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2094 }
2095
2096 let mut entries_by_id_edits = Vec::new();
2097 let mut entries_by_path_edits = Vec::new();
2098 for mut entry in snapshot.child_entries(&job.path).cloned() {
2099 let was_ignored = entry.is_ignored;
2100 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2101 if entry.is_dir() {
2102 let child_ignore_stack = if entry.is_ignored {
2103 IgnoreStack::all()
2104 } else {
2105 ignore_stack.clone()
2106 };
2107 job.ignore_queue
2108 .send(UpdateIgnoreStatusJob {
2109 path: entry.path.clone(),
2110 ignore_stack: child_ignore_stack,
2111 ignore_queue: job.ignore_queue.clone(),
2112 })
2113 .await
2114 .unwrap();
2115 }
2116
2117 if entry.is_ignored != was_ignored {
2118 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2119 path_entry.scan_id = snapshot.scan_id;
2120 path_entry.is_ignored = entry.is_ignored;
2121 entries_by_id_edits.push(Edit::Insert(path_entry));
2122 entries_by_path_edits.push(Edit::Insert(entry));
2123 }
2124 }
2125
2126 let mut snapshot = self.snapshot.lock();
2127 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2128 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2129 }
2130}
2131
2132async fn refresh_entry(
2133 fs: &dyn Fs,
2134 snapshot: &Mutex<LocalSnapshot>,
2135 path: Arc<Path>,
2136 abs_path: &Path,
2137) -> Result<Entry> {
2138 let root_char_bag;
2139 let next_entry_id;
2140 {
2141 let snapshot = snapshot.lock();
2142 root_char_bag = snapshot.root_char_bag;
2143 next_entry_id = snapshot.next_entry_id.clone();
2144 }
2145 let entry = Entry::new(
2146 path,
2147 &fs.metadata(abs_path)
2148 .await?
2149 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2150 &next_entry_id,
2151 root_char_bag,
2152 );
2153 Ok(snapshot.lock().insert_entry(entry, fs))
2154}
2155
2156fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2157 let mut result = root_char_bag;
2158 result.extend(
2159 path.to_string_lossy()
2160 .chars()
2161 .map(|c| c.to_ascii_lowercase()),
2162 );
2163 result
2164}
2165
2166struct ScanJob {
2167 abs_path: PathBuf,
2168 path: Arc<Path>,
2169 ignore_stack: Arc<IgnoreStack>,
2170 scan_queue: Sender<ScanJob>,
2171}
2172
2173struct UpdateIgnoreStatusJob {
2174 path: Arc<Path>,
2175 ignore_stack: Arc<IgnoreStack>,
2176 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2177}
2178
2179pub trait WorktreeHandle {
2180 #[cfg(any(test, feature = "test-support"))]
2181 fn flush_fs_events<'a>(
2182 &self,
2183 cx: &'a gpui::TestAppContext,
2184 ) -> futures::future::LocalBoxFuture<'a, ()>;
2185}
2186
2187impl WorktreeHandle for ModelHandle<Worktree> {
2188 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2189 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2190 // extra directory scans, and emit extra scan-state notifications.
2191 //
2192 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2193 // to ensure that all redundant FS events have already been processed.
2194 #[cfg(any(test, feature = "test-support"))]
2195 fn flush_fs_events<'a>(
2196 &self,
2197 cx: &'a gpui::TestAppContext,
2198 ) -> futures::future::LocalBoxFuture<'a, ()> {
2199 use smol::future::FutureExt;
2200
2201 let filename = "fs-event-sentinel";
2202 let tree = self.clone();
2203 let (fs, root_path) = self.read_with(cx, |tree, _| {
2204 let tree = tree.as_local().unwrap();
2205 (tree.fs.clone(), tree.abs_path().clone())
2206 });
2207
2208 async move {
2209 fs.create_file(&root_path.join(filename), Default::default())
2210 .await
2211 .unwrap();
2212 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2213 .await;
2214
2215 fs.remove_file(&root_path.join(filename), Default::default())
2216 .await
2217 .unwrap();
2218 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2219 .await;
2220
2221 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2222 .await;
2223 }
2224 .boxed_local()
2225 }
2226}
2227
2228#[derive(Clone, Debug)]
2229struct TraversalProgress<'a> {
2230 max_path: &'a Path,
2231 count: usize,
2232 visible_count: usize,
2233 file_count: usize,
2234 visible_file_count: usize,
2235}
2236
2237impl<'a> TraversalProgress<'a> {
2238 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2239 match (include_ignored, include_dirs) {
2240 (true, true) => self.count,
2241 (true, false) => self.file_count,
2242 (false, true) => self.visible_count,
2243 (false, false) => self.visible_file_count,
2244 }
2245 }
2246}
2247
2248impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2249 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2250 self.max_path = summary.max_path.as_ref();
2251 self.count += summary.count;
2252 self.visible_count += summary.visible_count;
2253 self.file_count += summary.file_count;
2254 self.visible_file_count += summary.visible_file_count;
2255 }
2256}
2257
2258impl<'a> Default for TraversalProgress<'a> {
2259 fn default() -> Self {
2260 Self {
2261 max_path: Path::new(""),
2262 count: 0,
2263 visible_count: 0,
2264 file_count: 0,
2265 visible_file_count: 0,
2266 }
2267 }
2268}
2269
2270pub struct Traversal<'a> {
2271 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2272 include_ignored: bool,
2273 include_dirs: bool,
2274}
2275
2276impl<'a> Traversal<'a> {
2277 pub fn advance(&mut self) -> bool {
2278 self.advance_to_offset(self.offset() + 1)
2279 }
2280
2281 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2282 self.cursor.seek_forward(
2283 &TraversalTarget::Count {
2284 count: offset,
2285 include_dirs: self.include_dirs,
2286 include_ignored: self.include_ignored,
2287 },
2288 Bias::Right,
2289 &(),
2290 )
2291 }
2292
2293 pub fn advance_to_sibling(&mut self) -> bool {
2294 while let Some(entry) = self.cursor.item() {
2295 self.cursor.seek_forward(
2296 &TraversalTarget::PathSuccessor(&entry.path),
2297 Bias::Left,
2298 &(),
2299 );
2300 if let Some(entry) = self.cursor.item() {
2301 if (self.include_dirs || !entry.is_dir())
2302 && (self.include_ignored || !entry.is_ignored)
2303 {
2304 return true;
2305 }
2306 }
2307 }
2308 false
2309 }
2310
2311 pub fn entry(&self) -> Option<&'a Entry> {
2312 self.cursor.item()
2313 }
2314
2315 pub fn offset(&self) -> usize {
2316 self.cursor
2317 .start()
2318 .count(self.include_dirs, self.include_ignored)
2319 }
2320}
2321
2322impl<'a> Iterator for Traversal<'a> {
2323 type Item = &'a Entry;
2324
2325 fn next(&mut self) -> Option<Self::Item> {
2326 if let Some(item) = self.entry() {
2327 self.advance();
2328 Some(item)
2329 } else {
2330 None
2331 }
2332 }
2333}
2334
2335#[derive(Debug)]
2336enum TraversalTarget<'a> {
2337 Path(&'a Path),
2338 PathSuccessor(&'a Path),
2339 Count {
2340 count: usize,
2341 include_ignored: bool,
2342 include_dirs: bool,
2343 },
2344}
2345
2346impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2347 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2348 match self {
2349 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2350 TraversalTarget::PathSuccessor(path) => {
2351 if !cursor_location.max_path.starts_with(path) {
2352 Ordering::Equal
2353 } else {
2354 Ordering::Greater
2355 }
2356 }
2357 TraversalTarget::Count {
2358 count,
2359 include_dirs,
2360 include_ignored,
2361 } => Ord::cmp(
2362 count,
2363 &cursor_location.count(*include_dirs, *include_ignored),
2364 ),
2365 }
2366 }
2367}
2368
2369struct ChildEntriesIter<'a> {
2370 parent_path: &'a Path,
2371 traversal: Traversal<'a>,
2372}
2373
2374impl<'a> Iterator for ChildEntriesIter<'a> {
2375 type Item = &'a Entry;
2376
2377 fn next(&mut self) -> Option<Self::Item> {
2378 if let Some(item) = self.traversal.entry() {
2379 if item.path.starts_with(&self.parent_path) {
2380 self.traversal.advance_to_sibling();
2381 return Some(item);
2382 }
2383 }
2384 None
2385 }
2386}
2387
2388impl<'a> From<&'a Entry> for proto::Entry {
2389 fn from(entry: &'a Entry) -> Self {
2390 Self {
2391 id: entry.id as u64,
2392 is_dir: entry.is_dir(),
2393 path: entry.path.to_string_lossy().to_string(),
2394 inode: entry.inode,
2395 mtime: Some(entry.mtime.into()),
2396 is_symlink: entry.is_symlink,
2397 is_ignored: entry.is_ignored,
2398 }
2399 }
2400}
2401
2402impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2403 type Error = anyhow::Error;
2404
2405 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2406 if let Some(mtime) = entry.mtime {
2407 let kind = if entry.is_dir {
2408 EntryKind::Dir
2409 } else {
2410 let mut char_bag = root_char_bag.clone();
2411 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2412 EntryKind::File(char_bag)
2413 };
2414 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2415 Ok(Entry {
2416 id: entry.id as usize,
2417 kind,
2418 path: path.clone(),
2419 inode: entry.inode,
2420 mtime: mtime.into(),
2421 is_symlink: entry.is_symlink,
2422 is_ignored: entry.is_ignored,
2423 })
2424 } else {
2425 Err(anyhow!(
2426 "missing mtime in remote worktree entry {:?}",
2427 entry.path
2428 ))
2429 }
2430 }
2431}
2432
2433#[cfg(test)]
2434mod tests {
2435 use super::*;
2436 use crate::fs::FakeFs;
2437 use anyhow::Result;
2438 use client::test::FakeHttpClient;
2439 use fs::RealFs;
2440 use rand::prelude::*;
2441 use serde_json::json;
2442 use std::{
2443 env,
2444 fmt::Write,
2445 time::{SystemTime, UNIX_EPOCH},
2446 };
2447 use util::test::temp_tree;
2448
2449 #[gpui::test]
2450 async fn test_traversal(cx: gpui::TestAppContext) {
2451 let fs = FakeFs::new(cx.background());
2452 fs.insert_tree(
2453 "/root",
2454 json!({
2455 ".gitignore": "a/b\n",
2456 "a": {
2457 "b": "",
2458 "c": "",
2459 }
2460 }),
2461 )
2462 .await;
2463
2464 let http_client = FakeHttpClient::with_404_response();
2465 let client = Client::new(http_client);
2466
2467 let tree = Worktree::local(
2468 client,
2469 Arc::from(Path::new("/root")),
2470 false,
2471 fs,
2472 &mut cx.to_async(),
2473 )
2474 .await
2475 .unwrap();
2476 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2477 .await;
2478
2479 tree.read_with(&cx, |tree, _| {
2480 assert_eq!(
2481 tree.entries(false)
2482 .map(|entry| entry.path.as_ref())
2483 .collect::<Vec<_>>(),
2484 vec![
2485 Path::new(""),
2486 Path::new(".gitignore"),
2487 Path::new("a"),
2488 Path::new("a/c"),
2489 ]
2490 );
2491 })
2492 }
2493
2494 #[gpui::test]
2495 async fn test_rescan_with_gitignore(cx: gpui::TestAppContext) {
2496 let dir = temp_tree(json!({
2497 ".git": {},
2498 ".gitignore": "ignored-dir\n",
2499 "tracked-dir": {
2500 "tracked-file1": "tracked contents",
2501 },
2502 "ignored-dir": {
2503 "ignored-file1": "ignored contents",
2504 }
2505 }));
2506
2507 let http_client = FakeHttpClient::with_404_response();
2508 let client = Client::new(http_client.clone());
2509
2510 let tree = Worktree::local(
2511 client,
2512 dir.path(),
2513 false,
2514 Arc::new(RealFs),
2515 &mut cx.to_async(),
2516 )
2517 .await
2518 .unwrap();
2519 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2520 .await;
2521 tree.flush_fs_events(&cx).await;
2522 cx.read(|cx| {
2523 let tree = tree.read(cx);
2524 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2525 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2526 assert_eq!(tracked.is_ignored, false);
2527 assert_eq!(ignored.is_ignored, true);
2528 });
2529
2530 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2531 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2532 tree.flush_fs_events(&cx).await;
2533 cx.read(|cx| {
2534 let tree = tree.read(cx);
2535 let dot_git = tree.entry_for_path(".git").unwrap();
2536 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2537 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2538 assert_eq!(tracked.is_ignored, false);
2539 assert_eq!(ignored.is_ignored, true);
2540 assert_eq!(dot_git.is_ignored, true);
2541 });
2542 }
2543
2544 #[gpui::test(iterations = 100)]
2545 fn test_random(mut rng: StdRng) {
2546 let operations = env::var("OPERATIONS")
2547 .map(|o| o.parse().unwrap())
2548 .unwrap_or(40);
2549 let initial_entries = env::var("INITIAL_ENTRIES")
2550 .map(|o| o.parse().unwrap())
2551 .unwrap_or(20);
2552
2553 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
2554 for _ in 0..initial_entries {
2555 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2556 }
2557 log::info!("Generated initial tree");
2558
2559 let (notify_tx, _notify_rx) = mpsc::unbounded();
2560 let fs = Arc::new(RealFs);
2561 let next_entry_id = Arc::new(AtomicUsize::new(0));
2562 let mut initial_snapshot = LocalSnapshot {
2563 abs_path: root_dir.path().into(),
2564 scan_id: 0,
2565 removed_entry_ids: Default::default(),
2566 ignores: Default::default(),
2567 next_entry_id: next_entry_id.clone(),
2568 snapshot: Snapshot {
2569 id: WorktreeId::from_usize(0),
2570 entries_by_path: Default::default(),
2571 entries_by_id: Default::default(),
2572 root_name: Default::default(),
2573 root_char_bag: Default::default(),
2574 },
2575 };
2576 initial_snapshot.insert_entry(
2577 Entry::new(
2578 Path::new("").into(),
2579 &smol::block_on(fs.metadata(root_dir.path()))
2580 .unwrap()
2581 .unwrap(),
2582 &next_entry_id,
2583 Default::default(),
2584 ),
2585 fs.as_ref(),
2586 );
2587 let mut scanner = BackgroundScanner::new(
2588 Arc::new(Mutex::new(initial_snapshot.clone())),
2589 notify_tx,
2590 fs.clone(),
2591 Arc::new(gpui::executor::Background::new()),
2592 );
2593 smol::block_on(scanner.scan_dirs()).unwrap();
2594 scanner.snapshot().check_invariants();
2595
2596 let mut events = Vec::new();
2597 let mut snapshots = Vec::new();
2598 let mut mutations_len = operations;
2599 while mutations_len > 1 {
2600 if !events.is_empty() && rng.gen_bool(0.4) {
2601 let len = rng.gen_range(0..=events.len());
2602 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2603 log::info!("Delivering events: {:#?}", to_deliver);
2604 smol::block_on(scanner.process_events(to_deliver));
2605 scanner.snapshot().check_invariants();
2606 } else {
2607 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2608 mutations_len -= 1;
2609 }
2610
2611 if rng.gen_bool(0.2) {
2612 snapshots.push(scanner.snapshot());
2613 }
2614 }
2615 log::info!("Quiescing: {:#?}", events);
2616 smol::block_on(scanner.process_events(events));
2617 scanner.snapshot().check_invariants();
2618
2619 let (notify_tx, _notify_rx) = mpsc::unbounded();
2620 let mut new_scanner = BackgroundScanner::new(
2621 Arc::new(Mutex::new(initial_snapshot)),
2622 notify_tx,
2623 scanner.fs.clone(),
2624 scanner.executor.clone(),
2625 );
2626 smol::block_on(new_scanner.scan_dirs()).unwrap();
2627 assert_eq!(
2628 scanner.snapshot().to_vec(true),
2629 new_scanner.snapshot().to_vec(true)
2630 );
2631
2632 for mut prev_snapshot in snapshots {
2633 let include_ignored = rng.gen::<bool>();
2634 if !include_ignored {
2635 let mut entries_by_path_edits = Vec::new();
2636 let mut entries_by_id_edits = Vec::new();
2637 for entry in prev_snapshot
2638 .entries_by_id
2639 .cursor::<()>()
2640 .filter(|e| e.is_ignored)
2641 {
2642 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2643 entries_by_id_edits.push(Edit::Remove(entry.id));
2644 }
2645
2646 prev_snapshot
2647 .entries_by_path
2648 .edit(entries_by_path_edits, &());
2649 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
2650 }
2651
2652 let update = scanner
2653 .snapshot()
2654 .build_update(&prev_snapshot, 0, 0, include_ignored);
2655 prev_snapshot.apply_remote_update(update).unwrap();
2656 assert_eq!(
2657 prev_snapshot.to_vec(true),
2658 scanner.snapshot().to_vec(include_ignored)
2659 );
2660 }
2661 }
2662
2663 fn randomly_mutate_tree(
2664 root_path: &Path,
2665 insertion_probability: f64,
2666 rng: &mut impl Rng,
2667 ) -> Result<Vec<fsevent::Event>> {
2668 let root_path = root_path.canonicalize().unwrap();
2669 let (dirs, files) = read_dir_recursive(root_path.clone());
2670
2671 let mut events = Vec::new();
2672 let mut record_event = |path: PathBuf| {
2673 events.push(fsevent::Event {
2674 event_id: SystemTime::now()
2675 .duration_since(UNIX_EPOCH)
2676 .unwrap()
2677 .as_secs(),
2678 flags: fsevent::StreamFlags::empty(),
2679 path,
2680 });
2681 };
2682
2683 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2684 let path = dirs.choose(rng).unwrap();
2685 let new_path = path.join(gen_name(rng));
2686
2687 if rng.gen() {
2688 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2689 std::fs::create_dir(&new_path)?;
2690 } else {
2691 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2692 std::fs::write(&new_path, "")?;
2693 }
2694 record_event(new_path);
2695 } else if rng.gen_bool(0.05) {
2696 let ignore_dir_path = dirs.choose(rng).unwrap();
2697 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2698
2699 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2700 let files_to_ignore = {
2701 let len = rng.gen_range(0..=subfiles.len());
2702 subfiles.choose_multiple(rng, len)
2703 };
2704 let dirs_to_ignore = {
2705 let len = rng.gen_range(0..subdirs.len());
2706 subdirs.choose_multiple(rng, len)
2707 };
2708
2709 let mut ignore_contents = String::new();
2710 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2711 write!(
2712 ignore_contents,
2713 "{}\n",
2714 path_to_ignore
2715 .strip_prefix(&ignore_dir_path)?
2716 .to_str()
2717 .unwrap()
2718 )
2719 .unwrap();
2720 }
2721 log::info!(
2722 "Creating {:?} with contents:\n{}",
2723 ignore_path.strip_prefix(&root_path)?,
2724 ignore_contents
2725 );
2726 std::fs::write(&ignore_path, ignore_contents).unwrap();
2727 record_event(ignore_path);
2728 } else {
2729 let old_path = {
2730 let file_path = files.choose(rng);
2731 let dir_path = dirs[1..].choose(rng);
2732 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2733 };
2734
2735 let is_rename = rng.gen();
2736 if is_rename {
2737 let new_path_parent = dirs
2738 .iter()
2739 .filter(|d| !d.starts_with(old_path))
2740 .choose(rng)
2741 .unwrap();
2742
2743 let overwrite_existing_dir =
2744 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2745 let new_path = if overwrite_existing_dir {
2746 std::fs::remove_dir_all(&new_path_parent).ok();
2747 new_path_parent.to_path_buf()
2748 } else {
2749 new_path_parent.join(gen_name(rng))
2750 };
2751
2752 log::info!(
2753 "Renaming {:?} to {}{:?}",
2754 old_path.strip_prefix(&root_path)?,
2755 if overwrite_existing_dir {
2756 "overwrite "
2757 } else {
2758 ""
2759 },
2760 new_path.strip_prefix(&root_path)?
2761 );
2762 std::fs::rename(&old_path, &new_path)?;
2763 record_event(old_path.clone());
2764 record_event(new_path);
2765 } else if old_path.is_dir() {
2766 let (dirs, files) = read_dir_recursive(old_path.clone());
2767
2768 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2769 std::fs::remove_dir_all(&old_path).unwrap();
2770 for file in files {
2771 record_event(file);
2772 }
2773 for dir in dirs {
2774 record_event(dir);
2775 }
2776 } else {
2777 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2778 std::fs::remove_file(old_path).unwrap();
2779 record_event(old_path.clone());
2780 }
2781 }
2782
2783 Ok(events)
2784 }
2785
2786 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2787 let child_entries = std::fs::read_dir(&path).unwrap();
2788 let mut dirs = vec![path];
2789 let mut files = Vec::new();
2790 for child_entry in child_entries {
2791 let child_path = child_entry.unwrap().path();
2792 if child_path.is_dir() {
2793 let (child_dirs, child_files) = read_dir_recursive(child_path);
2794 dirs.extend(child_dirs);
2795 files.extend(child_files);
2796 } else {
2797 files.push(child_path);
2798 }
2799 }
2800 (dirs, files)
2801 }
2802
2803 fn gen_name(rng: &mut impl Rng) -> String {
2804 (0..6)
2805 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2806 .map(char::from)
2807 .collect()
2808 }
2809
2810 impl LocalSnapshot {
2811 fn check_invariants(&self) {
2812 let mut files = self.files(true, 0);
2813 let mut visible_files = self.files(false, 0);
2814 for entry in self.entries_by_path.cursor::<()>() {
2815 if entry.is_file() {
2816 assert_eq!(files.next().unwrap().inode, entry.inode);
2817 if !entry.is_ignored {
2818 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2819 }
2820 }
2821 }
2822 assert!(files.next().is_none());
2823 assert!(visible_files.next().is_none());
2824
2825 let mut bfs_paths = Vec::new();
2826 let mut stack = vec![Path::new("")];
2827 while let Some(path) = stack.pop() {
2828 bfs_paths.push(path);
2829 let ix = stack.len();
2830 for child_entry in self.child_entries(path) {
2831 stack.insert(ix, &child_entry.path);
2832 }
2833 }
2834
2835 let dfs_paths = self
2836 .entries_by_path
2837 .cursor::<()>()
2838 .map(|e| e.path.as_ref())
2839 .collect::<Vec<_>>();
2840 assert_eq!(bfs_paths, dfs_paths);
2841
2842 for (ignore_parent_path, _) in &self.ignores {
2843 assert!(self.entry_for_path(ignore_parent_path).is_some());
2844 assert!(self
2845 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2846 .is_some());
2847 }
2848 }
2849
2850 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2851 let mut paths = Vec::new();
2852 for entry in self.entries_by_path.cursor::<()>() {
2853 if include_ignored || !entry.is_ignored {
2854 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2855 }
2856 }
2857 paths.sort_by(|a, b| a.0.cmp(&b.0));
2858 paths
2859 }
2860 }
2861}