1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use self::{char_bag::CharBag, ignore::IgnoreStack};
6use crate::{
7 editor::{self, Buffer, History, Operation, Rope},
8 language::LanguageRegistry,
9 rpc::{self, proto},
10 sum_tree::{self, Cursor, Edit, SumTree},
11 time::{self, ReplicaId},
12 util::Bias,
13};
14use ::ignore::gitignore::Gitignore;
15use anyhow::{anyhow, Context, Result};
16use atomic::Ordering::SeqCst;
17pub use fuzzy::{match_paths, PathMatch};
18use gpui::{
19 scoped_pool, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
20 Task, WeakModelHandle,
21};
22use lazy_static::lazy_static;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink, Stream},
26 watch,
27};
28use smol::{
29 channel::Sender,
30 io::{AsyncReadExt, AsyncWriteExt},
31};
32use std::{
33 cmp::{self, Ordering},
34 collections::HashMap,
35 convert::{TryFrom, TryInto},
36 ffi::{OsStr, OsString},
37 fmt, fs,
38 future::Future,
39 io,
40 ops::Deref,
41 os::unix::fs::MetadataExt,
42 path::{Path, PathBuf},
43 sync::{
44 atomic::{self, AtomicUsize},
45 Arc,
46 },
47 time::{Duration, SystemTime},
48};
49use zed_rpc::{PeerId, TypedEnvelope};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55pub fn init(cx: &mut MutableAppContext, rpc: rpc::Client) {
56 rpc.on_message(remote::add_peer, cx);
57 rpc.on_message(remote::remove_peer, cx);
58 rpc.on_message(remote::update_worktree, cx);
59 rpc.on_message(remote::open_buffer, cx);
60 rpc.on_message(remote::close_buffer, cx);
61 rpc.on_message(remote::update_buffer, cx);
62 rpc.on_message(remote::buffer_saved, cx);
63 rpc.on_message(remote::save_buffer, cx);
64}
65
66#[derive(Clone, Debug)]
67enum ScanState {
68 Idle,
69 Scanning,
70 Err(Arc<io::Error>),
71}
72
73pub enum Worktree {
74 Local(LocalWorktree),
75 Remote(RemoteWorktree),
76}
77
78impl Entity for Worktree {
79 type Event = ();
80
81 fn release(&mut self, cx: &mut MutableAppContext) {
82 let rpc = match self {
83 Self::Local(tree) => tree.rpc.clone(),
84 Self::Remote(tree) => Some((tree.rpc.clone(), tree.remote_id)),
85 };
86
87 if let Some((rpc, worktree_id)) = rpc {
88 cx.spawn(|_| async move {
89 rpc.state
90 .write()
91 .await
92 .shared_worktrees
93 .remove(&worktree_id);
94 if let Err(err) = rpc.send(proto::CloseWorktree { worktree_id }).await {
95 log::error!("error closing worktree {}: {}", worktree_id, err);
96 }
97 })
98 .detach();
99 }
100 }
101}
102
103impl Worktree {
104 pub fn local(
105 path: impl Into<Arc<Path>>,
106 languages: Arc<LanguageRegistry>,
107 cx: &mut ModelContext<Worktree>,
108 ) -> Self {
109 Worktree::Local(LocalWorktree::new(path, languages, cx))
110 }
111
112 pub async fn open_remote(
113 rpc: rpc::Client,
114 id: u64,
115 access_token: String,
116 languages: Arc<LanguageRegistry>,
117 cx: &mut AsyncAppContext,
118 ) -> Result<ModelHandle<Self>> {
119 let response = rpc
120 .request(proto::OpenWorktree {
121 worktree_id: id,
122 access_token,
123 })
124 .await?;
125
126 Worktree::remote(response, rpc, languages, cx).await
127 }
128
129 async fn remote(
130 open_response: proto::OpenWorktreeResponse,
131 rpc: rpc::Client,
132 languages: Arc<LanguageRegistry>,
133 cx: &mut AsyncAppContext,
134 ) -> Result<ModelHandle<Self>> {
135 let worktree = open_response
136 .worktree
137 .ok_or_else(|| anyhow!("empty worktree"))?;
138
139 let remote_id = open_response.worktree_id;
140 let replica_id = open_response.replica_id as ReplicaId;
141 let peers = open_response.peers;
142 let root_char_bag: CharBag = worktree
143 .root_name
144 .chars()
145 .map(|c| c.to_ascii_lowercase())
146 .collect();
147 let root_name = worktree.root_name.clone();
148 let (entries_by_path, entries_by_id) = cx
149 .background()
150 .spawn(async move {
151 let mut entries_by_path_edits = Vec::new();
152 let mut entries_by_id_edits = Vec::new();
153 for entry in worktree.entries {
154 match Entry::try_from((&root_char_bag, entry)) {
155 Ok(entry) => {
156 entries_by_id_edits.push(Edit::Insert(PathEntry {
157 id: entry.id,
158 path: entry.path.clone(),
159 scan_id: 0,
160 }));
161 entries_by_path_edits.push(Edit::Insert(entry));
162 }
163 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
164 }
165 }
166
167 let mut entries_by_path = SumTree::new();
168 let mut entries_by_id = SumTree::new();
169 entries_by_path.edit(entries_by_path_edits, &());
170 entries_by_id.edit(entries_by_id_edits, &());
171 (entries_by_path, entries_by_id)
172 })
173 .await;
174
175 let worktree = cx.update(|cx| {
176 cx.add_model(|cx: &mut ModelContext<Worktree>| {
177 let snapshot = Snapshot {
178 id: cx.model_id(),
179 scan_id: 0,
180 abs_path: Path::new("").into(),
181 root_name,
182 root_char_bag,
183 ignores: Default::default(),
184 entries_by_path,
185 entries_by_id,
186 removed_entry_ids: Default::default(),
187 next_entry_id: Default::default(),
188 };
189
190 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
191 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
192
193 cx.background()
194 .spawn(async move {
195 while let Some(update) = updates_rx.recv().await {
196 let mut snapshot = snapshot_tx.borrow().clone();
197 if let Err(error) = snapshot.apply_update(update) {
198 log::error!("error applying worktree update: {}", error);
199 }
200 *snapshot_tx.borrow_mut() = snapshot;
201 }
202 })
203 .detach();
204
205 {
206 let mut snapshot_rx = snapshot_rx.clone();
207 cx.spawn_weak(|this, mut cx| async move {
208 while let Some(_) = snapshot_rx.recv().await {
209 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
210 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
211 } else {
212 break;
213 }
214 }
215 })
216 .detach();
217 }
218
219 Worktree::Remote(RemoteWorktree {
220 remote_id,
221 replica_id,
222 snapshot,
223 snapshot_rx,
224 updates_tx,
225 rpc: rpc.clone(),
226 open_buffers: Default::default(),
227 peers: peers
228 .into_iter()
229 .map(|p| (PeerId(p.peer_id), p.replica_id as ReplicaId))
230 .collect(),
231 languages,
232 })
233 })
234 });
235 rpc.state
236 .write()
237 .await
238 .shared_worktrees
239 .insert(open_response.worktree_id, worktree.downgrade());
240
241 Ok(worktree)
242 }
243
244 pub fn as_local(&self) -> Option<&LocalWorktree> {
245 if let Worktree::Local(worktree) = self {
246 Some(worktree)
247 } else {
248 None
249 }
250 }
251
252 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
253 if let Worktree::Local(worktree) = self {
254 Some(worktree)
255 } else {
256 None
257 }
258 }
259
260 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
261 if let Worktree::Remote(worktree) = self {
262 Some(worktree)
263 } else {
264 None
265 }
266 }
267
268 pub fn snapshot(&self) -> Snapshot {
269 match self {
270 Worktree::Local(worktree) => worktree.snapshot(),
271 Worktree::Remote(worktree) => worktree.snapshot(),
272 }
273 }
274
275 pub fn replica_id(&self) -> ReplicaId {
276 match self {
277 Worktree::Local(_) => 0,
278 Worktree::Remote(worktree) => worktree.replica_id,
279 }
280 }
281
282 pub fn add_peer(
283 &mut self,
284 envelope: TypedEnvelope<proto::AddPeer>,
285 cx: &mut ModelContext<Worktree>,
286 ) -> Result<()> {
287 match self {
288 Worktree::Local(worktree) => worktree.add_peer(envelope, cx),
289 Worktree::Remote(worktree) => worktree.add_peer(envelope, cx),
290 }
291 }
292
293 pub fn remove_peer(
294 &mut self,
295 envelope: TypedEnvelope<proto::RemovePeer>,
296 cx: &mut ModelContext<Worktree>,
297 ) -> Result<()> {
298 match self {
299 Worktree::Local(worktree) => worktree.remove_peer(envelope, cx),
300 Worktree::Remote(worktree) => worktree.remove_peer(envelope, cx),
301 }
302 }
303
304 pub fn peers(&self) -> &HashMap<PeerId, ReplicaId> {
305 match self {
306 Worktree::Local(worktree) => &worktree.peers,
307 Worktree::Remote(worktree) => &worktree.peers,
308 }
309 }
310
311 pub fn open_buffer(
312 &mut self,
313 path: impl AsRef<Path>,
314 cx: &mut ModelContext<Self>,
315 ) -> Task<Result<ModelHandle<Buffer>>> {
316 match self {
317 Worktree::Local(worktree) => worktree.open_buffer(path.as_ref(), cx),
318 Worktree::Remote(worktree) => worktree.open_buffer(path.as_ref(), cx),
319 }
320 }
321
322 #[cfg(feature = "test-support")]
323 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
324 let open_buffers = match self {
325 Worktree::Local(worktree) => &worktree.open_buffers,
326 Worktree::Remote(worktree) => &worktree.open_buffers,
327 };
328
329 let path = path.as_ref();
330 open_buffers
331 .values()
332 .find(|buffer| {
333 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
334 file.path.as_ref() == path
335 } else {
336 false
337 }
338 })
339 .is_some()
340 }
341
342 pub fn update_buffer(
343 &mut self,
344 envelope: proto::UpdateBuffer,
345 cx: &mut ModelContext<Self>,
346 ) -> Result<()> {
347 let open_buffers = match self {
348 Worktree::Local(worktree) => &worktree.open_buffers,
349 Worktree::Remote(worktree) => &worktree.open_buffers,
350 };
351 let buffer = open_buffers
352 .get(&(envelope.buffer_id as usize))
353 .and_then(|buf| buf.upgrade(&cx));
354
355 let buffer = if let Some(buffer) = buffer {
356 buffer
357 } else {
358 return if matches!(self, Worktree::Local(_)) {
359 Err(anyhow!(
360 "invalid buffer {} in update buffer message",
361 envelope.buffer_id
362 ))
363 } else {
364 Ok(())
365 };
366 };
367
368 let ops = envelope
369 .operations
370 .into_iter()
371 .map(|op| op.try_into())
372 .collect::<anyhow::Result<Vec<_>>>()?;
373 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
374 Ok(())
375 }
376
377 pub fn buffer_saved(
378 &mut self,
379 message: proto::BufferSaved,
380 cx: &mut ModelContext<Self>,
381 ) -> Result<()> {
382 if let Worktree::Remote(worktree) = self {
383 if let Some(buffer) = worktree
384 .open_buffers
385 .get(&(message.buffer_id as usize))
386 .and_then(|buf| buf.upgrade(&cx))
387 {
388 buffer.update(cx, |buffer, cx| {
389 let version = message.version.try_into()?;
390 let mtime = message
391 .mtime
392 .ok_or_else(|| anyhow!("missing mtime"))?
393 .into();
394 buffer.did_save(version, mtime, cx);
395 Result::<_, anyhow::Error>::Ok(())
396 })?;
397 }
398 Ok(())
399 } else {
400 Err(anyhow!(
401 "invalid buffer {} in buffer saved message",
402 message.buffer_id
403 ))
404 }
405 }
406
407 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
408 let update_buffers = match self {
409 Self::Local(worktree) => {
410 worktree.snapshot = worktree.background_snapshot.lock().clone();
411 if worktree.is_scanning() {
412 if !worktree.poll_scheduled {
413 cx.spawn(|this, mut cx| async move {
414 smol::Timer::after(Duration::from_millis(100)).await;
415 this.update(&mut cx, |this, cx| {
416 this.as_local_mut().unwrap().poll_scheduled = false;
417 this.poll_snapshot(cx);
418 })
419 })
420 .detach();
421 worktree.poll_scheduled = true;
422 }
423 false
424 } else {
425 true
426 }
427 }
428 Self::Remote(worktree) => {
429 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
430 true
431 }
432 };
433
434 if update_buffers {
435 let mut buffers_to_delete = Vec::new();
436 for (buffer_id, buffer) in self.open_buffers() {
437 if let Some(buffer) = buffer.upgrade(&cx) {
438 buffer.update(cx, |buffer, cx| {
439 let buffer_is_clean = !buffer.is_dirty();
440
441 if let Some(file) = buffer.file_mut() {
442 let mut file_changed = false;
443
444 if let Some(entry) = file
445 .entry_id
446 .and_then(|entry_id| self.entry_for_id(entry_id))
447 {
448 if entry.path != file.path {
449 file.path = entry.path.clone();
450 file_changed = true;
451 }
452
453 if entry.mtime != file.mtime {
454 file.mtime = entry.mtime;
455 file_changed = true;
456 if let Some(worktree) = self.as_local() {
457 if buffer_is_clean {
458 let abs_path = worktree.absolutize(&file.path);
459 refresh_buffer(abs_path, cx);
460 }
461 }
462 }
463 } else if let Some(entry) = self.entry_for_path(&file.path) {
464 file.entry_id = Some(entry.id);
465 file.mtime = entry.mtime;
466 if let Some(worktree) = self.as_local() {
467 if buffer_is_clean {
468 let abs_path = worktree.absolutize(&file.path);
469 refresh_buffer(abs_path, cx);
470 }
471 }
472 file_changed = true;
473 } else if !file.is_deleted() {
474 if buffer_is_clean {
475 cx.emit(editor::buffer::Event::Dirtied);
476 }
477 file.entry_id = None;
478 file_changed = true;
479 }
480
481 if file_changed {
482 cx.emit(editor::buffer::Event::FileHandleChanged);
483 }
484 }
485 });
486 } else {
487 buffers_to_delete.push(*buffer_id);
488 }
489 }
490
491 for buffer_id in buffers_to_delete {
492 self.open_buffers_mut().remove(&buffer_id);
493 }
494 }
495
496 cx.notify();
497 }
498
499 fn open_buffers(&self) -> &HashMap<usize, WeakModelHandle<Buffer>> {
500 match self {
501 Self::Local(worktree) => &worktree.open_buffers,
502 Self::Remote(worktree) => &worktree.open_buffers,
503 }
504 }
505
506 fn open_buffers_mut(&mut self) -> &mut HashMap<usize, WeakModelHandle<Buffer>> {
507 match self {
508 Self::Local(worktree) => &mut worktree.open_buffers,
509 Self::Remote(worktree) => &mut worktree.open_buffers,
510 }
511 }
512}
513
514impl Deref for Worktree {
515 type Target = Snapshot;
516
517 fn deref(&self) -> &Self::Target {
518 match self {
519 Worktree::Local(worktree) => &worktree.snapshot,
520 Worktree::Remote(worktree) => &worktree.snapshot,
521 }
522 }
523}
524
525pub struct LocalWorktree {
526 snapshot: Snapshot,
527 background_snapshot: Arc<Mutex<Snapshot>>,
528 snapshots_to_send_tx: Option<Sender<Snapshot>>,
529 last_scan_state_rx: watch::Receiver<ScanState>,
530 _event_stream_handle: fsevent::Handle,
531 poll_scheduled: bool,
532 rpc: Option<(rpc::Client, u64)>,
533 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
534 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
535 peers: HashMap<PeerId, ReplicaId>,
536 languages: Arc<LanguageRegistry>,
537}
538
539impl LocalWorktree {
540 fn new(
541 path: impl Into<Arc<Path>>,
542 languages: Arc<LanguageRegistry>,
543 cx: &mut ModelContext<Worktree>,
544 ) -> Self {
545 let abs_path = path.into();
546 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
547 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
548 let id = cx.model_id();
549 let snapshot = Snapshot {
550 id,
551 scan_id: 0,
552 abs_path,
553 root_name: Default::default(),
554 root_char_bag: Default::default(),
555 ignores: Default::default(),
556 entries_by_path: Default::default(),
557 entries_by_id: Default::default(),
558 removed_entry_ids: Default::default(),
559 next_entry_id: Default::default(),
560 };
561 let (event_stream, event_stream_handle) =
562 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
563
564 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
565
566 let tree = Self {
567 snapshot,
568 background_snapshot: background_snapshot.clone(),
569 snapshots_to_send_tx: None,
570 last_scan_state_rx,
571 _event_stream_handle: event_stream_handle,
572 poll_scheduled: false,
573 open_buffers: Default::default(),
574 shared_buffers: Default::default(),
575 peers: Default::default(),
576 rpc: None,
577 languages,
578 };
579
580 std::thread::spawn(move || {
581 let scanner = BackgroundScanner::new(background_snapshot, scan_states_tx, id);
582 scanner.run(event_stream)
583 });
584
585 cx.spawn_weak(|this, mut cx| async move {
586 while let Ok(scan_state) = scan_states_rx.recv().await {
587 if let Some(handle) = cx.read(|cx| this.upgrade(&cx)) {
588 handle.update(&mut cx, |this, cx| {
589 last_scan_state_tx.blocking_send(scan_state).ok();
590 this.poll_snapshot(cx);
591 let tree = this.as_local_mut().unwrap();
592 if !tree.is_scanning() {
593 if let Some(snapshots_to_send_tx) = tree.snapshots_to_send_tx.clone() {
594 if let Err(err) =
595 smol::block_on(snapshots_to_send_tx.send(tree.snapshot()))
596 {
597 log::error!("error submitting snapshot to send {}", err);
598 }
599 }
600 }
601 });
602 } else {
603 break;
604 }
605 }
606 })
607 .detach();
608
609 tree
610 }
611
612 pub fn open_buffer(
613 &mut self,
614 path: &Path,
615 cx: &mut ModelContext<Worktree>,
616 ) -> Task<Result<ModelHandle<Buffer>>> {
617 let handle = cx.handle();
618
619 // If there is already a buffer for the given path, then return it.
620 let mut existing_buffer = None;
621 self.open_buffers.retain(|_buffer_id, buffer| {
622 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
623 if let Some(file) = buffer.read(cx.as_ref()).file() {
624 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
625 existing_buffer = Some(buffer);
626 }
627 }
628 true
629 } else {
630 false
631 }
632 });
633
634 let languages = self.languages.clone();
635 let path = Arc::from(path);
636 cx.spawn(|this, mut cx| async move {
637 if let Some(existing_buffer) = existing_buffer {
638 Ok(existing_buffer)
639 } else {
640 let (file, contents) = this
641 .update(&mut cx, |this, cx| this.as_local().unwrap().load(&path, cx))
642 .await?;
643 let language = languages.select_language(&path).cloned();
644 let buffer = cx.add_model(|cx| {
645 Buffer::from_history(0, History::new(contents.into()), Some(file), language, cx)
646 });
647 this.update(&mut cx, |this, _| {
648 let this = this
649 .as_local_mut()
650 .ok_or_else(|| anyhow!("must be a local worktree"))?;
651 this.open_buffers.insert(buffer.id(), buffer.downgrade());
652 Ok(buffer)
653 })
654 }
655 })
656 }
657
658 pub fn open_remote_buffer(
659 &mut self,
660 envelope: TypedEnvelope<proto::OpenBuffer>,
661 cx: &mut ModelContext<Worktree>,
662 ) -> Task<Result<proto::OpenBufferResponse>> {
663 let peer_id = envelope.original_sender_id();
664 let path = Path::new(&envelope.payload.path);
665
666 let buffer = self.open_buffer(path, cx);
667
668 cx.spawn(|this, mut cx| async move {
669 let buffer = buffer.await?;
670 this.update(&mut cx, |this, cx| {
671 this.as_local_mut()
672 .unwrap()
673 .shared_buffers
674 .entry(peer_id?)
675 .or_default()
676 .insert(buffer.id() as u64, buffer.clone());
677
678 Ok(proto::OpenBufferResponse {
679 buffer: Some(buffer.update(cx.as_mut(), |buffer, cx| buffer.to_proto(cx))),
680 })
681 })
682 })
683 }
684
685 pub fn close_remote_buffer(
686 &mut self,
687 envelope: TypedEnvelope<proto::CloseBuffer>,
688 _: &mut ModelContext<Worktree>,
689 ) -> Result<()> {
690 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
691 shared_buffers.remove(&envelope.payload.buffer_id);
692 }
693
694 Ok(())
695 }
696
697 pub fn add_peer(
698 &mut self,
699 envelope: TypedEnvelope<proto::AddPeer>,
700 cx: &mut ModelContext<Worktree>,
701 ) -> Result<()> {
702 let peer = envelope.payload.peer.ok_or_else(|| anyhow!("empty peer"))?;
703 self.peers
704 .insert(PeerId(peer.peer_id), peer.replica_id as ReplicaId);
705 cx.notify();
706 Ok(())
707 }
708
709 pub fn remove_peer(
710 &mut self,
711 envelope: TypedEnvelope<proto::RemovePeer>,
712 cx: &mut ModelContext<Worktree>,
713 ) -> Result<()> {
714 let peer_id = PeerId(envelope.payload.peer_id);
715 let replica_id = self
716 .peers
717 .remove(&peer_id)
718 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?;
719 self.shared_buffers.remove(&peer_id);
720 for (_, buffer) in &self.open_buffers {
721 if let Some(buffer) = buffer.upgrade(&cx) {
722 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
723 }
724 }
725 cx.notify();
726 Ok(())
727 }
728
729 pub fn scan_complete(&self) -> impl Future<Output = ()> {
730 let mut scan_state_rx = self.last_scan_state_rx.clone();
731 async move {
732 let mut scan_state = Some(scan_state_rx.borrow().clone());
733 while let Some(ScanState::Scanning) = scan_state {
734 scan_state = scan_state_rx.recv().await;
735 }
736 }
737 }
738
739 fn is_scanning(&self) -> bool {
740 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
741 true
742 } else {
743 false
744 }
745 }
746
747 pub fn snapshot(&self) -> Snapshot {
748 self.snapshot.clone()
749 }
750
751 pub fn abs_path(&self) -> &Path {
752 self.snapshot.abs_path.as_ref()
753 }
754
755 pub fn contains_abs_path(&self, path: &Path) -> bool {
756 path.starts_with(&self.snapshot.abs_path)
757 }
758
759 fn absolutize(&self, path: &Path) -> PathBuf {
760 if path.file_name().is_some() {
761 self.snapshot.abs_path.join(path)
762 } else {
763 self.snapshot.abs_path.to_path_buf()
764 }
765 }
766
767 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
768 let handle = cx.handle();
769 let path = Arc::from(path);
770 let abs_path = self.absolutize(&path);
771 let background_snapshot = self.background_snapshot.clone();
772 cx.spawn(|this, mut cx| async move {
773 let mut file = smol::fs::File::open(&abs_path).await?;
774 let mut text = String::new();
775 file.read_to_string(&mut text).await?;
776 // Eagerly populate the snapshot with an updated entry for the loaded file
777 let entry = refresh_entry(&background_snapshot, path, &abs_path)?;
778 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
779 Ok((File::new(entry.id, handle, entry.path, entry.mtime), text))
780 })
781 }
782
783 pub fn save_buffer_as(
784 &self,
785 buffer: ModelHandle<Buffer>,
786 path: impl Into<Arc<Path>>,
787 text: Rope,
788 cx: &mut ModelContext<Worktree>,
789 ) -> Task<Result<File>> {
790 let save = self.save(path, text, cx);
791 cx.spawn(|this, mut cx| async move {
792 let entry = save.await?;
793 this.update(&mut cx, |this, cx| {
794 this.as_local_mut()
795 .unwrap()
796 .open_buffers
797 .insert(buffer.id(), buffer.downgrade());
798 Ok(File::new(entry.id, cx.handle(), entry.path, entry.mtime))
799 })
800 })
801 }
802
803 fn save(
804 &self,
805 path: impl Into<Arc<Path>>,
806 text: Rope,
807 cx: &mut ModelContext<Worktree>,
808 ) -> Task<Result<Entry>> {
809 let path = path.into();
810 let abs_path = self.absolutize(&path);
811 let background_snapshot = self.background_snapshot.clone();
812
813 let save = cx.background().spawn(async move {
814 let buffer_size = text.summary().bytes.min(10 * 1024);
815 let file = smol::fs::File::create(&abs_path).await?;
816 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
817 for chunk in text.chunks() {
818 writer.write_all(chunk.as_bytes()).await?;
819 }
820 writer.flush().await?;
821 refresh_entry(&background_snapshot, path.clone(), &abs_path)
822 });
823
824 cx.spawn(|this, mut cx| async move {
825 let entry = save.await?;
826 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
827 Ok(entry)
828 })
829 }
830
831 pub fn share(
832 &mut self,
833 rpc: rpc::Client,
834 cx: &mut ModelContext<Worktree>,
835 ) -> Task<anyhow::Result<(u64, String)>> {
836 let snapshot = self.snapshot();
837 let share_request = self.share_request(cx);
838 let handle = cx.handle();
839 cx.spawn(|this, mut cx| async move {
840 let share_request = share_request.await;
841 let share_response = rpc.request(share_request).await?;
842
843 rpc.state
844 .write()
845 .await
846 .shared_worktrees
847 .insert(share_response.worktree_id, handle.downgrade());
848
849 log::info!("sharing worktree {:?}", share_response);
850 let (snapshots_to_send_tx, snapshots_to_send_rx) =
851 smol::channel::unbounded::<Snapshot>();
852
853 cx.background()
854 .spawn({
855 let rpc = rpc.clone();
856 let worktree_id = share_response.worktree_id;
857 async move {
858 let mut prev_snapshot = snapshot;
859 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
860 let message = snapshot.build_update(&prev_snapshot, worktree_id);
861 match rpc.send(message).await {
862 Ok(()) => prev_snapshot = snapshot,
863 Err(err) => log::error!("error sending snapshot diff {}", err),
864 }
865 }
866 }
867 })
868 .detach();
869
870 this.update(&mut cx, |worktree, _| {
871 let worktree = worktree.as_local_mut().unwrap();
872 worktree.rpc = Some((rpc, share_response.worktree_id));
873 worktree.snapshots_to_send_tx = Some(snapshots_to_send_tx);
874 });
875
876 Ok((share_response.worktree_id, share_response.access_token))
877 })
878 }
879
880 fn share_request(&self, cx: &mut ModelContext<Worktree>) -> Task<proto::ShareWorktree> {
881 let snapshot = self.snapshot();
882 let root_name = self.root_name.clone();
883 cx.background().spawn(async move {
884 let entries = snapshot
885 .entries_by_path
886 .cursor::<(), ()>()
887 .map(Into::into)
888 .collect();
889 proto::ShareWorktree {
890 worktree: Some(proto::Worktree { root_name, entries }),
891 }
892 })
893 }
894}
895
896pub fn refresh_buffer(abs_path: PathBuf, cx: &mut ModelContext<Buffer>) {
897 cx.spawn(|buffer, mut cx| async move {
898 let new_text = cx
899 .background()
900 .spawn(async move {
901 let mut file = smol::fs::File::open(&abs_path).await?;
902 let mut text = String::new();
903 file.read_to_string(&mut text).await?;
904 Ok::<_, anyhow::Error>(text.into())
905 })
906 .await;
907
908 match new_text {
909 Err(error) => log::error!("error refreshing buffer after file changed: {}", error),
910 Ok(new_text) => {
911 buffer
912 .update(&mut cx, |buffer, cx| {
913 buffer.set_text_from_disk(new_text, cx)
914 })
915 .await;
916 }
917 }
918 })
919 .detach()
920}
921
922impl Deref for LocalWorktree {
923 type Target = Snapshot;
924
925 fn deref(&self) -> &Self::Target {
926 &self.snapshot
927 }
928}
929
930impl fmt::Debug for LocalWorktree {
931 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
932 self.snapshot.fmt(f)
933 }
934}
935
936pub struct RemoteWorktree {
937 remote_id: u64,
938 snapshot: Snapshot,
939 snapshot_rx: watch::Receiver<Snapshot>,
940 rpc: rpc::Client,
941 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
942 replica_id: ReplicaId,
943 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
944 peers: HashMap<PeerId, ReplicaId>,
945 languages: Arc<LanguageRegistry>,
946}
947
948impl RemoteWorktree {
949 pub fn open_buffer(
950 &mut self,
951 path: &Path,
952 cx: &mut ModelContext<Worktree>,
953 ) -> Task<Result<ModelHandle<Buffer>>> {
954 let handle = cx.handle();
955 let mut existing_buffer = None;
956 self.open_buffers.retain(|_buffer_id, buffer| {
957 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
958 if let Some(file) = buffer.read(cx.as_ref()).file() {
959 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
960 existing_buffer = Some(buffer);
961 }
962 }
963 true
964 } else {
965 false
966 }
967 });
968
969 let rpc = self.rpc.clone();
970 let languages = self.languages.clone();
971 let replica_id = self.replica_id;
972 let remote_worktree_id = self.remote_id;
973 let path = path.to_string_lossy().to_string();
974 cx.spawn(|this, mut cx| async move {
975 if let Some(existing_buffer) = existing_buffer {
976 Ok(existing_buffer)
977 } else {
978 let entry = this
979 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
980 .ok_or_else(|| anyhow!("file does not exist"))?;
981 let file = File::new(entry.id, handle, entry.path, entry.mtime);
982 let language = languages.select_language(&path).cloned();
983 let response = rpc
984 .request(proto::OpenBuffer {
985 worktree_id: remote_worktree_id as u64,
986 path,
987 })
988 .await?;
989 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
990 let buffer_id = remote_buffer.id;
991 let buffer = cx.add_model(|cx| {
992 Buffer::from_proto(replica_id, remote_buffer, Some(file), language, cx).unwrap()
993 });
994 this.update(&mut cx, |this, _| {
995 let this = this.as_remote_mut().unwrap();
996 this.open_buffers
997 .insert(buffer_id as usize, buffer.downgrade());
998 });
999 Ok(buffer)
1000 }
1001 })
1002 }
1003
1004 fn snapshot(&self) -> Snapshot {
1005 self.snapshot.clone()
1006 }
1007
1008 pub fn add_peer(
1009 &mut self,
1010 envelope: TypedEnvelope<proto::AddPeer>,
1011 cx: &mut ModelContext<Worktree>,
1012 ) -> Result<()> {
1013 let peer = envelope.payload.peer.ok_or_else(|| anyhow!("empty peer"))?;
1014 self.peers
1015 .insert(PeerId(peer.peer_id), peer.replica_id as ReplicaId);
1016 cx.notify();
1017 Ok(())
1018 }
1019
1020 pub fn remove_peer(
1021 &mut self,
1022 envelope: TypedEnvelope<proto::RemovePeer>,
1023 cx: &mut ModelContext<Worktree>,
1024 ) -> Result<()> {
1025 let peer_id = PeerId(envelope.payload.peer_id);
1026 let replica_id = self
1027 .peers
1028 .remove(&peer_id)
1029 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?;
1030 for (_, buffer) in &self.open_buffers {
1031 if let Some(buffer) = buffer.upgrade(&cx) {
1032 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1033 }
1034 }
1035 cx.notify();
1036 Ok(())
1037 }
1038}
1039
1040#[derive(Clone)]
1041pub struct Snapshot {
1042 id: usize,
1043 scan_id: usize,
1044 abs_path: Arc<Path>,
1045 root_name: String,
1046 root_char_bag: CharBag,
1047 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
1048 entries_by_path: SumTree<Entry>,
1049 entries_by_id: SumTree<PathEntry>,
1050 removed_entry_ids: HashMap<u64, usize>,
1051 next_entry_id: Arc<AtomicUsize>,
1052}
1053
1054impl Snapshot {
1055 pub fn build_update(&self, other: &Self, worktree_id: u64) -> proto::UpdateWorktree {
1056 let mut updated_entries = Vec::new();
1057 let mut removed_entries = Vec::new();
1058 let mut self_entries = self.entries_by_id.cursor::<(), ()>().peekable();
1059 let mut other_entries = other.entries_by_id.cursor::<(), ()>().peekable();
1060 loop {
1061 match (self_entries.peek(), other_entries.peek()) {
1062 (Some(self_entry), Some(other_entry)) => match self_entry.id.cmp(&other_entry.id) {
1063 Ordering::Less => {
1064 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1065 updated_entries.push(entry);
1066 self_entries.next();
1067 }
1068 Ordering::Equal => {
1069 if self_entry.scan_id != other_entry.scan_id {
1070 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1071 updated_entries.push(entry);
1072 }
1073
1074 self_entries.next();
1075 other_entries.next();
1076 }
1077 Ordering::Greater => {
1078 removed_entries.push(other_entry.id as u64);
1079 other_entries.next();
1080 }
1081 },
1082 (Some(self_entry), None) => {
1083 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1084 updated_entries.push(entry);
1085 self_entries.next();
1086 }
1087 (None, Some(other_entry)) => {
1088 removed_entries.push(other_entry.id as u64);
1089 other_entries.next();
1090 }
1091 (None, None) => break,
1092 }
1093 }
1094
1095 proto::UpdateWorktree {
1096 updated_entries,
1097 removed_entries,
1098 worktree_id,
1099 }
1100 }
1101
1102 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1103 self.scan_id += 1;
1104 let scan_id = self.scan_id;
1105
1106 let mut entries_by_path_edits = Vec::new();
1107 let mut entries_by_id_edits = Vec::new();
1108 for entry_id in update.removed_entries {
1109 let entry_id = entry_id as usize;
1110 let entry = self
1111 .entry_for_id(entry_id)
1112 .ok_or_else(|| anyhow!("unknown entry"))?;
1113 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1114 entries_by_id_edits.push(Edit::Remove(entry.id));
1115 }
1116
1117 for entry in update.updated_entries {
1118 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1119 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1120 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1121 }
1122 entries_by_id_edits.push(Edit::Insert(PathEntry {
1123 id: entry.id,
1124 path: entry.path.clone(),
1125 scan_id,
1126 }));
1127 entries_by_path_edits.push(Edit::Insert(entry));
1128 }
1129
1130 self.entries_by_path.edit(entries_by_path_edits, &());
1131 self.entries_by_id.edit(entries_by_id_edits, &());
1132
1133 Ok(())
1134 }
1135
1136 pub fn file_count(&self) -> usize {
1137 self.entries_by_path.summary().file_count
1138 }
1139
1140 pub fn visible_file_count(&self) -> usize {
1141 self.entries_by_path.summary().visible_file_count
1142 }
1143
1144 pub fn files(&self, start: usize) -> FileIter {
1145 FileIter::all(self, start)
1146 }
1147
1148 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1149 let empty_path = Path::new("");
1150 self.entries_by_path
1151 .cursor::<(), ()>()
1152 .filter(move |entry| entry.path.as_ref() != empty_path)
1153 .map(|entry| entry.path())
1154 }
1155
1156 pub fn visible_files(&self, start: usize) -> FileIter {
1157 FileIter::visible(self, start)
1158 }
1159
1160 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
1161 ChildEntriesIter::new(path, self)
1162 }
1163
1164 pub fn root_entry(&self) -> &Entry {
1165 self.entry_for_path("").unwrap()
1166 }
1167
1168 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
1169 /// a directory.
1170 pub fn root_name(&self) -> &str {
1171 &self.root_name
1172 }
1173
1174 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1175 let mut cursor = self.entries_by_path.cursor::<_, ()>();
1176 if cursor.seek(&PathSearch::Exact(path.as_ref()), Bias::Left, &()) {
1177 cursor.item()
1178 } else {
1179 None
1180 }
1181 }
1182
1183 fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1184 let entry = self.entries_by_id.get(&id, &())?;
1185 self.entry_for_path(&entry.path)
1186 }
1187
1188 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1189 self.entry_for_path(path.as_ref()).map(|e| e.inode())
1190 }
1191
1192 fn insert_entry(&mut self, mut entry: Entry) -> Entry {
1193 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
1194 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
1195 if let Some(err) = err {
1196 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
1197 }
1198
1199 let ignore_dir_path = entry.path().parent().unwrap();
1200 self.ignores
1201 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1202 }
1203
1204 self.reuse_entry_id(&mut entry);
1205 self.entries_by_path.insert_or_replace(entry.clone(), &());
1206 self.entries_by_id.insert_or_replace(
1207 PathEntry {
1208 id: entry.id,
1209 path: entry.path.clone(),
1210 scan_id: self.scan_id,
1211 },
1212 &(),
1213 );
1214 entry
1215 }
1216
1217 fn populate_dir(
1218 &mut self,
1219 parent_path: Arc<Path>,
1220 entries: impl IntoIterator<Item = Entry>,
1221 ignore: Option<Arc<Gitignore>>,
1222 ) {
1223 let mut parent_entry = self
1224 .entries_by_path
1225 .get(&PathKey(parent_path.clone()), &())
1226 .unwrap()
1227 .clone();
1228 if let Some(ignore) = ignore {
1229 self.ignores.insert(parent_path, (ignore, self.scan_id));
1230 }
1231 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1232 parent_entry.kind = EntryKind::Dir;
1233 } else {
1234 unreachable!();
1235 }
1236
1237 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1238 let mut entries_by_id_edits = Vec::new();
1239
1240 for mut entry in entries {
1241 self.reuse_entry_id(&mut entry);
1242 entries_by_id_edits.push(Edit::Insert(PathEntry {
1243 id: entry.id,
1244 path: entry.path.clone(),
1245 scan_id: self.scan_id,
1246 }));
1247 entries_by_path_edits.push(Edit::Insert(entry));
1248 }
1249
1250 self.entries_by_path.edit(entries_by_path_edits, &());
1251 self.entries_by_id.edit(entries_by_id_edits, &());
1252 }
1253
1254 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1255 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1256 entry.id = removed_entry_id;
1257 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1258 entry.id = existing_entry.id;
1259 }
1260 }
1261
1262 fn remove_path(&mut self, path: &Path) {
1263 let mut new_entries;
1264 let removed_entry_ids;
1265 {
1266 let mut cursor = self.entries_by_path.cursor::<_, ()>();
1267 new_entries = cursor.slice(&PathSearch::Exact(path), Bias::Left, &());
1268 removed_entry_ids = cursor.slice(&PathSearch::Successor(path), Bias::Left, &());
1269 new_entries.push_tree(cursor.suffix(&()), &());
1270 }
1271 self.entries_by_path = new_entries;
1272
1273 let mut entries_by_id_edits = Vec::new();
1274 for entry in removed_entry_ids.cursor::<(), ()>() {
1275 let removed_entry_id = self
1276 .removed_entry_ids
1277 .entry(entry.inode)
1278 .or_insert(entry.id);
1279 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1280 entries_by_id_edits.push(Edit::Remove(entry.id));
1281 }
1282 self.entries_by_id.edit(entries_by_id_edits, &());
1283
1284 if path.file_name() == Some(&GITIGNORE) {
1285 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1286 *scan_id = self.scan_id;
1287 }
1288 }
1289 }
1290
1291 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1292 let mut new_ignores = Vec::new();
1293 for ancestor in path.ancestors().skip(1) {
1294 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1295 new_ignores.push((ancestor, Some(ignore.clone())));
1296 } else {
1297 new_ignores.push((ancestor, None));
1298 }
1299 }
1300
1301 let mut ignore_stack = IgnoreStack::none();
1302 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1303 if ignore_stack.is_path_ignored(&parent_path, true) {
1304 ignore_stack = IgnoreStack::all();
1305 break;
1306 } else if let Some(ignore) = ignore {
1307 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1308 }
1309 }
1310
1311 if ignore_stack.is_path_ignored(path, is_dir) {
1312 ignore_stack = IgnoreStack::all();
1313 }
1314
1315 ignore_stack
1316 }
1317}
1318
1319impl fmt::Debug for Snapshot {
1320 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1321 for entry in self.entries_by_path.cursor::<(), ()>() {
1322 for _ in entry.path().ancestors().skip(1) {
1323 write!(f, " ")?;
1324 }
1325 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
1326 }
1327 Ok(())
1328 }
1329}
1330
1331#[derive(Clone, PartialEq)]
1332pub struct File {
1333 entry_id: Option<usize>,
1334 worktree: ModelHandle<Worktree>,
1335 pub path: Arc<Path>,
1336 pub mtime: SystemTime,
1337}
1338
1339impl File {
1340 pub fn new(
1341 entry_id: usize,
1342 worktree: ModelHandle<Worktree>,
1343 path: Arc<Path>,
1344 mtime: SystemTime,
1345 ) -> Self {
1346 Self {
1347 entry_id: Some(entry_id),
1348 worktree,
1349 path,
1350 mtime,
1351 }
1352 }
1353
1354 pub fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1355 self.worktree.update(cx, |worktree, cx| {
1356 if let Some((rpc, remote_id)) = match worktree {
1357 Worktree::Local(worktree) => worktree.rpc.clone(),
1358 Worktree::Remote(worktree) => Some((worktree.rpc.clone(), worktree.remote_id)),
1359 } {
1360 cx.background()
1361 .spawn(async move {
1362 if let Err(error) = rpc
1363 .send(proto::UpdateBuffer {
1364 worktree_id: remote_id,
1365 buffer_id,
1366 operations: Some(operation).iter().map(Into::into).collect(),
1367 })
1368 .await
1369 {
1370 log::error!("error sending buffer operation: {}", error);
1371 }
1372 })
1373 .detach();
1374 }
1375 });
1376 }
1377
1378 pub fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1379 self.worktree.update(cx, |worktree, cx| {
1380 if let Worktree::Remote(worktree) = worktree {
1381 let worktree_id = worktree.remote_id;
1382 let rpc = worktree.rpc.clone();
1383 cx.background()
1384 .spawn(async move {
1385 if let Err(error) = rpc
1386 .send(proto::CloseBuffer {
1387 worktree_id,
1388 buffer_id,
1389 })
1390 .await
1391 {
1392 log::error!("error closing remote buffer: {}", error);
1393 };
1394 })
1395 .detach();
1396 }
1397 });
1398 }
1399
1400 /// Returns this file's path relative to the root of its worktree.
1401 pub fn path(&self) -> Arc<Path> {
1402 self.path.clone()
1403 }
1404
1405 pub fn abs_path(&self, cx: &AppContext) -> PathBuf {
1406 self.worktree.read(cx).abs_path.join(&self.path)
1407 }
1408
1409 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1410 /// of its worktree, then this method will return the name of the worktree itself.
1411 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
1412 self.path
1413 .file_name()
1414 .or_else(|| Some(OsStr::new(self.worktree.read(cx).root_name())))
1415 .map(Into::into)
1416 }
1417
1418 pub fn is_deleted(&self) -> bool {
1419 self.entry_id.is_none()
1420 }
1421
1422 pub fn exists(&self) -> bool {
1423 !self.is_deleted()
1424 }
1425
1426 pub fn save(
1427 &self,
1428 buffer_id: u64,
1429 text: Rope,
1430 version: time::Global,
1431 cx: &mut MutableAppContext,
1432 ) -> Task<Result<(time::Global, SystemTime)>> {
1433 self.worktree.update(cx, |worktree, cx| match worktree {
1434 Worktree::Local(worktree) => {
1435 let rpc = worktree.rpc.clone();
1436 let save = worktree.save(self.path.clone(), text, cx);
1437 cx.spawn(|_, _| async move {
1438 let entry = save.await?;
1439 if let Some((rpc, worktree_id)) = rpc {
1440 rpc.send(proto::BufferSaved {
1441 worktree_id,
1442 buffer_id,
1443 version: (&version).into(),
1444 mtime: Some(entry.mtime.into()),
1445 })
1446 .await?;
1447 }
1448 Ok((version, entry.mtime))
1449 })
1450 }
1451 Worktree::Remote(worktree) => {
1452 let rpc = worktree.rpc.clone();
1453 let worktree_id = worktree.remote_id;
1454 cx.spawn(|_, _| async move {
1455 let response = rpc
1456 .request(proto::SaveBuffer {
1457 worktree_id,
1458 buffer_id,
1459 })
1460 .await?;
1461 let version = response.version.try_into()?;
1462 let mtime = response
1463 .mtime
1464 .ok_or_else(|| anyhow!("missing mtime"))?
1465 .into();
1466 Ok((version, mtime))
1467 })
1468 }
1469 })
1470 }
1471
1472 pub fn worktree_id(&self) -> usize {
1473 self.worktree.id()
1474 }
1475
1476 pub fn entry_id(&self) -> (usize, Arc<Path>) {
1477 (self.worktree.id(), self.path())
1478 }
1479}
1480
1481#[derive(Clone, Debug)]
1482pub struct Entry {
1483 id: usize,
1484 kind: EntryKind,
1485 path: Arc<Path>,
1486 inode: u64,
1487 mtime: SystemTime,
1488 is_symlink: bool,
1489 is_ignored: bool,
1490}
1491
1492#[derive(Clone, Debug)]
1493pub enum EntryKind {
1494 PendingDir,
1495 Dir,
1496 File(CharBag),
1497}
1498
1499impl Entry {
1500 pub fn path(&self) -> &Arc<Path> {
1501 &self.path
1502 }
1503
1504 pub fn inode(&self) -> u64 {
1505 self.inode
1506 }
1507
1508 pub fn is_ignored(&self) -> bool {
1509 self.is_ignored
1510 }
1511
1512 fn is_dir(&self) -> bool {
1513 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1514 }
1515
1516 fn is_file(&self) -> bool {
1517 matches!(self.kind, EntryKind::File(_))
1518 }
1519}
1520
1521impl sum_tree::Item for Entry {
1522 type Summary = EntrySummary;
1523
1524 fn summary(&self) -> Self::Summary {
1525 let file_count;
1526 let visible_file_count;
1527 if self.is_file() {
1528 file_count = 1;
1529 if self.is_ignored {
1530 visible_file_count = 0;
1531 } else {
1532 visible_file_count = 1;
1533 }
1534 } else {
1535 file_count = 0;
1536 visible_file_count = 0;
1537 }
1538
1539 EntrySummary {
1540 max_path: self.path().clone(),
1541 file_count,
1542 visible_file_count,
1543 }
1544 }
1545}
1546
1547impl sum_tree::KeyedItem for Entry {
1548 type Key = PathKey;
1549
1550 fn key(&self) -> Self::Key {
1551 PathKey(self.path().clone())
1552 }
1553}
1554
1555#[derive(Clone, Debug)]
1556pub struct EntrySummary {
1557 max_path: Arc<Path>,
1558 file_count: usize,
1559 visible_file_count: usize,
1560}
1561
1562impl Default for EntrySummary {
1563 fn default() -> Self {
1564 Self {
1565 max_path: Arc::from(Path::new("")),
1566 file_count: 0,
1567 visible_file_count: 0,
1568 }
1569 }
1570}
1571
1572impl sum_tree::Summary for EntrySummary {
1573 type Context = ();
1574
1575 fn add_summary(&mut self, rhs: &Self, _: &()) {
1576 self.max_path = rhs.max_path.clone();
1577 self.file_count += rhs.file_count;
1578 self.visible_file_count += rhs.visible_file_count;
1579 }
1580}
1581
1582#[derive(Clone, Debug)]
1583struct PathEntry {
1584 id: usize,
1585 path: Arc<Path>,
1586 scan_id: usize,
1587}
1588
1589impl sum_tree::Item for PathEntry {
1590 type Summary = PathEntrySummary;
1591
1592 fn summary(&self) -> Self::Summary {
1593 PathEntrySummary { max_id: self.id }
1594 }
1595}
1596
1597impl sum_tree::KeyedItem for PathEntry {
1598 type Key = usize;
1599
1600 fn key(&self) -> Self::Key {
1601 self.id
1602 }
1603}
1604
1605#[derive(Clone, Debug, Default)]
1606struct PathEntrySummary {
1607 max_id: usize,
1608}
1609
1610impl sum_tree::Summary for PathEntrySummary {
1611 type Context = ();
1612
1613 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
1614 self.max_id = summary.max_id;
1615 }
1616}
1617
1618impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
1619 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
1620 *self = summary.max_id;
1621 }
1622}
1623
1624#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1625pub struct PathKey(Arc<Path>);
1626
1627impl Default for PathKey {
1628 fn default() -> Self {
1629 Self(Path::new("").into())
1630 }
1631}
1632
1633impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1634 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1635 self.0 = summary.max_path.clone();
1636 }
1637}
1638
1639#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1640enum PathSearch<'a> {
1641 Exact(&'a Path),
1642 Successor(&'a Path),
1643}
1644
1645impl<'a> Ord for PathSearch<'a> {
1646 fn cmp(&self, other: &Self) -> cmp::Ordering {
1647 match (self, other) {
1648 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
1649 (Self::Successor(a), Self::Exact(b)) => {
1650 if b.starts_with(a) {
1651 cmp::Ordering::Greater
1652 } else {
1653 a.cmp(b)
1654 }
1655 }
1656 _ => unreachable!("not sure we need the other two cases"),
1657 }
1658 }
1659}
1660
1661impl<'a> PartialOrd for PathSearch<'a> {
1662 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1663 Some(self.cmp(other))
1664 }
1665}
1666
1667impl<'a> Default for PathSearch<'a> {
1668 fn default() -> Self {
1669 Self::Exact(Path::new("").into())
1670 }
1671}
1672
1673impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
1674 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1675 *self = Self::Exact(summary.max_path.as_ref());
1676 }
1677}
1678
1679#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1680pub struct FileCount(usize);
1681
1682impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
1683 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1684 self.0 += summary.file_count;
1685 }
1686}
1687
1688#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1689pub struct VisibleFileCount(usize);
1690
1691impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
1692 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1693 self.0 += summary.visible_file_count;
1694 }
1695}
1696
1697struct BackgroundScanner {
1698 snapshot: Arc<Mutex<Snapshot>>,
1699 notify: Sender<ScanState>,
1700 thread_pool: scoped_pool::Pool,
1701}
1702
1703impl BackgroundScanner {
1704 fn new(snapshot: Arc<Mutex<Snapshot>>, notify: Sender<ScanState>, worktree_id: usize) -> Self {
1705 Self {
1706 snapshot,
1707 notify,
1708 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
1709 }
1710 }
1711
1712 fn abs_path(&self) -> Arc<Path> {
1713 self.snapshot.lock().abs_path.clone()
1714 }
1715
1716 fn snapshot(&self) -> Snapshot {
1717 self.snapshot.lock().clone()
1718 }
1719
1720 fn run(mut self, event_stream: fsevent::EventStream) {
1721 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1722 return;
1723 }
1724
1725 if let Err(err) = self.scan_dirs() {
1726 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
1727 return;
1728 }
1729 }
1730
1731 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1732 return;
1733 }
1734
1735 event_stream.run(move |events| {
1736 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1737 return false;
1738 }
1739
1740 if !self.process_events(events) {
1741 return false;
1742 }
1743
1744 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1745 return false;
1746 }
1747
1748 true
1749 });
1750 }
1751
1752 fn scan_dirs(&mut self) -> io::Result<()> {
1753 self.snapshot.lock().scan_id += 1;
1754
1755 let path: Arc<Path> = Arc::from(Path::new(""));
1756 let abs_path = self.abs_path();
1757 let metadata = fs::metadata(&abs_path)?;
1758 let inode = metadata.ino();
1759 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
1760 let is_dir = metadata.file_type().is_dir();
1761 let mtime = metadata.modified()?;
1762
1763 // After determining whether the root entry is a file or a directory, populate the
1764 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1765 let mut root_name = abs_path
1766 .file_name()
1767 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1768 if is_dir {
1769 root_name.push('/');
1770 }
1771
1772 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1773 let next_entry_id;
1774 {
1775 let mut snapshot = self.snapshot.lock();
1776 snapshot.root_name = root_name;
1777 snapshot.root_char_bag = root_char_bag;
1778 next_entry_id = snapshot.next_entry_id.clone();
1779 }
1780
1781 if is_dir {
1782 self.snapshot.lock().insert_entry(Entry {
1783 id: next_entry_id.fetch_add(1, SeqCst),
1784 kind: EntryKind::PendingDir,
1785 path: path.clone(),
1786 inode,
1787 mtime,
1788 is_symlink,
1789 is_ignored: false,
1790 });
1791
1792 let (tx, rx) = crossbeam_channel::unbounded();
1793 tx.send(ScanJob {
1794 abs_path: abs_path.to_path_buf(),
1795 path,
1796 ignore_stack: IgnoreStack::none(),
1797 scan_queue: tx.clone(),
1798 })
1799 .unwrap();
1800 drop(tx);
1801
1802 self.thread_pool.scoped(|pool| {
1803 for _ in 0..self.thread_pool.thread_count() {
1804 pool.execute(|| {
1805 while let Ok(job) = rx.recv() {
1806 if let Err(err) =
1807 self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
1808 {
1809 log::error!("error scanning {:?}: {}", job.abs_path, err);
1810 }
1811 }
1812 });
1813 }
1814 });
1815 } else {
1816 self.snapshot.lock().insert_entry(Entry {
1817 id: next_entry_id.fetch_add(1, SeqCst),
1818 kind: EntryKind::File(char_bag_for_path(root_char_bag, &path)),
1819 path,
1820 inode,
1821 mtime,
1822 is_symlink,
1823 is_ignored: false,
1824 });
1825 }
1826
1827 Ok(())
1828 }
1829
1830 fn scan_dir(
1831 &self,
1832 root_char_bag: CharBag,
1833 next_entry_id: Arc<AtomicUsize>,
1834 job: &ScanJob,
1835 ) -> io::Result<()> {
1836 let mut new_entries: Vec<Entry> = Vec::new();
1837 let mut new_jobs: Vec<ScanJob> = Vec::new();
1838 let mut ignore_stack = job.ignore_stack.clone();
1839 let mut new_ignore = None;
1840
1841 for child_entry in fs::read_dir(&job.abs_path)? {
1842 let child_entry = child_entry?;
1843 let child_name = child_entry.file_name();
1844 let child_abs_path = job.abs_path.join(&child_name);
1845 let child_path: Arc<Path> = job.path.join(&child_name).into();
1846 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
1847 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
1848 metadata
1849 } else {
1850 log::error!("could not get metadata for path {:?}", child_abs_path);
1851 continue;
1852 };
1853
1854 let child_inode = child_metadata.ino();
1855 let child_mtime = child_metadata.modified()?;
1856
1857 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1858 if child_name == *GITIGNORE {
1859 let (ignore, err) = Gitignore::new(&child_abs_path);
1860 if let Some(err) = err {
1861 log::error!("error in ignore file {:?} - {:?}", child_path, err);
1862 }
1863 let ignore = Arc::new(ignore);
1864 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1865 new_ignore = Some(ignore);
1866
1867 // Update ignore status of any child entries we've already processed to reflect the
1868 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1869 // there should rarely be too numerous. Update the ignore stack associated with any
1870 // new jobs as well.
1871 let mut new_jobs = new_jobs.iter_mut();
1872 for entry in &mut new_entries {
1873 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1874 if entry.is_dir() {
1875 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1876 IgnoreStack::all()
1877 } else {
1878 ignore_stack.clone()
1879 };
1880 }
1881 }
1882 }
1883
1884 if child_metadata.is_dir() {
1885 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1886 new_entries.push(Entry {
1887 id: next_entry_id.fetch_add(1, SeqCst),
1888 kind: EntryKind::PendingDir,
1889 path: child_path.clone(),
1890 inode: child_inode,
1891 mtime: child_mtime,
1892 is_symlink: child_is_symlink,
1893 is_ignored,
1894 });
1895 new_jobs.push(ScanJob {
1896 abs_path: child_abs_path,
1897 path: child_path,
1898 ignore_stack: if is_ignored {
1899 IgnoreStack::all()
1900 } else {
1901 ignore_stack.clone()
1902 },
1903 scan_queue: job.scan_queue.clone(),
1904 });
1905 } else {
1906 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1907 new_entries.push(Entry {
1908 id: next_entry_id.fetch_add(1, SeqCst),
1909 kind: EntryKind::File(char_bag_for_path(root_char_bag, &child_path)),
1910 path: child_path,
1911 inode: child_inode,
1912 mtime: child_mtime,
1913 is_symlink: child_is_symlink,
1914 is_ignored,
1915 });
1916 };
1917 }
1918
1919 self.snapshot
1920 .lock()
1921 .populate_dir(job.path.clone(), new_entries, new_ignore);
1922 for new_job in new_jobs {
1923 job.scan_queue.send(new_job).unwrap();
1924 }
1925
1926 Ok(())
1927 }
1928
1929 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1930 let mut snapshot = self.snapshot();
1931 snapshot.scan_id += 1;
1932
1933 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
1934 abs_path
1935 } else {
1936 return false;
1937 };
1938 let root_char_bag = snapshot.root_char_bag;
1939 let next_entry_id = snapshot.next_entry_id.clone();
1940
1941 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1942 events.dedup_by(|a, b| a.path.starts_with(&b.path));
1943
1944 for event in &events {
1945 match event.path.strip_prefix(&root_abs_path) {
1946 Ok(path) => snapshot.remove_path(&path),
1947 Err(_) => {
1948 log::error!(
1949 "unexpected event {:?} for root path {:?}",
1950 event.path,
1951 root_abs_path
1952 );
1953 continue;
1954 }
1955 }
1956 }
1957
1958 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
1959 for event in events {
1960 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
1961 Ok(path) => Arc::from(path.to_path_buf()),
1962 Err(_) => {
1963 log::error!(
1964 "unexpected event {:?} for root path {:?}",
1965 event.path,
1966 root_abs_path
1967 );
1968 continue;
1969 }
1970 };
1971
1972 match fs_entry_for_path(
1973 snapshot.root_char_bag,
1974 &next_entry_id,
1975 path.clone(),
1976 &event.path,
1977 ) {
1978 Ok(Some(mut fs_entry)) => {
1979 let is_dir = fs_entry.is_dir();
1980 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1981 fs_entry.is_ignored = ignore_stack.is_all();
1982 snapshot.insert_entry(fs_entry);
1983 if is_dir {
1984 scan_queue_tx
1985 .send(ScanJob {
1986 abs_path: event.path,
1987 path,
1988 ignore_stack,
1989 scan_queue: scan_queue_tx.clone(),
1990 })
1991 .unwrap();
1992 }
1993 }
1994 Ok(None) => {}
1995 Err(err) => {
1996 // TODO - create a special 'error' entry in the entries tree to mark this
1997 log::error!("error reading file on event {:?}", err);
1998 }
1999 }
2000 }
2001
2002 *self.snapshot.lock() = snapshot;
2003
2004 // Scan any directories that were created as part of this event batch.
2005 drop(scan_queue_tx);
2006 self.thread_pool.scoped(|pool| {
2007 for _ in 0..self.thread_pool.thread_count() {
2008 pool.execute(|| {
2009 while let Ok(job) = scan_queue_rx.recv() {
2010 if let Err(err) = self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
2011 {
2012 log::error!("error scanning {:?}: {}", job.abs_path, err);
2013 }
2014 }
2015 });
2016 }
2017 });
2018
2019 // Attempt to detect renames only over a single batch of file-system events.
2020 self.snapshot.lock().removed_entry_ids.clear();
2021
2022 self.update_ignore_statuses();
2023 true
2024 }
2025
2026 fn update_ignore_statuses(&self) {
2027 let mut snapshot = self.snapshot();
2028
2029 let mut ignores_to_update = Vec::new();
2030 let mut ignores_to_delete = Vec::new();
2031 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2032 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2033 ignores_to_update.push(parent_path.clone());
2034 }
2035
2036 let ignore_path = parent_path.join(&*GITIGNORE);
2037 if snapshot.entry_for_path(ignore_path).is_none() {
2038 ignores_to_delete.push(parent_path.clone());
2039 }
2040 }
2041
2042 for parent_path in ignores_to_delete {
2043 snapshot.ignores.remove(&parent_path);
2044 self.snapshot.lock().ignores.remove(&parent_path);
2045 }
2046
2047 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
2048 ignores_to_update.sort_unstable();
2049 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2050 while let Some(parent_path) = ignores_to_update.next() {
2051 while ignores_to_update
2052 .peek()
2053 .map_or(false, |p| p.starts_with(&parent_path))
2054 {
2055 ignores_to_update.next().unwrap();
2056 }
2057
2058 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2059 ignore_queue_tx
2060 .send(UpdateIgnoreStatusJob {
2061 path: parent_path,
2062 ignore_stack,
2063 ignore_queue: ignore_queue_tx.clone(),
2064 })
2065 .unwrap();
2066 }
2067 drop(ignore_queue_tx);
2068
2069 self.thread_pool.scoped(|scope| {
2070 for _ in 0..self.thread_pool.thread_count() {
2071 scope.execute(|| {
2072 while let Ok(job) = ignore_queue_rx.recv() {
2073 self.update_ignore_status(job, &snapshot);
2074 }
2075 });
2076 }
2077 });
2078 }
2079
2080 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2081 let mut ignore_stack = job.ignore_stack;
2082 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2083 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2084 }
2085
2086 let mut edits = Vec::new();
2087 for mut entry in snapshot.child_entries(&job.path).cloned() {
2088 let was_ignored = entry.is_ignored;
2089 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
2090 if entry.is_dir() {
2091 let child_ignore_stack = if entry.is_ignored {
2092 IgnoreStack::all()
2093 } else {
2094 ignore_stack.clone()
2095 };
2096 job.ignore_queue
2097 .send(UpdateIgnoreStatusJob {
2098 path: entry.path().clone(),
2099 ignore_stack: child_ignore_stack,
2100 ignore_queue: job.ignore_queue.clone(),
2101 })
2102 .unwrap();
2103 }
2104
2105 if entry.is_ignored != was_ignored {
2106 edits.push(Edit::Insert(entry));
2107 }
2108 }
2109 self.snapshot.lock().entries_by_path.edit(edits, &());
2110 }
2111}
2112
2113fn refresh_entry(snapshot: &Mutex<Snapshot>, path: Arc<Path>, abs_path: &Path) -> Result<Entry> {
2114 let root_char_bag;
2115 let next_entry_id;
2116 {
2117 let snapshot = snapshot.lock();
2118 root_char_bag = snapshot.root_char_bag;
2119 next_entry_id = snapshot.next_entry_id.clone();
2120 }
2121 let entry = fs_entry_for_path(root_char_bag, &next_entry_id, path, abs_path)?
2122 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
2123 Ok(snapshot.lock().insert_entry(entry))
2124}
2125
2126fn fs_entry_for_path(
2127 root_char_bag: CharBag,
2128 next_entry_id: &AtomicUsize,
2129 path: Arc<Path>,
2130 abs_path: &Path,
2131) -> Result<Option<Entry>> {
2132 let metadata = match fs::metadata(&abs_path) {
2133 Err(err) => {
2134 return match (err.kind(), err.raw_os_error()) {
2135 (io::ErrorKind::NotFound, _) => Ok(None),
2136 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
2137 _ => Err(anyhow::Error::new(err)),
2138 }
2139 }
2140 Ok(metadata) => metadata,
2141 };
2142 let inode = metadata.ino();
2143 let mtime = metadata.modified()?;
2144 let is_symlink = fs::symlink_metadata(&abs_path)
2145 .context("failed to read symlink metadata")?
2146 .file_type()
2147 .is_symlink();
2148
2149 let entry = Entry {
2150 id: next_entry_id.fetch_add(1, SeqCst),
2151 kind: if metadata.file_type().is_dir() {
2152 EntryKind::PendingDir
2153 } else {
2154 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2155 },
2156 path: Arc::from(path),
2157 inode,
2158 mtime,
2159 is_symlink,
2160 is_ignored: false,
2161 };
2162
2163 Ok(Some(entry))
2164}
2165
2166fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2167 let mut result = root_char_bag;
2168 result.extend(
2169 path.to_string_lossy()
2170 .chars()
2171 .map(|c| c.to_ascii_lowercase()),
2172 );
2173 result
2174}
2175
2176struct ScanJob {
2177 abs_path: PathBuf,
2178 path: Arc<Path>,
2179 ignore_stack: Arc<IgnoreStack>,
2180 scan_queue: crossbeam_channel::Sender<ScanJob>,
2181}
2182
2183struct UpdateIgnoreStatusJob {
2184 path: Arc<Path>,
2185 ignore_stack: Arc<IgnoreStack>,
2186 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
2187}
2188
2189pub trait WorktreeHandle {
2190 #[cfg(test)]
2191 fn flush_fs_events<'a>(
2192 &self,
2193 cx: &'a gpui::TestAppContext,
2194 ) -> futures::future::LocalBoxFuture<'a, ()>;
2195}
2196
2197impl WorktreeHandle for ModelHandle<Worktree> {
2198 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2199 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2200 // extra directory scans, and emit extra scan-state notifications.
2201 //
2202 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2203 // to ensure that all redundant FS events have already been processed.
2204 #[cfg(test)]
2205 fn flush_fs_events<'a>(
2206 &self,
2207 cx: &'a gpui::TestAppContext,
2208 ) -> futures::future::LocalBoxFuture<'a, ()> {
2209 use smol::future::FutureExt;
2210
2211 let filename = "fs-event-sentinel";
2212 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2213 let tree = self.clone();
2214 async move {
2215 fs::write(root_path.join(filename), "").unwrap();
2216 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2217 .await;
2218
2219 fs::remove_file(root_path.join(filename)).unwrap();
2220 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2221 .await;
2222
2223 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2224 .await;
2225 }
2226 .boxed_local()
2227 }
2228}
2229
2230pub enum FileIter<'a> {
2231 All(Cursor<'a, Entry, FileCount, ()>),
2232 Visible(Cursor<'a, Entry, VisibleFileCount, ()>),
2233}
2234
2235impl<'a> FileIter<'a> {
2236 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
2237 let mut cursor = snapshot.entries_by_path.cursor();
2238 cursor.seek(&FileCount(start), Bias::Right, &());
2239 Self::All(cursor)
2240 }
2241
2242 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
2243 let mut cursor = snapshot.entries_by_path.cursor();
2244 cursor.seek(&VisibleFileCount(start), Bias::Right, &());
2245 Self::Visible(cursor)
2246 }
2247
2248 fn next_internal(&mut self) {
2249 match self {
2250 Self::All(cursor) => {
2251 let ix = *cursor.seek_start();
2252 cursor.seek_forward(&FileCount(ix.0 + 1), Bias::Right, &());
2253 }
2254 Self::Visible(cursor) => {
2255 let ix = *cursor.seek_start();
2256 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), Bias::Right, &());
2257 }
2258 }
2259 }
2260
2261 fn item(&self) -> Option<&'a Entry> {
2262 match self {
2263 Self::All(cursor) => cursor.item(),
2264 Self::Visible(cursor) => cursor.item(),
2265 }
2266 }
2267}
2268
2269impl<'a> Iterator for FileIter<'a> {
2270 type Item = &'a Entry;
2271
2272 fn next(&mut self) -> Option<Self::Item> {
2273 if let Some(entry) = self.item() {
2274 self.next_internal();
2275 Some(entry)
2276 } else {
2277 None
2278 }
2279 }
2280}
2281
2282struct ChildEntriesIter<'a> {
2283 parent_path: &'a Path,
2284 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
2285}
2286
2287impl<'a> ChildEntriesIter<'a> {
2288 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
2289 let mut cursor = snapshot.entries_by_path.cursor();
2290 cursor.seek(&PathSearch::Exact(parent_path), Bias::Right, &());
2291 Self {
2292 parent_path,
2293 cursor,
2294 }
2295 }
2296}
2297
2298impl<'a> Iterator for ChildEntriesIter<'a> {
2299 type Item = &'a Entry;
2300
2301 fn next(&mut self) -> Option<Self::Item> {
2302 if let Some(item) = self.cursor.item() {
2303 if item.path().starts_with(self.parent_path) {
2304 self.cursor
2305 .seek_forward(&PathSearch::Successor(item.path()), Bias::Left, &());
2306 Some(item)
2307 } else {
2308 None
2309 }
2310 } else {
2311 None
2312 }
2313 }
2314}
2315
2316impl<'a> From<&'a Entry> for proto::Entry {
2317 fn from(entry: &'a Entry) -> Self {
2318 Self {
2319 id: entry.id as u64,
2320 is_dir: entry.is_dir(),
2321 path: entry.path.to_string_lossy().to_string(),
2322 inode: entry.inode,
2323 mtime: Some(entry.mtime.into()),
2324 is_symlink: entry.is_symlink,
2325 is_ignored: entry.is_ignored,
2326 }
2327 }
2328}
2329
2330impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2331 type Error = anyhow::Error;
2332
2333 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2334 if let Some(mtime) = entry.mtime {
2335 let kind = if entry.is_dir {
2336 EntryKind::Dir
2337 } else {
2338 let mut char_bag = root_char_bag.clone();
2339 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2340 EntryKind::File(char_bag)
2341 };
2342 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2343 Ok(Entry {
2344 id: entry.id as usize,
2345 kind,
2346 path: path.clone(),
2347 inode: entry.inode,
2348 mtime: mtime.into(),
2349 is_symlink: entry.is_symlink,
2350 is_ignored: entry.is_ignored,
2351 })
2352 } else {
2353 Err(anyhow!(
2354 "missing mtime in remote worktree entry {:?}",
2355 entry.path
2356 ))
2357 }
2358 }
2359}
2360
2361mod remote {
2362 use super::*;
2363
2364 pub async fn add_peer(
2365 envelope: TypedEnvelope<proto::AddPeer>,
2366 rpc: &rpc::Client,
2367 cx: &mut AsyncAppContext,
2368 ) -> anyhow::Result<()> {
2369 rpc.state
2370 .read()
2371 .await
2372 .shared_worktree(envelope.payload.worktree_id, cx)?
2373 .update(cx, |worktree, cx| worktree.add_peer(envelope, cx))
2374 }
2375
2376 pub async fn remove_peer(
2377 envelope: TypedEnvelope<proto::RemovePeer>,
2378 rpc: &rpc::Client,
2379 cx: &mut AsyncAppContext,
2380 ) -> anyhow::Result<()> {
2381 rpc.state
2382 .read()
2383 .await
2384 .shared_worktree(envelope.payload.worktree_id, cx)?
2385 .update(cx, |worktree, cx| worktree.remove_peer(envelope, cx))
2386 }
2387
2388 pub async fn update_worktree(
2389 envelope: TypedEnvelope<proto::UpdateWorktree>,
2390 rpc: &rpc::Client,
2391 cx: &mut AsyncAppContext,
2392 ) -> anyhow::Result<()> {
2393 rpc.state
2394 .read()
2395 .await
2396 .shared_worktree(envelope.payload.worktree_id, cx)?
2397 .update(cx, |worktree, _| {
2398 if let Some(worktree) = worktree.as_remote_mut() {
2399 let mut tx = worktree.updates_tx.clone();
2400 Ok(async move {
2401 tx.send(envelope.payload)
2402 .await
2403 .expect("receiver runs to completion");
2404 })
2405 } else {
2406 Err(anyhow!(
2407 "invalid update message for local worktree {}",
2408 envelope.payload.worktree_id
2409 ))
2410 }
2411 })?
2412 .await;
2413
2414 Ok(())
2415 }
2416
2417 pub async fn open_buffer(
2418 envelope: TypedEnvelope<proto::OpenBuffer>,
2419 rpc: &rpc::Client,
2420 cx: &mut AsyncAppContext,
2421 ) -> anyhow::Result<()> {
2422 let receipt = envelope.receipt();
2423 let worktree = rpc
2424 .state
2425 .read()
2426 .await
2427 .shared_worktree(envelope.payload.worktree_id, cx)?;
2428
2429 let response = worktree
2430 .update(cx, |worktree, cx| {
2431 worktree
2432 .as_local_mut()
2433 .unwrap()
2434 .open_remote_buffer(envelope, cx)
2435 })
2436 .await?;
2437
2438 rpc.respond(receipt, response).await?;
2439
2440 Ok(())
2441 }
2442
2443 pub async fn close_buffer(
2444 envelope: TypedEnvelope<proto::CloseBuffer>,
2445 rpc: &rpc::Client,
2446 cx: &mut AsyncAppContext,
2447 ) -> anyhow::Result<()> {
2448 let worktree = rpc
2449 .state
2450 .read()
2451 .await
2452 .shared_worktree(envelope.payload.worktree_id, cx)?;
2453
2454 worktree.update(cx, |worktree, cx| {
2455 worktree
2456 .as_local_mut()
2457 .unwrap()
2458 .close_remote_buffer(envelope, cx)
2459 })
2460 }
2461
2462 pub async fn update_buffer(
2463 envelope: TypedEnvelope<proto::UpdateBuffer>,
2464 rpc: &rpc::Client,
2465 cx: &mut AsyncAppContext,
2466 ) -> anyhow::Result<()> {
2467 let message = envelope.payload;
2468 rpc.state
2469 .read()
2470 .await
2471 .shared_worktree(message.worktree_id, cx)?
2472 .update(cx, |tree, cx| tree.update_buffer(message, cx))?;
2473 Ok(())
2474 }
2475
2476 pub async fn save_buffer(
2477 envelope: TypedEnvelope<proto::SaveBuffer>,
2478 rpc: &rpc::Client,
2479 cx: &mut AsyncAppContext,
2480 ) -> anyhow::Result<()> {
2481 let state = rpc.state.read().await;
2482 let worktree = state.shared_worktree(envelope.payload.worktree_id, cx)?;
2483 let sender_id = envelope.original_sender_id()?;
2484 let buffer = worktree.read_with(cx, |tree, _| {
2485 tree.as_local()
2486 .unwrap()
2487 .shared_buffers
2488 .get(&sender_id)
2489 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
2490 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))
2491 })?;
2492 let (version, mtime) = buffer.update(cx, |buffer, cx| buffer.save(cx))?.await?;
2493 rpc.respond(
2494 envelope.receipt(),
2495 proto::BufferSaved {
2496 worktree_id: envelope.payload.worktree_id,
2497 buffer_id: envelope.payload.buffer_id,
2498 version: (&version).into(),
2499 mtime: Some(mtime.into()),
2500 },
2501 )
2502 .await?;
2503 Ok(())
2504 }
2505
2506 pub async fn buffer_saved(
2507 envelope: TypedEnvelope<proto::BufferSaved>,
2508 rpc: &rpc::Client,
2509 cx: &mut AsyncAppContext,
2510 ) -> anyhow::Result<()> {
2511 rpc.state
2512 .read()
2513 .await
2514 .shared_worktree(envelope.payload.worktree_id, cx)?
2515 .update(cx, |worktree, cx| {
2516 worktree.buffer_saved(envelope.payload, cx)
2517 })?;
2518 Ok(())
2519 }
2520}
2521
2522#[cfg(test)]
2523mod tests {
2524 use super::*;
2525 use crate::test::*;
2526 use anyhow::Result;
2527 use rand::prelude::*;
2528 use serde_json::json;
2529 use std::time::UNIX_EPOCH;
2530 use std::{env, fmt::Write, os::unix, time::SystemTime};
2531
2532 #[gpui::test]
2533 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2534 let dir = temp_tree(json!({
2535 "root": {
2536 "apple": "",
2537 "banana": {
2538 "carrot": {
2539 "date": "",
2540 "endive": "",
2541 }
2542 },
2543 "fennel": {
2544 "grape": "",
2545 }
2546 }
2547 }));
2548
2549 let root_link_path = dir.path().join("root_link");
2550 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2551 unix::fs::symlink(
2552 &dir.path().join("root/fennel"),
2553 &dir.path().join("root/finnochio"),
2554 )
2555 .unwrap();
2556
2557 let tree = cx.add_model(|cx| Worktree::local(root_link_path, Default::default(), cx));
2558
2559 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2560 .await;
2561 cx.read(|cx| {
2562 let tree = tree.read(cx);
2563 assert_eq!(tree.file_count(), 5);
2564
2565 assert_eq!(
2566 tree.inode_for_path("fennel/grape"),
2567 tree.inode_for_path("finnochio/grape")
2568 );
2569
2570 let results = match_paths(
2571 Some(tree.snapshot()).iter(),
2572 "bna",
2573 false,
2574 false,
2575 false,
2576 10,
2577 Default::default(),
2578 cx.thread_pool().clone(),
2579 )
2580 .into_iter()
2581 .map(|result| result.path)
2582 .collect::<Vec<Arc<Path>>>();
2583 assert_eq!(
2584 results,
2585 vec![
2586 PathBuf::from("banana/carrot/date").into(),
2587 PathBuf::from("banana/carrot/endive").into(),
2588 ]
2589 );
2590 })
2591 }
2592
2593 #[gpui::test]
2594 async fn test_save_file(mut cx: gpui::TestAppContext) {
2595 let app_state = cx.read(build_app_state);
2596 let dir = temp_tree(json!({
2597 "file1": "the old contents",
2598 }));
2599 let tree = cx.add_model(|cx| Worktree::local(dir.path(), app_state.languages, cx));
2600 let buffer = tree
2601 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
2602 .await
2603 .unwrap();
2604 let save = buffer.update(&mut cx, |buffer, cx| {
2605 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2606 buffer.save(cx).unwrap()
2607 });
2608 save.await.unwrap();
2609
2610 let new_text = fs::read_to_string(dir.path().join("file1")).unwrap();
2611 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2612 }
2613
2614 #[gpui::test]
2615 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2616 let app_state = cx.read(build_app_state);
2617 let dir = temp_tree(json!({
2618 "file1": "the old contents",
2619 }));
2620 let file_path = dir.path().join("file1");
2621
2622 let tree = cx.add_model(|cx| Worktree::local(file_path.clone(), app_state.languages, cx));
2623 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2624 .await;
2625 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
2626
2627 let buffer = tree
2628 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
2629 .await
2630 .unwrap();
2631 let save = buffer.update(&mut cx, |buffer, cx| {
2632 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2633 buffer.save(cx).unwrap()
2634 });
2635 save.await.unwrap();
2636
2637 let new_text = fs::read_to_string(file_path).unwrap();
2638 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2639 }
2640
2641 #[gpui::test]
2642 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2643 let dir = temp_tree(json!({
2644 "a": {
2645 "file1": "",
2646 "file2": "",
2647 "file3": "",
2648 },
2649 "b": {
2650 "c": {
2651 "file4": "",
2652 "file5": "",
2653 }
2654 }
2655 }));
2656
2657 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2658
2659 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2660 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
2661 async move { buffer.await.unwrap() }
2662 };
2663 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2664 tree.read_with(cx, |tree, _| {
2665 tree.entry_for_path(path)
2666 .expect(&format!("no entry for path {}", path))
2667 .id
2668 })
2669 };
2670
2671 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2672 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2673 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2674 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2675
2676 let file2_id = id_for_path("a/file2", &cx);
2677 let file3_id = id_for_path("a/file3", &cx);
2678 let file4_id = id_for_path("b/c/file4", &cx);
2679
2680 // Wait for the initial scan.
2681 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2682 .await;
2683
2684 // Create a remote copy of this worktree.
2685 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2686 let worktree_id = 1;
2687 let share_request = tree
2688 .update(&mut cx, |tree, cx| {
2689 tree.as_local().unwrap().share_request(cx)
2690 })
2691 .await;
2692 let remote = Worktree::remote(
2693 proto::OpenWorktreeResponse {
2694 worktree_id,
2695 worktree: share_request.worktree,
2696 replica_id: 1,
2697 peers: Vec::new(),
2698 },
2699 rpc::Client::new(Default::default()),
2700 Default::default(),
2701 &mut cx.to_async(),
2702 )
2703 .await
2704 .unwrap();
2705
2706 cx.read(|cx| {
2707 assert!(!buffer2.read(cx).is_dirty());
2708 assert!(!buffer3.read(cx).is_dirty());
2709 assert!(!buffer4.read(cx).is_dirty());
2710 assert!(!buffer5.read(cx).is_dirty());
2711 });
2712
2713 // Rename and delete files and directories.
2714 tree.flush_fs_events(&cx).await;
2715 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2716 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2717 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2718 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2719 tree.flush_fs_events(&cx).await;
2720
2721 let expected_paths = vec![
2722 "a",
2723 "a/file1",
2724 "a/file2.new",
2725 "b",
2726 "d",
2727 "d/file3",
2728 "d/file4",
2729 ];
2730
2731 cx.read(|app| {
2732 assert_eq!(
2733 tree.read(app)
2734 .paths()
2735 .map(|p| p.to_str().unwrap())
2736 .collect::<Vec<_>>(),
2737 expected_paths
2738 );
2739
2740 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2741 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2742 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2743
2744 assert_eq!(
2745 buffer2.read(app).file().unwrap().path().as_ref(),
2746 Path::new("a/file2.new")
2747 );
2748 assert_eq!(
2749 buffer3.read(app).file().unwrap().path().as_ref(),
2750 Path::new("d/file3")
2751 );
2752 assert_eq!(
2753 buffer4.read(app).file().unwrap().path().as_ref(),
2754 Path::new("d/file4")
2755 );
2756 assert_eq!(
2757 buffer5.read(app).file().unwrap().path().as_ref(),
2758 Path::new("b/c/file5")
2759 );
2760
2761 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2762 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2763 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2764 assert!(buffer5.read(app).file().unwrap().is_deleted());
2765 });
2766
2767 // Update the remote worktree. Check that it becomes consistent with the
2768 // local worktree.
2769 remote.update(&mut cx, |remote, cx| {
2770 let update_message = tree
2771 .read(cx)
2772 .snapshot()
2773 .build_update(&initial_snapshot, worktree_id);
2774 remote
2775 .as_remote_mut()
2776 .unwrap()
2777 .snapshot
2778 .apply_update(update_message)
2779 .unwrap();
2780
2781 assert_eq!(
2782 remote
2783 .paths()
2784 .map(|p| p.to_str().unwrap())
2785 .collect::<Vec<_>>(),
2786 expected_paths
2787 );
2788 });
2789 }
2790
2791 #[gpui::test]
2792 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
2793 let dir = temp_tree(json!({
2794 ".git": {},
2795 ".gitignore": "ignored-dir\n",
2796 "tracked-dir": {
2797 "tracked-file1": "tracked contents",
2798 },
2799 "ignored-dir": {
2800 "ignored-file1": "ignored contents",
2801 }
2802 }));
2803
2804 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2805 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2806 .await;
2807 tree.flush_fs_events(&cx).await;
2808 cx.read(|cx| {
2809 let tree = tree.read(cx);
2810 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2811 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2812 assert_eq!(tracked.is_ignored(), false);
2813 assert_eq!(ignored.is_ignored(), true);
2814 });
2815
2816 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2817 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2818 tree.flush_fs_events(&cx).await;
2819 cx.read(|cx| {
2820 let tree = tree.read(cx);
2821 let dot_git = tree.entry_for_path(".git").unwrap();
2822 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2823 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2824 assert_eq!(tracked.is_ignored(), false);
2825 assert_eq!(ignored.is_ignored(), true);
2826 assert_eq!(dot_git.is_ignored(), true);
2827 });
2828 }
2829
2830 #[test]
2831 fn test_random() {
2832 let iterations = env::var("ITERATIONS")
2833 .map(|i| i.parse().unwrap())
2834 .unwrap_or(100);
2835 let operations = env::var("OPERATIONS")
2836 .map(|o| o.parse().unwrap())
2837 .unwrap_or(40);
2838 let initial_entries = env::var("INITIAL_ENTRIES")
2839 .map(|o| o.parse().unwrap())
2840 .unwrap_or(20);
2841 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
2842 seed..seed + 1
2843 } else {
2844 0..iterations
2845 };
2846
2847 for seed in seeds {
2848 dbg!(seed);
2849 let mut rng = StdRng::seed_from_u64(seed);
2850
2851 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
2852 for _ in 0..initial_entries {
2853 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2854 }
2855 log::info!("Generated initial tree");
2856
2857 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2858 let mut scanner = BackgroundScanner::new(
2859 Arc::new(Mutex::new(Snapshot {
2860 id: 0,
2861 scan_id: 0,
2862 abs_path: root_dir.path().into(),
2863 entries_by_path: Default::default(),
2864 entries_by_id: Default::default(),
2865 removed_entry_ids: Default::default(),
2866 ignores: Default::default(),
2867 root_name: Default::default(),
2868 root_char_bag: Default::default(),
2869 next_entry_id: Default::default(),
2870 })),
2871 notify_tx,
2872 0,
2873 );
2874 scanner.scan_dirs().unwrap();
2875 scanner.snapshot().check_invariants();
2876
2877 let mut events = Vec::new();
2878 let mut mutations_len = operations;
2879 while mutations_len > 1 {
2880 if !events.is_empty() && rng.gen_bool(0.4) {
2881 let len = rng.gen_range(0..=events.len());
2882 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2883 log::info!("Delivering events: {:#?}", to_deliver);
2884 scanner.process_events(to_deliver);
2885 scanner.snapshot().check_invariants();
2886 } else {
2887 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2888 mutations_len -= 1;
2889 }
2890 }
2891 log::info!("Quiescing: {:#?}", events);
2892 scanner.process_events(events);
2893 scanner.snapshot().check_invariants();
2894
2895 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2896 let mut new_scanner = BackgroundScanner::new(
2897 Arc::new(Mutex::new(Snapshot {
2898 id: 0,
2899 scan_id: 0,
2900 abs_path: root_dir.path().into(),
2901 entries_by_path: Default::default(),
2902 entries_by_id: Default::default(),
2903 removed_entry_ids: Default::default(),
2904 ignores: Default::default(),
2905 root_name: Default::default(),
2906 root_char_bag: Default::default(),
2907 next_entry_id: Default::default(),
2908 })),
2909 notify_tx,
2910 1,
2911 );
2912 new_scanner.scan_dirs().unwrap();
2913 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
2914 }
2915 }
2916
2917 fn randomly_mutate_tree(
2918 root_path: &Path,
2919 insertion_probability: f64,
2920 rng: &mut impl Rng,
2921 ) -> Result<Vec<fsevent::Event>> {
2922 let root_path = root_path.canonicalize().unwrap();
2923 let (dirs, files) = read_dir_recursive(root_path.clone());
2924
2925 let mut events = Vec::new();
2926 let mut record_event = |path: PathBuf| {
2927 events.push(fsevent::Event {
2928 event_id: SystemTime::now()
2929 .duration_since(UNIX_EPOCH)
2930 .unwrap()
2931 .as_secs(),
2932 flags: fsevent::StreamFlags::empty(),
2933 path,
2934 });
2935 };
2936
2937 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2938 let path = dirs.choose(rng).unwrap();
2939 let new_path = path.join(gen_name(rng));
2940
2941 if rng.gen() {
2942 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2943 fs::create_dir(&new_path)?;
2944 } else {
2945 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2946 fs::write(&new_path, "")?;
2947 }
2948 record_event(new_path);
2949 } else if rng.gen_bool(0.05) {
2950 let ignore_dir_path = dirs.choose(rng).unwrap();
2951 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2952
2953 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2954 let files_to_ignore = {
2955 let len = rng.gen_range(0..=subfiles.len());
2956 subfiles.choose_multiple(rng, len)
2957 };
2958 let dirs_to_ignore = {
2959 let len = rng.gen_range(0..subdirs.len());
2960 subdirs.choose_multiple(rng, len)
2961 };
2962
2963 let mut ignore_contents = String::new();
2964 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2965 write!(
2966 ignore_contents,
2967 "{}\n",
2968 path_to_ignore
2969 .strip_prefix(&ignore_dir_path)?
2970 .to_str()
2971 .unwrap()
2972 )
2973 .unwrap();
2974 }
2975 log::info!(
2976 "Creating {:?} with contents:\n{}",
2977 ignore_path.strip_prefix(&root_path)?,
2978 ignore_contents
2979 );
2980 fs::write(&ignore_path, ignore_contents).unwrap();
2981 record_event(ignore_path);
2982 } else {
2983 let old_path = {
2984 let file_path = files.choose(rng);
2985 let dir_path = dirs[1..].choose(rng);
2986 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2987 };
2988
2989 let is_rename = rng.gen();
2990 if is_rename {
2991 let new_path_parent = dirs
2992 .iter()
2993 .filter(|d| !d.starts_with(old_path))
2994 .choose(rng)
2995 .unwrap();
2996
2997 let overwrite_existing_dir =
2998 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2999 let new_path = if overwrite_existing_dir {
3000 fs::remove_dir_all(&new_path_parent).ok();
3001 new_path_parent.to_path_buf()
3002 } else {
3003 new_path_parent.join(gen_name(rng))
3004 };
3005
3006 log::info!(
3007 "Renaming {:?} to {}{:?}",
3008 old_path.strip_prefix(&root_path)?,
3009 if overwrite_existing_dir {
3010 "overwrite "
3011 } else {
3012 ""
3013 },
3014 new_path.strip_prefix(&root_path)?
3015 );
3016 fs::rename(&old_path, &new_path)?;
3017 record_event(old_path.clone());
3018 record_event(new_path);
3019 } else if old_path.is_dir() {
3020 let (dirs, files) = read_dir_recursive(old_path.clone());
3021
3022 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3023 fs::remove_dir_all(&old_path).unwrap();
3024 for file in files {
3025 record_event(file);
3026 }
3027 for dir in dirs {
3028 record_event(dir);
3029 }
3030 } else {
3031 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3032 fs::remove_file(old_path).unwrap();
3033 record_event(old_path.clone());
3034 }
3035 }
3036
3037 Ok(events)
3038 }
3039
3040 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3041 let child_entries = fs::read_dir(&path).unwrap();
3042 let mut dirs = vec![path];
3043 let mut files = Vec::new();
3044 for child_entry in child_entries {
3045 let child_path = child_entry.unwrap().path();
3046 if child_path.is_dir() {
3047 let (child_dirs, child_files) = read_dir_recursive(child_path);
3048 dirs.extend(child_dirs);
3049 files.extend(child_files);
3050 } else {
3051 files.push(child_path);
3052 }
3053 }
3054 (dirs, files)
3055 }
3056
3057 fn gen_name(rng: &mut impl Rng) -> String {
3058 (0..6)
3059 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3060 .map(char::from)
3061 .collect()
3062 }
3063
3064 impl Snapshot {
3065 fn check_invariants(&self) {
3066 let mut files = self.files(0);
3067 let mut visible_files = self.visible_files(0);
3068 for entry in self.entries_by_path.cursor::<(), ()>() {
3069 if entry.is_file() {
3070 assert_eq!(files.next().unwrap().inode(), entry.inode);
3071 if !entry.is_ignored {
3072 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
3073 }
3074 }
3075 }
3076 assert!(files.next().is_none());
3077 assert!(visible_files.next().is_none());
3078
3079 let mut bfs_paths = Vec::new();
3080 let mut stack = vec![Path::new("")];
3081 while let Some(path) = stack.pop() {
3082 bfs_paths.push(path);
3083 let ix = stack.len();
3084 for child_entry in self.child_entries(path) {
3085 stack.insert(ix, child_entry.path());
3086 }
3087 }
3088
3089 let dfs_paths = self
3090 .entries_by_path
3091 .cursor::<(), ()>()
3092 .map(|e| e.path().as_ref())
3093 .collect::<Vec<_>>();
3094 assert_eq!(bfs_paths, dfs_paths);
3095
3096 for (ignore_parent_path, _) in &self.ignores {
3097 assert!(self.entry_for_path(ignore_parent_path).is_some());
3098 assert!(self
3099 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3100 .is_some());
3101 }
3102 }
3103
3104 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
3105 let mut paths = Vec::new();
3106 for entry in self.entries_by_path.cursor::<(), ()>() {
3107 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
3108 }
3109 paths.sort_by(|a, b| a.0.cmp(&b.0));
3110 paths
3111 }
3112 }
3113}