1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use self::{char_bag::CharBag, ignore::IgnoreStack};
6use crate::{
7 editor::{self, Buffer, History, Operation, Rope},
8 language::LanguageRegistry,
9 rpc::{self, proto},
10 sum_tree::{self, Cursor, Edit, SumTree},
11 time::ReplicaId,
12 util::Bias,
13};
14use ::ignore::gitignore::Gitignore;
15use anyhow::{anyhow, Context, Result};
16use atomic::Ordering::SeqCst;
17pub use fuzzy::{match_paths, PathMatch};
18use gpui::{
19 scoped_pool, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
20 Task, WeakModelHandle,
21};
22use lazy_static::lazy_static;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink, Stream},
26 watch,
27};
28use smol::{
29 channel::Sender,
30 io::{AsyncReadExt, AsyncWriteExt},
31};
32use std::{
33 cmp,
34 collections::HashMap,
35 convert::TryInto,
36 ffi::{OsStr, OsString},
37 fmt, fs,
38 future::Future,
39 io,
40 ops::Deref,
41 os::unix::fs::MetadataExt,
42 path::{Path, PathBuf},
43 sync::{
44 atomic::{self, AtomicUsize},
45 Arc,
46 },
47 time::{Duration, SystemTime},
48};
49use zed_rpc::{PeerId, TypedEnvelope};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55pub fn init(cx: &mut MutableAppContext, rpc: rpc::Client) {
56 rpc.on_message(remote::add_guest, cx);
57 rpc.on_message(remote::remove_guest, cx);
58 rpc.on_message(remote::open_buffer, cx);
59 rpc.on_message(remote::close_buffer, cx);
60 rpc.on_message(remote::update_buffer, cx);
61}
62
63#[derive(Clone, Debug)]
64enum ScanState {
65 Idle,
66 Scanning,
67 Err(Arc<io::Error>),
68}
69
70pub enum Worktree {
71 Local(LocalWorktree),
72 Remote(RemoteWorktree),
73}
74
75impl Entity for Worktree {
76 type Event = ();
77
78 fn release(&mut self, cx: &mut MutableAppContext) {
79 let rpc = match self {
80 Self::Local(tree) => tree.rpc.clone(),
81 Self::Remote(tree) => Some((tree.rpc.clone(), tree.remote_id)),
82 };
83
84 if let Some((rpc, worktree_id)) = rpc {
85 cx.spawn(|_| async move {
86 rpc.state.lock().await.shared_worktrees.remove(&worktree_id);
87 if let Err(err) = rpc.send(proto::CloseWorktree { worktree_id }).await {
88 log::error!("error closing worktree {}: {}", worktree_id, err);
89 }
90 })
91 .detach();
92 }
93 }
94}
95
96impl Worktree {
97 pub fn local(
98 path: impl Into<Arc<Path>>,
99 languages: Arc<LanguageRegistry>,
100 cx: &mut ModelContext<Worktree>,
101 ) -> Self {
102 Worktree::Local(LocalWorktree::new(path, languages, cx))
103 }
104
105 pub async fn remote(
106 rpc: rpc::Client,
107 id: u64,
108 access_token: String,
109 languages: Arc<LanguageRegistry>,
110 cx: &mut AsyncAppContext,
111 ) -> Result<ModelHandle<Self>> {
112 let open_worktree_response = rpc
113 .request(proto::OpenWorktree {
114 worktree_id: id,
115 access_token,
116 })
117 .await?;
118 let worktree_message = open_worktree_response
119 .worktree
120 .ok_or_else(|| anyhow!("empty worktree"))?;
121 let replica_id = open_worktree_response.replica_id;
122 let peers = open_worktree_response.peers;
123 let worktree = cx.update(|cx| {
124 cx.add_model(|cx| {
125 Worktree::Remote(RemoteWorktree::new(
126 id,
127 replica_id as ReplicaId,
128 worktree_message,
129 peers,
130 rpc.clone(),
131 languages,
132 cx,
133 ))
134 })
135 });
136 rpc.state
137 .lock()
138 .await
139 .shared_worktrees
140 .insert(id, worktree.downgrade());
141 Ok(worktree)
142 }
143
144 pub fn as_local(&self) -> Option<&LocalWorktree> {
145 if let Worktree::Local(worktree) = self {
146 Some(worktree)
147 } else {
148 None
149 }
150 }
151
152 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
153 if let Worktree::Local(worktree) = self {
154 Some(worktree)
155 } else {
156 None
157 }
158 }
159
160 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
161 if let Worktree::Remote(worktree) = self {
162 Some(worktree)
163 } else {
164 None
165 }
166 }
167
168 pub fn snapshot(&self) -> Snapshot {
169 match self {
170 Worktree::Local(worktree) => worktree.snapshot(),
171 Worktree::Remote(worktree) => worktree.snapshot.clone(),
172 }
173 }
174
175 pub fn add_guest(
176 &mut self,
177 envelope: TypedEnvelope<proto::AddGuest>,
178 cx: &mut ModelContext<Worktree>,
179 ) -> Result<()> {
180 match self {
181 Worktree::Local(worktree) => worktree.add_guest(envelope, cx),
182 Worktree::Remote(worktree) => worktree.add_guest(envelope, cx),
183 }
184 }
185
186 pub fn remove_guest(
187 &mut self,
188 envelope: TypedEnvelope<proto::RemoveGuest>,
189 cx: &mut ModelContext<Worktree>,
190 ) -> Result<()> {
191 match self {
192 Worktree::Local(worktree) => worktree.remove_guest(envelope, cx),
193 Worktree::Remote(worktree) => worktree.remove_guest(envelope, cx),
194 }
195 }
196
197 pub fn open_buffer(
198 &mut self,
199 path: impl AsRef<Path>,
200 cx: &mut ModelContext<Self>,
201 ) -> Task<Result<ModelHandle<Buffer>>> {
202 match self {
203 Worktree::Local(worktree) => worktree.open_buffer(path.as_ref(), cx),
204 Worktree::Remote(worktree) => worktree.open_buffer(path.as_ref(), cx),
205 }
206 }
207
208 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
209 let open_buffers = match self {
210 Worktree::Local(worktree) => &worktree.open_buffers,
211 Worktree::Remote(worktree) => &worktree.open_buffers,
212 };
213
214 let path = path.as_ref();
215 open_buffers
216 .values()
217 .find(|buffer| {
218 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
219 file.path.as_ref() == path
220 } else {
221 false
222 }
223 })
224 .is_some()
225 }
226
227 pub fn update_buffer(
228 &mut self,
229 envelope: proto::UpdateBuffer,
230 cx: &mut ModelContext<Self>,
231 ) -> Result<()> {
232 let open_buffers = match self {
233 Worktree::Local(worktree) => &worktree.open_buffers,
234 Worktree::Remote(worktree) => &worktree.open_buffers,
235 };
236 let buffer = open_buffers
237 .get(&(envelope.buffer_id as usize))
238 .and_then(|buf| buf.upgrade(&cx))
239 .ok_or_else(|| {
240 anyhow!(
241 "invalid buffer {} in update buffer message",
242 envelope.buffer_id
243 )
244 })?;
245 let ops = envelope
246 .operations
247 .into_iter()
248 .map(|op| op.try_into())
249 .collect::<anyhow::Result<Vec<_>>>()?;
250 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
251 Ok(())
252 }
253
254 fn save(
255 &self,
256 path: &Path,
257 text: Rope,
258 cx: &mut ModelContext<Self>,
259 ) -> impl Future<Output = Result<()>> {
260 match self {
261 Worktree::Local(worktree) => {
262 let save = worktree.save(path, text, cx);
263 async move {
264 save.await?;
265 Ok(())
266 }
267 }
268 Worktree::Remote(_) => todo!(),
269 }
270 }
271}
272
273impl Deref for Worktree {
274 type Target = Snapshot;
275
276 fn deref(&self) -> &Self::Target {
277 match self {
278 Worktree::Local(worktree) => &worktree.snapshot,
279 Worktree::Remote(worktree) => &worktree.snapshot,
280 }
281 }
282}
283
284pub struct LocalWorktree {
285 snapshot: Snapshot,
286 background_snapshot: Arc<Mutex<Snapshot>>,
287 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
288 _event_stream_handle: fsevent::Handle,
289 poll_scheduled: bool,
290 rpc: Option<(rpc::Client, u64)>,
291 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
292 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
293 peers: HashMap<PeerId, ReplicaId>,
294 languages: Arc<LanguageRegistry>,
295}
296
297impl LocalWorktree {
298 fn new(
299 path: impl Into<Arc<Path>>,
300 languages: Arc<LanguageRegistry>,
301 cx: &mut ModelContext<Worktree>,
302 ) -> Self {
303 let abs_path = path.into();
304 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
305 let id = cx.model_id();
306 let snapshot = Snapshot {
307 id,
308 scan_id: 0,
309 abs_path,
310 root_name: Default::default(),
311 root_char_bag: Default::default(),
312 ignores: Default::default(),
313 entries: Default::default(),
314 paths_by_id: Default::default(),
315 removed_entry_ids: Default::default(),
316 next_entry_id: Default::default(),
317 };
318 let (event_stream, event_stream_handle) =
319 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
320
321 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
322
323 let tree = Self {
324 snapshot,
325 background_snapshot: background_snapshot.clone(),
326 scan_state: watch::channel_with(ScanState::Scanning),
327 _event_stream_handle: event_stream_handle,
328 poll_scheduled: false,
329 open_buffers: Default::default(),
330 shared_buffers: Default::default(),
331 peers: Default::default(),
332 rpc: None,
333 languages,
334 };
335
336 std::thread::spawn(move || {
337 let scanner = BackgroundScanner::new(background_snapshot, scan_state_tx, id);
338 scanner.run(event_stream)
339 });
340
341 cx.spawn(|this, mut cx| {
342 let this = this.downgrade();
343 async move {
344 while let Ok(scan_state) = scan_state_rx.recv().await {
345 let alive = cx.update(|cx| {
346 if let Some(handle) = this.upgrade(&cx) {
347 handle.update(cx, |this, cx| {
348 if let Worktree::Local(worktree) = this {
349 worktree.observe_scan_state(scan_state, cx)
350 } else {
351 unreachable!()
352 }
353 });
354 true
355 } else {
356 false
357 }
358 });
359
360 if !alive {
361 break;
362 }
363 }
364 }
365 })
366 .detach();
367
368 tree
369 }
370
371 pub fn open_buffer(
372 &mut self,
373 path: &Path,
374 cx: &mut ModelContext<Worktree>,
375 ) -> Task<Result<ModelHandle<Buffer>>> {
376 let handle = cx.handle();
377
378 // If there is already a buffer for the given path, then return it.
379 let mut existing_buffer = None;
380 self.open_buffers.retain(|_buffer_id, buffer| {
381 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
382 if let Some(file) = buffer.read(cx.as_ref()).file() {
383 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
384 existing_buffer = Some(buffer);
385 }
386 }
387 true
388 } else {
389 false
390 }
391 });
392
393 let languages = self.languages.clone();
394 let path = Arc::from(path);
395 cx.spawn(|this, mut cx| async move {
396 if let Some(existing_buffer) = existing_buffer {
397 Ok(existing_buffer)
398 } else {
399 let (file, contents) = this
400 .update(&mut cx, |this, cx| this.as_local().unwrap().load(&path, cx))
401 .await?;
402 let language = languages.select_language(&path).cloned();
403 let buffer = cx.add_model(|cx| {
404 Buffer::from_history(0, History::new(contents.into()), Some(file), language, cx)
405 });
406 this.update(&mut cx, |this, _| {
407 let this = this
408 .as_local_mut()
409 .ok_or_else(|| anyhow!("must be a local worktree"))?;
410 this.open_buffers.insert(buffer.id(), buffer.downgrade());
411 Ok(buffer)
412 })
413 }
414 })
415 }
416
417 pub fn open_remote_buffer(
418 &mut self,
419 envelope: TypedEnvelope<proto::OpenBuffer>,
420 cx: &mut ModelContext<Worktree>,
421 ) -> Task<Result<proto::OpenBufferResponse>> {
422 let peer_id = envelope.original_sender_id();
423 let path = Path::new(&envelope.payload.path);
424
425 let buffer = self.open_buffer(path, cx);
426
427 cx.spawn(|this, mut cx| async move {
428 let buffer = buffer.await?;
429 this.update(&mut cx, |this, cx| {
430 this.as_local_mut()
431 .unwrap()
432 .shared_buffers
433 .entry(peer_id?)
434 .or_default()
435 .insert(buffer.id() as u64, buffer.clone());
436
437 Ok(proto::OpenBufferResponse {
438 buffer: Some(buffer.update(cx.as_mut(), |buffer, cx| buffer.to_proto(cx))),
439 })
440 })
441 })
442 }
443
444 pub fn close_remote_buffer(
445 &mut self,
446 envelope: TypedEnvelope<proto::CloseBuffer>,
447 cx: &mut ModelContext<Worktree>,
448 ) -> Result<()> {
449 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
450 shared_buffers.remove(&envelope.payload.buffer_id);
451 }
452
453 Ok(())
454 }
455
456 pub fn add_guest(
457 &mut self,
458 envelope: TypedEnvelope<proto::AddGuest>,
459 cx: &mut ModelContext<Worktree>,
460 ) -> Result<()> {
461 let guest = envelope
462 .payload
463 .guest
464 .ok_or_else(|| anyhow!("empty peer"))?;
465 self.peers
466 .insert(PeerId(guest.peer_id), guest.replica_id as ReplicaId);
467 Ok(())
468 }
469
470 pub fn remove_guest(
471 &mut self,
472 envelope: TypedEnvelope<proto::RemoveGuest>,
473 cx: &mut ModelContext<Worktree>,
474 ) -> Result<()> {
475 let peer_id = PeerId(envelope.payload.peer_id);
476 let replica_id = self
477 .peers
478 .remove(&peer_id)
479 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?;
480 self.shared_buffers.remove(&peer_id);
481 for (_, buffer) in &self.open_buffers {
482 if let Some(buffer) = buffer.upgrade(&cx) {
483 buffer.update(cx, |buffer, cx| buffer.remove_guest(replica_id, cx));
484 }
485 }
486 Ok(())
487 }
488
489 pub fn scan_complete(&self) -> impl Future<Output = ()> {
490 let mut scan_state_rx = self.scan_state.1.clone();
491 async move {
492 let mut scan_state = Some(scan_state_rx.borrow().clone());
493 while let Some(ScanState::Scanning) = scan_state {
494 scan_state = scan_state_rx.recv().await;
495 }
496 }
497 }
498
499 fn observe_scan_state(&mut self, scan_state: ScanState, cx: &mut ModelContext<Worktree>) {
500 self.scan_state.0.blocking_send(scan_state).ok();
501 self.poll_snapshot(cx);
502 }
503
504 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
505 self.snapshot = self.background_snapshot.lock().clone();
506 if self.is_scanning() {
507 if !self.poll_scheduled {
508 cx.spawn(|this, mut cx| async move {
509 smol::Timer::after(Duration::from_millis(100)).await;
510 this.update(&mut cx, |this, cx| {
511 let worktree = this.as_local_mut().unwrap();
512 worktree.poll_scheduled = false;
513 worktree.poll_snapshot(cx);
514 })
515 })
516 .detach();
517 self.poll_scheduled = true;
518 }
519 } else {
520 let mut buffers_to_delete = Vec::new();
521 for (buffer_id, buffer) in &self.open_buffers {
522 if let Some(buffer) = buffer.upgrade(&cx) {
523 buffer.update(cx, |buffer, cx| {
524 let buffer_is_clean = !buffer.is_dirty();
525
526 if let Some(file) = buffer.file_mut() {
527 let mut file_changed = false;
528
529 if let Some(entry) = file
530 .entry_id
531 .and_then(|entry_id| self.entry_for_id(entry_id))
532 {
533 if entry.path != file.path {
534 file.path = entry.path.clone();
535 file_changed = true;
536 }
537
538 if entry.mtime != file.mtime {
539 file.mtime = entry.mtime;
540 file_changed = true;
541 if buffer_is_clean {
542 let abs_path = self.absolutize(&file.path);
543 refresh_buffer(abs_path, cx);
544 }
545 }
546 } else if let Some(entry) = self.entry_for_path(&file.path) {
547 file.entry_id = Some(entry.id);
548 file.mtime = entry.mtime;
549 if buffer_is_clean {
550 let abs_path = self.absolutize(&file.path);
551 refresh_buffer(abs_path, cx);
552 }
553 file_changed = true;
554 } else if !file.is_deleted() {
555 if buffer_is_clean {
556 cx.emit(editor::buffer::Event::Dirtied);
557 }
558 file.entry_id = None;
559 file_changed = true;
560 }
561
562 if file_changed {
563 cx.emit(editor::buffer::Event::FileHandleChanged);
564 }
565 }
566 });
567 } else {
568 buffers_to_delete.push(*buffer_id);
569 }
570 }
571
572 for buffer_id in buffers_to_delete {
573 self.open_buffers.remove(&buffer_id);
574 }
575 }
576
577 cx.notify();
578 }
579
580 fn is_scanning(&self) -> bool {
581 if let ScanState::Scanning = *self.scan_state.1.borrow() {
582 true
583 } else {
584 false
585 }
586 }
587
588 pub fn snapshot(&self) -> Snapshot {
589 self.snapshot.clone()
590 }
591
592 pub fn abs_path(&self) -> &Path {
593 self.snapshot.abs_path.as_ref()
594 }
595
596 pub fn contains_abs_path(&self, path: &Path) -> bool {
597 path.starts_with(&self.snapshot.abs_path)
598 }
599
600 fn absolutize(&self, path: &Path) -> PathBuf {
601 if path.file_name().is_some() {
602 self.snapshot.abs_path.join(path)
603 } else {
604 self.snapshot.abs_path.to_path_buf()
605 }
606 }
607
608 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
609 let handle = cx.handle();
610 let path = Arc::from(path);
611 let abs_path = self.absolutize(&path);
612 let background_snapshot = self.background_snapshot.clone();
613 cx.spawn(|this, mut cx| async move {
614 let mut file = smol::fs::File::open(&abs_path).await?;
615 let mut text = String::new();
616 file.read_to_string(&mut text).await?;
617 // Eagerly populate the snapshot with an updated entry for the loaded file
618 let entry = refresh_entry(&background_snapshot, path, &abs_path)?;
619 this.update(&mut cx, |this, cx| {
620 let this = this.as_local_mut().unwrap();
621 this.poll_snapshot(cx);
622 });
623 Ok((File::new(entry.id, handle, entry.path, entry.mtime), text))
624 })
625 }
626
627 pub fn save_buffer_as(
628 &self,
629 buffer: ModelHandle<Buffer>,
630 path: impl Into<Arc<Path>>,
631 text: Rope,
632 cx: &mut ModelContext<Worktree>,
633 ) -> Task<Result<File>> {
634 let save = self.save(path, text, cx);
635 cx.spawn(|this, mut cx| async move {
636 let entry = save.await?;
637 this.update(&mut cx, |this, cx| {
638 this.as_local_mut()
639 .unwrap()
640 .open_buffers
641 .insert(buffer.id(), buffer.downgrade());
642 Ok(File::new(entry.id, cx.handle(), entry.path, entry.mtime))
643 })
644 })
645 }
646
647 fn save(
648 &self,
649 path: impl Into<Arc<Path>>,
650 text: Rope,
651 cx: &mut ModelContext<Worktree>,
652 ) -> Task<Result<Entry>> {
653 let path = path.into();
654 let abs_path = self.absolutize(&path);
655 let background_snapshot = self.background_snapshot.clone();
656
657 let save = cx.background().spawn(async move {
658 let buffer_size = text.summary().bytes.min(10 * 1024);
659 let file = smol::fs::File::create(&abs_path).await?;
660 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
661 for chunk in text.chunks() {
662 writer.write_all(chunk.as_bytes()).await?;
663 }
664 writer.flush().await?;
665 refresh_entry(&background_snapshot, path.clone(), &abs_path)
666 });
667
668 cx.spawn(|this, mut cx| async move {
669 let entry = save.await?;
670 this.update(&mut cx, |this, cx| {
671 this.as_local_mut().unwrap().poll_snapshot(cx);
672 });
673 Ok(entry)
674 })
675 }
676
677 pub fn share(
678 &mut self,
679 rpc: rpc::Client,
680 cx: &mut ModelContext<Worktree>,
681 ) -> Task<anyhow::Result<(u64, String)>> {
682 let root_name = self.root_name.clone();
683 let snapshot = self.snapshot();
684 let handle = cx.handle();
685 cx.spawn(|this, mut cx| async move {
686 let entries = cx
687 .background()
688 .spawn(async move {
689 snapshot
690 .entries
691 .cursor::<(), ()>()
692 .map(|entry| proto::Entry {
693 id: entry.id as u64,
694 is_dir: entry.is_dir(),
695 path: entry.path.to_string_lossy().to_string(),
696 inode: entry.inode,
697 mtime: Some(entry.mtime.into()),
698 is_symlink: entry.is_symlink,
699 is_ignored: entry.is_ignored,
700 })
701 .collect()
702 })
703 .await;
704
705 let share_response = rpc
706 .request(proto::ShareWorktree {
707 worktree: Some(proto::Worktree { root_name, entries }),
708 })
709 .await?;
710
711 rpc.state
712 .lock()
713 .await
714 .shared_worktrees
715 .insert(share_response.worktree_id, handle.downgrade());
716
717 log::info!("sharing worktree {:?}", share_response);
718
719 this.update(&mut cx, |worktree, _| {
720 worktree.as_local_mut().unwrap().rpc = Some((rpc, share_response.worktree_id));
721 });
722 Ok((share_response.worktree_id, share_response.access_token))
723 })
724 }
725}
726
727pub fn refresh_buffer(abs_path: PathBuf, cx: &mut ModelContext<Buffer>) {
728 cx.spawn(|buffer, mut cx| async move {
729 let new_text = cx
730 .background()
731 .spawn(async move {
732 let mut file = smol::fs::File::open(&abs_path).await?;
733 let mut text = String::new();
734 file.read_to_string(&mut text).await?;
735 Ok::<_, anyhow::Error>(text.into())
736 })
737 .await;
738
739 match new_text {
740 Err(error) => log::error!("error refreshing buffer after file changed: {}", error),
741 Ok(new_text) => {
742 buffer
743 .update(&mut cx, |buffer, cx| {
744 buffer.set_text_from_disk(new_text, cx)
745 })
746 .await;
747 }
748 }
749 })
750 .detach()
751}
752
753impl Deref for LocalWorktree {
754 type Target = Snapshot;
755
756 fn deref(&self) -> &Self::Target {
757 &self.snapshot
758 }
759}
760
761impl fmt::Debug for LocalWorktree {
762 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
763 self.snapshot.fmt(f)
764 }
765}
766
767pub struct RemoteWorktree {
768 remote_id: u64,
769 snapshot: Snapshot,
770 rpc: rpc::Client,
771 replica_id: ReplicaId,
772 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
773 peers: HashMap<PeerId, ReplicaId>,
774 languages: Arc<LanguageRegistry>,
775}
776
777impl RemoteWorktree {
778 fn new(
779 remote_id: u64,
780 replica_id: ReplicaId,
781 worktree: proto::Worktree,
782 peers: Vec<proto::Peer>,
783 rpc: rpc::Client,
784 languages: Arc<LanguageRegistry>,
785 cx: &mut ModelContext<Worktree>,
786 ) -> Self {
787 let root_char_bag: CharBag = worktree
788 .root_name
789 .chars()
790 .map(|c| c.to_ascii_lowercase())
791 .collect();
792 let mut entries = SumTree::new();
793 let mut paths_by_id = rpds::HashTrieMapSync::default();
794 for entry in worktree.entries {
795 if let Some(mtime) = entry.mtime {
796 let kind = if entry.is_dir {
797 EntryKind::Dir
798 } else {
799 let mut char_bag = root_char_bag.clone();
800 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
801 EntryKind::File(char_bag)
802 };
803 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
804 entries.push(
805 Entry {
806 id: entry.id as usize,
807 kind,
808 path: path.clone(),
809 inode: entry.inode,
810 mtime: mtime.into(),
811 is_symlink: entry.is_symlink,
812 is_ignored: entry.is_ignored,
813 },
814 &(),
815 );
816 paths_by_id.insert_mut(entry.id as usize, path);
817 } else {
818 log::warn!("missing mtime in remote worktree entry {:?}", entry.path);
819 }
820 }
821 let snapshot = Snapshot {
822 id: cx.model_id(),
823 scan_id: 0,
824 abs_path: Path::new("").into(),
825 root_name: worktree.root_name,
826 root_char_bag,
827 ignores: Default::default(),
828 entries,
829 paths_by_id,
830 removed_entry_ids: Default::default(),
831 next_entry_id: Default::default(),
832 };
833 Self {
834 remote_id,
835 snapshot,
836 rpc,
837 replica_id,
838 open_buffers: Default::default(),
839 peers: peers
840 .into_iter()
841 .map(|p| (PeerId(p.peer_id), p.replica_id as ReplicaId))
842 .collect(),
843 languages,
844 }
845 }
846
847 pub fn open_buffer(
848 &mut self,
849 path: &Path,
850 cx: &mut ModelContext<Worktree>,
851 ) -> Task<Result<ModelHandle<Buffer>>> {
852 let handle = cx.handle();
853 let mut existing_buffer = None;
854 self.open_buffers.retain(|_buffer_id, buffer| {
855 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
856 if let Some(file) = buffer.read(cx.as_ref()).file() {
857 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
858 existing_buffer = Some(buffer);
859 }
860 }
861 true
862 } else {
863 false
864 }
865 });
866
867 let rpc = self.rpc.clone();
868 let languages = self.languages.clone();
869 let replica_id = self.replica_id;
870 let remote_worktree_id = self.remote_id;
871 let path = path.to_string_lossy().to_string();
872 cx.spawn(|this, mut cx| async move {
873 if let Some(existing_buffer) = existing_buffer {
874 Ok(existing_buffer)
875 } else {
876 let entry = this
877 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
878 .ok_or_else(|| anyhow!("file does not exist"))?;
879 let file = File::new(entry.id, handle, entry.path, entry.mtime);
880 let language = languages.select_language(&path).cloned();
881 let response = rpc
882 .request(proto::OpenBuffer {
883 worktree_id: remote_worktree_id as u64,
884 path,
885 })
886 .await?;
887 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
888 let buffer_id = remote_buffer.id;
889 let buffer = cx.add_model(|cx| {
890 Buffer::from_proto(replica_id, remote_buffer, Some(file), language, cx).unwrap()
891 });
892 this.update(&mut cx, |this, _| {
893 let this = this.as_remote_mut().unwrap();
894 this.open_buffers
895 .insert(buffer_id as usize, buffer.downgrade());
896 });
897 Ok(buffer)
898 }
899 })
900 }
901
902 pub fn add_guest(
903 &mut self,
904 envelope: TypedEnvelope<proto::AddGuest>,
905 cx: &mut ModelContext<Worktree>,
906 ) -> Result<()> {
907 let guest = envelope
908 .payload
909 .guest
910 .ok_or_else(|| anyhow!("empty peer"))?;
911 self.peers
912 .insert(PeerId(guest.peer_id), guest.replica_id as ReplicaId);
913 Ok(())
914 }
915
916 pub fn remove_guest(
917 &mut self,
918 envelope: TypedEnvelope<proto::RemoveGuest>,
919 cx: &mut ModelContext<Worktree>,
920 ) -> Result<()> {
921 let peer_id = PeerId(envelope.payload.peer_id);
922 let replica_id = self
923 .peers
924 .remove(&peer_id)
925 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?;
926 for (_, buffer) in &self.open_buffers {
927 if let Some(buffer) = buffer.upgrade(&cx) {
928 buffer.update(cx, |buffer, cx| buffer.remove_guest(replica_id, cx));
929 }
930 }
931 Ok(())
932 }
933}
934
935#[derive(Clone)]
936pub struct Snapshot {
937 id: usize,
938 scan_id: usize,
939 abs_path: Arc<Path>,
940 root_name: String,
941 root_char_bag: CharBag,
942 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
943 entries: SumTree<Entry>,
944 paths_by_id: rpds::HashTrieMapSync<usize, Arc<Path>>,
945 removed_entry_ids: HashMap<u64, usize>,
946 next_entry_id: Arc<AtomicUsize>,
947}
948
949impl Snapshot {
950 pub fn file_count(&self) -> usize {
951 self.entries.summary().file_count
952 }
953
954 pub fn visible_file_count(&self) -> usize {
955 self.entries.summary().visible_file_count
956 }
957
958 pub fn files(&self, start: usize) -> FileIter {
959 FileIter::all(self, start)
960 }
961
962 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
963 let empty_path = Path::new("");
964 self.entries
965 .cursor::<(), ()>()
966 .filter(move |entry| entry.path.as_ref() != empty_path)
967 .map(|entry| entry.path())
968 }
969
970 pub fn visible_files(&self, start: usize) -> FileIter {
971 FileIter::visible(self, start)
972 }
973
974 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
975 ChildEntriesIter::new(path, self)
976 }
977
978 pub fn root_entry(&self) -> &Entry {
979 self.entry_for_path("").unwrap()
980 }
981
982 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
983 /// a directory.
984 pub fn root_name(&self) -> &str {
985 &self.root_name
986 }
987
988 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
989 let mut cursor = self.entries.cursor::<_, ()>();
990 if cursor.seek(&PathSearch::Exact(path.as_ref()), Bias::Left, &()) {
991 cursor.item()
992 } else {
993 None
994 }
995 }
996
997 fn entry_for_id(&self, id: usize) -> Option<&Entry> {
998 let path = self.paths_by_id.get(&id)?;
999 self.entry_for_path(path)
1000 }
1001
1002 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1003 self.entry_for_path(path.as_ref()).map(|e| e.inode())
1004 }
1005
1006 fn insert_entry(&mut self, mut entry: Entry) -> Entry {
1007 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
1008 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
1009 if let Some(err) = err {
1010 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
1011 }
1012
1013 let ignore_dir_path = entry.path().parent().unwrap();
1014 self.ignores
1015 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1016 }
1017
1018 self.reuse_entry_id(&mut entry);
1019 self.entries.insert_or_replace(entry.clone(), &());
1020 self.paths_by_id.insert_mut(entry.id, entry.path.clone());
1021 entry
1022 }
1023
1024 fn populate_dir(
1025 &mut self,
1026 parent_path: Arc<Path>,
1027 entries: impl IntoIterator<Item = Entry>,
1028 ignore: Option<Arc<Gitignore>>,
1029 ) {
1030 let mut edits = Vec::new();
1031
1032 let mut parent_entry = self
1033 .entries
1034 .get(&PathKey(parent_path.clone()), &())
1035 .unwrap()
1036 .clone();
1037 if let Some(ignore) = ignore {
1038 self.ignores.insert(parent_path, (ignore, self.scan_id));
1039 }
1040 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1041 parent_entry.kind = EntryKind::Dir;
1042 } else {
1043 unreachable!();
1044 }
1045 edits.push(Edit::Insert(parent_entry));
1046
1047 for mut entry in entries {
1048 self.reuse_entry_id(&mut entry);
1049 self.paths_by_id.insert_mut(entry.id, entry.path.clone());
1050 edits.push(Edit::Insert(entry));
1051 }
1052 self.entries.edit(edits, &());
1053 }
1054
1055 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1056 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1057 entry.id = removed_entry_id;
1058 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1059 entry.id = existing_entry.id;
1060 }
1061 }
1062
1063 fn remove_path(&mut self, path: &Path) {
1064 let mut new_entries;
1065 let removed_entry_ids;
1066 {
1067 let mut cursor = self.entries.cursor::<_, ()>();
1068 new_entries = cursor.slice(&PathSearch::Exact(path), Bias::Left, &());
1069 removed_entry_ids = cursor.slice(&PathSearch::Successor(path), Bias::Left, &());
1070 new_entries.push_tree(cursor.suffix(&()), &());
1071 }
1072 self.entries = new_entries;
1073 for entry in removed_entry_ids.cursor::<(), ()>() {
1074 let removed_entry_id = self
1075 .removed_entry_ids
1076 .entry(entry.inode)
1077 .or_insert(entry.id);
1078 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1079 self.paths_by_id.remove_mut(&entry.id);
1080 }
1081
1082 if path.file_name() == Some(&GITIGNORE) {
1083 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1084 *scan_id = self.scan_id;
1085 }
1086 }
1087 }
1088
1089 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1090 let mut new_ignores = Vec::new();
1091 for ancestor in path.ancestors().skip(1) {
1092 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1093 new_ignores.push((ancestor, Some(ignore.clone())));
1094 } else {
1095 new_ignores.push((ancestor, None));
1096 }
1097 }
1098
1099 let mut ignore_stack = IgnoreStack::none();
1100 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1101 if ignore_stack.is_path_ignored(&parent_path, true) {
1102 ignore_stack = IgnoreStack::all();
1103 break;
1104 } else if let Some(ignore) = ignore {
1105 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1106 }
1107 }
1108
1109 if ignore_stack.is_path_ignored(path, is_dir) {
1110 ignore_stack = IgnoreStack::all();
1111 }
1112
1113 ignore_stack
1114 }
1115}
1116
1117impl fmt::Debug for Snapshot {
1118 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1119 for entry in self.entries.cursor::<(), ()>() {
1120 for _ in entry.path().ancestors().skip(1) {
1121 write!(f, " ")?;
1122 }
1123 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
1124 }
1125 Ok(())
1126 }
1127}
1128
1129#[derive(Clone, PartialEq)]
1130pub struct File {
1131 entry_id: Option<usize>,
1132 worktree: ModelHandle<Worktree>,
1133 pub path: Arc<Path>,
1134 pub mtime: SystemTime,
1135}
1136
1137impl File {
1138 pub fn new(
1139 entry_id: usize,
1140 worktree: ModelHandle<Worktree>,
1141 path: Arc<Path>,
1142 mtime: SystemTime,
1143 ) -> Self {
1144 Self {
1145 entry_id: Some(entry_id),
1146 worktree,
1147 path,
1148 mtime,
1149 }
1150 }
1151
1152 pub fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1153 self.worktree.update(cx, |worktree, cx| {
1154 if let Some((rpc, remote_id)) = match worktree {
1155 Worktree::Local(worktree) => worktree.rpc.clone(),
1156 Worktree::Remote(worktree) => Some((worktree.rpc.clone(), worktree.remote_id)),
1157 } {
1158 cx.background()
1159 .spawn(async move {
1160 if let Err(error) = rpc
1161 .send(proto::UpdateBuffer {
1162 worktree_id: remote_id,
1163 buffer_id,
1164 operations: Some(operation).iter().map(Into::into).collect(),
1165 })
1166 .await
1167 {
1168 log::error!("error sending buffer operation: {}", error);
1169 }
1170 })
1171 .detach();
1172 }
1173 });
1174 }
1175
1176 pub fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1177 self.worktree.update(cx, |worktree, cx| {
1178 if let Worktree::Remote(worktree) = worktree {
1179 let worktree_id = worktree.remote_id;
1180 let rpc = worktree.rpc.clone();
1181 cx.background()
1182 .spawn(async move {
1183 if let Err(error) = rpc
1184 .send(proto::CloseBuffer {
1185 worktree_id,
1186 buffer_id,
1187 })
1188 .await
1189 {
1190 log::error!("error closing remote buffer: {}", error);
1191 };
1192 })
1193 .detach();
1194 }
1195 });
1196 }
1197
1198 /// Returns this file's path relative to the root of its worktree.
1199 pub fn path(&self) -> Arc<Path> {
1200 self.path.clone()
1201 }
1202
1203 pub fn abs_path(&self, cx: &AppContext) -> PathBuf {
1204 self.worktree.read(cx).abs_path.join(&self.path)
1205 }
1206
1207 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1208 /// of its worktree, then this method will return the name of the worktree itself.
1209 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
1210 self.path
1211 .file_name()
1212 .or_else(|| Some(OsStr::new(self.worktree.read(cx).root_name())))
1213 .map(Into::into)
1214 }
1215
1216 pub fn is_deleted(&self) -> bool {
1217 self.entry_id.is_none()
1218 }
1219
1220 pub fn exists(&self) -> bool {
1221 !self.is_deleted()
1222 }
1223
1224 pub fn save(&self, text: Rope, cx: &mut MutableAppContext) -> impl Future<Output = Result<()>> {
1225 self.worktree
1226 .update(cx, |worktree, cx| worktree.save(&self.path, text, cx))
1227 }
1228
1229 pub fn worktree_id(&self) -> usize {
1230 self.worktree.id()
1231 }
1232
1233 pub fn entry_id(&self) -> (usize, Arc<Path>) {
1234 (self.worktree.id(), self.path())
1235 }
1236}
1237
1238#[derive(Clone, Debug)]
1239pub struct Entry {
1240 id: usize,
1241 kind: EntryKind,
1242 path: Arc<Path>,
1243 inode: u64,
1244 mtime: SystemTime,
1245 is_symlink: bool,
1246 is_ignored: bool,
1247}
1248
1249#[derive(Clone, Debug)]
1250pub enum EntryKind {
1251 PendingDir,
1252 Dir,
1253 File(CharBag),
1254}
1255
1256impl Entry {
1257 pub fn path(&self) -> &Arc<Path> {
1258 &self.path
1259 }
1260
1261 pub fn inode(&self) -> u64 {
1262 self.inode
1263 }
1264
1265 pub fn is_ignored(&self) -> bool {
1266 self.is_ignored
1267 }
1268
1269 fn is_dir(&self) -> bool {
1270 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1271 }
1272
1273 fn is_file(&self) -> bool {
1274 matches!(self.kind, EntryKind::File(_))
1275 }
1276}
1277
1278impl sum_tree::Item for Entry {
1279 type Summary = EntrySummary;
1280
1281 fn summary(&self) -> Self::Summary {
1282 let file_count;
1283 let visible_file_count;
1284 if self.is_file() {
1285 file_count = 1;
1286 if self.is_ignored {
1287 visible_file_count = 0;
1288 } else {
1289 visible_file_count = 1;
1290 }
1291 } else {
1292 file_count = 0;
1293 visible_file_count = 0;
1294 }
1295
1296 EntrySummary {
1297 max_path: self.path().clone(),
1298 file_count,
1299 visible_file_count,
1300 }
1301 }
1302}
1303
1304impl sum_tree::KeyedItem for Entry {
1305 type Key = PathKey;
1306
1307 fn key(&self) -> Self::Key {
1308 PathKey(self.path().clone())
1309 }
1310}
1311
1312#[derive(Clone, Debug)]
1313pub struct EntrySummary {
1314 max_path: Arc<Path>,
1315 file_count: usize,
1316 visible_file_count: usize,
1317}
1318
1319impl Default for EntrySummary {
1320 fn default() -> Self {
1321 Self {
1322 max_path: Arc::from(Path::new("")),
1323 file_count: 0,
1324 visible_file_count: 0,
1325 }
1326 }
1327}
1328
1329impl sum_tree::Summary for EntrySummary {
1330 type Context = ();
1331
1332 fn add_summary(&mut self, rhs: &Self, _: &()) {
1333 self.max_path = rhs.max_path.clone();
1334 self.file_count += rhs.file_count;
1335 self.visible_file_count += rhs.visible_file_count;
1336 }
1337}
1338
1339#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1340pub struct PathKey(Arc<Path>);
1341
1342impl Default for PathKey {
1343 fn default() -> Self {
1344 Self(Path::new("").into())
1345 }
1346}
1347
1348impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1349 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1350 self.0 = summary.max_path.clone();
1351 }
1352}
1353
1354#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1355enum PathSearch<'a> {
1356 Exact(&'a Path),
1357 Successor(&'a Path),
1358}
1359
1360impl<'a> Ord for PathSearch<'a> {
1361 fn cmp(&self, other: &Self) -> cmp::Ordering {
1362 match (self, other) {
1363 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
1364 (Self::Successor(a), Self::Exact(b)) => {
1365 if b.starts_with(a) {
1366 cmp::Ordering::Greater
1367 } else {
1368 a.cmp(b)
1369 }
1370 }
1371 _ => unreachable!("not sure we need the other two cases"),
1372 }
1373 }
1374}
1375
1376impl<'a> PartialOrd for PathSearch<'a> {
1377 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1378 Some(self.cmp(other))
1379 }
1380}
1381
1382impl<'a> Default for PathSearch<'a> {
1383 fn default() -> Self {
1384 Self::Exact(Path::new("").into())
1385 }
1386}
1387
1388impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
1389 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1390 *self = Self::Exact(summary.max_path.as_ref());
1391 }
1392}
1393
1394#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1395pub struct FileCount(usize);
1396
1397impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
1398 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1399 self.0 += summary.file_count;
1400 }
1401}
1402
1403#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1404pub struct VisibleFileCount(usize);
1405
1406impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
1407 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1408 self.0 += summary.visible_file_count;
1409 }
1410}
1411
1412struct BackgroundScanner {
1413 snapshot: Arc<Mutex<Snapshot>>,
1414 notify: Sender<ScanState>,
1415 thread_pool: scoped_pool::Pool,
1416}
1417
1418impl BackgroundScanner {
1419 fn new(snapshot: Arc<Mutex<Snapshot>>, notify: Sender<ScanState>, worktree_id: usize) -> Self {
1420 Self {
1421 snapshot,
1422 notify,
1423 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
1424 }
1425 }
1426
1427 fn abs_path(&self) -> Arc<Path> {
1428 self.snapshot.lock().abs_path.clone()
1429 }
1430
1431 fn snapshot(&self) -> Snapshot {
1432 self.snapshot.lock().clone()
1433 }
1434
1435 fn run(mut self, event_stream: fsevent::EventStream) {
1436 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1437 return;
1438 }
1439
1440 if let Err(err) = self.scan_dirs() {
1441 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
1442 return;
1443 }
1444 }
1445
1446 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1447 return;
1448 }
1449
1450 event_stream.run(move |events| {
1451 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1452 return false;
1453 }
1454
1455 if !self.process_events(events) {
1456 return false;
1457 }
1458
1459 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1460 return false;
1461 }
1462
1463 true
1464 });
1465 }
1466
1467 fn scan_dirs(&mut self) -> io::Result<()> {
1468 self.snapshot.lock().scan_id += 1;
1469
1470 let path: Arc<Path> = Arc::from(Path::new(""));
1471 let abs_path = self.abs_path();
1472 let metadata = fs::metadata(&abs_path)?;
1473 let inode = metadata.ino();
1474 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
1475 let is_dir = metadata.file_type().is_dir();
1476 let mtime = metadata.modified()?;
1477
1478 // After determining whether the root entry is a file or a directory, populate the
1479 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1480 let mut root_name = abs_path
1481 .file_name()
1482 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1483 if is_dir {
1484 root_name.push('/');
1485 }
1486
1487 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1488 let next_entry_id;
1489 {
1490 let mut snapshot = self.snapshot.lock();
1491 snapshot.root_name = root_name;
1492 snapshot.root_char_bag = root_char_bag;
1493 next_entry_id = snapshot.next_entry_id.clone();
1494 }
1495
1496 if is_dir {
1497 self.snapshot.lock().insert_entry(Entry {
1498 id: next_entry_id.fetch_add(1, SeqCst),
1499 kind: EntryKind::PendingDir,
1500 path: path.clone(),
1501 inode,
1502 mtime,
1503 is_symlink,
1504 is_ignored: false,
1505 });
1506
1507 let (tx, rx) = crossbeam_channel::unbounded();
1508 tx.send(ScanJob {
1509 abs_path: abs_path.to_path_buf(),
1510 path,
1511 ignore_stack: IgnoreStack::none(),
1512 scan_queue: tx.clone(),
1513 })
1514 .unwrap();
1515 drop(tx);
1516
1517 self.thread_pool.scoped(|pool| {
1518 for _ in 0..self.thread_pool.thread_count() {
1519 pool.execute(|| {
1520 while let Ok(job) = rx.recv() {
1521 if let Err(err) =
1522 self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
1523 {
1524 log::error!("error scanning {:?}: {}", job.abs_path, err);
1525 }
1526 }
1527 });
1528 }
1529 });
1530 } else {
1531 self.snapshot.lock().insert_entry(Entry {
1532 id: next_entry_id.fetch_add(1, SeqCst),
1533 kind: EntryKind::File(char_bag_for_path(root_char_bag, &path)),
1534 path,
1535 inode,
1536 mtime,
1537 is_symlink,
1538 is_ignored: false,
1539 });
1540 }
1541
1542 Ok(())
1543 }
1544
1545 fn scan_dir(
1546 &self,
1547 root_char_bag: CharBag,
1548 next_entry_id: Arc<AtomicUsize>,
1549 job: &ScanJob,
1550 ) -> io::Result<()> {
1551 let mut new_entries: Vec<Entry> = Vec::new();
1552 let mut new_jobs: Vec<ScanJob> = Vec::new();
1553 let mut ignore_stack = job.ignore_stack.clone();
1554 let mut new_ignore = None;
1555
1556 for child_entry in fs::read_dir(&job.abs_path)? {
1557 let child_entry = child_entry?;
1558 let child_name = child_entry.file_name();
1559 let child_abs_path = job.abs_path.join(&child_name);
1560 let child_path: Arc<Path> = job.path.join(&child_name).into();
1561 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
1562 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
1563 metadata
1564 } else {
1565 log::error!("could not get metadata for path {:?}", child_abs_path);
1566 continue;
1567 };
1568
1569 let child_inode = child_metadata.ino();
1570 let child_mtime = child_metadata.modified()?;
1571
1572 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1573 if child_name == *GITIGNORE {
1574 let (ignore, err) = Gitignore::new(&child_abs_path);
1575 if let Some(err) = err {
1576 log::error!("error in ignore file {:?} - {:?}", child_path, err);
1577 }
1578 let ignore = Arc::new(ignore);
1579 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1580 new_ignore = Some(ignore);
1581
1582 // Update ignore status of any child entries we've already processed to reflect the
1583 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1584 // there should rarely be too numerous. Update the ignore stack associated with any
1585 // new jobs as well.
1586 let mut new_jobs = new_jobs.iter_mut();
1587 for entry in &mut new_entries {
1588 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1589 if entry.is_dir() {
1590 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1591 IgnoreStack::all()
1592 } else {
1593 ignore_stack.clone()
1594 };
1595 }
1596 }
1597 }
1598
1599 if child_metadata.is_dir() {
1600 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1601 new_entries.push(Entry {
1602 id: next_entry_id.fetch_add(1, SeqCst),
1603 kind: EntryKind::PendingDir,
1604 path: child_path.clone(),
1605 inode: child_inode,
1606 mtime: child_mtime,
1607 is_symlink: child_is_symlink,
1608 is_ignored,
1609 });
1610 new_jobs.push(ScanJob {
1611 abs_path: child_abs_path,
1612 path: child_path,
1613 ignore_stack: if is_ignored {
1614 IgnoreStack::all()
1615 } else {
1616 ignore_stack.clone()
1617 },
1618 scan_queue: job.scan_queue.clone(),
1619 });
1620 } else {
1621 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1622 new_entries.push(Entry {
1623 id: next_entry_id.fetch_add(1, SeqCst),
1624 kind: EntryKind::File(char_bag_for_path(root_char_bag, &child_path)),
1625 path: child_path,
1626 inode: child_inode,
1627 mtime: child_mtime,
1628 is_symlink: child_is_symlink,
1629 is_ignored,
1630 });
1631 };
1632 }
1633
1634 self.snapshot
1635 .lock()
1636 .populate_dir(job.path.clone(), new_entries, new_ignore);
1637 for new_job in new_jobs {
1638 job.scan_queue.send(new_job).unwrap();
1639 }
1640
1641 Ok(())
1642 }
1643
1644 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1645 let mut snapshot = self.snapshot();
1646 snapshot.scan_id += 1;
1647
1648 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
1649 abs_path
1650 } else {
1651 return false;
1652 };
1653 let root_char_bag = snapshot.root_char_bag;
1654 let next_entry_id = snapshot.next_entry_id.clone();
1655
1656 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1657 events.dedup_by(|a, b| a.path.starts_with(&b.path));
1658
1659 for event in &events {
1660 match event.path.strip_prefix(&root_abs_path) {
1661 Ok(path) => snapshot.remove_path(&path),
1662 Err(_) => {
1663 log::error!(
1664 "unexpected event {:?} for root path {:?}",
1665 event.path,
1666 root_abs_path
1667 );
1668 continue;
1669 }
1670 }
1671 }
1672
1673 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
1674 for event in events {
1675 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
1676 Ok(path) => Arc::from(path.to_path_buf()),
1677 Err(_) => {
1678 log::error!(
1679 "unexpected event {:?} for root path {:?}",
1680 event.path,
1681 root_abs_path
1682 );
1683 continue;
1684 }
1685 };
1686
1687 match fs_entry_for_path(
1688 snapshot.root_char_bag,
1689 &next_entry_id,
1690 path.clone(),
1691 &event.path,
1692 ) {
1693 Ok(Some(mut fs_entry)) => {
1694 let is_dir = fs_entry.is_dir();
1695 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1696 fs_entry.is_ignored = ignore_stack.is_all();
1697 snapshot.insert_entry(fs_entry);
1698 if is_dir {
1699 scan_queue_tx
1700 .send(ScanJob {
1701 abs_path: event.path,
1702 path,
1703 ignore_stack,
1704 scan_queue: scan_queue_tx.clone(),
1705 })
1706 .unwrap();
1707 }
1708 }
1709 Ok(None) => {}
1710 Err(err) => {
1711 // TODO - create a special 'error' entry in the entries tree to mark this
1712 log::error!("error reading file on event {:?}", err);
1713 }
1714 }
1715 }
1716
1717 *self.snapshot.lock() = snapshot;
1718
1719 // Scan any directories that were created as part of this event batch.
1720 drop(scan_queue_tx);
1721 self.thread_pool.scoped(|pool| {
1722 for _ in 0..self.thread_pool.thread_count() {
1723 pool.execute(|| {
1724 while let Ok(job) = scan_queue_rx.recv() {
1725 if let Err(err) = self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
1726 {
1727 log::error!("error scanning {:?}: {}", job.abs_path, err);
1728 }
1729 }
1730 });
1731 }
1732 });
1733
1734 // Attempt to detect renames only over a single batch of file-system events.
1735 self.snapshot.lock().removed_entry_ids.clear();
1736
1737 self.update_ignore_statuses();
1738 true
1739 }
1740
1741 fn update_ignore_statuses(&self) {
1742 let mut snapshot = self.snapshot();
1743
1744 let mut ignores_to_update = Vec::new();
1745 let mut ignores_to_delete = Vec::new();
1746 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1747 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1748 ignores_to_update.push(parent_path.clone());
1749 }
1750
1751 let ignore_path = parent_path.join(&*GITIGNORE);
1752 if snapshot.entry_for_path(ignore_path).is_none() {
1753 ignores_to_delete.push(parent_path.clone());
1754 }
1755 }
1756
1757 for parent_path in ignores_to_delete {
1758 snapshot.ignores.remove(&parent_path);
1759 self.snapshot.lock().ignores.remove(&parent_path);
1760 }
1761
1762 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1763 ignores_to_update.sort_unstable();
1764 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1765 while let Some(parent_path) = ignores_to_update.next() {
1766 while ignores_to_update
1767 .peek()
1768 .map_or(false, |p| p.starts_with(&parent_path))
1769 {
1770 ignores_to_update.next().unwrap();
1771 }
1772
1773 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1774 ignore_queue_tx
1775 .send(UpdateIgnoreStatusJob {
1776 path: parent_path,
1777 ignore_stack,
1778 ignore_queue: ignore_queue_tx.clone(),
1779 })
1780 .unwrap();
1781 }
1782 drop(ignore_queue_tx);
1783
1784 self.thread_pool.scoped(|scope| {
1785 for _ in 0..self.thread_pool.thread_count() {
1786 scope.execute(|| {
1787 while let Ok(job) = ignore_queue_rx.recv() {
1788 self.update_ignore_status(job, &snapshot);
1789 }
1790 });
1791 }
1792 });
1793 }
1794
1795 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1796 let mut ignore_stack = job.ignore_stack;
1797 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1798 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1799 }
1800
1801 let mut edits = Vec::new();
1802 for mut entry in snapshot.child_entries(&job.path).cloned() {
1803 let was_ignored = entry.is_ignored;
1804 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1805 if entry.is_dir() {
1806 let child_ignore_stack = if entry.is_ignored {
1807 IgnoreStack::all()
1808 } else {
1809 ignore_stack.clone()
1810 };
1811 job.ignore_queue
1812 .send(UpdateIgnoreStatusJob {
1813 path: entry.path().clone(),
1814 ignore_stack: child_ignore_stack,
1815 ignore_queue: job.ignore_queue.clone(),
1816 })
1817 .unwrap();
1818 }
1819
1820 if entry.is_ignored != was_ignored {
1821 edits.push(Edit::Insert(entry));
1822 }
1823 }
1824 self.snapshot.lock().entries.edit(edits, &());
1825 }
1826}
1827
1828fn refresh_entry(snapshot: &Mutex<Snapshot>, path: Arc<Path>, abs_path: &Path) -> Result<Entry> {
1829 let root_char_bag;
1830 let next_entry_id;
1831 {
1832 let snapshot = snapshot.lock();
1833 root_char_bag = snapshot.root_char_bag;
1834 next_entry_id = snapshot.next_entry_id.clone();
1835 }
1836 let entry = fs_entry_for_path(root_char_bag, &next_entry_id, path, abs_path)?
1837 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
1838 Ok(snapshot.lock().insert_entry(entry))
1839}
1840
1841fn fs_entry_for_path(
1842 root_char_bag: CharBag,
1843 next_entry_id: &AtomicUsize,
1844 path: Arc<Path>,
1845 abs_path: &Path,
1846) -> Result<Option<Entry>> {
1847 let metadata = match fs::metadata(&abs_path) {
1848 Err(err) => {
1849 return match (err.kind(), err.raw_os_error()) {
1850 (io::ErrorKind::NotFound, _) => Ok(None),
1851 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1852 _ => Err(anyhow::Error::new(err)),
1853 }
1854 }
1855 Ok(metadata) => metadata,
1856 };
1857 let inode = metadata.ino();
1858 let mtime = metadata.modified()?;
1859 let is_symlink = fs::symlink_metadata(&abs_path)
1860 .context("failed to read symlink metadata")?
1861 .file_type()
1862 .is_symlink();
1863
1864 let entry = Entry {
1865 id: next_entry_id.fetch_add(1, SeqCst),
1866 kind: if metadata.file_type().is_dir() {
1867 EntryKind::PendingDir
1868 } else {
1869 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1870 },
1871 path: Arc::from(path),
1872 inode,
1873 mtime,
1874 is_symlink,
1875 is_ignored: false,
1876 };
1877
1878 Ok(Some(entry))
1879}
1880
1881fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
1882 let mut result = root_char_bag;
1883 result.extend(
1884 path.to_string_lossy()
1885 .chars()
1886 .map(|c| c.to_ascii_lowercase()),
1887 );
1888 result
1889}
1890
1891struct ScanJob {
1892 abs_path: PathBuf,
1893 path: Arc<Path>,
1894 ignore_stack: Arc<IgnoreStack>,
1895 scan_queue: crossbeam_channel::Sender<ScanJob>,
1896}
1897
1898struct UpdateIgnoreStatusJob {
1899 path: Arc<Path>,
1900 ignore_stack: Arc<IgnoreStack>,
1901 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1902}
1903
1904pub trait WorktreeHandle {
1905 #[cfg(test)]
1906 fn flush_fs_events<'a>(
1907 &self,
1908 cx: &'a gpui::TestAppContext,
1909 ) -> futures::future::LocalBoxFuture<'a, ()>;
1910}
1911
1912impl WorktreeHandle for ModelHandle<Worktree> {
1913 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1914 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1915 // extra directory scans, and emit extra scan-state notifications.
1916 //
1917 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1918 // to ensure that all redundant FS events have already been processed.
1919 #[cfg(test)]
1920 fn flush_fs_events<'a>(
1921 &self,
1922 cx: &'a gpui::TestAppContext,
1923 ) -> futures::future::LocalBoxFuture<'a, ()> {
1924 use smol::future::FutureExt;
1925
1926 let filename = "fs-event-sentinel";
1927 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
1928 let tree = self.clone();
1929 async move {
1930 fs::write(root_path.join(filename), "").unwrap();
1931 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
1932 .await;
1933
1934 fs::remove_file(root_path.join(filename)).unwrap();
1935 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
1936 .await;
1937
1938 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1939 .await;
1940 }
1941 .boxed_local()
1942 }
1943}
1944
1945pub enum FileIter<'a> {
1946 All(Cursor<'a, Entry, FileCount, ()>),
1947 Visible(Cursor<'a, Entry, VisibleFileCount, ()>),
1948}
1949
1950impl<'a> FileIter<'a> {
1951 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1952 let mut cursor = snapshot.entries.cursor();
1953 cursor.seek(&FileCount(start), Bias::Right, &());
1954 Self::All(cursor)
1955 }
1956
1957 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1958 let mut cursor = snapshot.entries.cursor();
1959 cursor.seek(&VisibleFileCount(start), Bias::Right, &());
1960 Self::Visible(cursor)
1961 }
1962
1963 fn next_internal(&mut self) {
1964 match self {
1965 Self::All(cursor) => {
1966 let ix = *cursor.seek_start();
1967 cursor.seek_forward(&FileCount(ix.0 + 1), Bias::Right, &());
1968 }
1969 Self::Visible(cursor) => {
1970 let ix = *cursor.seek_start();
1971 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), Bias::Right, &());
1972 }
1973 }
1974 }
1975
1976 fn item(&self) -> Option<&'a Entry> {
1977 match self {
1978 Self::All(cursor) => cursor.item(),
1979 Self::Visible(cursor) => cursor.item(),
1980 }
1981 }
1982}
1983
1984impl<'a> Iterator for FileIter<'a> {
1985 type Item = &'a Entry;
1986
1987 fn next(&mut self) -> Option<Self::Item> {
1988 if let Some(entry) = self.item() {
1989 self.next_internal();
1990 Some(entry)
1991 } else {
1992 None
1993 }
1994 }
1995}
1996
1997struct ChildEntriesIter<'a> {
1998 parent_path: &'a Path,
1999 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
2000}
2001
2002impl<'a> ChildEntriesIter<'a> {
2003 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
2004 let mut cursor = snapshot.entries.cursor();
2005 cursor.seek(&PathSearch::Exact(parent_path), Bias::Right, &());
2006 Self {
2007 parent_path,
2008 cursor,
2009 }
2010 }
2011}
2012
2013impl<'a> Iterator for ChildEntriesIter<'a> {
2014 type Item = &'a Entry;
2015
2016 fn next(&mut self) -> Option<Self::Item> {
2017 if let Some(item) = self.cursor.item() {
2018 if item.path().starts_with(self.parent_path) {
2019 self.cursor
2020 .seek_forward(&PathSearch::Successor(item.path()), Bias::Left, &());
2021 Some(item)
2022 } else {
2023 None
2024 }
2025 } else {
2026 None
2027 }
2028 }
2029}
2030
2031mod remote {
2032 use super::*;
2033
2034 pub async fn add_guest(
2035 envelope: TypedEnvelope<proto::AddGuest>,
2036 rpc: &rpc::Client,
2037 cx: &mut AsyncAppContext,
2038 ) -> anyhow::Result<()> {
2039 rpc.state
2040 .lock()
2041 .await
2042 .shared_worktree(envelope.payload.worktree_id, cx)?
2043 .update(cx, |worktree, cx| worktree.add_guest(envelope, cx))
2044 }
2045
2046 pub async fn remove_guest(
2047 envelope: TypedEnvelope<proto::RemoveGuest>,
2048 rpc: &rpc::Client,
2049 cx: &mut AsyncAppContext,
2050 ) -> anyhow::Result<()> {
2051 rpc.state
2052 .lock()
2053 .await
2054 .shared_worktree(envelope.payload.worktree_id, cx)?
2055 .update(cx, |worktree, cx| worktree.remove_guest(envelope, cx))
2056 }
2057
2058 pub async fn open_buffer(
2059 envelope: TypedEnvelope<proto::OpenBuffer>,
2060 rpc: &rpc::Client,
2061 cx: &mut AsyncAppContext,
2062 ) -> anyhow::Result<()> {
2063 let receipt = envelope.receipt();
2064 let worktree = rpc
2065 .state
2066 .lock()
2067 .await
2068 .shared_worktree(envelope.payload.worktree_id, cx)?;
2069
2070 let response = worktree
2071 .update(cx, |worktree, cx| {
2072 worktree
2073 .as_local_mut()
2074 .unwrap()
2075 .open_remote_buffer(envelope, cx)
2076 })
2077 .await?;
2078
2079 rpc.respond(receipt, response).await?;
2080
2081 Ok(())
2082 }
2083
2084 pub async fn close_buffer(
2085 envelope: TypedEnvelope<proto::CloseBuffer>,
2086 rpc: &rpc::Client,
2087 cx: &mut AsyncAppContext,
2088 ) -> anyhow::Result<()> {
2089 let worktree = rpc
2090 .state
2091 .lock()
2092 .await
2093 .shared_worktree(envelope.payload.worktree_id, cx)?;
2094
2095 worktree.update(cx, |worktree, cx| {
2096 worktree
2097 .as_local_mut()
2098 .unwrap()
2099 .close_remote_buffer(envelope, cx)
2100 })
2101 }
2102
2103 pub async fn update_buffer(
2104 envelope: TypedEnvelope<proto::UpdateBuffer>,
2105 rpc: &rpc::Client,
2106 cx: &mut AsyncAppContext,
2107 ) -> anyhow::Result<()> {
2108 let message = envelope.payload;
2109 let mut state = rpc.state.lock().await;
2110 let worktree = state.shared_worktree(message.worktree_id, cx)?;
2111 worktree.update(cx, |tree, cx| tree.update_buffer(message, cx))?;
2112 Ok(())
2113 }
2114}
2115
2116#[cfg(test)]
2117mod tests {
2118 use super::*;
2119 use crate::test::*;
2120 use anyhow::Result;
2121 use rand::prelude::*;
2122 use serde_json::json;
2123 use std::time::UNIX_EPOCH;
2124 use std::{env, fmt::Write, os::unix, time::SystemTime};
2125
2126 #[gpui::test]
2127 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2128 let dir = temp_tree(json!({
2129 "root": {
2130 "apple": "",
2131 "banana": {
2132 "carrot": {
2133 "date": "",
2134 "endive": "",
2135 }
2136 },
2137 "fennel": {
2138 "grape": "",
2139 }
2140 }
2141 }));
2142
2143 let root_link_path = dir.path().join("root_link");
2144 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2145 unix::fs::symlink(
2146 &dir.path().join("root/fennel"),
2147 &dir.path().join("root/finnochio"),
2148 )
2149 .unwrap();
2150
2151 let tree = cx.add_model(|cx| Worktree::local(root_link_path, Default::default(), cx));
2152
2153 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2154 .await;
2155 cx.read(|cx| {
2156 let tree = tree.read(cx);
2157 assert_eq!(tree.file_count(), 5);
2158
2159 assert_eq!(
2160 tree.inode_for_path("fennel/grape"),
2161 tree.inode_for_path("finnochio/grape")
2162 );
2163
2164 let results = match_paths(
2165 Some(tree.snapshot()).iter(),
2166 "bna",
2167 false,
2168 false,
2169 false,
2170 10,
2171 Default::default(),
2172 cx.thread_pool().clone(),
2173 )
2174 .into_iter()
2175 .map(|result| result.path)
2176 .collect::<Vec<Arc<Path>>>();
2177 assert_eq!(
2178 results,
2179 vec![
2180 PathBuf::from("banana/carrot/date").into(),
2181 PathBuf::from("banana/carrot/endive").into(),
2182 ]
2183 );
2184 })
2185 }
2186
2187 #[gpui::test]
2188 async fn test_save_file(mut cx: gpui::TestAppContext) {
2189 let app_state = cx.read(build_app_state);
2190 let dir = temp_tree(json!({
2191 "file1": "the old contents",
2192 }));
2193 let tree = cx.add_model(|cx| Worktree::local(dir.path(), app_state.languages, cx));
2194 let buffer = tree
2195 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
2196 .await
2197 .unwrap();
2198 let save = buffer.update(&mut cx, |buffer, cx| {
2199 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2200 buffer.save(cx).unwrap()
2201 });
2202 save.await.unwrap();
2203
2204 let new_text = fs::read_to_string(dir.path().join("file1")).unwrap();
2205 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2206 }
2207
2208 #[gpui::test]
2209 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2210 let app_state = cx.read(build_app_state);
2211 let dir = temp_tree(json!({
2212 "file1": "the old contents",
2213 }));
2214 let file_path = dir.path().join("file1");
2215
2216 let tree = cx.add_model(|cx| Worktree::local(file_path.clone(), app_state.languages, cx));
2217 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2218 .await;
2219 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
2220
2221 let buffer = tree
2222 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
2223 .await
2224 .unwrap();
2225 let save = buffer.update(&mut cx, |buffer, cx| {
2226 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2227 buffer.save(cx).unwrap()
2228 });
2229 save.await.unwrap();
2230
2231 let new_text = fs::read_to_string(file_path).unwrap();
2232 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2233 }
2234
2235 #[gpui::test]
2236 async fn test_rescan_simple(mut cx: gpui::TestAppContext) {
2237 let dir = temp_tree(json!({
2238 "a": {
2239 "file1": "",
2240 "file2": "",
2241 "file3": "",
2242 },
2243 "b": {
2244 "c": {
2245 "file4": "",
2246 "file5": "",
2247 }
2248 }
2249 }));
2250
2251 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2252
2253 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2254 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
2255 async move { buffer.await.unwrap() }
2256 };
2257 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2258 tree.read_with(cx, |tree, _| {
2259 tree.entry_for_path(path)
2260 .expect(&format!("no entry for path {}", path))
2261 .id
2262 })
2263 };
2264
2265 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2266 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2267 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2268 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2269
2270 let file2_id = id_for_path("a/file2", &cx);
2271 let file3_id = id_for_path("a/file3", &cx);
2272 let file4_id = id_for_path("b/c/file4", &cx);
2273
2274 // After scanning, the worktree knows which files exist and which don't.
2275 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2276 .await;
2277
2278 cx.read(|cx| {
2279 assert!(!buffer2.read(cx).is_dirty());
2280 assert!(!buffer3.read(cx).is_dirty());
2281 assert!(!buffer4.read(cx).is_dirty());
2282 assert!(!buffer5.read(cx).is_dirty());
2283 });
2284
2285 tree.flush_fs_events(&cx).await;
2286 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2287 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2288 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2289 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2290 tree.flush_fs_events(&cx).await;
2291
2292 cx.read(|app| {
2293 assert_eq!(
2294 tree.read(app)
2295 .paths()
2296 .map(|p| p.to_str().unwrap())
2297 .collect::<Vec<_>>(),
2298 vec![
2299 "a",
2300 "a/file1",
2301 "a/file2.new",
2302 "b",
2303 "d",
2304 "d/file3",
2305 "d/file4"
2306 ]
2307 );
2308
2309 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2310 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2311 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2312
2313 assert_eq!(
2314 buffer2.read(app).file().unwrap().path().as_ref(),
2315 Path::new("a/file2.new")
2316 );
2317 assert_eq!(
2318 buffer3.read(app).file().unwrap().path().as_ref(),
2319 Path::new("d/file3")
2320 );
2321 assert_eq!(
2322 buffer4.read(app).file().unwrap().path().as_ref(),
2323 Path::new("d/file4")
2324 );
2325 assert_eq!(
2326 buffer5.read(app).file().unwrap().path().as_ref(),
2327 Path::new("b/c/file5")
2328 );
2329
2330 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2331 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2332 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2333 assert!(buffer5.read(app).file().unwrap().is_deleted());
2334 });
2335 }
2336
2337 #[gpui::test]
2338 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
2339 let dir = temp_tree(json!({
2340 ".git": {},
2341 ".gitignore": "ignored-dir\n",
2342 "tracked-dir": {
2343 "tracked-file1": "tracked contents",
2344 },
2345 "ignored-dir": {
2346 "ignored-file1": "ignored contents",
2347 }
2348 }));
2349
2350 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2351 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2352 .await;
2353 tree.flush_fs_events(&cx).await;
2354 cx.read(|cx| {
2355 let tree = tree.read(cx);
2356 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2357 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2358 assert_eq!(tracked.is_ignored(), false);
2359 assert_eq!(ignored.is_ignored(), true);
2360 });
2361
2362 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2363 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2364 tree.flush_fs_events(&cx).await;
2365 cx.read(|cx| {
2366 let tree = tree.read(cx);
2367 let dot_git = tree.entry_for_path(".git").unwrap();
2368 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2369 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2370 assert_eq!(tracked.is_ignored(), false);
2371 assert_eq!(ignored.is_ignored(), true);
2372 assert_eq!(dot_git.is_ignored(), true);
2373 });
2374 }
2375
2376 #[test]
2377 fn test_random() {
2378 let iterations = env::var("ITERATIONS")
2379 .map(|i| i.parse().unwrap())
2380 .unwrap_or(100);
2381 let operations = env::var("OPERATIONS")
2382 .map(|o| o.parse().unwrap())
2383 .unwrap_or(40);
2384 let initial_entries = env::var("INITIAL_ENTRIES")
2385 .map(|o| o.parse().unwrap())
2386 .unwrap_or(20);
2387 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
2388 seed..seed + 1
2389 } else {
2390 0..iterations
2391 };
2392
2393 for seed in seeds {
2394 dbg!(seed);
2395 let mut rng = StdRng::seed_from_u64(seed);
2396
2397 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
2398 for _ in 0..initial_entries {
2399 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2400 }
2401 log::info!("Generated initial tree");
2402
2403 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2404 let mut scanner = BackgroundScanner::new(
2405 Arc::new(Mutex::new(Snapshot {
2406 id: 0,
2407 scan_id: 0,
2408 abs_path: root_dir.path().into(),
2409 entries: Default::default(),
2410 paths_by_id: Default::default(),
2411 removed_entry_ids: Default::default(),
2412 ignores: Default::default(),
2413 root_name: Default::default(),
2414 root_char_bag: Default::default(),
2415 next_entry_id: Default::default(),
2416 })),
2417 notify_tx,
2418 0,
2419 );
2420 scanner.scan_dirs().unwrap();
2421 scanner.snapshot().check_invariants();
2422
2423 let mut events = Vec::new();
2424 let mut mutations_len = operations;
2425 while mutations_len > 1 {
2426 if !events.is_empty() && rng.gen_bool(0.4) {
2427 let len = rng.gen_range(0..=events.len());
2428 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2429 log::info!("Delivering events: {:#?}", to_deliver);
2430 scanner.process_events(to_deliver);
2431 scanner.snapshot().check_invariants();
2432 } else {
2433 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2434 mutations_len -= 1;
2435 }
2436 }
2437 log::info!("Quiescing: {:#?}", events);
2438 scanner.process_events(events);
2439 scanner.snapshot().check_invariants();
2440
2441 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2442 let mut new_scanner = BackgroundScanner::new(
2443 Arc::new(Mutex::new(Snapshot {
2444 id: 0,
2445 scan_id: 0,
2446 abs_path: root_dir.path().into(),
2447 entries: Default::default(),
2448 paths_by_id: Default::default(),
2449 removed_entry_ids: Default::default(),
2450 ignores: Default::default(),
2451 root_name: Default::default(),
2452 root_char_bag: Default::default(),
2453 next_entry_id: Default::default(),
2454 })),
2455 notify_tx,
2456 1,
2457 );
2458 new_scanner.scan_dirs().unwrap();
2459 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
2460 }
2461 }
2462
2463 fn randomly_mutate_tree(
2464 root_path: &Path,
2465 insertion_probability: f64,
2466 rng: &mut impl Rng,
2467 ) -> Result<Vec<fsevent::Event>> {
2468 let root_path = root_path.canonicalize().unwrap();
2469 let (dirs, files) = read_dir_recursive(root_path.clone());
2470
2471 let mut events = Vec::new();
2472 let mut record_event = |path: PathBuf| {
2473 events.push(fsevent::Event {
2474 event_id: SystemTime::now()
2475 .duration_since(UNIX_EPOCH)
2476 .unwrap()
2477 .as_secs(),
2478 flags: fsevent::StreamFlags::empty(),
2479 path,
2480 });
2481 };
2482
2483 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2484 let path = dirs.choose(rng).unwrap();
2485 let new_path = path.join(gen_name(rng));
2486
2487 if rng.gen() {
2488 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2489 fs::create_dir(&new_path)?;
2490 } else {
2491 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2492 fs::write(&new_path, "")?;
2493 }
2494 record_event(new_path);
2495 } else if rng.gen_bool(0.05) {
2496 let ignore_dir_path = dirs.choose(rng).unwrap();
2497 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2498
2499 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2500 let files_to_ignore = {
2501 let len = rng.gen_range(0..=subfiles.len());
2502 subfiles.choose_multiple(rng, len)
2503 };
2504 let dirs_to_ignore = {
2505 let len = rng.gen_range(0..subdirs.len());
2506 subdirs.choose_multiple(rng, len)
2507 };
2508
2509 let mut ignore_contents = String::new();
2510 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2511 write!(
2512 ignore_contents,
2513 "{}\n",
2514 path_to_ignore
2515 .strip_prefix(&ignore_dir_path)?
2516 .to_str()
2517 .unwrap()
2518 )
2519 .unwrap();
2520 }
2521 log::info!(
2522 "Creating {:?} with contents:\n{}",
2523 ignore_path.strip_prefix(&root_path)?,
2524 ignore_contents
2525 );
2526 fs::write(&ignore_path, ignore_contents).unwrap();
2527 record_event(ignore_path);
2528 } else {
2529 let old_path = {
2530 let file_path = files.choose(rng);
2531 let dir_path = dirs[1..].choose(rng);
2532 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2533 };
2534
2535 let is_rename = rng.gen();
2536 if is_rename {
2537 let new_path_parent = dirs
2538 .iter()
2539 .filter(|d| !d.starts_with(old_path))
2540 .choose(rng)
2541 .unwrap();
2542
2543 let overwrite_existing_dir =
2544 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2545 let new_path = if overwrite_existing_dir {
2546 fs::remove_dir_all(&new_path_parent).ok();
2547 new_path_parent.to_path_buf()
2548 } else {
2549 new_path_parent.join(gen_name(rng))
2550 };
2551
2552 log::info!(
2553 "Renaming {:?} to {}{:?}",
2554 old_path.strip_prefix(&root_path)?,
2555 if overwrite_existing_dir {
2556 "overwrite "
2557 } else {
2558 ""
2559 },
2560 new_path.strip_prefix(&root_path)?
2561 );
2562 fs::rename(&old_path, &new_path)?;
2563 record_event(old_path.clone());
2564 record_event(new_path);
2565 } else if old_path.is_dir() {
2566 let (dirs, files) = read_dir_recursive(old_path.clone());
2567
2568 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2569 fs::remove_dir_all(&old_path).unwrap();
2570 for file in files {
2571 record_event(file);
2572 }
2573 for dir in dirs {
2574 record_event(dir);
2575 }
2576 } else {
2577 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2578 fs::remove_file(old_path).unwrap();
2579 record_event(old_path.clone());
2580 }
2581 }
2582
2583 Ok(events)
2584 }
2585
2586 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2587 let child_entries = fs::read_dir(&path).unwrap();
2588 let mut dirs = vec![path];
2589 let mut files = Vec::new();
2590 for child_entry in child_entries {
2591 let child_path = child_entry.unwrap().path();
2592 if child_path.is_dir() {
2593 let (child_dirs, child_files) = read_dir_recursive(child_path);
2594 dirs.extend(child_dirs);
2595 files.extend(child_files);
2596 } else {
2597 files.push(child_path);
2598 }
2599 }
2600 (dirs, files)
2601 }
2602
2603 fn gen_name(rng: &mut impl Rng) -> String {
2604 (0..6)
2605 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2606 .map(char::from)
2607 .collect()
2608 }
2609
2610 impl Snapshot {
2611 fn check_invariants(&self) {
2612 let mut files = self.files(0);
2613 let mut visible_files = self.visible_files(0);
2614 for entry in self.entries.cursor::<(), ()>() {
2615 if entry.is_file() {
2616 assert_eq!(files.next().unwrap().inode(), entry.inode);
2617 if !entry.is_ignored {
2618 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
2619 }
2620 }
2621 }
2622 assert!(files.next().is_none());
2623 assert!(visible_files.next().is_none());
2624
2625 let mut bfs_paths = Vec::new();
2626 let mut stack = vec![Path::new("")];
2627 while let Some(path) = stack.pop() {
2628 bfs_paths.push(path);
2629 let ix = stack.len();
2630 for child_entry in self.child_entries(path) {
2631 stack.insert(ix, child_entry.path());
2632 }
2633 }
2634
2635 let dfs_paths = self
2636 .entries
2637 .cursor::<(), ()>()
2638 .map(|e| e.path().as_ref())
2639 .collect::<Vec<_>>();
2640 assert_eq!(bfs_paths, dfs_paths);
2641
2642 for (ignore_parent_path, _) in &self.ignores {
2643 assert!(self.entry_for_path(ignore_parent_path).is_some());
2644 assert!(self
2645 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2646 .is_some());
2647 }
2648 }
2649
2650 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
2651 let mut paths = Vec::new();
2652 for entry in self.entries.cursor::<(), ()>() {
2653 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
2654 }
2655 paths.sort_by(|a, b| a.0.cmp(&b.0));
2656 paths
2657 }
2658 }
2659}