1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use self::{char_bag::CharBag, ignore::IgnoreStack};
6use crate::{
7 editor::{self, Buffer, History, Operation, Rope},
8 language::LanguageRegistry,
9 rpc::{self, proto},
10 sum_tree::{self, Cursor, Edit, SumTree},
11 time::ReplicaId,
12 util::Bias,
13};
14use ::ignore::gitignore::Gitignore;
15use anyhow::{anyhow, Context, Result};
16use atomic::Ordering::SeqCst;
17pub use fuzzy::{match_paths, PathMatch};
18use gpui::{
19 scoped_pool, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
20 Task, WeakModelHandle,
21};
22use lazy_static::lazy_static;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink, Stream},
26 watch,
27};
28use smol::{
29 channel::Sender,
30 io::{AsyncReadExt, AsyncWriteExt},
31};
32use std::{
33 cmp,
34 collections::HashMap,
35 convert::TryInto,
36 ffi::{OsStr, OsString},
37 fmt, fs,
38 future::Future,
39 io,
40 ops::Deref,
41 os::unix::fs::MetadataExt,
42 path::{Path, PathBuf},
43 sync::{
44 atomic::{self, AtomicUsize},
45 Arc,
46 },
47 time::{Duration, SystemTime},
48};
49use zed_rpc::{PeerId, TypedEnvelope};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55pub fn init(cx: &mut MutableAppContext, rpc: rpc::Client) {
56 rpc.on_message(remote::remove_guest, cx);
57 rpc.on_message(remote::open_buffer, cx);
58 rpc.on_message(remote::close_buffer, cx);
59 rpc.on_message(remote::update_buffer, cx);
60}
61
62#[derive(Clone, Debug)]
63enum ScanState {
64 Idle,
65 Scanning,
66 Err(Arc<io::Error>),
67}
68
69pub enum Worktree {
70 Local(LocalWorktree),
71 Remote(RemoteWorktree),
72}
73
74impl Entity for Worktree {
75 type Event = ();
76}
77
78impl Worktree {
79 pub fn local(
80 path: impl Into<Arc<Path>>,
81 languages: Arc<LanguageRegistry>,
82 cx: &mut ModelContext<Worktree>,
83 ) -> Self {
84 Worktree::Local(LocalWorktree::new(path, languages, cx))
85 }
86
87 pub async fn remote(
88 rpc: rpc::Client,
89 id: u64,
90 access_token: String,
91 languages: Arc<LanguageRegistry>,
92 cx: &mut AsyncAppContext,
93 ) -> Result<ModelHandle<Self>> {
94 let open_worktree_response = rpc
95 .request(proto::OpenWorktree {
96 worktree_id: id,
97 access_token,
98 })
99 .await?;
100 let worktree_message = open_worktree_response
101 .worktree
102 .ok_or_else(|| anyhow!("empty worktree"))?;
103 let replica_id = open_worktree_response
104 .replica_id
105 .ok_or_else(|| anyhow!("empty replica id"))?;
106 let worktree = cx.update(|cx| {
107 cx.add_model(|cx| {
108 Worktree::Remote(RemoteWorktree::new(
109 id,
110 worktree_message,
111 rpc.clone(),
112 replica_id as ReplicaId,
113 languages,
114 cx,
115 ))
116 })
117 });
118 rpc.state
119 .lock()
120 .await
121 .shared_worktrees
122 .insert(id, worktree.downgrade());
123 Ok(worktree)
124 }
125
126 pub fn as_local(&self) -> Option<&LocalWorktree> {
127 if let Worktree::Local(worktree) = self {
128 Some(worktree)
129 } else {
130 None
131 }
132 }
133
134 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
135 if let Worktree::Local(worktree) = self {
136 Some(worktree)
137 } else {
138 None
139 }
140 }
141
142 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
143 if let Worktree::Remote(worktree) = self {
144 Some(worktree)
145 } else {
146 None
147 }
148 }
149
150 pub fn snapshot(&self) -> Snapshot {
151 match self {
152 Worktree::Local(worktree) => worktree.snapshot(),
153 Worktree::Remote(worktree) => worktree.snapshot.clone(),
154 }
155 }
156
157 pub fn remove_guest(
158 &mut self,
159 envelope: TypedEnvelope<proto::RemoveGuest>,
160 cx: &mut ModelContext<Worktree>,
161 ) -> Result<()> {
162 match self {
163 Worktree::Local(worktree) => worktree.remove_guest(envelope, cx),
164 Worktree::Remote(_) => todo!(),
165 }
166 }
167
168 pub fn open_buffer(
169 &mut self,
170 path: impl AsRef<Path>,
171 cx: &mut ModelContext<Self>,
172 ) -> Task<Result<ModelHandle<Buffer>>> {
173 match self {
174 Worktree::Local(worktree) => worktree.open_buffer(path.as_ref(), cx),
175 Worktree::Remote(worktree) => worktree.open_buffer(path.as_ref(), cx),
176 }
177 }
178
179 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
180 let open_buffers = match self {
181 Worktree::Local(worktree) => &worktree.open_buffers,
182 Worktree::Remote(worktree) => &worktree.open_buffers,
183 };
184
185 let path = path.as_ref();
186 open_buffers
187 .values()
188 .find(|buffer| {
189 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
190 file.path.as_ref() == path
191 } else {
192 false
193 }
194 })
195 .is_some()
196 }
197
198 pub fn update_buffer(
199 &mut self,
200 envelope: proto::UpdateBuffer,
201 cx: &mut ModelContext<Self>,
202 ) -> Result<()> {
203 let open_buffers = match self {
204 Worktree::Local(worktree) => &worktree.open_buffers,
205 Worktree::Remote(worktree) => &worktree.open_buffers,
206 };
207 let buffer = open_buffers
208 .get(&(envelope.buffer_id as usize))
209 .and_then(|buf| buf.upgrade(&cx))
210 .ok_or_else(|| {
211 anyhow!(
212 "invalid buffer {} in update buffer message",
213 envelope.buffer_id
214 )
215 })?;
216 let ops = envelope
217 .operations
218 .into_iter()
219 .map(|op| op.try_into())
220 .collect::<anyhow::Result<Vec<_>>>()?;
221 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
222 Ok(())
223 }
224
225 fn save(
226 &self,
227 path: &Path,
228 text: Rope,
229 cx: &mut ModelContext<Self>,
230 ) -> impl Future<Output = Result<()>> {
231 match self {
232 Worktree::Local(worktree) => {
233 let save = worktree.save(path, text, cx);
234 async move {
235 save.await?;
236 Ok(())
237 }
238 }
239 Worktree::Remote(_) => todo!(),
240 }
241 }
242}
243
244impl Deref for Worktree {
245 type Target = Snapshot;
246
247 fn deref(&self) -> &Self::Target {
248 match self {
249 Worktree::Local(worktree) => &worktree.snapshot,
250 Worktree::Remote(worktree) => &worktree.snapshot,
251 }
252 }
253}
254
255pub struct LocalWorktree {
256 snapshot: Snapshot,
257 background_snapshot: Arc<Mutex<Snapshot>>,
258 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
259 _event_stream_handle: fsevent::Handle,
260 poll_scheduled: bool,
261 rpc: Option<(rpc::Client, u64)>,
262 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
263 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
264 languages: Arc<LanguageRegistry>,
265}
266
267impl LocalWorktree {
268 fn new(
269 path: impl Into<Arc<Path>>,
270 languages: Arc<LanguageRegistry>,
271 cx: &mut ModelContext<Worktree>,
272 ) -> Self {
273 let abs_path = path.into();
274 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
275 let id = cx.model_id();
276 let snapshot = Snapshot {
277 id,
278 scan_id: 0,
279 abs_path,
280 root_name: Default::default(),
281 root_char_bag: Default::default(),
282 ignores: Default::default(),
283 entries: Default::default(),
284 paths_by_id: Default::default(),
285 removed_entry_ids: Default::default(),
286 next_entry_id: Default::default(),
287 };
288 let (event_stream, event_stream_handle) =
289 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
290
291 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
292
293 let tree = Self {
294 snapshot,
295 background_snapshot: background_snapshot.clone(),
296 scan_state: watch::channel_with(ScanState::Scanning),
297 _event_stream_handle: event_stream_handle,
298 poll_scheduled: false,
299 open_buffers: Default::default(),
300 shared_buffers: Default::default(),
301 rpc: None,
302 languages,
303 };
304
305 std::thread::spawn(move || {
306 let scanner = BackgroundScanner::new(background_snapshot, scan_state_tx, id);
307 scanner.run(event_stream)
308 });
309
310 cx.spawn(|this, mut cx| {
311 let this = this.downgrade();
312 async move {
313 while let Ok(scan_state) = scan_state_rx.recv().await {
314 let alive = cx.update(|cx| {
315 if let Some(handle) = this.upgrade(&cx) {
316 handle.update(cx, |this, cx| {
317 if let Worktree::Local(worktree) = this {
318 worktree.observe_scan_state(scan_state, cx)
319 } else {
320 unreachable!()
321 }
322 });
323 true
324 } else {
325 false
326 }
327 });
328
329 if !alive {
330 break;
331 }
332 }
333 }
334 })
335 .detach();
336
337 tree
338 }
339
340 pub fn open_buffer(
341 &mut self,
342 path: &Path,
343 cx: &mut ModelContext<Worktree>,
344 ) -> Task<Result<ModelHandle<Buffer>>> {
345 let handle = cx.handle();
346
347 // If there is already a buffer for the given path, then return it.
348 let mut existing_buffer = None;
349 self.open_buffers.retain(|_buffer_id, buffer| {
350 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
351 if let Some(file) = buffer.read(cx.as_ref()).file() {
352 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
353 existing_buffer = Some(buffer);
354 }
355 }
356 true
357 } else {
358 false
359 }
360 });
361
362 let languages = self.languages.clone();
363 let path = Arc::from(path);
364 cx.spawn(|this, mut cx| async move {
365 if let Some(existing_buffer) = existing_buffer {
366 Ok(existing_buffer)
367 } else {
368 let (file, contents) = this
369 .update(&mut cx, |this, cx| this.as_local().unwrap().load(&path, cx))
370 .await?;
371 let language = languages.select_language(&path).cloned();
372 let buffer = cx.add_model(|cx| {
373 Buffer::from_history(0, History::new(contents.into()), Some(file), language, cx)
374 });
375 this.update(&mut cx, |this, _| {
376 let this = this
377 .as_local_mut()
378 .ok_or_else(|| anyhow!("must be a local worktree"))?;
379 this.open_buffers.insert(buffer.id(), buffer.downgrade());
380 Ok(buffer)
381 })
382 }
383 })
384 }
385
386 pub fn open_remote_buffer(
387 &mut self,
388 envelope: TypedEnvelope<proto::OpenBuffer>,
389 cx: &mut ModelContext<Worktree>,
390 ) -> Task<Result<proto::OpenBufferResponse>> {
391 let peer_id = envelope.original_sender_id();
392 let path = Path::new(&envelope.payload.path);
393
394 let buffer = self.open_buffer(path, cx);
395
396 cx.spawn(|this, mut cx| async move {
397 let buffer = buffer.await?;
398 this.update(&mut cx, |this, cx| {
399 this.as_local_mut()
400 .unwrap()
401 .shared_buffers
402 .entry(peer_id?)
403 .or_default()
404 .insert(buffer.id() as u64, buffer.clone());
405
406 Ok(proto::OpenBufferResponse {
407 buffer: Some(buffer.update(cx.as_mut(), |buffer, cx| buffer.to_proto(cx))),
408 })
409 })
410 })
411 }
412
413 pub fn close_remote_buffer(
414 &mut self,
415 envelope: TypedEnvelope<proto::CloseBuffer>,
416 cx: &mut ModelContext<Worktree>,
417 ) -> Result<()> {
418 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
419 shared_buffers.remove(&envelope.payload.buffer_id);
420 }
421
422 Ok(())
423 }
424
425 pub fn remove_guest(
426 &mut self,
427 envelope: TypedEnvelope<proto::RemoveGuest>,
428 cx: &mut ModelContext<Worktree>,
429 ) -> Result<()> {
430 self.shared_buffers.remove(&envelope.original_sender_id()?);
431 Ok(())
432 }
433
434 pub fn scan_complete(&self) -> impl Future<Output = ()> {
435 let mut scan_state_rx = self.scan_state.1.clone();
436 async move {
437 let mut scan_state = Some(scan_state_rx.borrow().clone());
438 while let Some(ScanState::Scanning) = scan_state {
439 scan_state = scan_state_rx.recv().await;
440 }
441 }
442 }
443
444 fn observe_scan_state(&mut self, scan_state: ScanState, cx: &mut ModelContext<Worktree>) {
445 self.scan_state.0.blocking_send(scan_state).ok();
446 self.poll_snapshot(cx);
447 }
448
449 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
450 self.snapshot = self.background_snapshot.lock().clone();
451 if self.is_scanning() {
452 if !self.poll_scheduled {
453 cx.spawn(|this, mut cx| async move {
454 smol::Timer::after(Duration::from_millis(100)).await;
455 this.update(&mut cx, |this, cx| {
456 let worktree = this.as_local_mut().unwrap();
457 worktree.poll_scheduled = false;
458 worktree.poll_snapshot(cx);
459 })
460 })
461 .detach();
462 self.poll_scheduled = true;
463 }
464 } else {
465 let mut buffers_to_delete = Vec::new();
466 for (buffer_id, buffer) in &self.open_buffers {
467 if let Some(buffer) = buffer.upgrade(&cx) {
468 buffer.update(cx, |buffer, cx| {
469 let buffer_is_clean = !buffer.is_dirty();
470
471 if let Some(file) = buffer.file_mut() {
472 let mut file_changed = false;
473
474 if let Some(entry) = file
475 .entry_id
476 .and_then(|entry_id| self.entry_for_id(entry_id))
477 {
478 if entry.path != file.path {
479 file.path = entry.path.clone();
480 file_changed = true;
481 }
482
483 if entry.mtime != file.mtime {
484 file.mtime = entry.mtime;
485 file_changed = true;
486 if buffer_is_clean {
487 let abs_path = self.absolutize(&file.path);
488 refresh_buffer(abs_path, cx);
489 }
490 }
491 } else if let Some(entry) = self.entry_for_path(&file.path) {
492 file.entry_id = Some(entry.id);
493 file.mtime = entry.mtime;
494 if buffer_is_clean {
495 let abs_path = self.absolutize(&file.path);
496 refresh_buffer(abs_path, cx);
497 }
498 file_changed = true;
499 } else if !file.is_deleted() {
500 if buffer_is_clean {
501 cx.emit(editor::buffer::Event::Dirtied);
502 }
503 file.entry_id = None;
504 file_changed = true;
505 }
506
507 if file_changed {
508 cx.emit(editor::buffer::Event::FileHandleChanged);
509 }
510 }
511 });
512 } else {
513 buffers_to_delete.push(*buffer_id);
514 }
515 }
516
517 for buffer_id in buffers_to_delete {
518 self.open_buffers.remove(&buffer_id);
519 }
520 }
521
522 cx.notify();
523 }
524
525 fn is_scanning(&self) -> bool {
526 if let ScanState::Scanning = *self.scan_state.1.borrow() {
527 true
528 } else {
529 false
530 }
531 }
532
533 pub fn snapshot(&self) -> Snapshot {
534 self.snapshot.clone()
535 }
536
537 pub fn abs_path(&self) -> &Path {
538 self.snapshot.abs_path.as_ref()
539 }
540
541 pub fn contains_abs_path(&self, path: &Path) -> bool {
542 path.starts_with(&self.snapshot.abs_path)
543 }
544
545 fn absolutize(&self, path: &Path) -> PathBuf {
546 if path.file_name().is_some() {
547 self.snapshot.abs_path.join(path)
548 } else {
549 self.snapshot.abs_path.to_path_buf()
550 }
551 }
552
553 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
554 let handle = cx.handle();
555 let path = Arc::from(path);
556 let abs_path = self.absolutize(&path);
557 let background_snapshot = self.background_snapshot.clone();
558 cx.spawn(|this, mut cx| async move {
559 let mut file = smol::fs::File::open(&abs_path).await?;
560 let mut text = String::new();
561 file.read_to_string(&mut text).await?;
562 // Eagerly populate the snapshot with an updated entry for the loaded file
563 let entry = refresh_entry(&background_snapshot, path, &abs_path)?;
564 this.update(&mut cx, |this, cx| {
565 let this = this.as_local_mut().unwrap();
566 this.poll_snapshot(cx);
567 });
568 Ok((File::new(entry.id, handle, entry.path, entry.mtime), text))
569 })
570 }
571
572 pub fn save_buffer_as(
573 &self,
574 buffer: ModelHandle<Buffer>,
575 path: impl Into<Arc<Path>>,
576 text: Rope,
577 cx: &mut ModelContext<Worktree>,
578 ) -> Task<Result<File>> {
579 let save = self.save(path, text, cx);
580 cx.spawn(|this, mut cx| async move {
581 let entry = save.await?;
582 this.update(&mut cx, |this, cx| {
583 this.as_local_mut()
584 .unwrap()
585 .open_buffers
586 .insert(buffer.id(), buffer.downgrade());
587 Ok(File::new(entry.id, cx.handle(), entry.path, entry.mtime))
588 })
589 })
590 }
591
592 fn save(
593 &self,
594 path: impl Into<Arc<Path>>,
595 text: Rope,
596 cx: &mut ModelContext<Worktree>,
597 ) -> Task<Result<Entry>> {
598 let path = path.into();
599 let abs_path = self.absolutize(&path);
600 let background_snapshot = self.background_snapshot.clone();
601
602 let save = cx.background().spawn(async move {
603 let buffer_size = text.summary().bytes.min(10 * 1024);
604 let file = smol::fs::File::create(&abs_path).await?;
605 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
606 for chunk in text.chunks() {
607 writer.write_all(chunk.as_bytes()).await?;
608 }
609 writer.flush().await?;
610 refresh_entry(&background_snapshot, path.clone(), &abs_path)
611 });
612
613 cx.spawn(|this, mut cx| async move {
614 let entry = save.await?;
615 this.update(&mut cx, |this, cx| {
616 this.as_local_mut().unwrap().poll_snapshot(cx);
617 });
618 Ok(entry)
619 })
620 }
621
622 pub fn share(
623 &mut self,
624 rpc: rpc::Client,
625 cx: &mut ModelContext<Worktree>,
626 ) -> Task<anyhow::Result<(u64, String)>> {
627 let root_name = self.root_name.clone();
628 let snapshot = self.snapshot();
629 let handle = cx.handle();
630 cx.spawn(|this, mut cx| async move {
631 let entries = cx
632 .background()
633 .spawn(async move {
634 snapshot
635 .entries
636 .cursor::<(), ()>()
637 .map(|entry| proto::Entry {
638 id: entry.id as u64,
639 is_dir: entry.is_dir(),
640 path: entry.path.to_string_lossy().to_string(),
641 inode: entry.inode,
642 mtime: Some(entry.mtime.into()),
643 is_symlink: entry.is_symlink,
644 is_ignored: entry.is_ignored,
645 })
646 .collect()
647 })
648 .await;
649
650 let share_response = rpc
651 .request(proto::ShareWorktree {
652 worktree: Some(proto::Worktree { root_name, entries }),
653 })
654 .await?;
655
656 rpc.state
657 .lock()
658 .await
659 .shared_worktrees
660 .insert(share_response.worktree_id, handle.downgrade());
661
662 log::info!("sharing worktree {:?}", share_response);
663
664 this.update(&mut cx, |worktree, _| {
665 worktree.as_local_mut().unwrap().rpc = Some((rpc, share_response.worktree_id));
666 });
667 Ok((share_response.worktree_id, share_response.access_token))
668 })
669 }
670}
671
672pub fn refresh_buffer(abs_path: PathBuf, cx: &mut ModelContext<Buffer>) {
673 cx.spawn(|buffer, mut cx| async move {
674 let new_text = cx
675 .background()
676 .spawn(async move {
677 let mut file = smol::fs::File::open(&abs_path).await?;
678 let mut text = String::new();
679 file.read_to_string(&mut text).await?;
680 Ok::<_, anyhow::Error>(text.into())
681 })
682 .await;
683
684 match new_text {
685 Err(error) => log::error!("error refreshing buffer after file changed: {}", error),
686 Ok(new_text) => {
687 buffer
688 .update(&mut cx, |buffer, cx| {
689 buffer.set_text_from_disk(new_text, cx)
690 })
691 .await;
692 }
693 }
694 })
695 .detach()
696}
697
698impl Deref for LocalWorktree {
699 type Target = Snapshot;
700
701 fn deref(&self) -> &Self::Target {
702 &self.snapshot
703 }
704}
705
706impl fmt::Debug for LocalWorktree {
707 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
708 self.snapshot.fmt(f)
709 }
710}
711
712pub struct RemoteWorktree {
713 remote_id: u64,
714 snapshot: Snapshot,
715 rpc: rpc::Client,
716 replica_id: ReplicaId,
717 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
718 languages: Arc<LanguageRegistry>,
719}
720
721impl RemoteWorktree {
722 fn new(
723 remote_id: u64,
724 worktree: proto::Worktree,
725 rpc: rpc::Client,
726 replica_id: ReplicaId,
727 languages: Arc<LanguageRegistry>,
728 cx: &mut ModelContext<Worktree>,
729 ) -> Self {
730 let root_char_bag: CharBag = worktree
731 .root_name
732 .chars()
733 .map(|c| c.to_ascii_lowercase())
734 .collect();
735 let mut entries = SumTree::new();
736 let mut paths_by_id = rpds::HashTrieMapSync::default();
737 for entry in worktree.entries {
738 if let Some(mtime) = entry.mtime {
739 let kind = if entry.is_dir {
740 EntryKind::Dir
741 } else {
742 let mut char_bag = root_char_bag.clone();
743 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
744 EntryKind::File(char_bag)
745 };
746 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
747 entries.push(
748 Entry {
749 id: entry.id as usize,
750 kind,
751 path: path.clone(),
752 inode: entry.inode,
753 mtime: mtime.into(),
754 is_symlink: entry.is_symlink,
755 is_ignored: entry.is_ignored,
756 },
757 &(),
758 );
759 paths_by_id.insert_mut(entry.id as usize, path);
760 } else {
761 log::warn!("missing mtime in remote worktree entry {:?}", entry.path);
762 }
763 }
764 let snapshot = Snapshot {
765 id: cx.model_id(),
766 scan_id: 0,
767 abs_path: Path::new("").into(),
768 root_name: worktree.root_name,
769 root_char_bag,
770 ignores: Default::default(),
771 entries,
772 paths_by_id,
773 removed_entry_ids: Default::default(),
774 next_entry_id: Default::default(),
775 };
776 Self {
777 remote_id,
778 snapshot,
779 rpc,
780 replica_id,
781 open_buffers: Default::default(),
782 languages,
783 }
784 }
785
786 pub fn open_buffer(
787 &mut self,
788 path: &Path,
789 cx: &mut ModelContext<Worktree>,
790 ) -> Task<Result<ModelHandle<Buffer>>> {
791 let handle = cx.handle();
792 let mut existing_buffer = None;
793 self.open_buffers.retain(|_buffer_id, buffer| {
794 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
795 if let Some(file) = buffer.read(cx.as_ref()).file() {
796 if file.worktree_id() == handle.id() && file.path.as_ref() == path {
797 existing_buffer = Some(buffer);
798 }
799 }
800 true
801 } else {
802 false
803 }
804 });
805
806 let rpc = self.rpc.clone();
807 let languages = self.languages.clone();
808 let replica_id = self.replica_id;
809 let remote_worktree_id = self.remote_id;
810 let path = path.to_string_lossy().to_string();
811 cx.spawn(|this, mut cx| async move {
812 if let Some(existing_buffer) = existing_buffer {
813 Ok(existing_buffer)
814 } else {
815 let entry = this
816 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
817 .ok_or_else(|| anyhow!("file does not exist"))?;
818 let file = File::new(entry.id, handle, entry.path, entry.mtime);
819 let language = languages.select_language(&path).cloned();
820 let response = rpc
821 .request(proto::OpenBuffer {
822 worktree_id: remote_worktree_id as u64,
823 path,
824 })
825 .await?;
826 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
827 let buffer_id = remote_buffer.id;
828 let buffer = cx.add_model(|cx| {
829 Buffer::from_proto(replica_id, remote_buffer, Some(file), language, cx).unwrap()
830 });
831 this.update(&mut cx, |this, _| {
832 let this = this.as_remote_mut().unwrap();
833 this.open_buffers
834 .insert(buffer_id as usize, buffer.downgrade());
835 });
836 Ok(buffer)
837 }
838 })
839 }
840}
841
842#[derive(Clone)]
843pub struct Snapshot {
844 id: usize,
845 scan_id: usize,
846 abs_path: Arc<Path>,
847 root_name: String,
848 root_char_bag: CharBag,
849 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
850 entries: SumTree<Entry>,
851 paths_by_id: rpds::HashTrieMapSync<usize, Arc<Path>>,
852 removed_entry_ids: HashMap<u64, usize>,
853 next_entry_id: Arc<AtomicUsize>,
854}
855
856impl Snapshot {
857 pub fn file_count(&self) -> usize {
858 self.entries.summary().file_count
859 }
860
861 pub fn visible_file_count(&self) -> usize {
862 self.entries.summary().visible_file_count
863 }
864
865 pub fn files(&self, start: usize) -> FileIter {
866 FileIter::all(self, start)
867 }
868
869 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
870 let empty_path = Path::new("");
871 self.entries
872 .cursor::<(), ()>()
873 .filter(move |entry| entry.path.as_ref() != empty_path)
874 .map(|entry| entry.path())
875 }
876
877 pub fn visible_files(&self, start: usize) -> FileIter {
878 FileIter::visible(self, start)
879 }
880
881 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
882 ChildEntriesIter::new(path, self)
883 }
884
885 pub fn root_entry(&self) -> &Entry {
886 self.entry_for_path("").unwrap()
887 }
888
889 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
890 /// a directory.
891 pub fn root_name(&self) -> &str {
892 &self.root_name
893 }
894
895 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
896 let mut cursor = self.entries.cursor::<_, ()>();
897 if cursor.seek(&PathSearch::Exact(path.as_ref()), Bias::Left, &()) {
898 cursor.item()
899 } else {
900 None
901 }
902 }
903
904 fn entry_for_id(&self, id: usize) -> Option<&Entry> {
905 let path = self.paths_by_id.get(&id)?;
906 self.entry_for_path(path)
907 }
908
909 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
910 self.entry_for_path(path.as_ref()).map(|e| e.inode())
911 }
912
913 fn insert_entry(&mut self, mut entry: Entry) -> Entry {
914 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
915 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
916 if let Some(err) = err {
917 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
918 }
919
920 let ignore_dir_path = entry.path().parent().unwrap();
921 self.ignores
922 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
923 }
924
925 self.reuse_entry_id(&mut entry);
926 self.entries.insert_or_replace(entry.clone(), &());
927 self.paths_by_id.insert_mut(entry.id, entry.path.clone());
928 entry
929 }
930
931 fn populate_dir(
932 &mut self,
933 parent_path: Arc<Path>,
934 entries: impl IntoIterator<Item = Entry>,
935 ignore: Option<Arc<Gitignore>>,
936 ) {
937 let mut edits = Vec::new();
938
939 let mut parent_entry = self
940 .entries
941 .get(&PathKey(parent_path.clone()), &())
942 .unwrap()
943 .clone();
944 if let Some(ignore) = ignore {
945 self.ignores.insert(parent_path, (ignore, self.scan_id));
946 }
947 if matches!(parent_entry.kind, EntryKind::PendingDir) {
948 parent_entry.kind = EntryKind::Dir;
949 } else {
950 unreachable!();
951 }
952 edits.push(Edit::Insert(parent_entry));
953
954 for mut entry in entries {
955 self.reuse_entry_id(&mut entry);
956 self.paths_by_id.insert_mut(entry.id, entry.path.clone());
957 edits.push(Edit::Insert(entry));
958 }
959 self.entries.edit(edits, &());
960 }
961
962 fn reuse_entry_id(&mut self, entry: &mut Entry) {
963 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
964 entry.id = removed_entry_id;
965 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
966 entry.id = existing_entry.id;
967 }
968 }
969
970 fn remove_path(&mut self, path: &Path) {
971 let mut new_entries;
972 let removed_entry_ids;
973 {
974 let mut cursor = self.entries.cursor::<_, ()>();
975 new_entries = cursor.slice(&PathSearch::Exact(path), Bias::Left, &());
976 removed_entry_ids = cursor.slice(&PathSearch::Successor(path), Bias::Left, &());
977 new_entries.push_tree(cursor.suffix(&()), &());
978 }
979 self.entries = new_entries;
980 for entry in removed_entry_ids.cursor::<(), ()>() {
981 let removed_entry_id = self
982 .removed_entry_ids
983 .entry(entry.inode)
984 .or_insert(entry.id);
985 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
986 self.paths_by_id.remove_mut(&entry.id);
987 }
988
989 if path.file_name() == Some(&GITIGNORE) {
990 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
991 *scan_id = self.scan_id;
992 }
993 }
994 }
995
996 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
997 let mut new_ignores = Vec::new();
998 for ancestor in path.ancestors().skip(1) {
999 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1000 new_ignores.push((ancestor, Some(ignore.clone())));
1001 } else {
1002 new_ignores.push((ancestor, None));
1003 }
1004 }
1005
1006 let mut ignore_stack = IgnoreStack::none();
1007 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1008 if ignore_stack.is_path_ignored(&parent_path, true) {
1009 ignore_stack = IgnoreStack::all();
1010 break;
1011 } else if let Some(ignore) = ignore {
1012 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1013 }
1014 }
1015
1016 if ignore_stack.is_path_ignored(path, is_dir) {
1017 ignore_stack = IgnoreStack::all();
1018 }
1019
1020 ignore_stack
1021 }
1022}
1023
1024impl fmt::Debug for Snapshot {
1025 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1026 for entry in self.entries.cursor::<(), ()>() {
1027 for _ in entry.path().ancestors().skip(1) {
1028 write!(f, " ")?;
1029 }
1030 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
1031 }
1032 Ok(())
1033 }
1034}
1035
1036#[derive(Clone, PartialEq)]
1037pub struct File {
1038 entry_id: Option<usize>,
1039 worktree: ModelHandle<Worktree>,
1040 pub path: Arc<Path>,
1041 pub mtime: SystemTime,
1042}
1043
1044impl File {
1045 pub fn new(
1046 entry_id: usize,
1047 worktree: ModelHandle<Worktree>,
1048 path: Arc<Path>,
1049 mtime: SystemTime,
1050 ) -> Self {
1051 Self {
1052 entry_id: Some(entry_id),
1053 worktree,
1054 path,
1055 mtime,
1056 }
1057 }
1058
1059 pub fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1060 self.worktree.update(cx, |worktree, cx| {
1061 if let Some((rpc, remote_id)) = match worktree {
1062 Worktree::Local(worktree) => worktree.rpc.clone(),
1063 Worktree::Remote(worktree) => Some((worktree.rpc.clone(), worktree.remote_id)),
1064 } {
1065 cx.background()
1066 .spawn(async move {
1067 if let Err(error) = rpc
1068 .send(proto::UpdateBuffer {
1069 worktree_id: remote_id,
1070 buffer_id,
1071 operations: Some(operation).iter().map(Into::into).collect(),
1072 })
1073 .await
1074 {
1075 log::error!("error sending buffer operation: {}", error);
1076 }
1077 })
1078 .detach();
1079 }
1080 });
1081 }
1082
1083 pub fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1084 self.worktree.update(cx, |worktree, cx| {
1085 if let Worktree::Remote(worktree) = worktree {
1086 let worktree_id = worktree.remote_id;
1087 let rpc = worktree.rpc.clone();
1088 cx.background()
1089 .spawn(async move {
1090 if let Err(error) = rpc
1091 .send(proto::CloseBuffer {
1092 worktree_id,
1093 buffer_id,
1094 })
1095 .await
1096 {
1097 log::error!("error closing remote buffer: {}", error);
1098 };
1099 })
1100 .detach();
1101 }
1102 });
1103 }
1104
1105 /// Returns this file's path relative to the root of its worktree.
1106 pub fn path(&self) -> Arc<Path> {
1107 self.path.clone()
1108 }
1109
1110 pub fn abs_path(&self, cx: &AppContext) -> PathBuf {
1111 self.worktree.read(cx).abs_path.join(&self.path)
1112 }
1113
1114 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1115 /// of its worktree, then this method will return the name of the worktree itself.
1116 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
1117 self.path
1118 .file_name()
1119 .or_else(|| Some(OsStr::new(self.worktree.read(cx).root_name())))
1120 .map(Into::into)
1121 }
1122
1123 pub fn is_deleted(&self) -> bool {
1124 self.entry_id.is_none()
1125 }
1126
1127 pub fn exists(&self) -> bool {
1128 !self.is_deleted()
1129 }
1130
1131 pub fn save(&self, text: Rope, cx: &mut MutableAppContext) -> impl Future<Output = Result<()>> {
1132 self.worktree
1133 .update(cx, |worktree, cx| worktree.save(&self.path, text, cx))
1134 }
1135
1136 pub fn worktree_id(&self) -> usize {
1137 self.worktree.id()
1138 }
1139
1140 pub fn entry_id(&self) -> (usize, Arc<Path>) {
1141 (self.worktree.id(), self.path())
1142 }
1143}
1144
1145#[derive(Clone, Debug)]
1146pub struct Entry {
1147 id: usize,
1148 kind: EntryKind,
1149 path: Arc<Path>,
1150 inode: u64,
1151 mtime: SystemTime,
1152 is_symlink: bool,
1153 is_ignored: bool,
1154}
1155
1156#[derive(Clone, Debug)]
1157pub enum EntryKind {
1158 PendingDir,
1159 Dir,
1160 File(CharBag),
1161}
1162
1163impl Entry {
1164 pub fn path(&self) -> &Arc<Path> {
1165 &self.path
1166 }
1167
1168 pub fn inode(&self) -> u64 {
1169 self.inode
1170 }
1171
1172 pub fn is_ignored(&self) -> bool {
1173 self.is_ignored
1174 }
1175
1176 fn is_dir(&self) -> bool {
1177 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1178 }
1179
1180 fn is_file(&self) -> bool {
1181 matches!(self.kind, EntryKind::File(_))
1182 }
1183}
1184
1185impl sum_tree::Item for Entry {
1186 type Summary = EntrySummary;
1187
1188 fn summary(&self) -> Self::Summary {
1189 let file_count;
1190 let visible_file_count;
1191 if self.is_file() {
1192 file_count = 1;
1193 if self.is_ignored {
1194 visible_file_count = 0;
1195 } else {
1196 visible_file_count = 1;
1197 }
1198 } else {
1199 file_count = 0;
1200 visible_file_count = 0;
1201 }
1202
1203 EntrySummary {
1204 max_path: self.path().clone(),
1205 file_count,
1206 visible_file_count,
1207 }
1208 }
1209}
1210
1211impl sum_tree::KeyedItem for Entry {
1212 type Key = PathKey;
1213
1214 fn key(&self) -> Self::Key {
1215 PathKey(self.path().clone())
1216 }
1217}
1218
1219#[derive(Clone, Debug)]
1220pub struct EntrySummary {
1221 max_path: Arc<Path>,
1222 file_count: usize,
1223 visible_file_count: usize,
1224}
1225
1226impl Default for EntrySummary {
1227 fn default() -> Self {
1228 Self {
1229 max_path: Arc::from(Path::new("")),
1230 file_count: 0,
1231 visible_file_count: 0,
1232 }
1233 }
1234}
1235
1236impl sum_tree::Summary for EntrySummary {
1237 type Context = ();
1238
1239 fn add_summary(&mut self, rhs: &Self, _: &()) {
1240 self.max_path = rhs.max_path.clone();
1241 self.file_count += rhs.file_count;
1242 self.visible_file_count += rhs.visible_file_count;
1243 }
1244}
1245
1246#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1247pub struct PathKey(Arc<Path>);
1248
1249impl Default for PathKey {
1250 fn default() -> Self {
1251 Self(Path::new("").into())
1252 }
1253}
1254
1255impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1256 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1257 self.0 = summary.max_path.clone();
1258 }
1259}
1260
1261#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1262enum PathSearch<'a> {
1263 Exact(&'a Path),
1264 Successor(&'a Path),
1265}
1266
1267impl<'a> Ord for PathSearch<'a> {
1268 fn cmp(&self, other: &Self) -> cmp::Ordering {
1269 match (self, other) {
1270 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
1271 (Self::Successor(a), Self::Exact(b)) => {
1272 if b.starts_with(a) {
1273 cmp::Ordering::Greater
1274 } else {
1275 a.cmp(b)
1276 }
1277 }
1278 _ => unreachable!("not sure we need the other two cases"),
1279 }
1280 }
1281}
1282
1283impl<'a> PartialOrd for PathSearch<'a> {
1284 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1285 Some(self.cmp(other))
1286 }
1287}
1288
1289impl<'a> Default for PathSearch<'a> {
1290 fn default() -> Self {
1291 Self::Exact(Path::new("").into())
1292 }
1293}
1294
1295impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
1296 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1297 *self = Self::Exact(summary.max_path.as_ref());
1298 }
1299}
1300
1301#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1302pub struct FileCount(usize);
1303
1304impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
1305 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1306 self.0 += summary.file_count;
1307 }
1308}
1309
1310#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
1311pub struct VisibleFileCount(usize);
1312
1313impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
1314 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1315 self.0 += summary.visible_file_count;
1316 }
1317}
1318
1319struct BackgroundScanner {
1320 snapshot: Arc<Mutex<Snapshot>>,
1321 notify: Sender<ScanState>,
1322 thread_pool: scoped_pool::Pool,
1323}
1324
1325impl BackgroundScanner {
1326 fn new(snapshot: Arc<Mutex<Snapshot>>, notify: Sender<ScanState>, worktree_id: usize) -> Self {
1327 Self {
1328 snapshot,
1329 notify,
1330 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
1331 }
1332 }
1333
1334 fn abs_path(&self) -> Arc<Path> {
1335 self.snapshot.lock().abs_path.clone()
1336 }
1337
1338 fn snapshot(&self) -> Snapshot {
1339 self.snapshot.lock().clone()
1340 }
1341
1342 fn run(mut self, event_stream: fsevent::EventStream) {
1343 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1344 return;
1345 }
1346
1347 if let Err(err) = self.scan_dirs() {
1348 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
1349 return;
1350 }
1351 }
1352
1353 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1354 return;
1355 }
1356
1357 event_stream.run(move |events| {
1358 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
1359 return false;
1360 }
1361
1362 if !self.process_events(events) {
1363 return false;
1364 }
1365
1366 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
1367 return false;
1368 }
1369
1370 true
1371 });
1372 }
1373
1374 fn scan_dirs(&mut self) -> io::Result<()> {
1375 self.snapshot.lock().scan_id += 1;
1376
1377 let path: Arc<Path> = Arc::from(Path::new(""));
1378 let abs_path = self.abs_path();
1379 let metadata = fs::metadata(&abs_path)?;
1380 let inode = metadata.ino();
1381 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
1382 let is_dir = metadata.file_type().is_dir();
1383 let mtime = metadata.modified()?;
1384
1385 // After determining whether the root entry is a file or a directory, populate the
1386 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1387 let mut root_name = abs_path
1388 .file_name()
1389 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1390 if is_dir {
1391 root_name.push('/');
1392 }
1393
1394 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1395 let next_entry_id;
1396 {
1397 let mut snapshot = self.snapshot.lock();
1398 snapshot.root_name = root_name;
1399 snapshot.root_char_bag = root_char_bag;
1400 next_entry_id = snapshot.next_entry_id.clone();
1401 }
1402
1403 if is_dir {
1404 self.snapshot.lock().insert_entry(Entry {
1405 id: next_entry_id.fetch_add(1, SeqCst),
1406 kind: EntryKind::PendingDir,
1407 path: path.clone(),
1408 inode,
1409 mtime,
1410 is_symlink,
1411 is_ignored: false,
1412 });
1413
1414 let (tx, rx) = crossbeam_channel::unbounded();
1415 tx.send(ScanJob {
1416 abs_path: abs_path.to_path_buf(),
1417 path,
1418 ignore_stack: IgnoreStack::none(),
1419 scan_queue: tx.clone(),
1420 })
1421 .unwrap();
1422 drop(tx);
1423
1424 self.thread_pool.scoped(|pool| {
1425 for _ in 0..self.thread_pool.thread_count() {
1426 pool.execute(|| {
1427 while let Ok(job) = rx.recv() {
1428 if let Err(err) =
1429 self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
1430 {
1431 log::error!("error scanning {:?}: {}", job.abs_path, err);
1432 }
1433 }
1434 });
1435 }
1436 });
1437 } else {
1438 self.snapshot.lock().insert_entry(Entry {
1439 id: next_entry_id.fetch_add(1, SeqCst),
1440 kind: EntryKind::File(char_bag_for_path(root_char_bag, &path)),
1441 path,
1442 inode,
1443 mtime,
1444 is_symlink,
1445 is_ignored: false,
1446 });
1447 }
1448
1449 Ok(())
1450 }
1451
1452 fn scan_dir(
1453 &self,
1454 root_char_bag: CharBag,
1455 next_entry_id: Arc<AtomicUsize>,
1456 job: &ScanJob,
1457 ) -> io::Result<()> {
1458 let mut new_entries: Vec<Entry> = Vec::new();
1459 let mut new_jobs: Vec<ScanJob> = Vec::new();
1460 let mut ignore_stack = job.ignore_stack.clone();
1461 let mut new_ignore = None;
1462
1463 for child_entry in fs::read_dir(&job.abs_path)? {
1464 let child_entry = child_entry?;
1465 let child_name = child_entry.file_name();
1466 let child_abs_path = job.abs_path.join(&child_name);
1467 let child_path: Arc<Path> = job.path.join(&child_name).into();
1468 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
1469 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
1470 metadata
1471 } else {
1472 log::error!("could not get metadata for path {:?}", child_abs_path);
1473 continue;
1474 };
1475
1476 let child_inode = child_metadata.ino();
1477 let child_mtime = child_metadata.modified()?;
1478
1479 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1480 if child_name == *GITIGNORE {
1481 let (ignore, err) = Gitignore::new(&child_abs_path);
1482 if let Some(err) = err {
1483 log::error!("error in ignore file {:?} - {:?}", child_path, err);
1484 }
1485 let ignore = Arc::new(ignore);
1486 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1487 new_ignore = Some(ignore);
1488
1489 // Update ignore status of any child entries we've already processed to reflect the
1490 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1491 // there should rarely be too numerous. Update the ignore stack associated with any
1492 // new jobs as well.
1493 let mut new_jobs = new_jobs.iter_mut();
1494 for entry in &mut new_entries {
1495 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1496 if entry.is_dir() {
1497 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1498 IgnoreStack::all()
1499 } else {
1500 ignore_stack.clone()
1501 };
1502 }
1503 }
1504 }
1505
1506 if child_metadata.is_dir() {
1507 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1508 new_entries.push(Entry {
1509 id: next_entry_id.fetch_add(1, SeqCst),
1510 kind: EntryKind::PendingDir,
1511 path: child_path.clone(),
1512 inode: child_inode,
1513 mtime: child_mtime,
1514 is_symlink: child_is_symlink,
1515 is_ignored,
1516 });
1517 new_jobs.push(ScanJob {
1518 abs_path: child_abs_path,
1519 path: child_path,
1520 ignore_stack: if is_ignored {
1521 IgnoreStack::all()
1522 } else {
1523 ignore_stack.clone()
1524 },
1525 scan_queue: job.scan_queue.clone(),
1526 });
1527 } else {
1528 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1529 new_entries.push(Entry {
1530 id: next_entry_id.fetch_add(1, SeqCst),
1531 kind: EntryKind::File(char_bag_for_path(root_char_bag, &child_path)),
1532 path: child_path,
1533 inode: child_inode,
1534 mtime: child_mtime,
1535 is_symlink: child_is_symlink,
1536 is_ignored,
1537 });
1538 };
1539 }
1540
1541 self.snapshot
1542 .lock()
1543 .populate_dir(job.path.clone(), new_entries, new_ignore);
1544 for new_job in new_jobs {
1545 job.scan_queue.send(new_job).unwrap();
1546 }
1547
1548 Ok(())
1549 }
1550
1551 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1552 let mut snapshot = self.snapshot();
1553 snapshot.scan_id += 1;
1554
1555 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
1556 abs_path
1557 } else {
1558 return false;
1559 };
1560 let root_char_bag = snapshot.root_char_bag;
1561 let next_entry_id = snapshot.next_entry_id.clone();
1562
1563 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1564 events.dedup_by(|a, b| a.path.starts_with(&b.path));
1565
1566 for event in &events {
1567 match event.path.strip_prefix(&root_abs_path) {
1568 Ok(path) => snapshot.remove_path(&path),
1569 Err(_) => {
1570 log::error!(
1571 "unexpected event {:?} for root path {:?}",
1572 event.path,
1573 root_abs_path
1574 );
1575 continue;
1576 }
1577 }
1578 }
1579
1580 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
1581 for event in events {
1582 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
1583 Ok(path) => Arc::from(path.to_path_buf()),
1584 Err(_) => {
1585 log::error!(
1586 "unexpected event {:?} for root path {:?}",
1587 event.path,
1588 root_abs_path
1589 );
1590 continue;
1591 }
1592 };
1593
1594 match fs_entry_for_path(
1595 snapshot.root_char_bag,
1596 &next_entry_id,
1597 path.clone(),
1598 &event.path,
1599 ) {
1600 Ok(Some(mut fs_entry)) => {
1601 let is_dir = fs_entry.is_dir();
1602 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1603 fs_entry.is_ignored = ignore_stack.is_all();
1604 snapshot.insert_entry(fs_entry);
1605 if is_dir {
1606 scan_queue_tx
1607 .send(ScanJob {
1608 abs_path: event.path,
1609 path,
1610 ignore_stack,
1611 scan_queue: scan_queue_tx.clone(),
1612 })
1613 .unwrap();
1614 }
1615 }
1616 Ok(None) => {}
1617 Err(err) => {
1618 // TODO - create a special 'error' entry in the entries tree to mark this
1619 log::error!("error reading file on event {:?}", err);
1620 }
1621 }
1622 }
1623
1624 *self.snapshot.lock() = snapshot;
1625
1626 // Scan any directories that were created as part of this event batch.
1627 drop(scan_queue_tx);
1628 self.thread_pool.scoped(|pool| {
1629 for _ in 0..self.thread_pool.thread_count() {
1630 pool.execute(|| {
1631 while let Ok(job) = scan_queue_rx.recv() {
1632 if let Err(err) = self.scan_dir(root_char_bag, next_entry_id.clone(), &job)
1633 {
1634 log::error!("error scanning {:?}: {}", job.abs_path, err);
1635 }
1636 }
1637 });
1638 }
1639 });
1640
1641 // Attempt to detect renames only over a single batch of file-system events.
1642 self.snapshot.lock().removed_entry_ids.clear();
1643
1644 self.update_ignore_statuses();
1645 true
1646 }
1647
1648 fn update_ignore_statuses(&self) {
1649 let mut snapshot = self.snapshot();
1650
1651 let mut ignores_to_update = Vec::new();
1652 let mut ignores_to_delete = Vec::new();
1653 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1654 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1655 ignores_to_update.push(parent_path.clone());
1656 }
1657
1658 let ignore_path = parent_path.join(&*GITIGNORE);
1659 if snapshot.entry_for_path(ignore_path).is_none() {
1660 ignores_to_delete.push(parent_path.clone());
1661 }
1662 }
1663
1664 for parent_path in ignores_to_delete {
1665 snapshot.ignores.remove(&parent_path);
1666 self.snapshot.lock().ignores.remove(&parent_path);
1667 }
1668
1669 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1670 ignores_to_update.sort_unstable();
1671 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1672 while let Some(parent_path) = ignores_to_update.next() {
1673 while ignores_to_update
1674 .peek()
1675 .map_or(false, |p| p.starts_with(&parent_path))
1676 {
1677 ignores_to_update.next().unwrap();
1678 }
1679
1680 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1681 ignore_queue_tx
1682 .send(UpdateIgnoreStatusJob {
1683 path: parent_path,
1684 ignore_stack,
1685 ignore_queue: ignore_queue_tx.clone(),
1686 })
1687 .unwrap();
1688 }
1689 drop(ignore_queue_tx);
1690
1691 self.thread_pool.scoped(|scope| {
1692 for _ in 0..self.thread_pool.thread_count() {
1693 scope.execute(|| {
1694 while let Ok(job) = ignore_queue_rx.recv() {
1695 self.update_ignore_status(job, &snapshot);
1696 }
1697 });
1698 }
1699 });
1700 }
1701
1702 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1703 let mut ignore_stack = job.ignore_stack;
1704 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1705 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1706 }
1707
1708 let mut edits = Vec::new();
1709 for mut entry in snapshot.child_entries(&job.path).cloned() {
1710 let was_ignored = entry.is_ignored;
1711 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1712 if entry.is_dir() {
1713 let child_ignore_stack = if entry.is_ignored {
1714 IgnoreStack::all()
1715 } else {
1716 ignore_stack.clone()
1717 };
1718 job.ignore_queue
1719 .send(UpdateIgnoreStatusJob {
1720 path: entry.path().clone(),
1721 ignore_stack: child_ignore_stack,
1722 ignore_queue: job.ignore_queue.clone(),
1723 })
1724 .unwrap();
1725 }
1726
1727 if entry.is_ignored != was_ignored {
1728 edits.push(Edit::Insert(entry));
1729 }
1730 }
1731 self.snapshot.lock().entries.edit(edits, &());
1732 }
1733}
1734
1735fn refresh_entry(snapshot: &Mutex<Snapshot>, path: Arc<Path>, abs_path: &Path) -> Result<Entry> {
1736 let root_char_bag;
1737 let next_entry_id;
1738 {
1739 let snapshot = snapshot.lock();
1740 root_char_bag = snapshot.root_char_bag;
1741 next_entry_id = snapshot.next_entry_id.clone();
1742 }
1743 let entry = fs_entry_for_path(root_char_bag, &next_entry_id, path, abs_path)?
1744 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
1745 Ok(snapshot.lock().insert_entry(entry))
1746}
1747
1748fn fs_entry_for_path(
1749 root_char_bag: CharBag,
1750 next_entry_id: &AtomicUsize,
1751 path: Arc<Path>,
1752 abs_path: &Path,
1753) -> Result<Option<Entry>> {
1754 let metadata = match fs::metadata(&abs_path) {
1755 Err(err) => {
1756 return match (err.kind(), err.raw_os_error()) {
1757 (io::ErrorKind::NotFound, _) => Ok(None),
1758 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1759 _ => Err(anyhow::Error::new(err)),
1760 }
1761 }
1762 Ok(metadata) => metadata,
1763 };
1764 let inode = metadata.ino();
1765 let mtime = metadata.modified()?;
1766 let is_symlink = fs::symlink_metadata(&abs_path)
1767 .context("failed to read symlink metadata")?
1768 .file_type()
1769 .is_symlink();
1770
1771 let entry = Entry {
1772 id: next_entry_id.fetch_add(1, SeqCst),
1773 kind: if metadata.file_type().is_dir() {
1774 EntryKind::PendingDir
1775 } else {
1776 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1777 },
1778 path: Arc::from(path),
1779 inode,
1780 mtime,
1781 is_symlink,
1782 is_ignored: false,
1783 };
1784
1785 Ok(Some(entry))
1786}
1787
1788fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
1789 let mut result = root_char_bag;
1790 result.extend(
1791 path.to_string_lossy()
1792 .chars()
1793 .map(|c| c.to_ascii_lowercase()),
1794 );
1795 result
1796}
1797
1798struct ScanJob {
1799 abs_path: PathBuf,
1800 path: Arc<Path>,
1801 ignore_stack: Arc<IgnoreStack>,
1802 scan_queue: crossbeam_channel::Sender<ScanJob>,
1803}
1804
1805struct UpdateIgnoreStatusJob {
1806 path: Arc<Path>,
1807 ignore_stack: Arc<IgnoreStack>,
1808 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1809}
1810
1811pub trait WorktreeHandle {
1812 #[cfg(test)]
1813 fn flush_fs_events<'a>(
1814 &self,
1815 cx: &'a gpui::TestAppContext,
1816 ) -> futures::future::LocalBoxFuture<'a, ()>;
1817}
1818
1819impl WorktreeHandle for ModelHandle<Worktree> {
1820 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1821 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1822 // extra directory scans, and emit extra scan-state notifications.
1823 //
1824 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1825 // to ensure that all redundant FS events have already been processed.
1826 #[cfg(test)]
1827 fn flush_fs_events<'a>(
1828 &self,
1829 cx: &'a gpui::TestAppContext,
1830 ) -> futures::future::LocalBoxFuture<'a, ()> {
1831 use smol::future::FutureExt;
1832
1833 let filename = "fs-event-sentinel";
1834 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
1835 let tree = self.clone();
1836 async move {
1837 fs::write(root_path.join(filename), "").unwrap();
1838 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
1839 .await;
1840
1841 fs::remove_file(root_path.join(filename)).unwrap();
1842 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
1843 .await;
1844
1845 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1846 .await;
1847 }
1848 .boxed_local()
1849 }
1850}
1851
1852pub enum FileIter<'a> {
1853 All(Cursor<'a, Entry, FileCount, ()>),
1854 Visible(Cursor<'a, Entry, VisibleFileCount, ()>),
1855}
1856
1857impl<'a> FileIter<'a> {
1858 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1859 let mut cursor = snapshot.entries.cursor();
1860 cursor.seek(&FileCount(start), Bias::Right, &());
1861 Self::All(cursor)
1862 }
1863
1864 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1865 let mut cursor = snapshot.entries.cursor();
1866 cursor.seek(&VisibleFileCount(start), Bias::Right, &());
1867 Self::Visible(cursor)
1868 }
1869
1870 fn next_internal(&mut self) {
1871 match self {
1872 Self::All(cursor) => {
1873 let ix = *cursor.seek_start();
1874 cursor.seek_forward(&FileCount(ix.0 + 1), Bias::Right, &());
1875 }
1876 Self::Visible(cursor) => {
1877 let ix = *cursor.seek_start();
1878 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), Bias::Right, &());
1879 }
1880 }
1881 }
1882
1883 fn item(&self) -> Option<&'a Entry> {
1884 match self {
1885 Self::All(cursor) => cursor.item(),
1886 Self::Visible(cursor) => cursor.item(),
1887 }
1888 }
1889}
1890
1891impl<'a> Iterator for FileIter<'a> {
1892 type Item = &'a Entry;
1893
1894 fn next(&mut self) -> Option<Self::Item> {
1895 if let Some(entry) = self.item() {
1896 self.next_internal();
1897 Some(entry)
1898 } else {
1899 None
1900 }
1901 }
1902}
1903
1904struct ChildEntriesIter<'a> {
1905 parent_path: &'a Path,
1906 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1907}
1908
1909impl<'a> ChildEntriesIter<'a> {
1910 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1911 let mut cursor = snapshot.entries.cursor();
1912 cursor.seek(&PathSearch::Exact(parent_path), Bias::Right, &());
1913 Self {
1914 parent_path,
1915 cursor,
1916 }
1917 }
1918}
1919
1920impl<'a> Iterator for ChildEntriesIter<'a> {
1921 type Item = &'a Entry;
1922
1923 fn next(&mut self) -> Option<Self::Item> {
1924 if let Some(item) = self.cursor.item() {
1925 if item.path().starts_with(self.parent_path) {
1926 self.cursor
1927 .seek_forward(&PathSearch::Successor(item.path()), Bias::Left, &());
1928 Some(item)
1929 } else {
1930 None
1931 }
1932 } else {
1933 None
1934 }
1935 }
1936}
1937
1938mod remote {
1939 use super::*;
1940
1941 pub async fn remove_guest(
1942 envelope: TypedEnvelope<proto::RemoveGuest>,
1943 rpc: &rpc::Client,
1944 cx: &mut AsyncAppContext,
1945 ) -> anyhow::Result<()> {
1946 rpc.state
1947 .lock()
1948 .await
1949 .shared_worktree(envelope.payload.worktree_id, cx)?
1950 .update(cx, |worktree, cx| worktree.remove_guest(envelope, cx))
1951 }
1952
1953 pub async fn open_buffer(
1954 envelope: TypedEnvelope<proto::OpenBuffer>,
1955 rpc: &rpc::Client,
1956 cx: &mut AsyncAppContext,
1957 ) -> anyhow::Result<()> {
1958 let receipt = envelope.receipt();
1959 let worktree = rpc
1960 .state
1961 .lock()
1962 .await
1963 .shared_worktree(envelope.payload.worktree_id, cx)?;
1964
1965 let response = worktree
1966 .update(cx, |worktree, cx| {
1967 worktree
1968 .as_local_mut()
1969 .unwrap()
1970 .open_remote_buffer(envelope, cx)
1971 })
1972 .await?;
1973
1974 rpc.respond(receipt, response).await?;
1975
1976 Ok(())
1977 }
1978
1979 pub async fn close_buffer(
1980 envelope: TypedEnvelope<proto::CloseBuffer>,
1981 rpc: &rpc::Client,
1982 cx: &mut AsyncAppContext,
1983 ) -> anyhow::Result<()> {
1984 let worktree = rpc
1985 .state
1986 .lock()
1987 .await
1988 .shared_worktree(envelope.payload.worktree_id, cx)?;
1989
1990 worktree.update(cx, |worktree, cx| {
1991 worktree
1992 .as_local_mut()
1993 .unwrap()
1994 .close_remote_buffer(envelope, cx)
1995 })
1996 }
1997
1998 pub async fn update_buffer(
1999 envelope: TypedEnvelope<proto::UpdateBuffer>,
2000 rpc: &rpc::Client,
2001 cx: &mut AsyncAppContext,
2002 ) -> anyhow::Result<()> {
2003 let message = envelope.payload;
2004 let mut state = rpc.state.lock().await;
2005 let worktree = state.shared_worktree(message.worktree_id, cx)?;
2006 worktree.update(cx, |tree, cx| tree.update_buffer(message, cx))?;
2007 Ok(())
2008 }
2009}
2010
2011#[cfg(test)]
2012mod tests {
2013 use super::*;
2014 use crate::test::*;
2015 use anyhow::Result;
2016 use rand::prelude::*;
2017 use serde_json::json;
2018 use std::time::UNIX_EPOCH;
2019 use std::{env, fmt::Write, os::unix, time::SystemTime};
2020
2021 #[gpui::test]
2022 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2023 let dir = temp_tree(json!({
2024 "root": {
2025 "apple": "",
2026 "banana": {
2027 "carrot": {
2028 "date": "",
2029 "endive": "",
2030 }
2031 },
2032 "fennel": {
2033 "grape": "",
2034 }
2035 }
2036 }));
2037
2038 let root_link_path = dir.path().join("root_link");
2039 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2040 unix::fs::symlink(
2041 &dir.path().join("root/fennel"),
2042 &dir.path().join("root/finnochio"),
2043 )
2044 .unwrap();
2045
2046 let tree = cx.add_model(|cx| Worktree::local(root_link_path, Default::default(), cx));
2047
2048 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2049 .await;
2050 cx.read(|cx| {
2051 let tree = tree.read(cx);
2052 assert_eq!(tree.file_count(), 5);
2053
2054 assert_eq!(
2055 tree.inode_for_path("fennel/grape"),
2056 tree.inode_for_path("finnochio/grape")
2057 );
2058
2059 let results = match_paths(
2060 Some(tree.snapshot()).iter(),
2061 "bna",
2062 false,
2063 false,
2064 false,
2065 10,
2066 Default::default(),
2067 cx.thread_pool().clone(),
2068 )
2069 .into_iter()
2070 .map(|result| result.path)
2071 .collect::<Vec<Arc<Path>>>();
2072 assert_eq!(
2073 results,
2074 vec![
2075 PathBuf::from("banana/carrot/date").into(),
2076 PathBuf::from("banana/carrot/endive").into(),
2077 ]
2078 );
2079 })
2080 }
2081
2082 #[gpui::test]
2083 async fn test_save_file(mut cx: gpui::TestAppContext) {
2084 let app_state = cx.read(build_app_state);
2085 let dir = temp_tree(json!({
2086 "file1": "the old contents",
2087 }));
2088 let tree = cx.add_model(|cx| Worktree::local(dir.path(), app_state.languages, cx));
2089 let buffer = tree
2090 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
2091 .await
2092 .unwrap();
2093 let save = buffer.update(&mut cx, |buffer, cx| {
2094 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2095 buffer.save(cx).unwrap()
2096 });
2097 save.await.unwrap();
2098
2099 let new_text = fs::read_to_string(dir.path().join("file1")).unwrap();
2100 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2101 }
2102
2103 #[gpui::test]
2104 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2105 let app_state = cx.read(build_app_state);
2106 let dir = temp_tree(json!({
2107 "file1": "the old contents",
2108 }));
2109 let file_path = dir.path().join("file1");
2110
2111 let tree = cx.add_model(|cx| Worktree::local(file_path.clone(), app_state.languages, cx));
2112 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2113 .await;
2114 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
2115
2116 let buffer = tree
2117 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
2118 .await
2119 .unwrap();
2120 let save = buffer.update(&mut cx, |buffer, cx| {
2121 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2122 buffer.save(cx).unwrap()
2123 });
2124 save.await.unwrap();
2125
2126 let new_text = fs::read_to_string(file_path).unwrap();
2127 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2128 }
2129
2130 #[gpui::test]
2131 async fn test_rescan_simple(mut cx: gpui::TestAppContext) {
2132 let dir = temp_tree(json!({
2133 "a": {
2134 "file1": "",
2135 "file2": "",
2136 "file3": "",
2137 },
2138 "b": {
2139 "c": {
2140 "file4": "",
2141 "file5": "",
2142 }
2143 }
2144 }));
2145
2146 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2147
2148 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2149 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
2150 async move { buffer.await.unwrap() }
2151 };
2152 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2153 tree.read_with(cx, |tree, _| {
2154 tree.entry_for_path(path)
2155 .expect(&format!("no entry for path {}", path))
2156 .id
2157 })
2158 };
2159
2160 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2161 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2162 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2163 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2164
2165 let file2_id = id_for_path("a/file2", &cx);
2166 let file3_id = id_for_path("a/file3", &cx);
2167 let file4_id = id_for_path("b/c/file4", &cx);
2168
2169 // After scanning, the worktree knows which files exist and which don't.
2170 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2171 .await;
2172
2173 cx.read(|cx| {
2174 assert!(!buffer2.read(cx).is_dirty());
2175 assert!(!buffer3.read(cx).is_dirty());
2176 assert!(!buffer4.read(cx).is_dirty());
2177 assert!(!buffer5.read(cx).is_dirty());
2178 });
2179
2180 tree.flush_fs_events(&cx).await;
2181 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2182 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2183 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2184 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2185 tree.flush_fs_events(&cx).await;
2186
2187 cx.read(|app| {
2188 assert_eq!(
2189 tree.read(app)
2190 .paths()
2191 .map(|p| p.to_str().unwrap())
2192 .collect::<Vec<_>>(),
2193 vec![
2194 "a",
2195 "a/file1",
2196 "a/file2.new",
2197 "b",
2198 "d",
2199 "d/file3",
2200 "d/file4"
2201 ]
2202 );
2203
2204 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2205 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2206 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2207
2208 assert_eq!(
2209 buffer2.read(app).file().unwrap().path().as_ref(),
2210 Path::new("a/file2.new")
2211 );
2212 assert_eq!(
2213 buffer3.read(app).file().unwrap().path().as_ref(),
2214 Path::new("d/file3")
2215 );
2216 assert_eq!(
2217 buffer4.read(app).file().unwrap().path().as_ref(),
2218 Path::new("d/file4")
2219 );
2220 assert_eq!(
2221 buffer5.read(app).file().unwrap().path().as_ref(),
2222 Path::new("b/c/file5")
2223 );
2224
2225 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2226 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2227 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2228 assert!(buffer5.read(app).file().unwrap().is_deleted());
2229 });
2230 }
2231
2232 #[gpui::test]
2233 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
2234 let dir = temp_tree(json!({
2235 ".git": {},
2236 ".gitignore": "ignored-dir\n",
2237 "tracked-dir": {
2238 "tracked-file1": "tracked contents",
2239 },
2240 "ignored-dir": {
2241 "ignored-file1": "ignored contents",
2242 }
2243 }));
2244
2245 let tree = cx.add_model(|cx| Worktree::local(dir.path(), Default::default(), cx));
2246 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2247 .await;
2248 tree.flush_fs_events(&cx).await;
2249 cx.read(|cx| {
2250 let tree = tree.read(cx);
2251 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2252 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2253 assert_eq!(tracked.is_ignored(), false);
2254 assert_eq!(ignored.is_ignored(), true);
2255 });
2256
2257 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2258 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2259 tree.flush_fs_events(&cx).await;
2260 cx.read(|cx| {
2261 let tree = tree.read(cx);
2262 let dot_git = tree.entry_for_path(".git").unwrap();
2263 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2264 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2265 assert_eq!(tracked.is_ignored(), false);
2266 assert_eq!(ignored.is_ignored(), true);
2267 assert_eq!(dot_git.is_ignored(), true);
2268 });
2269 }
2270
2271 #[test]
2272 fn test_random() {
2273 let iterations = env::var("ITERATIONS")
2274 .map(|i| i.parse().unwrap())
2275 .unwrap_or(100);
2276 let operations = env::var("OPERATIONS")
2277 .map(|o| o.parse().unwrap())
2278 .unwrap_or(40);
2279 let initial_entries = env::var("INITIAL_ENTRIES")
2280 .map(|o| o.parse().unwrap())
2281 .unwrap_or(20);
2282 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
2283 seed..seed + 1
2284 } else {
2285 0..iterations
2286 };
2287
2288 for seed in seeds {
2289 dbg!(seed);
2290 let mut rng = StdRng::seed_from_u64(seed);
2291
2292 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
2293 for _ in 0..initial_entries {
2294 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2295 }
2296 log::info!("Generated initial tree");
2297
2298 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2299 let mut scanner = BackgroundScanner::new(
2300 Arc::new(Mutex::new(Snapshot {
2301 id: 0,
2302 scan_id: 0,
2303 abs_path: root_dir.path().into(),
2304 entries: Default::default(),
2305 paths_by_id: Default::default(),
2306 removed_entry_ids: Default::default(),
2307 ignores: Default::default(),
2308 root_name: Default::default(),
2309 root_char_bag: Default::default(),
2310 next_entry_id: Default::default(),
2311 })),
2312 notify_tx,
2313 0,
2314 );
2315 scanner.scan_dirs().unwrap();
2316 scanner.snapshot().check_invariants();
2317
2318 let mut events = Vec::new();
2319 let mut mutations_len = operations;
2320 while mutations_len > 1 {
2321 if !events.is_empty() && rng.gen_bool(0.4) {
2322 let len = rng.gen_range(0..=events.len());
2323 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2324 log::info!("Delivering events: {:#?}", to_deliver);
2325 scanner.process_events(to_deliver);
2326 scanner.snapshot().check_invariants();
2327 } else {
2328 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2329 mutations_len -= 1;
2330 }
2331 }
2332 log::info!("Quiescing: {:#?}", events);
2333 scanner.process_events(events);
2334 scanner.snapshot().check_invariants();
2335
2336 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2337 let mut new_scanner = BackgroundScanner::new(
2338 Arc::new(Mutex::new(Snapshot {
2339 id: 0,
2340 scan_id: 0,
2341 abs_path: root_dir.path().into(),
2342 entries: Default::default(),
2343 paths_by_id: Default::default(),
2344 removed_entry_ids: Default::default(),
2345 ignores: Default::default(),
2346 root_name: Default::default(),
2347 root_char_bag: Default::default(),
2348 next_entry_id: Default::default(),
2349 })),
2350 notify_tx,
2351 1,
2352 );
2353 new_scanner.scan_dirs().unwrap();
2354 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
2355 }
2356 }
2357
2358 fn randomly_mutate_tree(
2359 root_path: &Path,
2360 insertion_probability: f64,
2361 rng: &mut impl Rng,
2362 ) -> Result<Vec<fsevent::Event>> {
2363 let root_path = root_path.canonicalize().unwrap();
2364 let (dirs, files) = read_dir_recursive(root_path.clone());
2365
2366 let mut events = Vec::new();
2367 let mut record_event = |path: PathBuf| {
2368 events.push(fsevent::Event {
2369 event_id: SystemTime::now()
2370 .duration_since(UNIX_EPOCH)
2371 .unwrap()
2372 .as_secs(),
2373 flags: fsevent::StreamFlags::empty(),
2374 path,
2375 });
2376 };
2377
2378 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2379 let path = dirs.choose(rng).unwrap();
2380 let new_path = path.join(gen_name(rng));
2381
2382 if rng.gen() {
2383 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2384 fs::create_dir(&new_path)?;
2385 } else {
2386 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2387 fs::write(&new_path, "")?;
2388 }
2389 record_event(new_path);
2390 } else if rng.gen_bool(0.05) {
2391 let ignore_dir_path = dirs.choose(rng).unwrap();
2392 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2393
2394 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2395 let files_to_ignore = {
2396 let len = rng.gen_range(0..=subfiles.len());
2397 subfiles.choose_multiple(rng, len)
2398 };
2399 let dirs_to_ignore = {
2400 let len = rng.gen_range(0..subdirs.len());
2401 subdirs.choose_multiple(rng, len)
2402 };
2403
2404 let mut ignore_contents = String::new();
2405 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2406 write!(
2407 ignore_contents,
2408 "{}\n",
2409 path_to_ignore
2410 .strip_prefix(&ignore_dir_path)?
2411 .to_str()
2412 .unwrap()
2413 )
2414 .unwrap();
2415 }
2416 log::info!(
2417 "Creating {:?} with contents:\n{}",
2418 ignore_path.strip_prefix(&root_path)?,
2419 ignore_contents
2420 );
2421 fs::write(&ignore_path, ignore_contents).unwrap();
2422 record_event(ignore_path);
2423 } else {
2424 let old_path = {
2425 let file_path = files.choose(rng);
2426 let dir_path = dirs[1..].choose(rng);
2427 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2428 };
2429
2430 let is_rename = rng.gen();
2431 if is_rename {
2432 let new_path_parent = dirs
2433 .iter()
2434 .filter(|d| !d.starts_with(old_path))
2435 .choose(rng)
2436 .unwrap();
2437
2438 let overwrite_existing_dir =
2439 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2440 let new_path = if overwrite_existing_dir {
2441 fs::remove_dir_all(&new_path_parent).ok();
2442 new_path_parent.to_path_buf()
2443 } else {
2444 new_path_parent.join(gen_name(rng))
2445 };
2446
2447 log::info!(
2448 "Renaming {:?} to {}{:?}",
2449 old_path.strip_prefix(&root_path)?,
2450 if overwrite_existing_dir {
2451 "overwrite "
2452 } else {
2453 ""
2454 },
2455 new_path.strip_prefix(&root_path)?
2456 );
2457 fs::rename(&old_path, &new_path)?;
2458 record_event(old_path.clone());
2459 record_event(new_path);
2460 } else if old_path.is_dir() {
2461 let (dirs, files) = read_dir_recursive(old_path.clone());
2462
2463 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2464 fs::remove_dir_all(&old_path).unwrap();
2465 for file in files {
2466 record_event(file);
2467 }
2468 for dir in dirs {
2469 record_event(dir);
2470 }
2471 } else {
2472 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2473 fs::remove_file(old_path).unwrap();
2474 record_event(old_path.clone());
2475 }
2476 }
2477
2478 Ok(events)
2479 }
2480
2481 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2482 let child_entries = fs::read_dir(&path).unwrap();
2483 let mut dirs = vec![path];
2484 let mut files = Vec::new();
2485 for child_entry in child_entries {
2486 let child_path = child_entry.unwrap().path();
2487 if child_path.is_dir() {
2488 let (child_dirs, child_files) = read_dir_recursive(child_path);
2489 dirs.extend(child_dirs);
2490 files.extend(child_files);
2491 } else {
2492 files.push(child_path);
2493 }
2494 }
2495 (dirs, files)
2496 }
2497
2498 fn gen_name(rng: &mut impl Rng) -> String {
2499 (0..6)
2500 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2501 .map(char::from)
2502 .collect()
2503 }
2504
2505 impl Snapshot {
2506 fn check_invariants(&self) {
2507 let mut files = self.files(0);
2508 let mut visible_files = self.visible_files(0);
2509 for entry in self.entries.cursor::<(), ()>() {
2510 if entry.is_file() {
2511 assert_eq!(files.next().unwrap().inode(), entry.inode);
2512 if !entry.is_ignored {
2513 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
2514 }
2515 }
2516 }
2517 assert!(files.next().is_none());
2518 assert!(visible_files.next().is_none());
2519
2520 let mut bfs_paths = Vec::new();
2521 let mut stack = vec![Path::new("")];
2522 while let Some(path) = stack.pop() {
2523 bfs_paths.push(path);
2524 let ix = stack.len();
2525 for child_entry in self.child_entries(path) {
2526 stack.insert(ix, child_entry.path());
2527 }
2528 }
2529
2530 let dfs_paths = self
2531 .entries
2532 .cursor::<(), ()>()
2533 .map(|e| e.path().as_ref())
2534 .collect::<Vec<_>>();
2535 assert_eq!(bfs_paths, dfs_paths);
2536
2537 for (ignore_parent_path, _) in &self.ignores {
2538 assert!(self.entry_for_path(ignore_parent_path).is_some());
2539 assert!(self
2540 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2541 .is_some());
2542 }
2543 }
2544
2545 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
2546 let mut paths = Vec::new();
2547 for entry in self.entries.cursor::<(), ()>() {
2548 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
2549 }
2550 paths.sort_by(|a, b| a.0.cmp(&b.0));
2551 paths
2552 }
2553 }
2554}