1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language,
19 LanguageRegistry, Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::Deref,
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, TreeMap};
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68#[derive(Clone, Debug, Eq, PartialEq)]
69pub enum Event {
70 DiskBasedDiagnosticsUpdating,
71 DiskBasedDiagnosticsUpdated,
72 DiagnosticsUpdated(Arc<Path>),
73}
74
75impl Entity for Worktree {
76 type Event = Event;
77
78 fn app_will_quit(
79 &mut self,
80 _: &mut MutableAppContext,
81 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
82 use futures::FutureExt;
83
84 if let Self::Local(worktree) = self {
85 let shutdown_futures = worktree
86 .language_servers
87 .drain()
88 .filter_map(|(_, server)| server.shutdown())
89 .collect::<Vec<_>>();
90 Some(
91 async move {
92 futures::future::join_all(shutdown_futures).await;
93 }
94 .boxed(),
95 )
96 } else {
97 None
98 }
99 }
100}
101
102impl Worktree {
103 pub async fn open_local(
104 client: Arc<Client>,
105 user_store: ModelHandle<UserStore>,
106 path: impl Into<Arc<Path>>,
107 fs: Arc<dyn Fs>,
108 languages: Arc<LanguageRegistry>,
109 cx: &mut AsyncAppContext,
110 ) -> Result<ModelHandle<Self>> {
111 let (tree, scan_states_tx) =
112 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
113 tree.update(cx, |tree, cx| {
114 let tree = tree.as_local_mut().unwrap();
115 let abs_path = tree.snapshot.abs_path.clone();
116 let background_snapshot = tree.background_snapshot.clone();
117 let background = cx.background().clone();
118 tree._background_scanner_task = Some(cx.background().spawn(async move {
119 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
120 let scanner =
121 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
122 scanner.run(events).await;
123 }));
124 });
125 Ok(tree)
126 }
127
128 pub async fn remote(
129 project_remote_id: u64,
130 replica_id: ReplicaId,
131 worktree: proto::Worktree,
132 client: Arc<Client>,
133 user_store: ModelHandle<UserStore>,
134 languages: Arc<LanguageRegistry>,
135 cx: &mut AsyncAppContext,
136 ) -> Result<ModelHandle<Self>> {
137 let remote_id = worktree.id;
138 let root_char_bag: CharBag = worktree
139 .root_name
140 .chars()
141 .map(|c| c.to_ascii_lowercase())
142 .collect();
143 let root_name = worktree.root_name.clone();
144 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
145 .background()
146 .spawn(async move {
147 let mut entries_by_path_edits = Vec::new();
148 let mut entries_by_id_edits = Vec::new();
149 for entry in worktree.entries {
150 match Entry::try_from((&root_char_bag, entry)) {
151 Ok(entry) => {
152 entries_by_id_edits.push(Edit::Insert(PathEntry {
153 id: entry.id,
154 path: entry.path.clone(),
155 is_ignored: entry.is_ignored,
156 scan_id: 0,
157 }));
158 entries_by_path_edits.push(Edit::Insert(entry));
159 }
160 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
161 }
162 }
163
164 let mut entries_by_path = SumTree::new();
165 let mut entries_by_id = SumTree::new();
166 entries_by_path.edit(entries_by_path_edits, &());
167 entries_by_id.edit(entries_by_id_edits, &());
168
169 let diagnostic_summaries = TreeMap::from_ordered_entries(
170 worktree.diagnostic_summaries.into_iter().map(|summary| {
171 (
172 PathKey(PathBuf::from(summary.path).into()),
173 DiagnosticSummary {
174 error_count: summary.error_count as usize,
175 warning_count: summary.warning_count as usize,
176 info_count: summary.info_count as usize,
177 hint_count: summary.hint_count as usize,
178 },
179 )
180 }),
181 );
182
183 (entries_by_path, entries_by_id, diagnostic_summaries)
184 })
185 .await;
186
187 let worktree = cx.update(|cx| {
188 cx.add_model(|cx: &mut ModelContext<Worktree>| {
189 let snapshot = Snapshot {
190 id: WorktreeId(remote_id as usize),
191 scan_id: 0,
192 abs_path: Path::new("").into(),
193 root_name,
194 root_char_bag,
195 ignores: Default::default(),
196 entries_by_path,
197 entries_by_id,
198 removed_entry_ids: Default::default(),
199 next_entry_id: Default::default(),
200 };
201
202 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
203 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
204
205 cx.background()
206 .spawn(async move {
207 while let Some(update) = updates_rx.recv().await {
208 let mut snapshot = snapshot_tx.borrow().clone();
209 if let Err(error) = snapshot.apply_update(update) {
210 log::error!("error applying worktree update: {}", error);
211 }
212 *snapshot_tx.borrow_mut() = snapshot;
213 }
214 })
215 .detach();
216
217 {
218 let mut snapshot_rx = snapshot_rx.clone();
219 cx.spawn_weak(|this, mut cx| async move {
220 while let Some(_) = snapshot_rx.recv().await {
221 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
222 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
223 } else {
224 break;
225 }
226 }
227 })
228 .detach();
229 }
230
231 Worktree::Remote(RemoteWorktree {
232 project_id: project_remote_id,
233 replica_id,
234 snapshot,
235 snapshot_rx,
236 updates_tx,
237 client: client.clone(),
238 loading_buffers: Default::default(),
239 open_buffers: Default::default(),
240 queued_operations: Default::default(),
241 languages,
242 user_store,
243 diagnostic_summaries,
244 })
245 })
246 });
247
248 Ok(worktree)
249 }
250
251 pub fn as_local(&self) -> Option<&LocalWorktree> {
252 if let Worktree::Local(worktree) = self {
253 Some(worktree)
254 } else {
255 None
256 }
257 }
258
259 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
260 if let Worktree::Remote(worktree) = self {
261 Some(worktree)
262 } else {
263 None
264 }
265 }
266
267 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
268 if let Worktree::Local(worktree) = self {
269 Some(worktree)
270 } else {
271 None
272 }
273 }
274
275 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
276 if let Worktree::Remote(worktree) = self {
277 Some(worktree)
278 } else {
279 None
280 }
281 }
282
283 pub fn snapshot(&self) -> Snapshot {
284 match self {
285 Worktree::Local(worktree) => worktree.snapshot(),
286 Worktree::Remote(worktree) => worktree.snapshot(),
287 }
288 }
289
290 pub fn replica_id(&self) -> ReplicaId {
291 match self {
292 Worktree::Local(_) => 0,
293 Worktree::Remote(worktree) => worktree.replica_id,
294 }
295 }
296
297 pub fn remove_collaborator(
298 &mut self,
299 peer_id: PeerId,
300 replica_id: ReplicaId,
301 cx: &mut ModelContext<Self>,
302 ) {
303 match self {
304 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
305 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
306 }
307 }
308
309 pub fn languages(&self) -> &Arc<LanguageRegistry> {
310 match self {
311 Worktree::Local(worktree) => &worktree.language_registry,
312 Worktree::Remote(worktree) => &worktree.languages,
313 }
314 }
315
316 pub fn user_store(&self) -> &ModelHandle<UserStore> {
317 match self {
318 Worktree::Local(worktree) => &worktree.user_store,
319 Worktree::Remote(worktree) => &worktree.user_store,
320 }
321 }
322
323 pub fn handle_open_buffer(
324 &mut self,
325 envelope: TypedEnvelope<proto::OpenBuffer>,
326 rpc: Arc<Client>,
327 cx: &mut ModelContext<Self>,
328 ) -> anyhow::Result<()> {
329 let receipt = envelope.receipt();
330
331 let response = self
332 .as_local_mut()
333 .unwrap()
334 .open_remote_buffer(envelope, cx);
335
336 cx.background()
337 .spawn(
338 async move {
339 rpc.respond(receipt, response.await?).await?;
340 Ok(())
341 }
342 .log_err(),
343 )
344 .detach();
345
346 Ok(())
347 }
348
349 pub fn handle_close_buffer(
350 &mut self,
351 envelope: TypedEnvelope<proto::CloseBuffer>,
352 _: Arc<Client>,
353 cx: &mut ModelContext<Self>,
354 ) -> anyhow::Result<()> {
355 self.as_local_mut()
356 .unwrap()
357 .close_remote_buffer(envelope, cx)
358 }
359
360 pub fn diagnostic_summaries<'a>(
361 &'a self,
362 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
363 match self {
364 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
365 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
366 }
367 .iter()
368 .map(|(path, summary)| (path.0.clone(), summary.clone()))
369 }
370
371 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
372 match self {
373 Worktree::Local(worktree) => &mut worktree.loading_buffers,
374 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
375 }
376 }
377
378 pub fn open_buffer(
379 &mut self,
380 path: impl AsRef<Path>,
381 cx: &mut ModelContext<Self>,
382 ) -> Task<Result<ModelHandle<Buffer>>> {
383 let path = path.as_ref();
384
385 // If there is already a buffer for the given path, then return it.
386 let existing_buffer = match self {
387 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
388 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
389 };
390 if let Some(existing_buffer) = existing_buffer {
391 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
392 }
393
394 let path: Arc<Path> = Arc::from(path);
395 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
396 // If the given path is already being loaded, then wait for that existing
397 // task to complete and return the same buffer.
398 hash_map::Entry::Occupied(e) => e.get().clone(),
399
400 // Otherwise, record the fact that this path is now being loaded.
401 hash_map::Entry::Vacant(entry) => {
402 let (mut tx, rx) = postage::watch::channel();
403 entry.insert(rx.clone());
404
405 let load_buffer = match self {
406 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
407 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
408 };
409 cx.spawn(move |this, mut cx| async move {
410 let result = load_buffer.await;
411
412 // After the buffer loads, record the fact that it is no longer
413 // loading.
414 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
415 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
416 })
417 .detach();
418 rx
419 }
420 };
421
422 cx.spawn(|_, _| async move {
423 loop {
424 if let Some(result) = loading_watch.borrow().as_ref() {
425 return result.clone().map_err(|e| anyhow!("{}", e));
426 }
427 loading_watch.recv().await;
428 }
429 })
430 }
431
432 #[cfg(feature = "test-support")]
433 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
434 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
435 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
436 Worktree::Remote(worktree) => {
437 Box::new(worktree.open_buffers.values().filter_map(|buf| {
438 if let RemoteBuffer::Loaded(buf) = buf {
439 Some(buf)
440 } else {
441 None
442 }
443 }))
444 }
445 };
446
447 let path = path.as_ref();
448 open_buffers
449 .find(|buffer| {
450 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
451 file.path().as_ref() == path
452 } else {
453 false
454 }
455 })
456 .is_some()
457 }
458
459 pub fn handle_update_buffer(
460 &mut self,
461 envelope: TypedEnvelope<proto::UpdateBuffer>,
462 cx: &mut ModelContext<Self>,
463 ) -> Result<()> {
464 let payload = envelope.payload.clone();
465 let buffer_id = payload.buffer_id as usize;
466 let ops = payload
467 .operations
468 .into_iter()
469 .map(|op| language::proto::deserialize_operation(op))
470 .collect::<Result<Vec<_>, _>>()?;
471
472 match self {
473 Worktree::Local(worktree) => {
474 let buffer = worktree
475 .open_buffers
476 .get(&buffer_id)
477 .and_then(|buf| buf.upgrade(cx))
478 .ok_or_else(|| {
479 anyhow!("invalid buffer {} in update buffer message", buffer_id)
480 })?;
481 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
482 }
483 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
484 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
485 Some(RemoteBuffer::Loaded(buffer)) => {
486 if let Some(buffer) = buffer.upgrade(cx) {
487 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
488 } else {
489 worktree
490 .open_buffers
491 .insert(buffer_id, RemoteBuffer::Operations(ops));
492 }
493 }
494 None => {
495 worktree
496 .open_buffers
497 .insert(buffer_id, RemoteBuffer::Operations(ops));
498 }
499 },
500 }
501
502 Ok(())
503 }
504
505 pub fn handle_save_buffer(
506 &mut self,
507 envelope: TypedEnvelope<proto::SaveBuffer>,
508 rpc: Arc<Client>,
509 cx: &mut ModelContext<Self>,
510 ) -> Result<()> {
511 let sender_id = envelope.original_sender_id()?;
512 let this = self.as_local().unwrap();
513 let project_id = this
514 .share
515 .as_ref()
516 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
517 .project_id;
518
519 let buffer = this
520 .shared_buffers
521 .get(&sender_id)
522 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
523 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
524
525 let receipt = envelope.receipt();
526 let worktree_id = envelope.payload.worktree_id;
527 let buffer_id = envelope.payload.buffer_id;
528 let save = cx.spawn(|_, mut cx| async move {
529 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
530 });
531
532 cx.background()
533 .spawn(
534 async move {
535 let (version, mtime) = save.await?;
536
537 rpc.respond(
538 receipt,
539 proto::BufferSaved {
540 project_id,
541 worktree_id,
542 buffer_id,
543 version: (&version).into(),
544 mtime: Some(mtime.into()),
545 },
546 )
547 .await?;
548
549 Ok(())
550 }
551 .log_err(),
552 )
553 .detach();
554
555 Ok(())
556 }
557
558 pub fn handle_buffer_saved(
559 &mut self,
560 envelope: TypedEnvelope<proto::BufferSaved>,
561 cx: &mut ModelContext<Self>,
562 ) -> Result<()> {
563 let payload = envelope.payload.clone();
564 let worktree = self.as_remote_mut().unwrap();
565 if let Some(buffer) = worktree
566 .open_buffers
567 .get(&(payload.buffer_id as usize))
568 .and_then(|buf| buf.upgrade(cx))
569 {
570 buffer.update(cx, |buffer, cx| {
571 let version = payload.version.try_into()?;
572 let mtime = payload
573 .mtime
574 .ok_or_else(|| anyhow!("missing mtime"))?
575 .into();
576 buffer.did_save(version, mtime, None, cx);
577 Result::<_, anyhow::Error>::Ok(())
578 })?;
579 }
580 Ok(())
581 }
582
583 pub fn handle_format_buffer(
584 &mut self,
585 envelope: TypedEnvelope<proto::FormatBuffer>,
586 rpc: Arc<Client>,
587 cx: &mut ModelContext<Self>,
588 ) -> Result<()> {
589 let sender_id = envelope.original_sender_id()?;
590 let this = self.as_local().unwrap();
591 let buffer = this
592 .shared_buffers
593 .get(&sender_id)
594 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
595 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
596
597 let receipt = envelope.receipt();
598 cx.spawn(|_, mut cx| async move {
599 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
600 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
601 // associated with formatting.
602 cx.spawn(|_| async move {
603 dbg!("responding");
604 match format {
605 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
606 Err(error) => {
607 rpc.respond_with_error(
608 receipt,
609 proto::Error {
610 message: error.to_string(),
611 },
612 )
613 .await?
614 }
615 }
616 Ok::<_, anyhow::Error>(())
617 })
618 .await
619 .log_err();
620 })
621 .detach();
622
623 Ok(())
624 }
625
626 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
627 match self {
628 Self::Local(worktree) => {
629 let is_fake_fs = worktree.fs.is_fake();
630 worktree.snapshot = worktree.background_snapshot.lock().clone();
631 if worktree.is_scanning() {
632 if worktree.poll_task.is_none() {
633 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
634 if is_fake_fs {
635 smol::future::yield_now().await;
636 } else {
637 smol::Timer::after(Duration::from_millis(100)).await;
638 }
639 this.update(&mut cx, |this, cx| {
640 this.as_local_mut().unwrap().poll_task = None;
641 this.poll_snapshot(cx);
642 })
643 }));
644 }
645 } else {
646 worktree.poll_task.take();
647 self.update_open_buffers(cx);
648 }
649 }
650 Self::Remote(worktree) => {
651 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
652 self.update_open_buffers(cx);
653 }
654 };
655
656 cx.notify();
657 }
658
659 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
660 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
661 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
662 Self::Remote(worktree) => {
663 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
664 if let RemoteBuffer::Loaded(buf) = buf {
665 Some((id, buf))
666 } else {
667 None
668 }
669 }))
670 }
671 };
672
673 let local = self.as_local().is_some();
674 let worktree_path = self.abs_path.clone();
675 let worktree_handle = cx.handle();
676 let mut buffers_to_delete = Vec::new();
677 for (buffer_id, buffer) in open_buffers {
678 if let Some(buffer) = buffer.upgrade(cx) {
679 buffer.update(cx, |buffer, cx| {
680 if let Some(old_file) = File::from_dyn(buffer.file()) {
681 let new_file = if let Some(entry) = old_file
682 .entry_id
683 .and_then(|entry_id| self.entry_for_id(entry_id))
684 {
685 File {
686 is_local: local,
687 worktree_path: worktree_path.clone(),
688 entry_id: Some(entry.id),
689 mtime: entry.mtime,
690 path: entry.path.clone(),
691 worktree: worktree_handle.clone(),
692 }
693 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
694 File {
695 is_local: local,
696 worktree_path: worktree_path.clone(),
697 entry_id: Some(entry.id),
698 mtime: entry.mtime,
699 path: entry.path.clone(),
700 worktree: worktree_handle.clone(),
701 }
702 } else {
703 File {
704 is_local: local,
705 worktree_path: worktree_path.clone(),
706 entry_id: None,
707 path: old_file.path().clone(),
708 mtime: old_file.mtime(),
709 worktree: worktree_handle.clone(),
710 }
711 };
712
713 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
714 task.detach();
715 }
716 }
717 });
718 } else {
719 buffers_to_delete.push(*buffer_id);
720 }
721 }
722
723 for buffer_id in buffers_to_delete {
724 match self {
725 Self::Local(worktree) => {
726 worktree.open_buffers.remove(&buffer_id);
727 }
728 Self::Remote(worktree) => {
729 worktree.open_buffers.remove(&buffer_id);
730 }
731 }
732 }
733 }
734
735 pub fn update_diagnostics(
736 &mut self,
737 params: lsp::PublishDiagnosticsParams,
738 disk_based_sources: &HashSet<String>,
739 cx: &mut ModelContext<Worktree>,
740 ) -> Result<()> {
741 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
742 let abs_path = params
743 .uri
744 .to_file_path()
745 .map_err(|_| anyhow!("URI is not a file"))?;
746 let worktree_path = Arc::from(
747 abs_path
748 .strip_prefix(&this.abs_path)
749 .context("path is not within worktree")?,
750 );
751
752 let mut next_group_id = 0;
753 let mut diagnostics = Vec::default();
754 let mut primary_diagnostic_group_ids = HashMap::default();
755 let mut sources_by_group_id = HashMap::default();
756 let mut supporting_diagnostic_severities = HashMap::default();
757 for diagnostic in ¶ms.diagnostics {
758 let source = diagnostic.source.as_ref();
759 let code = diagnostic.code.as_ref().map(|code| match code {
760 lsp::NumberOrString::Number(code) => code.to_string(),
761 lsp::NumberOrString::String(code) => code.clone(),
762 });
763 let range = range_from_lsp(diagnostic.range);
764 let is_supporting = diagnostic
765 .related_information
766 .as_ref()
767 .map_or(false, |infos| {
768 infos.iter().any(|info| {
769 primary_diagnostic_group_ids.contains_key(&(
770 source,
771 code.clone(),
772 range_from_lsp(info.location.range),
773 ))
774 })
775 });
776
777 if is_supporting {
778 if let Some(severity) = diagnostic.severity {
779 supporting_diagnostic_severities
780 .insert((source, code.clone(), range), severity);
781 }
782 } else {
783 let group_id = post_inc(&mut next_group_id);
784 let is_disk_based =
785 source.map_or(false, |source| disk_based_sources.contains(source));
786
787 sources_by_group_id.insert(group_id, source);
788 primary_diagnostic_group_ids
789 .insert((source, code.clone(), range.clone()), group_id);
790
791 diagnostics.push(DiagnosticEntry {
792 range,
793 diagnostic: Diagnostic {
794 code: code.clone(),
795 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
796 message: diagnostic.message.clone(),
797 group_id,
798 is_primary: true,
799 is_valid: true,
800 is_disk_based,
801 },
802 });
803 if let Some(infos) = &diagnostic.related_information {
804 for info in infos {
805 if info.location.uri == params.uri {
806 let range = range_from_lsp(info.location.range);
807 diagnostics.push(DiagnosticEntry {
808 range,
809 diagnostic: Diagnostic {
810 code: code.clone(),
811 severity: DiagnosticSeverity::INFORMATION,
812 message: info.message.clone(),
813 group_id,
814 is_primary: false,
815 is_valid: true,
816 is_disk_based,
817 },
818 });
819 }
820 }
821 }
822 }
823 }
824
825 for entry in &mut diagnostics {
826 let diagnostic = &mut entry.diagnostic;
827 if !diagnostic.is_primary {
828 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
829 if let Some(&severity) = supporting_diagnostic_severities.get(&(
830 source,
831 diagnostic.code.clone(),
832 entry.range.clone(),
833 )) {
834 diagnostic.severity = severity;
835 }
836 }
837 }
838
839 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
840 Ok(())
841 }
842
843 pub fn update_diagnostic_entries(
844 &mut self,
845 worktree_path: Arc<Path>,
846 version: Option<i32>,
847 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
848 cx: &mut ModelContext<Self>,
849 ) -> Result<()> {
850 let this = self.as_local_mut().unwrap();
851 for buffer in this.open_buffers.values() {
852 if let Some(buffer) = buffer.upgrade(cx) {
853 if buffer
854 .read(cx)
855 .file()
856 .map_or(false, |file| *file.path() == worktree_path)
857 {
858 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
859 (
860 buffer.remote_id(),
861 buffer.update_diagnostics(version, diagnostics.clone(), cx),
862 )
863 });
864 self.send_buffer_update(remote_id, operation?, cx);
865 break;
866 }
867 }
868 }
869
870 let this = self.as_local_mut().unwrap();
871 let summary = DiagnosticSummary::new(&diagnostics);
872 this.diagnostic_summaries
873 .insert(PathKey(worktree_path.clone()), summary.clone());
874 this.diagnostics.insert(worktree_path.clone(), diagnostics);
875
876 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
877
878 if let Some(share) = this.share.as_ref() {
879 cx.foreground()
880 .spawn({
881 let client = this.client.clone();
882 let project_id = share.project_id;
883 let worktree_id = this.id().to_proto();
884 let path = worktree_path.to_string_lossy().to_string();
885 async move {
886 client
887 .send(proto::UpdateDiagnosticSummary {
888 project_id,
889 worktree_id,
890 summary: Some(proto::DiagnosticSummary {
891 path,
892 error_count: summary.error_count as u32,
893 warning_count: summary.warning_count as u32,
894 info_count: summary.info_count as u32,
895 hint_count: summary.hint_count as u32,
896 }),
897 })
898 .await
899 .log_err()
900 }
901 })
902 .detach();
903 }
904
905 Ok(())
906 }
907
908 fn send_buffer_update(
909 &mut self,
910 buffer_id: u64,
911 operation: Operation,
912 cx: &mut ModelContext<Self>,
913 ) {
914 if let Some((project_id, worktree_id, rpc)) = match self {
915 Worktree::Local(worktree) => worktree
916 .share
917 .as_ref()
918 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
919 Worktree::Remote(worktree) => Some((
920 worktree.project_id,
921 worktree.snapshot.id(),
922 worktree.client.clone(),
923 )),
924 } {
925 cx.spawn(|worktree, mut cx| async move {
926 dbg!(&operation);
927 if let Err(error) = rpc
928 .request(proto::UpdateBuffer {
929 project_id,
930 worktree_id: worktree_id.0 as u64,
931 buffer_id,
932 operations: vec![language::proto::serialize_operation(&operation)],
933 })
934 .await
935 {
936 worktree.update(&mut cx, |worktree, _| {
937 log::error!("error sending buffer operation: {}", error);
938 match worktree {
939 Worktree::Local(t) => &mut t.queued_operations,
940 Worktree::Remote(t) => &mut t.queued_operations,
941 }
942 .push((buffer_id, operation));
943 });
944 }
945 })
946 .detach();
947 }
948 }
949}
950
951impl WorktreeId {
952 pub fn from_usize(handle_id: usize) -> Self {
953 Self(handle_id)
954 }
955
956 pub(crate) fn from_proto(id: u64) -> Self {
957 Self(id as usize)
958 }
959
960 pub fn to_proto(&self) -> u64 {
961 self.0 as u64
962 }
963
964 pub fn to_usize(&self) -> usize {
965 self.0
966 }
967}
968
969impl fmt::Display for WorktreeId {
970 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
971 self.0.fmt(f)
972 }
973}
974
975#[derive(Clone)]
976pub struct Snapshot {
977 id: WorktreeId,
978 scan_id: usize,
979 abs_path: Arc<Path>,
980 root_name: String,
981 root_char_bag: CharBag,
982 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
983 entries_by_path: SumTree<Entry>,
984 entries_by_id: SumTree<PathEntry>,
985 removed_entry_ids: HashMap<u64, usize>,
986 next_entry_id: Arc<AtomicUsize>,
987}
988
989pub struct LocalWorktree {
990 snapshot: Snapshot,
991 config: WorktreeConfig,
992 background_snapshot: Arc<Mutex<Snapshot>>,
993 last_scan_state_rx: watch::Receiver<ScanState>,
994 _background_scanner_task: Option<Task<()>>,
995 poll_task: Option<Task<()>>,
996 share: Option<ShareState>,
997 loading_buffers: LoadingBuffers,
998 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
999 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
1000 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
1001 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1002 queued_operations: Vec<(u64, Operation)>,
1003 language_registry: Arc<LanguageRegistry>,
1004 client: Arc<Client>,
1005 user_store: ModelHandle<UserStore>,
1006 fs: Arc<dyn Fs>,
1007 languages: Vec<Arc<Language>>,
1008 language_servers: HashMap<String, Arc<LanguageServer>>,
1009}
1010
1011struct ShareState {
1012 project_id: u64,
1013 snapshots_tx: Sender<Snapshot>,
1014}
1015
1016pub struct RemoteWorktree {
1017 project_id: u64,
1018 snapshot: Snapshot,
1019 snapshot_rx: watch::Receiver<Snapshot>,
1020 client: Arc<Client>,
1021 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1022 replica_id: ReplicaId,
1023 loading_buffers: LoadingBuffers,
1024 open_buffers: HashMap<usize, RemoteBuffer>,
1025 languages: Arc<LanguageRegistry>,
1026 user_store: ModelHandle<UserStore>,
1027 queued_operations: Vec<(u64, Operation)>,
1028 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1029}
1030
1031type LoadingBuffers = HashMap<
1032 Arc<Path>,
1033 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
1034>;
1035
1036#[derive(Default, Deserialize)]
1037struct WorktreeConfig {
1038 collaborators: Vec<String>,
1039}
1040
1041impl LocalWorktree {
1042 async fn new(
1043 client: Arc<Client>,
1044 user_store: ModelHandle<UserStore>,
1045 path: impl Into<Arc<Path>>,
1046 fs: Arc<dyn Fs>,
1047 languages: Arc<LanguageRegistry>,
1048 cx: &mut AsyncAppContext,
1049 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1050 let abs_path = path.into();
1051 let path: Arc<Path> = Arc::from(Path::new(""));
1052 let next_entry_id = AtomicUsize::new(0);
1053
1054 // After determining whether the root entry is a file or a directory, populate the
1055 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1056 let root_name = abs_path
1057 .file_name()
1058 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1059 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1060 let metadata = fs.metadata(&abs_path).await?;
1061
1062 let mut config = WorktreeConfig::default();
1063 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1064 if let Ok(parsed) = toml::from_str(&zed_toml) {
1065 config = parsed;
1066 }
1067 }
1068
1069 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1070 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1071 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1072 let mut snapshot = Snapshot {
1073 id: WorktreeId::from_usize(cx.model_id()),
1074 scan_id: 0,
1075 abs_path,
1076 root_name: root_name.clone(),
1077 root_char_bag,
1078 ignores: Default::default(),
1079 entries_by_path: Default::default(),
1080 entries_by_id: Default::default(),
1081 removed_entry_ids: Default::default(),
1082 next_entry_id: Arc::new(next_entry_id),
1083 };
1084 if let Some(metadata) = metadata {
1085 snapshot.insert_entry(
1086 Entry::new(
1087 path.into(),
1088 &metadata,
1089 &snapshot.next_entry_id,
1090 snapshot.root_char_bag,
1091 ),
1092 fs.as_ref(),
1093 );
1094 }
1095
1096 let tree = Self {
1097 snapshot: snapshot.clone(),
1098 config,
1099 background_snapshot: Arc::new(Mutex::new(snapshot)),
1100 last_scan_state_rx,
1101 _background_scanner_task: None,
1102 share: None,
1103 poll_task: None,
1104 loading_buffers: Default::default(),
1105 open_buffers: Default::default(),
1106 shared_buffers: Default::default(),
1107 diagnostics: Default::default(),
1108 diagnostic_summaries: Default::default(),
1109 queued_operations: Default::default(),
1110 language_registry: languages,
1111 client,
1112 user_store,
1113 fs,
1114 languages: Default::default(),
1115 language_servers: Default::default(),
1116 };
1117
1118 cx.spawn_weak(|this, mut cx| async move {
1119 while let Ok(scan_state) = scan_states_rx.recv().await {
1120 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1121 let to_send = handle.update(&mut cx, |this, cx| {
1122 last_scan_state_tx.blocking_send(scan_state).ok();
1123 this.poll_snapshot(cx);
1124 let tree = this.as_local_mut().unwrap();
1125 if !tree.is_scanning() {
1126 if let Some(share) = tree.share.as_ref() {
1127 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1128 }
1129 }
1130 None
1131 });
1132
1133 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1134 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1135 log::error!("error submitting snapshot to send {}", err);
1136 }
1137 }
1138 } else {
1139 break;
1140 }
1141 }
1142 })
1143 .detach();
1144
1145 Worktree::Local(tree)
1146 });
1147
1148 Ok((tree, scan_states_tx))
1149 }
1150
1151 pub fn authorized_logins(&self) -> Vec<String> {
1152 self.config.collaborators.clone()
1153 }
1154
1155 pub fn language_registry(&self) -> &LanguageRegistry {
1156 &self.language_registry
1157 }
1158
1159 pub fn languages(&self) -> &[Arc<Language>] {
1160 &self.languages
1161 }
1162
1163 pub fn register_language(
1164 &mut self,
1165 language: &Arc<Language>,
1166 cx: &mut ModelContext<Worktree>,
1167 ) -> Option<Arc<LanguageServer>> {
1168 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1169 self.languages.push(language.clone());
1170 }
1171
1172 if let Some(server) = self.language_servers.get(language.name()) {
1173 return Some(server.clone());
1174 }
1175
1176 if let Some(language_server) = language
1177 .start_server(self.abs_path(), cx)
1178 .log_err()
1179 .flatten()
1180 {
1181 enum DiagnosticProgress {
1182 Updating,
1183 Updated,
1184 }
1185
1186 let disk_based_sources = language
1187 .disk_based_diagnostic_sources()
1188 .cloned()
1189 .unwrap_or_default();
1190 let disk_based_diagnostics_progress_token =
1191 language.disk_based_diagnostics_progress_token().cloned();
1192 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1193 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1194 smol::channel::unbounded();
1195 language_server
1196 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1197 smol::block_on(diagnostics_tx.send(params)).ok();
1198 })
1199 .detach();
1200 cx.spawn_weak(|this, mut cx| {
1201 let has_disk_based_diagnostic_progress_token =
1202 disk_based_diagnostics_progress_token.is_some();
1203 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1204 async move {
1205 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1206 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1207 handle.update(&mut cx, |this, cx| {
1208 if !has_disk_based_diagnostic_progress_token {
1209 smol::block_on(
1210 disk_based_diagnostics_done_tx
1211 .send(DiagnosticProgress::Updating),
1212 )
1213 .ok();
1214 }
1215 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1216 .log_err();
1217 if !has_disk_based_diagnostic_progress_token {
1218 smol::block_on(
1219 disk_based_diagnostics_done_tx
1220 .send(DiagnosticProgress::Updated),
1221 )
1222 .ok();
1223 }
1224 })
1225 } else {
1226 break;
1227 }
1228 }
1229 }
1230 })
1231 .detach();
1232
1233 let mut pending_disk_based_diagnostics: i32 = 0;
1234 language_server
1235 .on_notification::<lsp::notification::Progress, _>(move |params| {
1236 let token = match params.token {
1237 lsp::NumberOrString::Number(_) => None,
1238 lsp::NumberOrString::String(token) => Some(token),
1239 };
1240
1241 if token == disk_based_diagnostics_progress_token {
1242 match params.value {
1243 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1244 lsp::WorkDoneProgress::Begin(_) => {
1245 if pending_disk_based_diagnostics == 0 {
1246 smol::block_on(
1247 disk_based_diagnostics_done_tx
1248 .send(DiagnosticProgress::Updating),
1249 )
1250 .ok();
1251 }
1252 pending_disk_based_diagnostics += 1;
1253 }
1254 lsp::WorkDoneProgress::End(_) => {
1255 pending_disk_based_diagnostics -= 1;
1256 if pending_disk_based_diagnostics == 0 {
1257 smol::block_on(
1258 disk_based_diagnostics_done_tx
1259 .send(DiagnosticProgress::Updated),
1260 )
1261 .ok();
1262 }
1263 }
1264 _ => {}
1265 },
1266 }
1267 }
1268 })
1269 .detach();
1270 let rpc = self.client.clone();
1271 cx.spawn_weak(|this, mut cx| async move {
1272 while let Ok(progress) = disk_based_diagnostics_done_rx.recv().await {
1273 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1274 match progress {
1275 DiagnosticProgress::Updating => {
1276 let message = handle.update(&mut cx, |this, cx| {
1277 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1278 let this = this.as_local().unwrap();
1279 this.share.as_ref().map(|share| {
1280 proto::DiskBasedDiagnosticsUpdating {
1281 project_id: share.project_id,
1282 worktree_id: this.id().to_proto(),
1283 }
1284 })
1285 });
1286
1287 if let Some(message) = message {
1288 rpc.send(message).await.log_err();
1289 }
1290 }
1291 DiagnosticProgress::Updated => {
1292 let message = handle.update(&mut cx, |this, cx| {
1293 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1294 let this = this.as_local().unwrap();
1295 this.share.as_ref().map(|share| {
1296 proto::DiskBasedDiagnosticsUpdated {
1297 project_id: share.project_id,
1298 worktree_id: this.id().to_proto(),
1299 }
1300 })
1301 });
1302
1303 if let Some(message) = message {
1304 rpc.send(message).await.log_err();
1305 }
1306 }
1307 }
1308 } else {
1309 break;
1310 }
1311 }
1312 })
1313 .detach();
1314
1315 self.language_servers
1316 .insert(language.name().to_string(), language_server.clone());
1317 Some(language_server.clone())
1318 } else {
1319 None
1320 }
1321 }
1322
1323 fn get_open_buffer(
1324 &mut self,
1325 path: &Path,
1326 cx: &mut ModelContext<Worktree>,
1327 ) -> Option<ModelHandle<Buffer>> {
1328 let handle = cx.handle();
1329 let mut result = None;
1330 self.open_buffers.retain(|_buffer_id, buffer| {
1331 if let Some(buffer) = buffer.upgrade(cx) {
1332 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1333 if file.worktree == handle && file.path().as_ref() == path {
1334 result = Some(buffer);
1335 }
1336 }
1337 true
1338 } else {
1339 false
1340 }
1341 });
1342 result
1343 }
1344
1345 fn open_buffer(
1346 &mut self,
1347 path: &Path,
1348 cx: &mut ModelContext<Worktree>,
1349 ) -> Task<Result<ModelHandle<Buffer>>> {
1350 let path = Arc::from(path);
1351 cx.spawn(move |this, mut cx| async move {
1352 let (file, contents) = this
1353 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1354 .await?;
1355
1356 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1357 let this = this.as_local_mut().unwrap();
1358 let diagnostics = this.diagnostics.get(&path).cloned();
1359 let language = this
1360 .language_registry
1361 .select_language(file.full_path())
1362 .cloned();
1363 let server = language
1364 .as_ref()
1365 .and_then(|language| this.register_language(language, cx));
1366 (diagnostics, language, server)
1367 });
1368
1369 let mut buffer_operations = Vec::new();
1370 let buffer = cx.add_model(|cx| {
1371 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1372 buffer.set_language(language, language_server, cx);
1373 if let Some(diagnostics) = diagnostics {
1374 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1375 buffer_operations.push(op);
1376 }
1377 buffer
1378 });
1379
1380 this.update(&mut cx, |this, cx| {
1381 for op in buffer_operations {
1382 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1383 }
1384 let this = this.as_local_mut().unwrap();
1385 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1386 });
1387
1388 Ok(buffer)
1389 })
1390 }
1391
1392 pub fn open_remote_buffer(
1393 &mut self,
1394 envelope: TypedEnvelope<proto::OpenBuffer>,
1395 cx: &mut ModelContext<Worktree>,
1396 ) -> Task<Result<proto::OpenBufferResponse>> {
1397 cx.spawn(|this, mut cx| async move {
1398 let peer_id = envelope.original_sender_id();
1399 let path = Path::new(&envelope.payload.path);
1400 let buffer = this
1401 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1402 .await?;
1403 this.update(&mut cx, |this, cx| {
1404 this.as_local_mut()
1405 .unwrap()
1406 .shared_buffers
1407 .entry(peer_id?)
1408 .or_default()
1409 .insert(buffer.id() as u64, buffer.clone());
1410
1411 Ok(proto::OpenBufferResponse {
1412 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1413 })
1414 })
1415 })
1416 }
1417
1418 pub fn close_remote_buffer(
1419 &mut self,
1420 envelope: TypedEnvelope<proto::CloseBuffer>,
1421 cx: &mut ModelContext<Worktree>,
1422 ) -> Result<()> {
1423 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1424 shared_buffers.remove(&envelope.payload.buffer_id);
1425 cx.notify();
1426 }
1427
1428 Ok(())
1429 }
1430
1431 pub fn remove_collaborator(
1432 &mut self,
1433 peer_id: PeerId,
1434 replica_id: ReplicaId,
1435 cx: &mut ModelContext<Worktree>,
1436 ) {
1437 self.shared_buffers.remove(&peer_id);
1438 for (_, buffer) in &self.open_buffers {
1439 if let Some(buffer) = buffer.upgrade(cx) {
1440 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1441 }
1442 }
1443 cx.notify();
1444 }
1445
1446 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1447 let mut scan_state_rx = self.last_scan_state_rx.clone();
1448 async move {
1449 let mut scan_state = Some(scan_state_rx.borrow().clone());
1450 while let Some(ScanState::Scanning) = scan_state {
1451 scan_state = scan_state_rx.recv().await;
1452 }
1453 }
1454 }
1455
1456 fn is_scanning(&self) -> bool {
1457 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1458 true
1459 } else {
1460 false
1461 }
1462 }
1463
1464 pub fn snapshot(&self) -> Snapshot {
1465 self.snapshot.clone()
1466 }
1467
1468 pub fn abs_path(&self) -> &Arc<Path> {
1469 &self.snapshot.abs_path
1470 }
1471
1472 pub fn contains_abs_path(&self, path: &Path) -> bool {
1473 path.starts_with(&self.snapshot.abs_path)
1474 }
1475
1476 fn absolutize(&self, path: &Path) -> PathBuf {
1477 if path.file_name().is_some() {
1478 self.snapshot.abs_path.join(path)
1479 } else {
1480 self.snapshot.abs_path.to_path_buf()
1481 }
1482 }
1483
1484 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1485 let handle = cx.handle();
1486 let path = Arc::from(path);
1487 let worktree_path = self.abs_path.clone();
1488 let abs_path = self.absolutize(&path);
1489 let background_snapshot = self.background_snapshot.clone();
1490 let fs = self.fs.clone();
1491 cx.spawn(|this, mut cx| async move {
1492 let text = fs.load(&abs_path).await?;
1493 // Eagerly populate the snapshot with an updated entry for the loaded file
1494 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1495 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1496 Ok((
1497 File {
1498 entry_id: Some(entry.id),
1499 worktree: handle,
1500 worktree_path,
1501 path: entry.path,
1502 mtime: entry.mtime,
1503 is_local: true,
1504 },
1505 text,
1506 ))
1507 })
1508 }
1509
1510 pub fn save_buffer_as(
1511 &self,
1512 buffer: ModelHandle<Buffer>,
1513 path: impl Into<Arc<Path>>,
1514 text: Rope,
1515 cx: &mut ModelContext<Worktree>,
1516 ) -> Task<Result<File>> {
1517 let save = self.save(path, text, cx);
1518 cx.spawn(|this, mut cx| async move {
1519 let entry = save.await?;
1520 this.update(&mut cx, |this, cx| {
1521 let this = this.as_local_mut().unwrap();
1522 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1523 Ok(File {
1524 entry_id: Some(entry.id),
1525 worktree: cx.handle(),
1526 worktree_path: this.abs_path.clone(),
1527 path: entry.path,
1528 mtime: entry.mtime,
1529 is_local: true,
1530 })
1531 })
1532 })
1533 }
1534
1535 fn save(
1536 &self,
1537 path: impl Into<Arc<Path>>,
1538 text: Rope,
1539 cx: &mut ModelContext<Worktree>,
1540 ) -> Task<Result<Entry>> {
1541 let path = path.into();
1542 let abs_path = self.absolutize(&path);
1543 let background_snapshot = self.background_snapshot.clone();
1544 let fs = self.fs.clone();
1545 let save = cx.background().spawn(async move {
1546 fs.save(&abs_path, &text).await?;
1547 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1548 });
1549
1550 cx.spawn(|this, mut cx| async move {
1551 let entry = save.await?;
1552 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1553 Ok(entry)
1554 })
1555 }
1556
1557 pub fn share(
1558 &mut self,
1559 project_id: u64,
1560 cx: &mut ModelContext<Worktree>,
1561 ) -> Task<anyhow::Result<()>> {
1562 if self.share.is_some() {
1563 return Task::ready(Ok(()));
1564 }
1565
1566 let snapshot = self.snapshot();
1567 let rpc = self.client.clone();
1568 let worktree_id = cx.model_id() as u64;
1569 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1570 self.share = Some(ShareState {
1571 project_id,
1572 snapshots_tx: snapshots_to_send_tx,
1573 });
1574
1575 cx.background()
1576 .spawn({
1577 let rpc = rpc.clone();
1578 let snapshot = snapshot.clone();
1579 async move {
1580 let mut prev_snapshot = snapshot;
1581 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1582 let message =
1583 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1584 match rpc.send(message).await {
1585 Ok(()) => prev_snapshot = snapshot,
1586 Err(err) => log::error!("error sending snapshot diff {}", err),
1587 }
1588 }
1589 }
1590 })
1591 .detach();
1592
1593 let diagnostic_summaries = self.diagnostic_summaries.clone();
1594 let share_message = cx.background().spawn(async move {
1595 proto::ShareWorktree {
1596 project_id,
1597 worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1598 }
1599 });
1600
1601 cx.foreground().spawn(async move {
1602 rpc.request(share_message.await).await?;
1603 Ok(())
1604 })
1605 }
1606}
1607
1608fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1609 let contents = smol::block_on(fs.load(&abs_path))?;
1610 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1611 let mut builder = GitignoreBuilder::new(parent);
1612 for line in contents.lines() {
1613 builder.add_line(Some(abs_path.into()), line)?;
1614 }
1615 Ok(builder.build()?)
1616}
1617
1618impl Deref for Worktree {
1619 type Target = Snapshot;
1620
1621 fn deref(&self) -> &Self::Target {
1622 match self {
1623 Worktree::Local(worktree) => &worktree.snapshot,
1624 Worktree::Remote(worktree) => &worktree.snapshot,
1625 }
1626 }
1627}
1628
1629impl Deref for LocalWorktree {
1630 type Target = Snapshot;
1631
1632 fn deref(&self) -> &Self::Target {
1633 &self.snapshot
1634 }
1635}
1636
1637impl Deref for RemoteWorktree {
1638 type Target = Snapshot;
1639
1640 fn deref(&self) -> &Self::Target {
1641 &self.snapshot
1642 }
1643}
1644
1645impl fmt::Debug for LocalWorktree {
1646 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1647 self.snapshot.fmt(f)
1648 }
1649}
1650
1651impl RemoteWorktree {
1652 fn get_open_buffer(
1653 &mut self,
1654 path: &Path,
1655 cx: &mut ModelContext<Worktree>,
1656 ) -> Option<ModelHandle<Buffer>> {
1657 let handle = cx.handle();
1658 let mut existing_buffer = None;
1659 self.open_buffers.retain(|_buffer_id, buffer| {
1660 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1661 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1662 if file.worktree == handle && file.path().as_ref() == path {
1663 existing_buffer = Some(buffer);
1664 }
1665 }
1666 true
1667 } else {
1668 false
1669 }
1670 });
1671 existing_buffer
1672 }
1673
1674 fn open_buffer(
1675 &mut self,
1676 path: &Path,
1677 cx: &mut ModelContext<Worktree>,
1678 ) -> Task<Result<ModelHandle<Buffer>>> {
1679 let rpc = self.client.clone();
1680 let replica_id = self.replica_id;
1681 let project_id = self.project_id;
1682 let remote_worktree_id = self.id();
1683 let root_path = self.snapshot.abs_path.clone();
1684 let path: Arc<Path> = Arc::from(path);
1685 let path_string = path.to_string_lossy().to_string();
1686 cx.spawn_weak(move |this, mut cx| async move {
1687 let entry = this
1688 .upgrade(&cx)
1689 .ok_or_else(|| anyhow!("worktree was closed"))?
1690 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1691 .ok_or_else(|| anyhow!("file does not exist"))?;
1692 let response = rpc
1693 .request(proto::OpenBuffer {
1694 project_id,
1695 worktree_id: remote_worktree_id.to_proto(),
1696 path: path_string,
1697 })
1698 .await?;
1699
1700 let this = this
1701 .upgrade(&cx)
1702 .ok_or_else(|| anyhow!("worktree was closed"))?;
1703 let file = File {
1704 entry_id: Some(entry.id),
1705 worktree: this.clone(),
1706 worktree_path: root_path,
1707 path: entry.path,
1708 mtime: entry.mtime,
1709 is_local: false,
1710 };
1711 let language = this.read_with(&cx, |this, _| {
1712 use language::File;
1713 this.languages().select_language(file.full_path()).cloned()
1714 });
1715 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1716 let buffer_id = remote_buffer.id as usize;
1717 let buffer = cx.add_model(|cx| {
1718 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1719 .unwrap()
1720 .with_language(language, None, cx)
1721 });
1722 this.update(&mut cx, move |this, cx| {
1723 let this = this.as_remote_mut().unwrap();
1724 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1725 .open_buffers
1726 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1727 {
1728 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1729 }
1730 Result::<_, anyhow::Error>::Ok(buffer)
1731 })
1732 })
1733 }
1734
1735 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1736 for (_, buffer) in self.open_buffers.drain() {
1737 if let RemoteBuffer::Loaded(buffer) = buffer {
1738 if let Some(buffer) = buffer.upgrade(cx) {
1739 buffer.update(cx, |buffer, cx| buffer.close(cx))
1740 }
1741 }
1742 }
1743 }
1744
1745 fn snapshot(&self) -> Snapshot {
1746 self.snapshot.clone()
1747 }
1748
1749 pub fn update_from_remote(
1750 &mut self,
1751 envelope: TypedEnvelope<proto::UpdateWorktree>,
1752 cx: &mut ModelContext<Worktree>,
1753 ) -> Result<()> {
1754 let mut tx = self.updates_tx.clone();
1755 let payload = envelope.payload.clone();
1756 cx.background()
1757 .spawn(async move {
1758 tx.send(payload).await.expect("receiver runs to completion");
1759 })
1760 .detach();
1761
1762 Ok(())
1763 }
1764
1765 pub fn update_diagnostic_summary(
1766 &mut self,
1767 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1768 cx: &mut ModelContext<Worktree>,
1769 ) {
1770 if let Some(summary) = envelope.payload.summary {
1771 let path: Arc<Path> = Path::new(&summary.path).into();
1772 self.diagnostic_summaries.insert(
1773 PathKey(path.clone()),
1774 DiagnosticSummary {
1775 error_count: summary.error_count as usize,
1776 warning_count: summary.warning_count as usize,
1777 info_count: summary.info_count as usize,
1778 hint_count: summary.hint_count as usize,
1779 },
1780 );
1781 cx.emit(Event::DiagnosticsUpdated(path));
1782 }
1783 }
1784
1785 pub fn disk_based_diagnostics_updating(&self, cx: &mut ModelContext<Worktree>) {
1786 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1787 }
1788
1789 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1790 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1791 }
1792
1793 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1794 for (_, buffer) in &self.open_buffers {
1795 if let Some(buffer) = buffer.upgrade(cx) {
1796 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1797 }
1798 }
1799 cx.notify();
1800 }
1801}
1802
1803enum RemoteBuffer {
1804 Operations(Vec<Operation>),
1805 Loaded(WeakModelHandle<Buffer>),
1806}
1807
1808impl RemoteBuffer {
1809 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1810 match self {
1811 Self::Operations(_) => None,
1812 Self::Loaded(buffer) => buffer.upgrade(cx),
1813 }
1814 }
1815}
1816
1817impl Snapshot {
1818 pub fn id(&self) -> WorktreeId {
1819 self.id
1820 }
1821
1822 pub fn to_proto(
1823 &self,
1824 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1825 ) -> proto::Worktree {
1826 let root_name = self.root_name.clone();
1827 proto::Worktree {
1828 id: self.id.0 as u64,
1829 root_name,
1830 entries: self
1831 .entries_by_path
1832 .iter()
1833 .filter(|e| !e.is_ignored)
1834 .map(Into::into)
1835 .collect(),
1836 diagnostic_summaries: diagnostic_summaries
1837 .iter()
1838 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1839 .collect(),
1840 }
1841 }
1842
1843 pub fn build_update(
1844 &self,
1845 other: &Self,
1846 project_id: u64,
1847 worktree_id: u64,
1848 include_ignored: bool,
1849 ) -> proto::UpdateWorktree {
1850 let mut updated_entries = Vec::new();
1851 let mut removed_entries = Vec::new();
1852 let mut self_entries = self
1853 .entries_by_id
1854 .cursor::<()>()
1855 .filter(|e| include_ignored || !e.is_ignored)
1856 .peekable();
1857 let mut other_entries = other
1858 .entries_by_id
1859 .cursor::<()>()
1860 .filter(|e| include_ignored || !e.is_ignored)
1861 .peekable();
1862 loop {
1863 match (self_entries.peek(), other_entries.peek()) {
1864 (Some(self_entry), Some(other_entry)) => {
1865 match Ord::cmp(&self_entry.id, &other_entry.id) {
1866 Ordering::Less => {
1867 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1868 updated_entries.push(entry);
1869 self_entries.next();
1870 }
1871 Ordering::Equal => {
1872 if self_entry.scan_id != other_entry.scan_id {
1873 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1874 updated_entries.push(entry);
1875 }
1876
1877 self_entries.next();
1878 other_entries.next();
1879 }
1880 Ordering::Greater => {
1881 removed_entries.push(other_entry.id as u64);
1882 other_entries.next();
1883 }
1884 }
1885 }
1886 (Some(self_entry), None) => {
1887 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1888 updated_entries.push(entry);
1889 self_entries.next();
1890 }
1891 (None, Some(other_entry)) => {
1892 removed_entries.push(other_entry.id as u64);
1893 other_entries.next();
1894 }
1895 (None, None) => break,
1896 }
1897 }
1898
1899 proto::UpdateWorktree {
1900 project_id,
1901 worktree_id,
1902 root_name: self.root_name().to_string(),
1903 updated_entries,
1904 removed_entries,
1905 }
1906 }
1907
1908 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1909 self.scan_id += 1;
1910 let scan_id = self.scan_id;
1911
1912 let mut entries_by_path_edits = Vec::new();
1913 let mut entries_by_id_edits = Vec::new();
1914 for entry_id in update.removed_entries {
1915 let entry_id = entry_id as usize;
1916 let entry = self
1917 .entry_for_id(entry_id)
1918 .ok_or_else(|| anyhow!("unknown entry"))?;
1919 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1920 entries_by_id_edits.push(Edit::Remove(entry.id));
1921 }
1922
1923 for entry in update.updated_entries {
1924 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1925 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1926 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1927 }
1928 entries_by_id_edits.push(Edit::Insert(PathEntry {
1929 id: entry.id,
1930 path: entry.path.clone(),
1931 is_ignored: entry.is_ignored,
1932 scan_id,
1933 }));
1934 entries_by_path_edits.push(Edit::Insert(entry));
1935 }
1936
1937 self.entries_by_path.edit(entries_by_path_edits, &());
1938 self.entries_by_id.edit(entries_by_id_edits, &());
1939
1940 Ok(())
1941 }
1942
1943 pub fn file_count(&self) -> usize {
1944 self.entries_by_path.summary().file_count
1945 }
1946
1947 pub fn visible_file_count(&self) -> usize {
1948 self.entries_by_path.summary().visible_file_count
1949 }
1950
1951 fn traverse_from_offset(
1952 &self,
1953 include_dirs: bool,
1954 include_ignored: bool,
1955 start_offset: usize,
1956 ) -> Traversal {
1957 let mut cursor = self.entries_by_path.cursor();
1958 cursor.seek(
1959 &TraversalTarget::Count {
1960 count: start_offset,
1961 include_dirs,
1962 include_ignored,
1963 },
1964 Bias::Right,
1965 &(),
1966 );
1967 Traversal {
1968 cursor,
1969 include_dirs,
1970 include_ignored,
1971 }
1972 }
1973
1974 fn traverse_from_path(
1975 &self,
1976 include_dirs: bool,
1977 include_ignored: bool,
1978 path: &Path,
1979 ) -> Traversal {
1980 let mut cursor = self.entries_by_path.cursor();
1981 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1982 Traversal {
1983 cursor,
1984 include_dirs,
1985 include_ignored,
1986 }
1987 }
1988
1989 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1990 self.traverse_from_offset(false, include_ignored, start)
1991 }
1992
1993 pub fn entries(&self, include_ignored: bool) -> Traversal {
1994 self.traverse_from_offset(true, include_ignored, 0)
1995 }
1996
1997 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1998 let empty_path = Path::new("");
1999 self.entries_by_path
2000 .cursor::<()>()
2001 .filter(move |entry| entry.path.as_ref() != empty_path)
2002 .map(|entry| &entry.path)
2003 }
2004
2005 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2006 let mut cursor = self.entries_by_path.cursor();
2007 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2008 let traversal = Traversal {
2009 cursor,
2010 include_dirs: true,
2011 include_ignored: true,
2012 };
2013 ChildEntriesIter {
2014 traversal,
2015 parent_path,
2016 }
2017 }
2018
2019 pub fn root_entry(&self) -> Option<&Entry> {
2020 self.entry_for_path("")
2021 }
2022
2023 pub fn root_name(&self) -> &str {
2024 &self.root_name
2025 }
2026
2027 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2028 let path = path.as_ref();
2029 self.traverse_from_path(true, true, path)
2030 .entry()
2031 .and_then(|entry| {
2032 if entry.path.as_ref() == path {
2033 Some(entry)
2034 } else {
2035 None
2036 }
2037 })
2038 }
2039
2040 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
2041 let entry = self.entries_by_id.get(&id, &())?;
2042 self.entry_for_path(&entry.path)
2043 }
2044
2045 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2046 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2047 }
2048
2049 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2050 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
2051 let abs_path = self.abs_path.join(&entry.path);
2052 match build_gitignore(&abs_path, fs) {
2053 Ok(ignore) => {
2054 let ignore_dir_path = entry.path.parent().unwrap();
2055 self.ignores
2056 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
2057 }
2058 Err(error) => {
2059 log::error!(
2060 "error loading .gitignore file {:?} - {:?}",
2061 &entry.path,
2062 error
2063 );
2064 }
2065 }
2066 }
2067
2068 self.reuse_entry_id(&mut entry);
2069 self.entries_by_path.insert_or_replace(entry.clone(), &());
2070 self.entries_by_id.insert_or_replace(
2071 PathEntry {
2072 id: entry.id,
2073 path: entry.path.clone(),
2074 is_ignored: entry.is_ignored,
2075 scan_id: self.scan_id,
2076 },
2077 &(),
2078 );
2079 entry
2080 }
2081
2082 fn populate_dir(
2083 &mut self,
2084 parent_path: Arc<Path>,
2085 entries: impl IntoIterator<Item = Entry>,
2086 ignore: Option<Arc<Gitignore>>,
2087 ) {
2088 let mut parent_entry = self
2089 .entries_by_path
2090 .get(&PathKey(parent_path.clone()), &())
2091 .unwrap()
2092 .clone();
2093 if let Some(ignore) = ignore {
2094 self.ignores.insert(parent_path, (ignore, self.scan_id));
2095 }
2096 if matches!(parent_entry.kind, EntryKind::PendingDir) {
2097 parent_entry.kind = EntryKind::Dir;
2098 } else {
2099 unreachable!();
2100 }
2101
2102 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2103 let mut entries_by_id_edits = Vec::new();
2104
2105 for mut entry in entries {
2106 self.reuse_entry_id(&mut entry);
2107 entries_by_id_edits.push(Edit::Insert(PathEntry {
2108 id: entry.id,
2109 path: entry.path.clone(),
2110 is_ignored: entry.is_ignored,
2111 scan_id: self.scan_id,
2112 }));
2113 entries_by_path_edits.push(Edit::Insert(entry));
2114 }
2115
2116 self.entries_by_path.edit(entries_by_path_edits, &());
2117 self.entries_by_id.edit(entries_by_id_edits, &());
2118 }
2119
2120 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2121 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2122 entry.id = removed_entry_id;
2123 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2124 entry.id = existing_entry.id;
2125 }
2126 }
2127
2128 fn remove_path(&mut self, path: &Path) {
2129 let mut new_entries;
2130 let removed_entries;
2131 {
2132 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2133 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2134 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2135 new_entries.push_tree(cursor.suffix(&()), &());
2136 }
2137 self.entries_by_path = new_entries;
2138
2139 let mut entries_by_id_edits = Vec::new();
2140 for entry in removed_entries.cursor::<()>() {
2141 let removed_entry_id = self
2142 .removed_entry_ids
2143 .entry(entry.inode)
2144 .or_insert(entry.id);
2145 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2146 entries_by_id_edits.push(Edit::Remove(entry.id));
2147 }
2148 self.entries_by_id.edit(entries_by_id_edits, &());
2149
2150 if path.file_name() == Some(&GITIGNORE) {
2151 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2152 *scan_id = self.scan_id;
2153 }
2154 }
2155 }
2156
2157 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2158 let mut new_ignores = Vec::new();
2159 for ancestor in path.ancestors().skip(1) {
2160 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2161 new_ignores.push((ancestor, Some(ignore.clone())));
2162 } else {
2163 new_ignores.push((ancestor, None));
2164 }
2165 }
2166
2167 let mut ignore_stack = IgnoreStack::none();
2168 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2169 if ignore_stack.is_path_ignored(&parent_path, true) {
2170 ignore_stack = IgnoreStack::all();
2171 break;
2172 } else if let Some(ignore) = ignore {
2173 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2174 }
2175 }
2176
2177 if ignore_stack.is_path_ignored(path, is_dir) {
2178 ignore_stack = IgnoreStack::all();
2179 }
2180
2181 ignore_stack
2182 }
2183}
2184
2185impl fmt::Debug for Snapshot {
2186 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2187 for entry in self.entries_by_path.cursor::<()>() {
2188 for _ in entry.path.ancestors().skip(1) {
2189 write!(f, " ")?;
2190 }
2191 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2192 }
2193 Ok(())
2194 }
2195}
2196
2197#[derive(Clone, PartialEq)]
2198pub struct File {
2199 entry_id: Option<usize>,
2200 worktree: ModelHandle<Worktree>,
2201 worktree_path: Arc<Path>,
2202 pub path: Arc<Path>,
2203 pub mtime: SystemTime,
2204 is_local: bool,
2205}
2206
2207impl language::File for File {
2208 fn mtime(&self) -> SystemTime {
2209 self.mtime
2210 }
2211
2212 fn path(&self) -> &Arc<Path> {
2213 &self.path
2214 }
2215
2216 fn abs_path(&self) -> Option<PathBuf> {
2217 if self.is_local {
2218 Some(self.worktree_path.join(&self.path))
2219 } else {
2220 None
2221 }
2222 }
2223
2224 fn full_path(&self) -> PathBuf {
2225 let mut full_path = PathBuf::new();
2226 if let Some(worktree_name) = self.worktree_path.file_name() {
2227 full_path.push(worktree_name);
2228 }
2229 full_path.push(&self.path);
2230 full_path
2231 }
2232
2233 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2234 /// of its worktree, then this method will return the name of the worktree itself.
2235 fn file_name<'a>(&'a self) -> Option<OsString> {
2236 self.path
2237 .file_name()
2238 .or_else(|| self.worktree_path.file_name())
2239 .map(Into::into)
2240 }
2241
2242 fn is_deleted(&self) -> bool {
2243 self.entry_id.is_none()
2244 }
2245
2246 fn save(
2247 &self,
2248 buffer_id: u64,
2249 text: Rope,
2250 version: clock::Global,
2251 cx: &mut MutableAppContext,
2252 ) -> Task<Result<(clock::Global, SystemTime)>> {
2253 let worktree_id = self.worktree.read(cx).id().to_proto();
2254 self.worktree.update(cx, |worktree, cx| match worktree {
2255 Worktree::Local(worktree) => {
2256 let rpc = worktree.client.clone();
2257 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2258 let save = worktree.save(self.path.clone(), text, cx);
2259 cx.background().spawn(async move {
2260 let entry = save.await?;
2261 if let Some(project_id) = project_id {
2262 rpc.send(proto::BufferSaved {
2263 project_id,
2264 worktree_id,
2265 buffer_id,
2266 version: (&version).into(),
2267 mtime: Some(entry.mtime.into()),
2268 })
2269 .await?;
2270 }
2271 Ok((version, entry.mtime))
2272 })
2273 }
2274 Worktree::Remote(worktree) => {
2275 let rpc = worktree.client.clone();
2276 let project_id = worktree.project_id;
2277 cx.foreground().spawn(async move {
2278 let response = rpc
2279 .request(proto::SaveBuffer {
2280 project_id,
2281 worktree_id,
2282 buffer_id,
2283 })
2284 .await?;
2285 let version = response.version.try_into()?;
2286 let mtime = response
2287 .mtime
2288 .ok_or_else(|| anyhow!("missing mtime"))?
2289 .into();
2290 Ok((version, mtime))
2291 })
2292 }
2293 })
2294 }
2295
2296 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2297 let worktree = self.worktree.read(cx).as_local()?;
2298 let abs_path = worktree.absolutize(&self.path);
2299 let fs = worktree.fs.clone();
2300 Some(
2301 cx.background()
2302 .spawn(async move { fs.load(&abs_path).await }),
2303 )
2304 }
2305
2306 fn format_remote(
2307 &self,
2308 buffer_id: u64,
2309 cx: &mut MutableAppContext,
2310 ) -> Option<Task<Result<()>>> {
2311 let worktree = self.worktree.read(cx);
2312 let worktree_id = worktree.id().to_proto();
2313 let worktree = worktree.as_remote()?;
2314 let rpc = worktree.client.clone();
2315 let project_id = worktree.project_id;
2316 Some(cx.foreground().spawn(async move {
2317 rpc.request(proto::FormatBuffer {
2318 project_id,
2319 worktree_id,
2320 buffer_id,
2321 })
2322 .await?;
2323 Ok(())
2324 }))
2325 }
2326
2327 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2328 self.worktree.update(cx, |worktree, cx| {
2329 worktree.send_buffer_update(buffer_id, operation, cx);
2330 });
2331 }
2332
2333 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2334 self.worktree.update(cx, |worktree, cx| {
2335 if let Worktree::Remote(worktree) = worktree {
2336 let project_id = worktree.project_id;
2337 let worktree_id = worktree.id().to_proto();
2338 let rpc = worktree.client.clone();
2339 cx.background()
2340 .spawn(async move {
2341 if let Err(error) = rpc
2342 .send(proto::CloseBuffer {
2343 project_id,
2344 worktree_id,
2345 buffer_id,
2346 })
2347 .await
2348 {
2349 log::error!("error closing remote buffer: {}", error);
2350 }
2351 })
2352 .detach();
2353 }
2354 });
2355 }
2356
2357 fn as_any(&self) -> &dyn Any {
2358 self
2359 }
2360}
2361
2362impl File {
2363 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2364 file.and_then(|f| f.as_any().downcast_ref())
2365 }
2366
2367 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2368 self.worktree.read(cx).id()
2369 }
2370}
2371
2372#[derive(Clone, Debug)]
2373pub struct Entry {
2374 pub id: usize,
2375 pub kind: EntryKind,
2376 pub path: Arc<Path>,
2377 pub inode: u64,
2378 pub mtime: SystemTime,
2379 pub is_symlink: bool,
2380 pub is_ignored: bool,
2381}
2382
2383#[derive(Clone, Debug)]
2384pub enum EntryKind {
2385 PendingDir,
2386 Dir,
2387 File(CharBag),
2388}
2389
2390impl Entry {
2391 fn new(
2392 path: Arc<Path>,
2393 metadata: &fs::Metadata,
2394 next_entry_id: &AtomicUsize,
2395 root_char_bag: CharBag,
2396 ) -> Self {
2397 Self {
2398 id: next_entry_id.fetch_add(1, SeqCst),
2399 kind: if metadata.is_dir {
2400 EntryKind::PendingDir
2401 } else {
2402 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2403 },
2404 path,
2405 inode: metadata.inode,
2406 mtime: metadata.mtime,
2407 is_symlink: metadata.is_symlink,
2408 is_ignored: false,
2409 }
2410 }
2411
2412 pub fn is_dir(&self) -> bool {
2413 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2414 }
2415
2416 pub fn is_file(&self) -> bool {
2417 matches!(self.kind, EntryKind::File(_))
2418 }
2419}
2420
2421impl sum_tree::Item for Entry {
2422 type Summary = EntrySummary;
2423
2424 fn summary(&self) -> Self::Summary {
2425 let visible_count = if self.is_ignored { 0 } else { 1 };
2426 let file_count;
2427 let visible_file_count;
2428 if self.is_file() {
2429 file_count = 1;
2430 visible_file_count = visible_count;
2431 } else {
2432 file_count = 0;
2433 visible_file_count = 0;
2434 }
2435
2436 EntrySummary {
2437 max_path: self.path.clone(),
2438 count: 1,
2439 visible_count,
2440 file_count,
2441 visible_file_count,
2442 }
2443 }
2444}
2445
2446impl sum_tree::KeyedItem for Entry {
2447 type Key = PathKey;
2448
2449 fn key(&self) -> Self::Key {
2450 PathKey(self.path.clone())
2451 }
2452}
2453
2454#[derive(Clone, Debug)]
2455pub struct EntrySummary {
2456 max_path: Arc<Path>,
2457 count: usize,
2458 visible_count: usize,
2459 file_count: usize,
2460 visible_file_count: usize,
2461}
2462
2463impl Default for EntrySummary {
2464 fn default() -> Self {
2465 Self {
2466 max_path: Arc::from(Path::new("")),
2467 count: 0,
2468 visible_count: 0,
2469 file_count: 0,
2470 visible_file_count: 0,
2471 }
2472 }
2473}
2474
2475impl sum_tree::Summary for EntrySummary {
2476 type Context = ();
2477
2478 fn add_summary(&mut self, rhs: &Self, _: &()) {
2479 self.max_path = rhs.max_path.clone();
2480 self.visible_count += rhs.visible_count;
2481 self.file_count += rhs.file_count;
2482 self.visible_file_count += rhs.visible_file_count;
2483 }
2484}
2485
2486#[derive(Clone, Debug)]
2487struct PathEntry {
2488 id: usize,
2489 path: Arc<Path>,
2490 is_ignored: bool,
2491 scan_id: usize,
2492}
2493
2494impl sum_tree::Item for PathEntry {
2495 type Summary = PathEntrySummary;
2496
2497 fn summary(&self) -> Self::Summary {
2498 PathEntrySummary { max_id: self.id }
2499 }
2500}
2501
2502impl sum_tree::KeyedItem for PathEntry {
2503 type Key = usize;
2504
2505 fn key(&self) -> Self::Key {
2506 self.id
2507 }
2508}
2509
2510#[derive(Clone, Debug, Default)]
2511struct PathEntrySummary {
2512 max_id: usize,
2513}
2514
2515impl sum_tree::Summary for PathEntrySummary {
2516 type Context = ();
2517
2518 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2519 self.max_id = summary.max_id;
2520 }
2521}
2522
2523impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2524 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2525 *self = summary.max_id;
2526 }
2527}
2528
2529#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2530pub struct PathKey(Arc<Path>);
2531
2532impl Default for PathKey {
2533 fn default() -> Self {
2534 Self(Path::new("").into())
2535 }
2536}
2537
2538impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2539 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2540 self.0 = summary.max_path.clone();
2541 }
2542}
2543
2544struct BackgroundScanner {
2545 fs: Arc<dyn Fs>,
2546 snapshot: Arc<Mutex<Snapshot>>,
2547 notify: Sender<ScanState>,
2548 executor: Arc<executor::Background>,
2549}
2550
2551impl BackgroundScanner {
2552 fn new(
2553 snapshot: Arc<Mutex<Snapshot>>,
2554 notify: Sender<ScanState>,
2555 fs: Arc<dyn Fs>,
2556 executor: Arc<executor::Background>,
2557 ) -> Self {
2558 Self {
2559 fs,
2560 snapshot,
2561 notify,
2562 executor,
2563 }
2564 }
2565
2566 fn abs_path(&self) -> Arc<Path> {
2567 self.snapshot.lock().abs_path.clone()
2568 }
2569
2570 fn snapshot(&self) -> Snapshot {
2571 self.snapshot.lock().clone()
2572 }
2573
2574 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2575 if self.notify.send(ScanState::Scanning).await.is_err() {
2576 return;
2577 }
2578
2579 if let Err(err) = self.scan_dirs().await {
2580 if self
2581 .notify
2582 .send(ScanState::Err(Arc::new(err)))
2583 .await
2584 .is_err()
2585 {
2586 return;
2587 }
2588 }
2589
2590 if self.notify.send(ScanState::Idle).await.is_err() {
2591 return;
2592 }
2593
2594 futures::pin_mut!(events_rx);
2595 while let Some(events) = events_rx.next().await {
2596 if self.notify.send(ScanState::Scanning).await.is_err() {
2597 break;
2598 }
2599
2600 if !self.process_events(events).await {
2601 break;
2602 }
2603
2604 if self.notify.send(ScanState::Idle).await.is_err() {
2605 break;
2606 }
2607 }
2608 }
2609
2610 async fn scan_dirs(&mut self) -> Result<()> {
2611 let root_char_bag;
2612 let next_entry_id;
2613 let is_dir;
2614 {
2615 let snapshot = self.snapshot.lock();
2616 root_char_bag = snapshot.root_char_bag;
2617 next_entry_id = snapshot.next_entry_id.clone();
2618 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2619 };
2620
2621 if is_dir {
2622 let path: Arc<Path> = Arc::from(Path::new(""));
2623 let abs_path = self.abs_path();
2624 let (tx, rx) = channel::unbounded();
2625 tx.send(ScanJob {
2626 abs_path: abs_path.to_path_buf(),
2627 path,
2628 ignore_stack: IgnoreStack::none(),
2629 scan_queue: tx.clone(),
2630 })
2631 .await
2632 .unwrap();
2633 drop(tx);
2634
2635 self.executor
2636 .scoped(|scope| {
2637 for _ in 0..self.executor.num_cpus() {
2638 scope.spawn(async {
2639 while let Ok(job) = rx.recv().await {
2640 if let Err(err) = self
2641 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2642 .await
2643 {
2644 log::error!("error scanning {:?}: {}", job.abs_path, err);
2645 }
2646 }
2647 });
2648 }
2649 })
2650 .await;
2651 }
2652
2653 Ok(())
2654 }
2655
2656 async fn scan_dir(
2657 &self,
2658 root_char_bag: CharBag,
2659 next_entry_id: Arc<AtomicUsize>,
2660 job: &ScanJob,
2661 ) -> Result<()> {
2662 let mut new_entries: Vec<Entry> = Vec::new();
2663 let mut new_jobs: Vec<ScanJob> = Vec::new();
2664 let mut ignore_stack = job.ignore_stack.clone();
2665 let mut new_ignore = None;
2666
2667 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2668 while let Some(child_abs_path) = child_paths.next().await {
2669 let child_abs_path = match child_abs_path {
2670 Ok(child_abs_path) => child_abs_path,
2671 Err(error) => {
2672 log::error!("error processing entry {:?}", error);
2673 continue;
2674 }
2675 };
2676 let child_name = child_abs_path.file_name().unwrap();
2677 let child_path: Arc<Path> = job.path.join(child_name).into();
2678 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2679 Some(metadata) => metadata,
2680 None => continue,
2681 };
2682
2683 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2684 if child_name == *GITIGNORE {
2685 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2686 Ok(ignore) => {
2687 let ignore = Arc::new(ignore);
2688 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2689 new_ignore = Some(ignore);
2690 }
2691 Err(error) => {
2692 log::error!(
2693 "error loading .gitignore file {:?} - {:?}",
2694 child_name,
2695 error
2696 );
2697 }
2698 }
2699
2700 // Update ignore status of any child entries we've already processed to reflect the
2701 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2702 // there should rarely be too numerous. Update the ignore stack associated with any
2703 // new jobs as well.
2704 let mut new_jobs = new_jobs.iter_mut();
2705 for entry in &mut new_entries {
2706 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2707 if entry.is_dir() {
2708 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2709 IgnoreStack::all()
2710 } else {
2711 ignore_stack.clone()
2712 };
2713 }
2714 }
2715 }
2716
2717 let mut child_entry = Entry::new(
2718 child_path.clone(),
2719 &child_metadata,
2720 &next_entry_id,
2721 root_char_bag,
2722 );
2723
2724 if child_metadata.is_dir {
2725 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2726 child_entry.is_ignored = is_ignored;
2727 new_entries.push(child_entry);
2728 new_jobs.push(ScanJob {
2729 abs_path: child_abs_path,
2730 path: child_path,
2731 ignore_stack: if is_ignored {
2732 IgnoreStack::all()
2733 } else {
2734 ignore_stack.clone()
2735 },
2736 scan_queue: job.scan_queue.clone(),
2737 });
2738 } else {
2739 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2740 new_entries.push(child_entry);
2741 };
2742 }
2743
2744 self.snapshot
2745 .lock()
2746 .populate_dir(job.path.clone(), new_entries, new_ignore);
2747 for new_job in new_jobs {
2748 job.scan_queue.send(new_job).await.unwrap();
2749 }
2750
2751 Ok(())
2752 }
2753
2754 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2755 let mut snapshot = self.snapshot();
2756 snapshot.scan_id += 1;
2757
2758 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2759 abs_path
2760 } else {
2761 return false;
2762 };
2763 let root_char_bag = snapshot.root_char_bag;
2764 let next_entry_id = snapshot.next_entry_id.clone();
2765
2766 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2767 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2768
2769 for event in &events {
2770 match event.path.strip_prefix(&root_abs_path) {
2771 Ok(path) => snapshot.remove_path(&path),
2772 Err(_) => {
2773 log::error!(
2774 "unexpected event {:?} for root path {:?}",
2775 event.path,
2776 root_abs_path
2777 );
2778 continue;
2779 }
2780 }
2781 }
2782
2783 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2784 for event in events {
2785 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2786 Ok(path) => Arc::from(path.to_path_buf()),
2787 Err(_) => {
2788 log::error!(
2789 "unexpected event {:?} for root path {:?}",
2790 event.path,
2791 root_abs_path
2792 );
2793 continue;
2794 }
2795 };
2796
2797 match self.fs.metadata(&event.path).await {
2798 Ok(Some(metadata)) => {
2799 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2800 let mut fs_entry = Entry::new(
2801 path.clone(),
2802 &metadata,
2803 snapshot.next_entry_id.as_ref(),
2804 snapshot.root_char_bag,
2805 );
2806 fs_entry.is_ignored = ignore_stack.is_all();
2807 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2808 if metadata.is_dir {
2809 scan_queue_tx
2810 .send(ScanJob {
2811 abs_path: event.path,
2812 path,
2813 ignore_stack,
2814 scan_queue: scan_queue_tx.clone(),
2815 })
2816 .await
2817 .unwrap();
2818 }
2819 }
2820 Ok(None) => {}
2821 Err(err) => {
2822 // TODO - create a special 'error' entry in the entries tree to mark this
2823 log::error!("error reading file on event {:?}", err);
2824 }
2825 }
2826 }
2827
2828 *self.snapshot.lock() = snapshot;
2829
2830 // Scan any directories that were created as part of this event batch.
2831 drop(scan_queue_tx);
2832 self.executor
2833 .scoped(|scope| {
2834 for _ in 0..self.executor.num_cpus() {
2835 scope.spawn(async {
2836 while let Ok(job) = scan_queue_rx.recv().await {
2837 if let Err(err) = self
2838 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2839 .await
2840 {
2841 log::error!("error scanning {:?}: {}", job.abs_path, err);
2842 }
2843 }
2844 });
2845 }
2846 })
2847 .await;
2848
2849 // Attempt to detect renames only over a single batch of file-system events.
2850 self.snapshot.lock().removed_entry_ids.clear();
2851
2852 self.update_ignore_statuses().await;
2853 true
2854 }
2855
2856 async fn update_ignore_statuses(&self) {
2857 let mut snapshot = self.snapshot();
2858
2859 let mut ignores_to_update = Vec::new();
2860 let mut ignores_to_delete = Vec::new();
2861 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2862 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2863 ignores_to_update.push(parent_path.clone());
2864 }
2865
2866 let ignore_path = parent_path.join(&*GITIGNORE);
2867 if snapshot.entry_for_path(ignore_path).is_none() {
2868 ignores_to_delete.push(parent_path.clone());
2869 }
2870 }
2871
2872 for parent_path in ignores_to_delete {
2873 snapshot.ignores.remove(&parent_path);
2874 self.snapshot.lock().ignores.remove(&parent_path);
2875 }
2876
2877 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2878 ignores_to_update.sort_unstable();
2879 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2880 while let Some(parent_path) = ignores_to_update.next() {
2881 while ignores_to_update
2882 .peek()
2883 .map_or(false, |p| p.starts_with(&parent_path))
2884 {
2885 ignores_to_update.next().unwrap();
2886 }
2887
2888 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2889 ignore_queue_tx
2890 .send(UpdateIgnoreStatusJob {
2891 path: parent_path,
2892 ignore_stack,
2893 ignore_queue: ignore_queue_tx.clone(),
2894 })
2895 .await
2896 .unwrap();
2897 }
2898 drop(ignore_queue_tx);
2899
2900 self.executor
2901 .scoped(|scope| {
2902 for _ in 0..self.executor.num_cpus() {
2903 scope.spawn(async {
2904 while let Ok(job) = ignore_queue_rx.recv().await {
2905 self.update_ignore_status(job, &snapshot).await;
2906 }
2907 });
2908 }
2909 })
2910 .await;
2911 }
2912
2913 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2914 let mut ignore_stack = job.ignore_stack;
2915 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2916 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2917 }
2918
2919 let mut entries_by_id_edits = Vec::new();
2920 let mut entries_by_path_edits = Vec::new();
2921 for mut entry in snapshot.child_entries(&job.path).cloned() {
2922 let was_ignored = entry.is_ignored;
2923 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2924 if entry.is_dir() {
2925 let child_ignore_stack = if entry.is_ignored {
2926 IgnoreStack::all()
2927 } else {
2928 ignore_stack.clone()
2929 };
2930 job.ignore_queue
2931 .send(UpdateIgnoreStatusJob {
2932 path: entry.path.clone(),
2933 ignore_stack: child_ignore_stack,
2934 ignore_queue: job.ignore_queue.clone(),
2935 })
2936 .await
2937 .unwrap();
2938 }
2939
2940 if entry.is_ignored != was_ignored {
2941 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2942 path_entry.scan_id = snapshot.scan_id;
2943 path_entry.is_ignored = entry.is_ignored;
2944 entries_by_id_edits.push(Edit::Insert(path_entry));
2945 entries_by_path_edits.push(Edit::Insert(entry));
2946 }
2947 }
2948
2949 let mut snapshot = self.snapshot.lock();
2950 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2951 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2952 }
2953}
2954
2955async fn refresh_entry(
2956 fs: &dyn Fs,
2957 snapshot: &Mutex<Snapshot>,
2958 path: Arc<Path>,
2959 abs_path: &Path,
2960) -> Result<Entry> {
2961 let root_char_bag;
2962 let next_entry_id;
2963 {
2964 let snapshot = snapshot.lock();
2965 root_char_bag = snapshot.root_char_bag;
2966 next_entry_id = snapshot.next_entry_id.clone();
2967 }
2968 let entry = Entry::new(
2969 path,
2970 &fs.metadata(abs_path)
2971 .await?
2972 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2973 &next_entry_id,
2974 root_char_bag,
2975 );
2976 Ok(snapshot.lock().insert_entry(entry, fs))
2977}
2978
2979fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2980 let mut result = root_char_bag;
2981 result.extend(
2982 path.to_string_lossy()
2983 .chars()
2984 .map(|c| c.to_ascii_lowercase()),
2985 );
2986 result
2987}
2988
2989struct ScanJob {
2990 abs_path: PathBuf,
2991 path: Arc<Path>,
2992 ignore_stack: Arc<IgnoreStack>,
2993 scan_queue: Sender<ScanJob>,
2994}
2995
2996struct UpdateIgnoreStatusJob {
2997 path: Arc<Path>,
2998 ignore_stack: Arc<IgnoreStack>,
2999 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3000}
3001
3002pub trait WorktreeHandle {
3003 #[cfg(test)]
3004 fn flush_fs_events<'a>(
3005 &self,
3006 cx: &'a gpui::TestAppContext,
3007 ) -> futures::future::LocalBoxFuture<'a, ()>;
3008}
3009
3010impl WorktreeHandle for ModelHandle<Worktree> {
3011 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3012 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3013 // extra directory scans, and emit extra scan-state notifications.
3014 //
3015 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3016 // to ensure that all redundant FS events have already been processed.
3017 #[cfg(test)]
3018 fn flush_fs_events<'a>(
3019 &self,
3020 cx: &'a gpui::TestAppContext,
3021 ) -> futures::future::LocalBoxFuture<'a, ()> {
3022 use smol::future::FutureExt;
3023
3024 let filename = "fs-event-sentinel";
3025 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
3026 let tree = self.clone();
3027 async move {
3028 std::fs::write(root_path.join(filename), "").unwrap();
3029 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
3030 .await;
3031
3032 std::fs::remove_file(root_path.join(filename)).unwrap();
3033 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
3034 .await;
3035
3036 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3037 .await;
3038 }
3039 .boxed_local()
3040 }
3041}
3042
3043#[derive(Clone, Debug)]
3044struct TraversalProgress<'a> {
3045 max_path: &'a Path,
3046 count: usize,
3047 visible_count: usize,
3048 file_count: usize,
3049 visible_file_count: usize,
3050}
3051
3052impl<'a> TraversalProgress<'a> {
3053 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3054 match (include_ignored, include_dirs) {
3055 (true, true) => self.count,
3056 (true, false) => self.file_count,
3057 (false, true) => self.visible_count,
3058 (false, false) => self.visible_file_count,
3059 }
3060 }
3061}
3062
3063impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3064 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3065 self.max_path = summary.max_path.as_ref();
3066 self.count += summary.count;
3067 self.visible_count += summary.visible_count;
3068 self.file_count += summary.file_count;
3069 self.visible_file_count += summary.visible_file_count;
3070 }
3071}
3072
3073impl<'a> Default for TraversalProgress<'a> {
3074 fn default() -> Self {
3075 Self {
3076 max_path: Path::new(""),
3077 count: 0,
3078 visible_count: 0,
3079 file_count: 0,
3080 visible_file_count: 0,
3081 }
3082 }
3083}
3084
3085pub struct Traversal<'a> {
3086 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3087 include_ignored: bool,
3088 include_dirs: bool,
3089}
3090
3091impl<'a> Traversal<'a> {
3092 pub fn advance(&mut self) -> bool {
3093 self.advance_to_offset(self.offset() + 1)
3094 }
3095
3096 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3097 self.cursor.seek_forward(
3098 &TraversalTarget::Count {
3099 count: offset,
3100 include_dirs: self.include_dirs,
3101 include_ignored: self.include_ignored,
3102 },
3103 Bias::Right,
3104 &(),
3105 )
3106 }
3107
3108 pub fn advance_to_sibling(&mut self) -> bool {
3109 while let Some(entry) = self.cursor.item() {
3110 self.cursor.seek_forward(
3111 &TraversalTarget::PathSuccessor(&entry.path),
3112 Bias::Left,
3113 &(),
3114 );
3115 if let Some(entry) = self.cursor.item() {
3116 if (self.include_dirs || !entry.is_dir())
3117 && (self.include_ignored || !entry.is_ignored)
3118 {
3119 return true;
3120 }
3121 }
3122 }
3123 false
3124 }
3125
3126 pub fn entry(&self) -> Option<&'a Entry> {
3127 self.cursor.item()
3128 }
3129
3130 pub fn offset(&self) -> usize {
3131 self.cursor
3132 .start()
3133 .count(self.include_dirs, self.include_ignored)
3134 }
3135}
3136
3137impl<'a> Iterator for Traversal<'a> {
3138 type Item = &'a Entry;
3139
3140 fn next(&mut self) -> Option<Self::Item> {
3141 if let Some(item) = self.entry() {
3142 self.advance();
3143 Some(item)
3144 } else {
3145 None
3146 }
3147 }
3148}
3149
3150#[derive(Debug)]
3151enum TraversalTarget<'a> {
3152 Path(&'a Path),
3153 PathSuccessor(&'a Path),
3154 Count {
3155 count: usize,
3156 include_ignored: bool,
3157 include_dirs: bool,
3158 },
3159}
3160
3161impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3162 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3163 match self {
3164 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3165 TraversalTarget::PathSuccessor(path) => {
3166 if !cursor_location.max_path.starts_with(path) {
3167 Ordering::Equal
3168 } else {
3169 Ordering::Greater
3170 }
3171 }
3172 TraversalTarget::Count {
3173 count,
3174 include_dirs,
3175 include_ignored,
3176 } => Ord::cmp(
3177 count,
3178 &cursor_location.count(*include_dirs, *include_ignored),
3179 ),
3180 }
3181 }
3182}
3183
3184struct ChildEntriesIter<'a> {
3185 parent_path: &'a Path,
3186 traversal: Traversal<'a>,
3187}
3188
3189impl<'a> Iterator for ChildEntriesIter<'a> {
3190 type Item = &'a Entry;
3191
3192 fn next(&mut self) -> Option<Self::Item> {
3193 if let Some(item) = self.traversal.entry() {
3194 if item.path.starts_with(&self.parent_path) {
3195 self.traversal.advance_to_sibling();
3196 return Some(item);
3197 }
3198 }
3199 None
3200 }
3201}
3202
3203impl<'a> From<&'a Entry> for proto::Entry {
3204 fn from(entry: &'a Entry) -> Self {
3205 Self {
3206 id: entry.id as u64,
3207 is_dir: entry.is_dir(),
3208 path: entry.path.to_string_lossy().to_string(),
3209 inode: entry.inode,
3210 mtime: Some(entry.mtime.into()),
3211 is_symlink: entry.is_symlink,
3212 is_ignored: entry.is_ignored,
3213 }
3214 }
3215}
3216
3217impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3218 type Error = anyhow::Error;
3219
3220 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3221 if let Some(mtime) = entry.mtime {
3222 let kind = if entry.is_dir {
3223 EntryKind::Dir
3224 } else {
3225 let mut char_bag = root_char_bag.clone();
3226 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3227 EntryKind::File(char_bag)
3228 };
3229 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3230 Ok(Entry {
3231 id: entry.id as usize,
3232 kind,
3233 path: path.clone(),
3234 inode: entry.inode,
3235 mtime: mtime.into(),
3236 is_symlink: entry.is_symlink,
3237 is_ignored: entry.is_ignored,
3238 })
3239 } else {
3240 Err(anyhow!(
3241 "missing mtime in remote worktree entry {:?}",
3242 entry.path
3243 ))
3244 }
3245 }
3246}
3247
3248#[cfg(test)]
3249mod tests {
3250 use super::*;
3251 use crate::fs::FakeFs;
3252 use anyhow::Result;
3253 use client::test::{FakeHttpClient, FakeServer};
3254 use fs::RealFs;
3255 use gpui::test::subscribe;
3256 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3257 use language::{Diagnostic, LanguageConfig};
3258 use lsp::Url;
3259 use rand::prelude::*;
3260 use serde_json::json;
3261 use std::{cell::RefCell, rc::Rc};
3262 use std::{
3263 env,
3264 fmt::Write,
3265 time::{SystemTime, UNIX_EPOCH},
3266 };
3267 use text::Point;
3268 use unindent::Unindent as _;
3269 use util::test::temp_tree;
3270
3271 #[gpui::test]
3272 async fn test_traversal(mut cx: gpui::TestAppContext) {
3273 let fs = FakeFs::new();
3274 fs.insert_tree(
3275 "/root",
3276 json!({
3277 ".gitignore": "a/b\n",
3278 "a": {
3279 "b": "",
3280 "c": "",
3281 }
3282 }),
3283 )
3284 .await;
3285
3286 let http_client = FakeHttpClient::with_404_response();
3287 let client = Client::new(http_client.clone());
3288 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3289
3290 let tree = Worktree::open_local(
3291 client,
3292 user_store,
3293 Arc::from(Path::new("/root")),
3294 Arc::new(fs),
3295 Default::default(),
3296 &mut cx.to_async(),
3297 )
3298 .await
3299 .unwrap();
3300 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3301 .await;
3302
3303 tree.read_with(&cx, |tree, _| {
3304 assert_eq!(
3305 tree.entries(false)
3306 .map(|entry| entry.path.as_ref())
3307 .collect::<Vec<_>>(),
3308 vec![
3309 Path::new(""),
3310 Path::new(".gitignore"),
3311 Path::new("a"),
3312 Path::new("a/c"),
3313 ]
3314 );
3315 })
3316 }
3317
3318 #[gpui::test]
3319 async fn test_save_file(mut cx: gpui::TestAppContext) {
3320 let dir = temp_tree(json!({
3321 "file1": "the old contents",
3322 }));
3323
3324 let http_client = FakeHttpClient::with_404_response();
3325 let client = Client::new(http_client.clone());
3326 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3327
3328 let tree = Worktree::open_local(
3329 client,
3330 user_store,
3331 dir.path(),
3332 Arc::new(RealFs),
3333 Default::default(),
3334 &mut cx.to_async(),
3335 )
3336 .await
3337 .unwrap();
3338 let buffer = tree
3339 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3340 .await
3341 .unwrap();
3342 let save = buffer.update(&mut cx, |buffer, cx| {
3343 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3344 buffer.save(cx).unwrap()
3345 });
3346 save.await.unwrap();
3347
3348 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3349 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3350 }
3351
3352 #[gpui::test]
3353 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3354 let dir = temp_tree(json!({
3355 "file1": "the old contents",
3356 }));
3357 let file_path = dir.path().join("file1");
3358
3359 let http_client = FakeHttpClient::with_404_response();
3360 let client = Client::new(http_client.clone());
3361 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3362
3363 let tree = Worktree::open_local(
3364 client,
3365 user_store,
3366 file_path.clone(),
3367 Arc::new(RealFs),
3368 Default::default(),
3369 &mut cx.to_async(),
3370 )
3371 .await
3372 .unwrap();
3373 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3374 .await;
3375 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3376
3377 let buffer = tree
3378 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3379 .await
3380 .unwrap();
3381 let save = buffer.update(&mut cx, |buffer, cx| {
3382 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3383 buffer.save(cx).unwrap()
3384 });
3385 save.await.unwrap();
3386
3387 let new_text = std::fs::read_to_string(file_path).unwrap();
3388 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3389 }
3390
3391 #[gpui::test]
3392 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3393 let dir = temp_tree(json!({
3394 "a": {
3395 "file1": "",
3396 "file2": "",
3397 "file3": "",
3398 },
3399 "b": {
3400 "c": {
3401 "file4": "",
3402 "file5": "",
3403 }
3404 }
3405 }));
3406
3407 let user_id = 5;
3408 let http_client = FakeHttpClient::with_404_response();
3409 let mut client = Client::new(http_client.clone());
3410 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3411 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3412 let tree = Worktree::open_local(
3413 client,
3414 user_store.clone(),
3415 dir.path(),
3416 Arc::new(RealFs),
3417 Default::default(),
3418 &mut cx.to_async(),
3419 )
3420 .await
3421 .unwrap();
3422
3423 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3424 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3425 async move { buffer.await.unwrap() }
3426 };
3427 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3428 tree.read_with(cx, |tree, _| {
3429 tree.entry_for_path(path)
3430 .expect(&format!("no entry for path {}", path))
3431 .id
3432 })
3433 };
3434
3435 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3436 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3437 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3438 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3439
3440 let file2_id = id_for_path("a/file2", &cx);
3441 let file3_id = id_for_path("a/file3", &cx);
3442 let file4_id = id_for_path("b/c/file4", &cx);
3443
3444 // Wait for the initial scan.
3445 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3446 .await;
3447
3448 // Create a remote copy of this worktree.
3449 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3450 let remote = Worktree::remote(
3451 1,
3452 1,
3453 initial_snapshot.to_proto(&Default::default()),
3454 Client::new(http_client.clone()),
3455 user_store,
3456 Default::default(),
3457 &mut cx.to_async(),
3458 )
3459 .await
3460 .unwrap();
3461
3462 cx.read(|cx| {
3463 assert!(!buffer2.read(cx).is_dirty());
3464 assert!(!buffer3.read(cx).is_dirty());
3465 assert!(!buffer4.read(cx).is_dirty());
3466 assert!(!buffer5.read(cx).is_dirty());
3467 });
3468
3469 // Rename and delete files and directories.
3470 tree.flush_fs_events(&cx).await;
3471 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3472 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3473 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3474 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3475 tree.flush_fs_events(&cx).await;
3476
3477 let expected_paths = vec![
3478 "a",
3479 "a/file1",
3480 "a/file2.new",
3481 "b",
3482 "d",
3483 "d/file3",
3484 "d/file4",
3485 ];
3486
3487 cx.read(|app| {
3488 assert_eq!(
3489 tree.read(app)
3490 .paths()
3491 .map(|p| p.to_str().unwrap())
3492 .collect::<Vec<_>>(),
3493 expected_paths
3494 );
3495
3496 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3497 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3498 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3499
3500 assert_eq!(
3501 buffer2.read(app).file().unwrap().path().as_ref(),
3502 Path::new("a/file2.new")
3503 );
3504 assert_eq!(
3505 buffer3.read(app).file().unwrap().path().as_ref(),
3506 Path::new("d/file3")
3507 );
3508 assert_eq!(
3509 buffer4.read(app).file().unwrap().path().as_ref(),
3510 Path::new("d/file4")
3511 );
3512 assert_eq!(
3513 buffer5.read(app).file().unwrap().path().as_ref(),
3514 Path::new("b/c/file5")
3515 );
3516
3517 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3518 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3519 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3520 assert!(buffer5.read(app).file().unwrap().is_deleted());
3521 });
3522
3523 // Update the remote worktree. Check that it becomes consistent with the
3524 // local worktree.
3525 remote.update(&mut cx, |remote, cx| {
3526 let update_message =
3527 tree.read(cx)
3528 .snapshot()
3529 .build_update(&initial_snapshot, 1, 1, true);
3530 remote
3531 .as_remote_mut()
3532 .unwrap()
3533 .snapshot
3534 .apply_update(update_message)
3535 .unwrap();
3536
3537 assert_eq!(
3538 remote
3539 .paths()
3540 .map(|p| p.to_str().unwrap())
3541 .collect::<Vec<_>>(),
3542 expected_paths
3543 );
3544 });
3545 }
3546
3547 #[gpui::test]
3548 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3549 let dir = temp_tree(json!({
3550 ".git": {},
3551 ".gitignore": "ignored-dir\n",
3552 "tracked-dir": {
3553 "tracked-file1": "tracked contents",
3554 },
3555 "ignored-dir": {
3556 "ignored-file1": "ignored contents",
3557 }
3558 }));
3559
3560 let http_client = FakeHttpClient::with_404_response();
3561 let client = Client::new(http_client.clone());
3562 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3563
3564 let tree = Worktree::open_local(
3565 client,
3566 user_store,
3567 dir.path(),
3568 Arc::new(RealFs),
3569 Default::default(),
3570 &mut cx.to_async(),
3571 )
3572 .await
3573 .unwrap();
3574 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3575 .await;
3576 tree.flush_fs_events(&cx).await;
3577 cx.read(|cx| {
3578 let tree = tree.read(cx);
3579 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3580 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3581 assert_eq!(tracked.is_ignored, false);
3582 assert_eq!(ignored.is_ignored, true);
3583 });
3584
3585 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3586 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3587 tree.flush_fs_events(&cx).await;
3588 cx.read(|cx| {
3589 let tree = tree.read(cx);
3590 let dot_git = tree.entry_for_path(".git").unwrap();
3591 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3592 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3593 assert_eq!(tracked.is_ignored, false);
3594 assert_eq!(ignored.is_ignored, true);
3595 assert_eq!(dot_git.is_ignored, true);
3596 });
3597 }
3598
3599 #[gpui::test]
3600 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3601 let user_id = 100;
3602 let http_client = FakeHttpClient::with_404_response();
3603 let mut client = Client::new(http_client);
3604 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3605 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3606
3607 let fs = Arc::new(FakeFs::new());
3608 fs.insert_tree(
3609 "/the-dir",
3610 json!({
3611 "a.txt": "a-contents",
3612 "b.txt": "b-contents",
3613 }),
3614 )
3615 .await;
3616
3617 let worktree = Worktree::open_local(
3618 client.clone(),
3619 user_store,
3620 "/the-dir".as_ref(),
3621 fs,
3622 Default::default(),
3623 &mut cx.to_async(),
3624 )
3625 .await
3626 .unwrap();
3627
3628 // Spawn multiple tasks to open paths, repeating some paths.
3629 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3630 (
3631 worktree.open_buffer("a.txt", cx),
3632 worktree.open_buffer("b.txt", cx),
3633 worktree.open_buffer("a.txt", cx),
3634 )
3635 });
3636
3637 let buffer_a_1 = buffer_a_1.await.unwrap();
3638 let buffer_a_2 = buffer_a_2.await.unwrap();
3639 let buffer_b = buffer_b.await.unwrap();
3640 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3641 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3642
3643 // There is only one buffer per path.
3644 let buffer_a_id = buffer_a_1.id();
3645 assert_eq!(buffer_a_2.id(), buffer_a_id);
3646
3647 // Open the same path again while it is still open.
3648 drop(buffer_a_1);
3649 let buffer_a_3 = worktree
3650 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3651 .await
3652 .unwrap();
3653
3654 // There's still only one buffer per path.
3655 assert_eq!(buffer_a_3.id(), buffer_a_id);
3656 }
3657
3658 #[gpui::test]
3659 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3660 use std::fs;
3661
3662 let dir = temp_tree(json!({
3663 "file1": "abc",
3664 "file2": "def",
3665 "file3": "ghi",
3666 }));
3667 let http_client = FakeHttpClient::with_404_response();
3668 let client = Client::new(http_client.clone());
3669 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3670
3671 let tree = Worktree::open_local(
3672 client,
3673 user_store,
3674 dir.path(),
3675 Arc::new(RealFs),
3676 Default::default(),
3677 &mut cx.to_async(),
3678 )
3679 .await
3680 .unwrap();
3681 tree.flush_fs_events(&cx).await;
3682 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3683 .await;
3684
3685 let buffer1 = tree
3686 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3687 .await
3688 .unwrap();
3689 let events = Rc::new(RefCell::new(Vec::new()));
3690
3691 // initially, the buffer isn't dirty.
3692 buffer1.update(&mut cx, |buffer, cx| {
3693 cx.subscribe(&buffer1, {
3694 let events = events.clone();
3695 move |_, _, event, _| events.borrow_mut().push(event.clone())
3696 })
3697 .detach();
3698
3699 assert!(!buffer.is_dirty());
3700 assert!(events.borrow().is_empty());
3701
3702 buffer.edit(vec![1..2], "", cx);
3703 });
3704
3705 // after the first edit, the buffer is dirty, and emits a dirtied event.
3706 buffer1.update(&mut cx, |buffer, cx| {
3707 assert!(buffer.text() == "ac");
3708 assert!(buffer.is_dirty());
3709 assert_eq!(
3710 *events.borrow(),
3711 &[language::Event::Edited, language::Event::Dirtied]
3712 );
3713 events.borrow_mut().clear();
3714 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3715 });
3716
3717 // after saving, the buffer is not dirty, and emits a saved event.
3718 buffer1.update(&mut cx, |buffer, cx| {
3719 assert!(!buffer.is_dirty());
3720 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3721 events.borrow_mut().clear();
3722
3723 buffer.edit(vec![1..1], "B", cx);
3724 buffer.edit(vec![2..2], "D", cx);
3725 });
3726
3727 // after editing again, the buffer is dirty, and emits another dirty event.
3728 buffer1.update(&mut cx, |buffer, cx| {
3729 assert!(buffer.text() == "aBDc");
3730 assert!(buffer.is_dirty());
3731 assert_eq!(
3732 *events.borrow(),
3733 &[
3734 language::Event::Edited,
3735 language::Event::Dirtied,
3736 language::Event::Edited,
3737 ],
3738 );
3739 events.borrow_mut().clear();
3740
3741 // TODO - currently, after restoring the buffer to its
3742 // previously-saved state, the is still considered dirty.
3743 buffer.edit([1..3], "", cx);
3744 assert!(buffer.text() == "ac");
3745 assert!(buffer.is_dirty());
3746 });
3747
3748 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3749
3750 // When a file is deleted, the buffer is considered dirty.
3751 let events = Rc::new(RefCell::new(Vec::new()));
3752 let buffer2 = tree
3753 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3754 .await
3755 .unwrap();
3756 buffer2.update(&mut cx, |_, cx| {
3757 cx.subscribe(&buffer2, {
3758 let events = events.clone();
3759 move |_, _, event, _| events.borrow_mut().push(event.clone())
3760 })
3761 .detach();
3762 });
3763
3764 fs::remove_file(dir.path().join("file2")).unwrap();
3765 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3766 assert_eq!(
3767 *events.borrow(),
3768 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3769 );
3770
3771 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3772 let events = Rc::new(RefCell::new(Vec::new()));
3773 let buffer3 = tree
3774 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3775 .await
3776 .unwrap();
3777 buffer3.update(&mut cx, |_, cx| {
3778 cx.subscribe(&buffer3, {
3779 let events = events.clone();
3780 move |_, _, event, _| events.borrow_mut().push(event.clone())
3781 })
3782 .detach();
3783 });
3784
3785 tree.flush_fs_events(&cx).await;
3786 buffer3.update(&mut cx, |buffer, cx| {
3787 buffer.edit(Some(0..0), "x", cx);
3788 });
3789 events.borrow_mut().clear();
3790 fs::remove_file(dir.path().join("file3")).unwrap();
3791 buffer3
3792 .condition(&cx, |_, _| !events.borrow().is_empty())
3793 .await;
3794 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3795 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3796 }
3797
3798 #[gpui::test]
3799 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3800 use std::fs;
3801
3802 let initial_contents = "aaa\nbbbbb\nc\n";
3803 let dir = temp_tree(json!({ "the-file": initial_contents }));
3804 let http_client = FakeHttpClient::with_404_response();
3805 let client = Client::new(http_client.clone());
3806 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3807
3808 let tree = Worktree::open_local(
3809 client,
3810 user_store,
3811 dir.path(),
3812 Arc::new(RealFs),
3813 Default::default(),
3814 &mut cx.to_async(),
3815 )
3816 .await
3817 .unwrap();
3818 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3819 .await;
3820
3821 let abs_path = dir.path().join("the-file");
3822 let buffer = tree
3823 .update(&mut cx, |tree, cx| {
3824 tree.open_buffer(Path::new("the-file"), cx)
3825 })
3826 .await
3827 .unwrap();
3828
3829 // TODO
3830 // Add a cursor on each row.
3831 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3832 // assert!(!buffer.is_dirty());
3833 // buffer.add_selection_set(
3834 // &(0..3)
3835 // .map(|row| Selection {
3836 // id: row as usize,
3837 // start: Point::new(row, 1),
3838 // end: Point::new(row, 1),
3839 // reversed: false,
3840 // goal: SelectionGoal::None,
3841 // })
3842 // .collect::<Vec<_>>(),
3843 // cx,
3844 // )
3845 // });
3846
3847 // Change the file on disk, adding two new lines of text, and removing
3848 // one line.
3849 buffer.read_with(&cx, |buffer, _| {
3850 assert!(!buffer.is_dirty());
3851 assert!(!buffer.has_conflict());
3852 });
3853 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3854 fs::write(&abs_path, new_contents).unwrap();
3855
3856 // Because the buffer was not modified, it is reloaded from disk. Its
3857 // contents are edited according to the diff between the old and new
3858 // file contents.
3859 buffer
3860 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3861 .await;
3862
3863 buffer.update(&mut cx, |buffer, _| {
3864 assert_eq!(buffer.text(), new_contents);
3865 assert!(!buffer.is_dirty());
3866 assert!(!buffer.has_conflict());
3867
3868 // TODO
3869 // let cursor_positions = buffer
3870 // .selection_set(selection_set_id)
3871 // .unwrap()
3872 // .selections::<Point>(&*buffer)
3873 // .map(|selection| {
3874 // assert_eq!(selection.start, selection.end);
3875 // selection.start
3876 // })
3877 // .collect::<Vec<_>>();
3878 // assert_eq!(
3879 // cursor_positions,
3880 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3881 // );
3882 });
3883
3884 // Modify the buffer
3885 buffer.update(&mut cx, |buffer, cx| {
3886 buffer.edit(vec![0..0], " ", cx);
3887 assert!(buffer.is_dirty());
3888 assert!(!buffer.has_conflict());
3889 });
3890
3891 // Change the file on disk again, adding blank lines to the beginning.
3892 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3893
3894 // Because the buffer is modified, it doesn't reload from disk, but is
3895 // marked as having a conflict.
3896 buffer
3897 .condition(&cx, |buffer, _| buffer.has_conflict())
3898 .await;
3899 }
3900
3901 #[gpui::test]
3902 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3903 let (language_server_config, mut fake_server) =
3904 LanguageServerConfig::fake(cx.background()).await;
3905 let progress_token = language_server_config
3906 .disk_based_diagnostics_progress_token
3907 .clone()
3908 .unwrap();
3909 let mut languages = LanguageRegistry::new();
3910 languages.add(Arc::new(Language::new(
3911 LanguageConfig {
3912 name: "Rust".to_string(),
3913 path_suffixes: vec!["rs".to_string()],
3914 language_server: Some(language_server_config),
3915 ..Default::default()
3916 },
3917 Some(tree_sitter_rust::language()),
3918 )));
3919
3920 let dir = temp_tree(json!({
3921 "a.rs": "fn a() { A }",
3922 "b.rs": "const y: i32 = 1",
3923 }));
3924
3925 let http_client = FakeHttpClient::with_404_response();
3926 let client = Client::new(http_client.clone());
3927 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3928
3929 let tree = Worktree::open_local(
3930 client,
3931 user_store,
3932 dir.path(),
3933 Arc::new(RealFs),
3934 Arc::new(languages),
3935 &mut cx.to_async(),
3936 )
3937 .await
3938 .unwrap();
3939 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3940 .await;
3941
3942 // Cause worktree to start the fake language server
3943 let _buffer = tree
3944 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3945 .await
3946 .unwrap();
3947
3948 let mut events = subscribe(&tree, &mut cx);
3949
3950 fake_server.start_progress(&progress_token).await;
3951 assert_eq!(
3952 events.next().await.unwrap(),
3953 Event::DiskBasedDiagnosticsUpdating
3954 );
3955
3956 fake_server.start_progress(&progress_token).await;
3957 fake_server.end_progress(&progress_token).await;
3958 fake_server.start_progress(&progress_token).await;
3959
3960 fake_server
3961 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3962 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3963 version: None,
3964 diagnostics: vec![lsp::Diagnostic {
3965 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3966 severity: Some(lsp::DiagnosticSeverity::ERROR),
3967 message: "undefined variable 'A'".to_string(),
3968 ..Default::default()
3969 }],
3970 })
3971 .await;
3972 assert_eq!(
3973 events.next().await.unwrap(),
3974 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3975 );
3976
3977 fake_server.end_progress(&progress_token).await;
3978 fake_server.end_progress(&progress_token).await;
3979 assert_eq!(
3980 events.next().await.unwrap(),
3981 Event::DiskBasedDiagnosticsUpdated
3982 );
3983
3984 let buffer = tree
3985 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3986 .await
3987 .unwrap();
3988
3989 buffer.read_with(&cx, |buffer, _| {
3990 let snapshot = buffer.snapshot();
3991 let diagnostics = snapshot
3992 .diagnostics_in_range::<_, Point>(0..buffer.len())
3993 .collect::<Vec<_>>();
3994 assert_eq!(
3995 diagnostics,
3996 &[DiagnosticEntry {
3997 range: Point::new(0, 9)..Point::new(0, 10),
3998 diagnostic: Diagnostic {
3999 severity: lsp::DiagnosticSeverity::ERROR,
4000 message: "undefined variable 'A'".to_string(),
4001 group_id: 0,
4002 is_primary: true,
4003 ..Default::default()
4004 }
4005 }]
4006 )
4007 });
4008 }
4009
4010 #[gpui::test]
4011 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
4012 let fs = Arc::new(FakeFs::new());
4013 let http_client = FakeHttpClient::with_404_response();
4014 let client = Client::new(http_client.clone());
4015 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
4016
4017 fs.insert_tree(
4018 "/the-dir",
4019 json!({
4020 "a.rs": "
4021 fn foo(mut v: Vec<usize>) {
4022 for x in &v {
4023 v.push(1);
4024 }
4025 }
4026 "
4027 .unindent(),
4028 }),
4029 )
4030 .await;
4031
4032 let worktree = Worktree::open_local(
4033 client.clone(),
4034 user_store,
4035 "/the-dir".as_ref(),
4036 fs,
4037 Default::default(),
4038 &mut cx.to_async(),
4039 )
4040 .await
4041 .unwrap();
4042
4043 let buffer = worktree
4044 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4045 .await
4046 .unwrap();
4047
4048 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
4049 let message = lsp::PublishDiagnosticsParams {
4050 uri: buffer_uri.clone(),
4051 diagnostics: vec![
4052 lsp::Diagnostic {
4053 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4054 severity: Some(DiagnosticSeverity::WARNING),
4055 message: "error 1".to_string(),
4056 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4057 location: lsp::Location {
4058 uri: buffer_uri.clone(),
4059 range: lsp::Range::new(
4060 lsp::Position::new(1, 8),
4061 lsp::Position::new(1, 9),
4062 ),
4063 },
4064 message: "error 1 hint 1".to_string(),
4065 }]),
4066 ..Default::default()
4067 },
4068 lsp::Diagnostic {
4069 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4070 severity: Some(DiagnosticSeverity::HINT),
4071 message: "error 1 hint 1".to_string(),
4072 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4073 location: lsp::Location {
4074 uri: buffer_uri.clone(),
4075 range: lsp::Range::new(
4076 lsp::Position::new(1, 8),
4077 lsp::Position::new(1, 9),
4078 ),
4079 },
4080 message: "original diagnostic".to_string(),
4081 }]),
4082 ..Default::default()
4083 },
4084 lsp::Diagnostic {
4085 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
4086 severity: Some(DiagnosticSeverity::ERROR),
4087 message: "error 2".to_string(),
4088 related_information: Some(vec![
4089 lsp::DiagnosticRelatedInformation {
4090 location: lsp::Location {
4091 uri: buffer_uri.clone(),
4092 range: lsp::Range::new(
4093 lsp::Position::new(1, 13),
4094 lsp::Position::new(1, 15),
4095 ),
4096 },
4097 message: "error 2 hint 1".to_string(),
4098 },
4099 lsp::DiagnosticRelatedInformation {
4100 location: lsp::Location {
4101 uri: buffer_uri.clone(),
4102 range: lsp::Range::new(
4103 lsp::Position::new(1, 13),
4104 lsp::Position::new(1, 15),
4105 ),
4106 },
4107 message: "error 2 hint 2".to_string(),
4108 },
4109 ]),
4110 ..Default::default()
4111 },
4112 lsp::Diagnostic {
4113 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4114 severity: Some(DiagnosticSeverity::HINT),
4115 message: "error 2 hint 1".to_string(),
4116 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4117 location: lsp::Location {
4118 uri: buffer_uri.clone(),
4119 range: lsp::Range::new(
4120 lsp::Position::new(2, 8),
4121 lsp::Position::new(2, 17),
4122 ),
4123 },
4124 message: "original diagnostic".to_string(),
4125 }]),
4126 ..Default::default()
4127 },
4128 lsp::Diagnostic {
4129 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4130 severity: Some(DiagnosticSeverity::HINT),
4131 message: "error 2 hint 2".to_string(),
4132 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4133 location: lsp::Location {
4134 uri: buffer_uri.clone(),
4135 range: lsp::Range::new(
4136 lsp::Position::new(2, 8),
4137 lsp::Position::new(2, 17),
4138 ),
4139 },
4140 message: "original diagnostic".to_string(),
4141 }]),
4142 ..Default::default()
4143 },
4144 ],
4145 version: None,
4146 };
4147
4148 worktree
4149 .update(&mut cx, |tree, cx| {
4150 tree.update_diagnostics(message, &Default::default(), cx)
4151 })
4152 .unwrap();
4153 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4154
4155 assert_eq!(
4156 buffer
4157 .diagnostics_in_range::<_, Point>(0..buffer.len())
4158 .collect::<Vec<_>>(),
4159 &[
4160 DiagnosticEntry {
4161 range: Point::new(1, 8)..Point::new(1, 9),
4162 diagnostic: Diagnostic {
4163 severity: DiagnosticSeverity::WARNING,
4164 message: "error 1".to_string(),
4165 group_id: 0,
4166 is_primary: true,
4167 ..Default::default()
4168 }
4169 },
4170 DiagnosticEntry {
4171 range: Point::new(1, 8)..Point::new(1, 9),
4172 diagnostic: Diagnostic {
4173 severity: DiagnosticSeverity::HINT,
4174 message: "error 1 hint 1".to_string(),
4175 group_id: 0,
4176 is_primary: false,
4177 ..Default::default()
4178 }
4179 },
4180 DiagnosticEntry {
4181 range: Point::new(1, 13)..Point::new(1, 15),
4182 diagnostic: Diagnostic {
4183 severity: DiagnosticSeverity::HINT,
4184 message: "error 2 hint 1".to_string(),
4185 group_id: 1,
4186 is_primary: false,
4187 ..Default::default()
4188 }
4189 },
4190 DiagnosticEntry {
4191 range: Point::new(1, 13)..Point::new(1, 15),
4192 diagnostic: Diagnostic {
4193 severity: DiagnosticSeverity::HINT,
4194 message: "error 2 hint 2".to_string(),
4195 group_id: 1,
4196 is_primary: false,
4197 ..Default::default()
4198 }
4199 },
4200 DiagnosticEntry {
4201 range: Point::new(2, 8)..Point::new(2, 17),
4202 diagnostic: Diagnostic {
4203 severity: DiagnosticSeverity::ERROR,
4204 message: "error 2".to_string(),
4205 group_id: 1,
4206 is_primary: true,
4207 ..Default::default()
4208 }
4209 }
4210 ]
4211 );
4212
4213 assert_eq!(
4214 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4215 &[
4216 DiagnosticEntry {
4217 range: Point::new(1, 8)..Point::new(1, 9),
4218 diagnostic: Diagnostic {
4219 severity: DiagnosticSeverity::WARNING,
4220 message: "error 1".to_string(),
4221 group_id: 0,
4222 is_primary: true,
4223 ..Default::default()
4224 }
4225 },
4226 DiagnosticEntry {
4227 range: Point::new(1, 8)..Point::new(1, 9),
4228 diagnostic: Diagnostic {
4229 severity: DiagnosticSeverity::HINT,
4230 message: "error 1 hint 1".to_string(),
4231 group_id: 0,
4232 is_primary: false,
4233 ..Default::default()
4234 }
4235 },
4236 ]
4237 );
4238 assert_eq!(
4239 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4240 &[
4241 DiagnosticEntry {
4242 range: Point::new(1, 13)..Point::new(1, 15),
4243 diagnostic: Diagnostic {
4244 severity: DiagnosticSeverity::HINT,
4245 message: "error 2 hint 1".to_string(),
4246 group_id: 1,
4247 is_primary: false,
4248 ..Default::default()
4249 }
4250 },
4251 DiagnosticEntry {
4252 range: Point::new(1, 13)..Point::new(1, 15),
4253 diagnostic: Diagnostic {
4254 severity: DiagnosticSeverity::HINT,
4255 message: "error 2 hint 2".to_string(),
4256 group_id: 1,
4257 is_primary: false,
4258 ..Default::default()
4259 }
4260 },
4261 DiagnosticEntry {
4262 range: Point::new(2, 8)..Point::new(2, 17),
4263 diagnostic: Diagnostic {
4264 severity: DiagnosticSeverity::ERROR,
4265 message: "error 2".to_string(),
4266 group_id: 1,
4267 is_primary: true,
4268 ..Default::default()
4269 }
4270 }
4271 ]
4272 );
4273 }
4274
4275 #[gpui::test(iterations = 100)]
4276 fn test_random(mut rng: StdRng) {
4277 let operations = env::var("OPERATIONS")
4278 .map(|o| o.parse().unwrap())
4279 .unwrap_or(40);
4280 let initial_entries = env::var("INITIAL_ENTRIES")
4281 .map(|o| o.parse().unwrap())
4282 .unwrap_or(20);
4283
4284 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4285 for _ in 0..initial_entries {
4286 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4287 }
4288 log::info!("Generated initial tree");
4289
4290 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4291 let fs = Arc::new(RealFs);
4292 let next_entry_id = Arc::new(AtomicUsize::new(0));
4293 let mut initial_snapshot = Snapshot {
4294 id: WorktreeId::from_usize(0),
4295 scan_id: 0,
4296 abs_path: root_dir.path().into(),
4297 entries_by_path: Default::default(),
4298 entries_by_id: Default::default(),
4299 removed_entry_ids: Default::default(),
4300 ignores: Default::default(),
4301 root_name: Default::default(),
4302 root_char_bag: Default::default(),
4303 next_entry_id: next_entry_id.clone(),
4304 };
4305 initial_snapshot.insert_entry(
4306 Entry::new(
4307 Path::new("").into(),
4308 &smol::block_on(fs.metadata(root_dir.path()))
4309 .unwrap()
4310 .unwrap(),
4311 &next_entry_id,
4312 Default::default(),
4313 ),
4314 fs.as_ref(),
4315 );
4316 let mut scanner = BackgroundScanner::new(
4317 Arc::new(Mutex::new(initial_snapshot.clone())),
4318 notify_tx,
4319 fs.clone(),
4320 Arc::new(gpui::executor::Background::new()),
4321 );
4322 smol::block_on(scanner.scan_dirs()).unwrap();
4323 scanner.snapshot().check_invariants();
4324
4325 let mut events = Vec::new();
4326 let mut snapshots = Vec::new();
4327 let mut mutations_len = operations;
4328 while mutations_len > 1 {
4329 if !events.is_empty() && rng.gen_bool(0.4) {
4330 let len = rng.gen_range(0..=events.len());
4331 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4332 log::info!("Delivering events: {:#?}", to_deliver);
4333 smol::block_on(scanner.process_events(to_deliver));
4334 scanner.snapshot().check_invariants();
4335 } else {
4336 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4337 mutations_len -= 1;
4338 }
4339
4340 if rng.gen_bool(0.2) {
4341 snapshots.push(scanner.snapshot());
4342 }
4343 }
4344 log::info!("Quiescing: {:#?}", events);
4345 smol::block_on(scanner.process_events(events));
4346 scanner.snapshot().check_invariants();
4347
4348 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4349 let mut new_scanner = BackgroundScanner::new(
4350 Arc::new(Mutex::new(initial_snapshot)),
4351 notify_tx,
4352 scanner.fs.clone(),
4353 scanner.executor.clone(),
4354 );
4355 smol::block_on(new_scanner.scan_dirs()).unwrap();
4356 assert_eq!(
4357 scanner.snapshot().to_vec(true),
4358 new_scanner.snapshot().to_vec(true)
4359 );
4360
4361 for mut prev_snapshot in snapshots {
4362 let include_ignored = rng.gen::<bool>();
4363 if !include_ignored {
4364 let mut entries_by_path_edits = Vec::new();
4365 let mut entries_by_id_edits = Vec::new();
4366 for entry in prev_snapshot
4367 .entries_by_id
4368 .cursor::<()>()
4369 .filter(|e| e.is_ignored)
4370 {
4371 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4372 entries_by_id_edits.push(Edit::Remove(entry.id));
4373 }
4374
4375 prev_snapshot
4376 .entries_by_path
4377 .edit(entries_by_path_edits, &());
4378 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4379 }
4380
4381 let update = scanner
4382 .snapshot()
4383 .build_update(&prev_snapshot, 0, 0, include_ignored);
4384 prev_snapshot.apply_update(update).unwrap();
4385 assert_eq!(
4386 prev_snapshot.to_vec(true),
4387 scanner.snapshot().to_vec(include_ignored)
4388 );
4389 }
4390 }
4391
4392 fn randomly_mutate_tree(
4393 root_path: &Path,
4394 insertion_probability: f64,
4395 rng: &mut impl Rng,
4396 ) -> Result<Vec<fsevent::Event>> {
4397 let root_path = root_path.canonicalize().unwrap();
4398 let (dirs, files) = read_dir_recursive(root_path.clone());
4399
4400 let mut events = Vec::new();
4401 let mut record_event = |path: PathBuf| {
4402 events.push(fsevent::Event {
4403 event_id: SystemTime::now()
4404 .duration_since(UNIX_EPOCH)
4405 .unwrap()
4406 .as_secs(),
4407 flags: fsevent::StreamFlags::empty(),
4408 path,
4409 });
4410 };
4411
4412 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4413 let path = dirs.choose(rng).unwrap();
4414 let new_path = path.join(gen_name(rng));
4415
4416 if rng.gen() {
4417 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4418 std::fs::create_dir(&new_path)?;
4419 } else {
4420 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4421 std::fs::write(&new_path, "")?;
4422 }
4423 record_event(new_path);
4424 } else if rng.gen_bool(0.05) {
4425 let ignore_dir_path = dirs.choose(rng).unwrap();
4426 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4427
4428 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4429 let files_to_ignore = {
4430 let len = rng.gen_range(0..=subfiles.len());
4431 subfiles.choose_multiple(rng, len)
4432 };
4433 let dirs_to_ignore = {
4434 let len = rng.gen_range(0..subdirs.len());
4435 subdirs.choose_multiple(rng, len)
4436 };
4437
4438 let mut ignore_contents = String::new();
4439 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4440 write!(
4441 ignore_contents,
4442 "{}\n",
4443 path_to_ignore
4444 .strip_prefix(&ignore_dir_path)?
4445 .to_str()
4446 .unwrap()
4447 )
4448 .unwrap();
4449 }
4450 log::info!(
4451 "Creating {:?} with contents:\n{}",
4452 ignore_path.strip_prefix(&root_path)?,
4453 ignore_contents
4454 );
4455 std::fs::write(&ignore_path, ignore_contents).unwrap();
4456 record_event(ignore_path);
4457 } else {
4458 let old_path = {
4459 let file_path = files.choose(rng);
4460 let dir_path = dirs[1..].choose(rng);
4461 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4462 };
4463
4464 let is_rename = rng.gen();
4465 if is_rename {
4466 let new_path_parent = dirs
4467 .iter()
4468 .filter(|d| !d.starts_with(old_path))
4469 .choose(rng)
4470 .unwrap();
4471
4472 let overwrite_existing_dir =
4473 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4474 let new_path = if overwrite_existing_dir {
4475 std::fs::remove_dir_all(&new_path_parent).ok();
4476 new_path_parent.to_path_buf()
4477 } else {
4478 new_path_parent.join(gen_name(rng))
4479 };
4480
4481 log::info!(
4482 "Renaming {:?} to {}{:?}",
4483 old_path.strip_prefix(&root_path)?,
4484 if overwrite_existing_dir {
4485 "overwrite "
4486 } else {
4487 ""
4488 },
4489 new_path.strip_prefix(&root_path)?
4490 );
4491 std::fs::rename(&old_path, &new_path)?;
4492 record_event(old_path.clone());
4493 record_event(new_path);
4494 } else if old_path.is_dir() {
4495 let (dirs, files) = read_dir_recursive(old_path.clone());
4496
4497 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4498 std::fs::remove_dir_all(&old_path).unwrap();
4499 for file in files {
4500 record_event(file);
4501 }
4502 for dir in dirs {
4503 record_event(dir);
4504 }
4505 } else {
4506 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4507 std::fs::remove_file(old_path).unwrap();
4508 record_event(old_path.clone());
4509 }
4510 }
4511
4512 Ok(events)
4513 }
4514
4515 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4516 let child_entries = std::fs::read_dir(&path).unwrap();
4517 let mut dirs = vec![path];
4518 let mut files = Vec::new();
4519 for child_entry in child_entries {
4520 let child_path = child_entry.unwrap().path();
4521 if child_path.is_dir() {
4522 let (child_dirs, child_files) = read_dir_recursive(child_path);
4523 dirs.extend(child_dirs);
4524 files.extend(child_files);
4525 } else {
4526 files.push(child_path);
4527 }
4528 }
4529 (dirs, files)
4530 }
4531
4532 fn gen_name(rng: &mut impl Rng) -> String {
4533 (0..6)
4534 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4535 .map(char::from)
4536 .collect()
4537 }
4538
4539 impl Snapshot {
4540 fn check_invariants(&self) {
4541 let mut files = self.files(true, 0);
4542 let mut visible_files = self.files(false, 0);
4543 for entry in self.entries_by_path.cursor::<()>() {
4544 if entry.is_file() {
4545 assert_eq!(files.next().unwrap().inode, entry.inode);
4546 if !entry.is_ignored {
4547 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4548 }
4549 }
4550 }
4551 assert!(files.next().is_none());
4552 assert!(visible_files.next().is_none());
4553
4554 let mut bfs_paths = Vec::new();
4555 let mut stack = vec![Path::new("")];
4556 while let Some(path) = stack.pop() {
4557 bfs_paths.push(path);
4558 let ix = stack.len();
4559 for child_entry in self.child_entries(path) {
4560 stack.insert(ix, &child_entry.path);
4561 }
4562 }
4563
4564 let dfs_paths = self
4565 .entries_by_path
4566 .cursor::<()>()
4567 .map(|e| e.path.as_ref())
4568 .collect::<Vec<_>>();
4569 assert_eq!(bfs_paths, dfs_paths);
4570
4571 for (ignore_parent_path, _) in &self.ignores {
4572 assert!(self.entry_for_path(ignore_parent_path).is_some());
4573 assert!(self
4574 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4575 .is_some());
4576 }
4577 }
4578
4579 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4580 let mut paths = Vec::new();
4581 for entry in self.entries_by_path.cursor::<()>() {
4582 if include_ignored || !entry.is_ignored {
4583 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4584 }
4585 }
4586 paths.sort_by(|a, b| a.0.cmp(&b.0));
4587 paths
4588 }
4589 }
4590}