1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary, ProjectEntry,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language,
19 LanguageRegistry, Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::Deref,
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, TreeMap};
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68#[derive(Clone, Debug, Eq, PartialEq)]
69pub enum Event {
70 DiskBasedDiagnosticsUpdating,
71 DiskBasedDiagnosticsUpdated,
72 DiagnosticsUpdated(Arc<Path>),
73}
74
75impl Entity for Worktree {
76 type Event = Event;
77
78 fn app_will_quit(
79 &mut self,
80 _: &mut MutableAppContext,
81 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
82 use futures::FutureExt;
83
84 if let Self::Local(worktree) = self {
85 let shutdown_futures = worktree
86 .language_servers
87 .drain()
88 .filter_map(|(_, server)| server.shutdown())
89 .collect::<Vec<_>>();
90 Some(
91 async move {
92 futures::future::join_all(shutdown_futures).await;
93 }
94 .boxed(),
95 )
96 } else {
97 None
98 }
99 }
100}
101
102impl Worktree {
103 pub async fn open_local(
104 client: Arc<Client>,
105 user_store: ModelHandle<UserStore>,
106 path: impl Into<Arc<Path>>,
107 fs: Arc<dyn Fs>,
108 languages: Arc<LanguageRegistry>,
109 cx: &mut AsyncAppContext,
110 ) -> Result<ModelHandle<Self>> {
111 let (tree, scan_states_tx) =
112 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
113 tree.update(cx, |tree, cx| {
114 let tree = tree.as_local_mut().unwrap();
115 let abs_path = tree.snapshot.abs_path.clone();
116 let background_snapshot = tree.background_snapshot.clone();
117 let background = cx.background().clone();
118 tree._background_scanner_task = Some(cx.background().spawn(async move {
119 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
120 let scanner =
121 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
122 scanner.run(events).await;
123 }));
124 });
125 Ok(tree)
126 }
127
128 pub async fn remote(
129 project_remote_id: u64,
130 replica_id: ReplicaId,
131 worktree: proto::Worktree,
132 client: Arc<Client>,
133 user_store: ModelHandle<UserStore>,
134 languages: Arc<LanguageRegistry>,
135 cx: &mut AsyncAppContext,
136 ) -> Result<ModelHandle<Self>> {
137 let remote_id = worktree.id;
138 let root_char_bag: CharBag = worktree
139 .root_name
140 .chars()
141 .map(|c| c.to_ascii_lowercase())
142 .collect();
143 let root_name = worktree.root_name.clone();
144 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
145 .background()
146 .spawn(async move {
147 let mut entries_by_path_edits = Vec::new();
148 let mut entries_by_id_edits = Vec::new();
149 for entry in worktree.entries {
150 match Entry::try_from((&root_char_bag, entry)) {
151 Ok(entry) => {
152 entries_by_id_edits.push(Edit::Insert(PathEntry {
153 id: entry.id,
154 path: entry.path.clone(),
155 is_ignored: entry.is_ignored,
156 scan_id: 0,
157 }));
158 entries_by_path_edits.push(Edit::Insert(entry));
159 }
160 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
161 }
162 }
163
164 let mut entries_by_path = SumTree::new();
165 let mut entries_by_id = SumTree::new();
166 entries_by_path.edit(entries_by_path_edits, &());
167 entries_by_id.edit(entries_by_id_edits, &());
168
169 let diagnostic_summaries = TreeMap::from_ordered_entries(
170 worktree.diagnostic_summaries.into_iter().map(|summary| {
171 (
172 PathKey(PathBuf::from(summary.path).into()),
173 DiagnosticSummary {
174 error_count: summary.error_count as usize,
175 warning_count: summary.warning_count as usize,
176 info_count: summary.info_count as usize,
177 hint_count: summary.hint_count as usize,
178 },
179 )
180 }),
181 );
182
183 (entries_by_path, entries_by_id, diagnostic_summaries)
184 })
185 .await;
186
187 let worktree = cx.update(|cx| {
188 cx.add_model(|cx: &mut ModelContext<Worktree>| {
189 let snapshot = Snapshot {
190 id: WorktreeId(remote_id as usize),
191 scan_id: 0,
192 abs_path: Path::new("").into(),
193 root_name,
194 root_char_bag,
195 ignores: Default::default(),
196 entries_by_path,
197 entries_by_id,
198 removed_entry_ids: Default::default(),
199 next_entry_id: Default::default(),
200 };
201
202 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
203 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
204
205 cx.background()
206 .spawn(async move {
207 while let Some(update) = updates_rx.recv().await {
208 let mut snapshot = snapshot_tx.borrow().clone();
209 if let Err(error) = snapshot.apply_update(update) {
210 log::error!("error applying worktree update: {}", error);
211 }
212 *snapshot_tx.borrow_mut() = snapshot;
213 }
214 })
215 .detach();
216
217 {
218 let mut snapshot_rx = snapshot_rx.clone();
219 cx.spawn_weak(|this, mut cx| async move {
220 while let Some(_) = snapshot_rx.recv().await {
221 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
222 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
223 } else {
224 break;
225 }
226 }
227 })
228 .detach();
229 }
230
231 Worktree::Remote(RemoteWorktree {
232 project_id: project_remote_id,
233 replica_id,
234 snapshot,
235 snapshot_rx,
236 updates_tx,
237 client: client.clone(),
238 loading_buffers: Default::default(),
239 open_buffers: Default::default(),
240 queued_operations: Default::default(),
241 languages,
242 user_store,
243 diagnostic_summaries,
244 })
245 })
246 });
247
248 Ok(worktree)
249 }
250
251 pub fn as_local(&self) -> Option<&LocalWorktree> {
252 if let Worktree::Local(worktree) = self {
253 Some(worktree)
254 } else {
255 None
256 }
257 }
258
259 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
260 if let Worktree::Remote(worktree) = self {
261 Some(worktree)
262 } else {
263 None
264 }
265 }
266
267 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
268 if let Worktree::Local(worktree) = self {
269 Some(worktree)
270 } else {
271 None
272 }
273 }
274
275 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
276 if let Worktree::Remote(worktree) = self {
277 Some(worktree)
278 } else {
279 None
280 }
281 }
282
283 pub fn snapshot(&self) -> Snapshot {
284 match self {
285 Worktree::Local(worktree) => worktree.snapshot(),
286 Worktree::Remote(worktree) => worktree.snapshot(),
287 }
288 }
289
290 pub fn replica_id(&self) -> ReplicaId {
291 match self {
292 Worktree::Local(_) => 0,
293 Worktree::Remote(worktree) => worktree.replica_id,
294 }
295 }
296
297 pub fn remove_collaborator(
298 &mut self,
299 peer_id: PeerId,
300 replica_id: ReplicaId,
301 cx: &mut ModelContext<Self>,
302 ) {
303 match self {
304 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
305 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
306 }
307 }
308
309 pub fn languages(&self) -> &Arc<LanguageRegistry> {
310 match self {
311 Worktree::Local(worktree) => &worktree.language_registry,
312 Worktree::Remote(worktree) => &worktree.languages,
313 }
314 }
315
316 pub fn user_store(&self) -> &ModelHandle<UserStore> {
317 match self {
318 Worktree::Local(worktree) => &worktree.user_store,
319 Worktree::Remote(worktree) => &worktree.user_store,
320 }
321 }
322
323 pub fn handle_open_buffer(
324 &mut self,
325 envelope: TypedEnvelope<proto::OpenBuffer>,
326 rpc: Arc<Client>,
327 cx: &mut ModelContext<Self>,
328 ) -> anyhow::Result<()> {
329 let receipt = envelope.receipt();
330
331 let response = self
332 .as_local_mut()
333 .unwrap()
334 .open_remote_buffer(envelope, cx);
335
336 cx.background()
337 .spawn(
338 async move {
339 rpc.respond(receipt, response.await?).await?;
340 Ok(())
341 }
342 .log_err(),
343 )
344 .detach();
345
346 Ok(())
347 }
348
349 pub fn handle_close_buffer(
350 &mut self,
351 envelope: TypedEnvelope<proto::CloseBuffer>,
352 _: Arc<Client>,
353 cx: &mut ModelContext<Self>,
354 ) -> anyhow::Result<()> {
355 self.as_local_mut()
356 .unwrap()
357 .close_remote_buffer(envelope, cx)
358 }
359
360 pub fn diagnostic_summaries<'a>(
361 &'a self,
362 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
363 match self {
364 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
365 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
366 }
367 .iter()
368 .map(|(path, summary)| (path.0.clone(), summary.clone()))
369 }
370
371 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
372 match self {
373 Worktree::Local(worktree) => &mut worktree.loading_buffers,
374 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
375 }
376 }
377
378 pub fn open_buffer(
379 &mut self,
380 path: impl AsRef<Path>,
381 cx: &mut ModelContext<Self>,
382 ) -> Task<Result<ModelHandle<Buffer>>> {
383 let path = path.as_ref();
384
385 // If there is already a buffer for the given path, then return it.
386 let existing_buffer = match self {
387 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
388 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
389 };
390 if let Some(existing_buffer) = existing_buffer {
391 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
392 }
393
394 let path: Arc<Path> = Arc::from(path);
395 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
396 // If the given path is already being loaded, then wait for that existing
397 // task to complete and return the same buffer.
398 hash_map::Entry::Occupied(e) => e.get().clone(),
399
400 // Otherwise, record the fact that this path is now being loaded.
401 hash_map::Entry::Vacant(entry) => {
402 let (mut tx, rx) = postage::watch::channel();
403 entry.insert(rx.clone());
404
405 let load_buffer = match self {
406 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
407 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
408 };
409 cx.spawn(move |this, mut cx| async move {
410 let result = load_buffer.await;
411
412 // After the buffer loads, record the fact that it is no longer
413 // loading.
414 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
415 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
416 })
417 .detach();
418 rx
419 }
420 };
421
422 cx.spawn(|_, _| async move {
423 loop {
424 if let Some(result) = loading_watch.borrow().as_ref() {
425 return result.clone().map_err(|e| anyhow!("{}", e));
426 }
427 loading_watch.recv().await;
428 }
429 })
430 }
431
432 #[cfg(feature = "test-support")]
433 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
434 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
435 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
436 Worktree::Remote(worktree) => {
437 Box::new(worktree.open_buffers.values().filter_map(|buf| {
438 if let RemoteBuffer::Loaded(buf) = buf {
439 Some(buf)
440 } else {
441 None
442 }
443 }))
444 }
445 };
446
447 let path = path.as_ref();
448 open_buffers
449 .find(|buffer| {
450 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
451 file.path().as_ref() == path
452 } else {
453 false
454 }
455 })
456 .is_some()
457 }
458
459 pub fn handle_update_buffer(
460 &mut self,
461 envelope: TypedEnvelope<proto::UpdateBuffer>,
462 cx: &mut ModelContext<Self>,
463 ) -> Result<()> {
464 let payload = envelope.payload.clone();
465 let buffer_id = payload.buffer_id as usize;
466 let ops = payload
467 .operations
468 .into_iter()
469 .map(|op| language::proto::deserialize_operation(op))
470 .collect::<Result<Vec<_>, _>>()?;
471
472 match self {
473 Worktree::Local(worktree) => {
474 let buffer = worktree
475 .open_buffers
476 .get(&buffer_id)
477 .and_then(|buf| buf.upgrade(cx))
478 .ok_or_else(|| {
479 anyhow!("invalid buffer {} in update buffer message", buffer_id)
480 })?;
481 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
482 }
483 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
484 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
485 Some(RemoteBuffer::Loaded(buffer)) => {
486 if let Some(buffer) = buffer.upgrade(cx) {
487 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
488 } else {
489 worktree
490 .open_buffers
491 .insert(buffer_id, RemoteBuffer::Operations(ops));
492 }
493 }
494 None => {
495 worktree
496 .open_buffers
497 .insert(buffer_id, RemoteBuffer::Operations(ops));
498 }
499 },
500 }
501
502 Ok(())
503 }
504
505 pub fn handle_save_buffer(
506 &mut self,
507 envelope: TypedEnvelope<proto::SaveBuffer>,
508 rpc: Arc<Client>,
509 cx: &mut ModelContext<Self>,
510 ) -> Result<()> {
511 let sender_id = envelope.original_sender_id()?;
512 let this = self.as_local().unwrap();
513 let project_id = this
514 .share
515 .as_ref()
516 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
517 .project_id;
518
519 let buffer = this
520 .shared_buffers
521 .get(&sender_id)
522 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
523 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
524
525 let receipt = envelope.receipt();
526 let worktree_id = envelope.payload.worktree_id;
527 let buffer_id = envelope.payload.buffer_id;
528 let save = cx.spawn(|_, mut cx| async move {
529 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
530 });
531
532 cx.background()
533 .spawn(
534 async move {
535 let (version, mtime) = save.await?;
536
537 rpc.respond(
538 receipt,
539 proto::BufferSaved {
540 project_id,
541 worktree_id,
542 buffer_id,
543 version: (&version).into(),
544 mtime: Some(mtime.into()),
545 },
546 )
547 .await?;
548
549 Ok(())
550 }
551 .log_err(),
552 )
553 .detach();
554
555 Ok(())
556 }
557
558 pub fn handle_buffer_saved(
559 &mut self,
560 envelope: TypedEnvelope<proto::BufferSaved>,
561 cx: &mut ModelContext<Self>,
562 ) -> Result<()> {
563 let payload = envelope.payload.clone();
564 let worktree = self.as_remote_mut().unwrap();
565 if let Some(buffer) = worktree
566 .open_buffers
567 .get(&(payload.buffer_id as usize))
568 .and_then(|buf| buf.upgrade(cx))
569 {
570 buffer.update(cx, |buffer, cx| {
571 let version = payload.version.try_into()?;
572 let mtime = payload
573 .mtime
574 .ok_or_else(|| anyhow!("missing mtime"))?
575 .into();
576 buffer.did_save(version, mtime, None, cx);
577 Result::<_, anyhow::Error>::Ok(())
578 })?;
579 }
580 Ok(())
581 }
582
583 pub fn handle_format_buffer(
584 &mut self,
585 envelope: TypedEnvelope<proto::FormatBuffer>,
586 rpc: Arc<Client>,
587 cx: &mut ModelContext<Self>,
588 ) -> Result<()> {
589 let sender_id = envelope.original_sender_id()?;
590 let this = self.as_local().unwrap();
591 let buffer = this
592 .shared_buffers
593 .get(&sender_id)
594 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
595 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
596
597 let receipt = envelope.receipt();
598 cx.spawn(|_, mut cx| async move {
599 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
600 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
601 // associated with formatting.
602 cx.spawn(|_| async move {
603 match format {
604 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
605 Err(error) => {
606 rpc.respond_with_error(
607 receipt,
608 proto::Error {
609 message: error.to_string(),
610 },
611 )
612 .await?
613 }
614 }
615 Ok::<_, anyhow::Error>(())
616 })
617 .await
618 .log_err();
619 })
620 .detach();
621
622 Ok(())
623 }
624
625 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
626 match self {
627 Self::Local(worktree) => {
628 let is_fake_fs = worktree.fs.is_fake();
629 worktree.snapshot = worktree.background_snapshot.lock().clone();
630 if worktree.is_scanning() {
631 if worktree.poll_task.is_none() {
632 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
633 if is_fake_fs {
634 smol::future::yield_now().await;
635 } else {
636 smol::Timer::after(Duration::from_millis(100)).await;
637 }
638 this.update(&mut cx, |this, cx| {
639 this.as_local_mut().unwrap().poll_task = None;
640 this.poll_snapshot(cx);
641 })
642 }));
643 }
644 } else {
645 worktree.poll_task.take();
646 self.update_open_buffers(cx);
647 }
648 }
649 Self::Remote(worktree) => {
650 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
651 self.update_open_buffers(cx);
652 }
653 };
654
655 cx.notify();
656 }
657
658 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
659 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
660 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
661 Self::Remote(worktree) => {
662 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
663 if let RemoteBuffer::Loaded(buf) = buf {
664 Some((id, buf))
665 } else {
666 None
667 }
668 }))
669 }
670 };
671
672 let local = self.as_local().is_some();
673 let worktree_path = self.abs_path.clone();
674 let worktree_handle = cx.handle();
675 let mut buffers_to_delete = Vec::new();
676 for (buffer_id, buffer) in open_buffers {
677 if let Some(buffer) = buffer.upgrade(cx) {
678 buffer.update(cx, |buffer, cx| {
679 if let Some(old_file) = File::from_dyn(buffer.file()) {
680 let new_file = if let Some(entry) = old_file
681 .entry_id
682 .and_then(|entry_id| self.entry_for_id(entry_id))
683 {
684 File {
685 is_local: local,
686 worktree_path: worktree_path.clone(),
687 entry_id: Some(entry.id),
688 mtime: entry.mtime,
689 path: entry.path.clone(),
690 worktree: worktree_handle.clone(),
691 }
692 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
693 File {
694 is_local: local,
695 worktree_path: worktree_path.clone(),
696 entry_id: Some(entry.id),
697 mtime: entry.mtime,
698 path: entry.path.clone(),
699 worktree: worktree_handle.clone(),
700 }
701 } else {
702 File {
703 is_local: local,
704 worktree_path: worktree_path.clone(),
705 entry_id: None,
706 path: old_file.path().clone(),
707 mtime: old_file.mtime(),
708 worktree: worktree_handle.clone(),
709 }
710 };
711
712 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
713 task.detach();
714 }
715 }
716 });
717 } else {
718 buffers_to_delete.push(*buffer_id);
719 }
720 }
721
722 for buffer_id in buffers_to_delete {
723 match self {
724 Self::Local(worktree) => {
725 worktree.open_buffers.remove(&buffer_id);
726 }
727 Self::Remote(worktree) => {
728 worktree.open_buffers.remove(&buffer_id);
729 }
730 }
731 }
732 }
733
734 pub fn update_diagnostics(
735 &mut self,
736 params: lsp::PublishDiagnosticsParams,
737 disk_based_sources: &HashSet<String>,
738 cx: &mut ModelContext<Worktree>,
739 ) -> Result<()> {
740 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
741 let abs_path = params
742 .uri
743 .to_file_path()
744 .map_err(|_| anyhow!("URI is not a file"))?;
745 let worktree_path = Arc::from(
746 abs_path
747 .strip_prefix(&this.abs_path)
748 .context("path is not within worktree")?,
749 );
750
751 let mut next_group_id = 0;
752 let mut diagnostics = Vec::default();
753 let mut primary_diagnostic_group_ids = HashMap::default();
754 let mut sources_by_group_id = HashMap::default();
755 let mut supporting_diagnostic_severities = HashMap::default();
756 for diagnostic in ¶ms.diagnostics {
757 let source = diagnostic.source.as_ref();
758 let code = diagnostic.code.as_ref().map(|code| match code {
759 lsp::NumberOrString::Number(code) => code.to_string(),
760 lsp::NumberOrString::String(code) => code.clone(),
761 });
762 let range = range_from_lsp(diagnostic.range);
763 let is_supporting = diagnostic
764 .related_information
765 .as_ref()
766 .map_or(false, |infos| {
767 infos.iter().any(|info| {
768 primary_diagnostic_group_ids.contains_key(&(
769 source,
770 code.clone(),
771 range_from_lsp(info.location.range),
772 ))
773 })
774 });
775
776 if is_supporting {
777 if let Some(severity) = diagnostic.severity {
778 supporting_diagnostic_severities
779 .insert((source, code.clone(), range), severity);
780 }
781 } else {
782 let group_id = post_inc(&mut next_group_id);
783 let is_disk_based =
784 source.map_or(false, |source| disk_based_sources.contains(source));
785
786 sources_by_group_id.insert(group_id, source);
787 primary_diagnostic_group_ids
788 .insert((source, code.clone(), range.clone()), group_id);
789
790 diagnostics.push(DiagnosticEntry {
791 range,
792 diagnostic: Diagnostic {
793 code: code.clone(),
794 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
795 message: diagnostic.message.clone(),
796 group_id,
797 is_primary: true,
798 is_valid: true,
799 is_disk_based,
800 },
801 });
802 if let Some(infos) = &diagnostic.related_information {
803 for info in infos {
804 if info.location.uri == params.uri {
805 let range = range_from_lsp(info.location.range);
806 diagnostics.push(DiagnosticEntry {
807 range,
808 diagnostic: Diagnostic {
809 code: code.clone(),
810 severity: DiagnosticSeverity::INFORMATION,
811 message: info.message.clone(),
812 group_id,
813 is_primary: false,
814 is_valid: true,
815 is_disk_based,
816 },
817 });
818 }
819 }
820 }
821 }
822 }
823
824 for entry in &mut diagnostics {
825 let diagnostic = &mut entry.diagnostic;
826 if !diagnostic.is_primary {
827 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
828 if let Some(&severity) = supporting_diagnostic_severities.get(&(
829 source,
830 diagnostic.code.clone(),
831 entry.range.clone(),
832 )) {
833 diagnostic.severity = severity;
834 }
835 }
836 }
837
838 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
839 Ok(())
840 }
841
842 pub fn update_diagnostic_entries(
843 &mut self,
844 worktree_path: Arc<Path>,
845 version: Option<i32>,
846 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
847 cx: &mut ModelContext<Self>,
848 ) -> Result<()> {
849 let this = self.as_local_mut().unwrap();
850 for buffer in this.open_buffers.values() {
851 if let Some(buffer) = buffer.upgrade(cx) {
852 if buffer
853 .read(cx)
854 .file()
855 .map_or(false, |file| *file.path() == worktree_path)
856 {
857 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
858 (
859 buffer.remote_id(),
860 buffer.update_diagnostics(version, diagnostics.clone(), cx),
861 )
862 });
863 self.send_buffer_update(remote_id, operation?, cx);
864 break;
865 }
866 }
867 }
868
869 let this = self.as_local_mut().unwrap();
870 let summary = DiagnosticSummary::new(&diagnostics);
871 this.diagnostic_summaries
872 .insert(PathKey(worktree_path.clone()), summary.clone());
873 this.diagnostics.insert(worktree_path.clone(), diagnostics);
874
875 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
876
877 if let Some(share) = this.share.as_ref() {
878 cx.foreground()
879 .spawn({
880 let client = this.client.clone();
881 let project_id = share.project_id;
882 let worktree_id = this.id().to_proto();
883 let path = worktree_path.to_string_lossy().to_string();
884 async move {
885 client
886 .send(proto::UpdateDiagnosticSummary {
887 project_id,
888 worktree_id,
889 summary: Some(proto::DiagnosticSummary {
890 path,
891 error_count: summary.error_count as u32,
892 warning_count: summary.warning_count as u32,
893 info_count: summary.info_count as u32,
894 hint_count: summary.hint_count as u32,
895 }),
896 })
897 .await
898 .log_err()
899 }
900 })
901 .detach();
902 }
903
904 Ok(())
905 }
906
907 fn send_buffer_update(
908 &mut self,
909 buffer_id: u64,
910 operation: Operation,
911 cx: &mut ModelContext<Self>,
912 ) {
913 if let Some((project_id, worktree_id, rpc)) = match self {
914 Worktree::Local(worktree) => worktree
915 .share
916 .as_ref()
917 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
918 Worktree::Remote(worktree) => Some((
919 worktree.project_id,
920 worktree.snapshot.id(),
921 worktree.client.clone(),
922 )),
923 } {
924 cx.spawn(|worktree, mut cx| async move {
925 if let Err(error) = rpc
926 .request(proto::UpdateBuffer {
927 project_id,
928 worktree_id: worktree_id.0 as u64,
929 buffer_id,
930 operations: vec![language::proto::serialize_operation(&operation)],
931 })
932 .await
933 {
934 worktree.update(&mut cx, |worktree, _| {
935 log::error!("error sending buffer operation: {}", error);
936 match worktree {
937 Worktree::Local(t) => &mut t.queued_operations,
938 Worktree::Remote(t) => &mut t.queued_operations,
939 }
940 .push((buffer_id, operation));
941 });
942 }
943 })
944 .detach();
945 }
946 }
947}
948
949impl WorktreeId {
950 pub fn from_usize(handle_id: usize) -> Self {
951 Self(handle_id)
952 }
953
954 pub(crate) fn from_proto(id: u64) -> Self {
955 Self(id as usize)
956 }
957
958 pub fn to_proto(&self) -> u64 {
959 self.0 as u64
960 }
961
962 pub fn to_usize(&self) -> usize {
963 self.0
964 }
965}
966
967impl fmt::Display for WorktreeId {
968 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
969 self.0.fmt(f)
970 }
971}
972
973#[derive(Clone)]
974pub struct Snapshot {
975 id: WorktreeId,
976 scan_id: usize,
977 abs_path: Arc<Path>,
978 root_name: String,
979 root_char_bag: CharBag,
980 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
981 entries_by_path: SumTree<Entry>,
982 entries_by_id: SumTree<PathEntry>,
983 removed_entry_ids: HashMap<u64, usize>,
984 next_entry_id: Arc<AtomicUsize>,
985}
986
987pub struct LocalWorktree {
988 snapshot: Snapshot,
989 config: WorktreeConfig,
990 background_snapshot: Arc<Mutex<Snapshot>>,
991 last_scan_state_rx: watch::Receiver<ScanState>,
992 _background_scanner_task: Option<Task<()>>,
993 poll_task: Option<Task<()>>,
994 share: Option<ShareState>,
995 loading_buffers: LoadingBuffers,
996 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
997 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
998 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
999 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1000 queued_operations: Vec<(u64, Operation)>,
1001 language_registry: Arc<LanguageRegistry>,
1002 client: Arc<Client>,
1003 user_store: ModelHandle<UserStore>,
1004 fs: Arc<dyn Fs>,
1005 language_servers: HashMap<String, Arc<LanguageServer>>,
1006}
1007
1008struct ShareState {
1009 project_id: u64,
1010 snapshots_tx: Sender<Snapshot>,
1011 _maintain_remote_snapshot: Option<Task<()>>,
1012}
1013
1014pub struct RemoteWorktree {
1015 project_id: u64,
1016 snapshot: Snapshot,
1017 snapshot_rx: watch::Receiver<Snapshot>,
1018 client: Arc<Client>,
1019 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1020 replica_id: ReplicaId,
1021 loading_buffers: LoadingBuffers,
1022 open_buffers: HashMap<usize, RemoteBuffer>,
1023 languages: Arc<LanguageRegistry>,
1024 user_store: ModelHandle<UserStore>,
1025 queued_operations: Vec<(u64, Operation)>,
1026 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1027}
1028
1029type LoadingBuffers = HashMap<
1030 Arc<Path>,
1031 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
1032>;
1033
1034#[derive(Default, Deserialize)]
1035struct WorktreeConfig {
1036 collaborators: Vec<String>,
1037}
1038
1039impl LocalWorktree {
1040 async fn new(
1041 client: Arc<Client>,
1042 user_store: ModelHandle<UserStore>,
1043 path: impl Into<Arc<Path>>,
1044 fs: Arc<dyn Fs>,
1045 languages: Arc<LanguageRegistry>,
1046 cx: &mut AsyncAppContext,
1047 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1048 let abs_path = path.into();
1049 let path: Arc<Path> = Arc::from(Path::new(""));
1050 let next_entry_id = AtomicUsize::new(0);
1051
1052 // After determining whether the root entry is a file or a directory, populate the
1053 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1054 let root_name = abs_path
1055 .file_name()
1056 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1057 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1058 let metadata = fs.metadata(&abs_path).await?;
1059
1060 let mut config = WorktreeConfig::default();
1061 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1062 if let Ok(parsed) = toml::from_str(&zed_toml) {
1063 config = parsed;
1064 }
1065 }
1066
1067 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1068 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1069 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1070 let mut snapshot = Snapshot {
1071 id: WorktreeId::from_usize(cx.model_id()),
1072 scan_id: 0,
1073 abs_path,
1074 root_name: root_name.clone(),
1075 root_char_bag,
1076 ignores: Default::default(),
1077 entries_by_path: Default::default(),
1078 entries_by_id: Default::default(),
1079 removed_entry_ids: Default::default(),
1080 next_entry_id: Arc::new(next_entry_id),
1081 };
1082 if let Some(metadata) = metadata {
1083 snapshot.insert_entry(
1084 Entry::new(
1085 path.into(),
1086 &metadata,
1087 &snapshot.next_entry_id,
1088 snapshot.root_char_bag,
1089 ),
1090 fs.as_ref(),
1091 );
1092 }
1093
1094 let tree = Self {
1095 snapshot: snapshot.clone(),
1096 config,
1097 background_snapshot: Arc::new(Mutex::new(snapshot)),
1098 last_scan_state_rx,
1099 _background_scanner_task: None,
1100 share: None,
1101 poll_task: None,
1102 loading_buffers: Default::default(),
1103 open_buffers: Default::default(),
1104 shared_buffers: Default::default(),
1105 diagnostics: Default::default(),
1106 diagnostic_summaries: Default::default(),
1107 queued_operations: Default::default(),
1108 language_registry: languages,
1109 client,
1110 user_store,
1111 fs,
1112 language_servers: Default::default(),
1113 };
1114
1115 cx.spawn_weak(|this, mut cx| async move {
1116 while let Ok(scan_state) = scan_states_rx.recv().await {
1117 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1118 let to_send = handle.update(&mut cx, |this, cx| {
1119 last_scan_state_tx.blocking_send(scan_state).ok();
1120 this.poll_snapshot(cx);
1121 let tree = this.as_local_mut().unwrap();
1122 if !tree.is_scanning() {
1123 if let Some(share) = tree.share.as_ref() {
1124 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1125 }
1126 }
1127 None
1128 });
1129
1130 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1131 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1132 log::error!("error submitting snapshot to send {}", err);
1133 }
1134 }
1135 } else {
1136 break;
1137 }
1138 }
1139 })
1140 .detach();
1141
1142 Worktree::Local(tree)
1143 });
1144
1145 Ok((tree, scan_states_tx))
1146 }
1147
1148 pub fn authorized_logins(&self) -> Vec<String> {
1149 self.config.collaborators.clone()
1150 }
1151
1152 pub fn language_registry(&self) -> &LanguageRegistry {
1153 &self.language_registry
1154 }
1155
1156 pub fn register_language(
1157 &mut self,
1158 language: &Arc<Language>,
1159 cx: &mut ModelContext<Worktree>,
1160 ) -> Option<Arc<LanguageServer>> {
1161 if let Some(server) = self.language_servers.get(language.name()) {
1162 return Some(server.clone());
1163 }
1164
1165 if let Some(language_server) = language
1166 .start_server(self.abs_path(), cx)
1167 .log_err()
1168 .flatten()
1169 {
1170 enum DiagnosticProgress {
1171 Updating,
1172 Updated,
1173 }
1174
1175 let disk_based_sources = language
1176 .disk_based_diagnostic_sources()
1177 .cloned()
1178 .unwrap_or_default();
1179 let disk_based_diagnostics_progress_token =
1180 language.disk_based_diagnostics_progress_token().cloned();
1181 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1182 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1183 smol::channel::unbounded();
1184 language_server
1185 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1186 smol::block_on(diagnostics_tx.send(params)).ok();
1187 })
1188 .detach();
1189 cx.spawn_weak(|this, mut cx| {
1190 let has_disk_based_diagnostic_progress_token =
1191 disk_based_diagnostics_progress_token.is_some();
1192 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1193 async move {
1194 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1195 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1196 handle.update(&mut cx, |this, cx| {
1197 if !has_disk_based_diagnostic_progress_token {
1198 smol::block_on(
1199 disk_based_diagnostics_done_tx
1200 .send(DiagnosticProgress::Updating),
1201 )
1202 .ok();
1203 }
1204 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1205 .log_err();
1206 if !has_disk_based_diagnostic_progress_token {
1207 smol::block_on(
1208 disk_based_diagnostics_done_tx
1209 .send(DiagnosticProgress::Updated),
1210 )
1211 .ok();
1212 }
1213 })
1214 } else {
1215 break;
1216 }
1217 }
1218 }
1219 })
1220 .detach();
1221
1222 let mut pending_disk_based_diagnostics: i32 = 0;
1223 language_server
1224 .on_notification::<lsp::notification::Progress, _>(move |params| {
1225 let token = match params.token {
1226 lsp::NumberOrString::Number(_) => None,
1227 lsp::NumberOrString::String(token) => Some(token),
1228 };
1229
1230 if token == disk_based_diagnostics_progress_token {
1231 match params.value {
1232 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1233 lsp::WorkDoneProgress::Begin(_) => {
1234 if pending_disk_based_diagnostics == 0 {
1235 smol::block_on(
1236 disk_based_diagnostics_done_tx
1237 .send(DiagnosticProgress::Updating),
1238 )
1239 .ok();
1240 }
1241 pending_disk_based_diagnostics += 1;
1242 }
1243 lsp::WorkDoneProgress::End(_) => {
1244 pending_disk_based_diagnostics -= 1;
1245 if pending_disk_based_diagnostics == 0 {
1246 smol::block_on(
1247 disk_based_diagnostics_done_tx
1248 .send(DiagnosticProgress::Updated),
1249 )
1250 .ok();
1251 }
1252 }
1253 _ => {}
1254 },
1255 }
1256 }
1257 })
1258 .detach();
1259 let rpc = self.client.clone();
1260 cx.spawn_weak(|this, mut cx| async move {
1261 while let Ok(progress) = disk_based_diagnostics_done_rx.recv().await {
1262 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1263 match progress {
1264 DiagnosticProgress::Updating => {
1265 let message = handle.update(&mut cx, |this, cx| {
1266 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1267 let this = this.as_local().unwrap();
1268 this.share.as_ref().map(|share| {
1269 proto::DiskBasedDiagnosticsUpdating {
1270 project_id: share.project_id,
1271 worktree_id: this.id().to_proto(),
1272 }
1273 })
1274 });
1275
1276 if let Some(message) = message {
1277 rpc.send(message).await.log_err();
1278 }
1279 }
1280 DiagnosticProgress::Updated => {
1281 let message = handle.update(&mut cx, |this, cx| {
1282 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1283 let this = this.as_local().unwrap();
1284 this.share.as_ref().map(|share| {
1285 proto::DiskBasedDiagnosticsUpdated {
1286 project_id: share.project_id,
1287 worktree_id: this.id().to_proto(),
1288 }
1289 })
1290 });
1291
1292 if let Some(message) = message {
1293 rpc.send(message).await.log_err();
1294 }
1295 }
1296 }
1297 } else {
1298 break;
1299 }
1300 }
1301 })
1302 .detach();
1303
1304 self.language_servers
1305 .insert(language.name().to_string(), language_server.clone());
1306 Some(language_server.clone())
1307 } else {
1308 None
1309 }
1310 }
1311
1312 fn get_open_buffer(
1313 &mut self,
1314 path: &Path,
1315 cx: &mut ModelContext<Worktree>,
1316 ) -> Option<ModelHandle<Buffer>> {
1317 let handle = cx.handle();
1318 let mut result = None;
1319 self.open_buffers.retain(|_buffer_id, buffer| {
1320 if let Some(buffer) = buffer.upgrade(cx) {
1321 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1322 if file.worktree == handle && file.path().as_ref() == path {
1323 result = Some(buffer);
1324 }
1325 }
1326 true
1327 } else {
1328 false
1329 }
1330 });
1331 result
1332 }
1333
1334 fn open_buffer(
1335 &mut self,
1336 path: &Path,
1337 cx: &mut ModelContext<Worktree>,
1338 ) -> Task<Result<ModelHandle<Buffer>>> {
1339 let path = Arc::from(path);
1340 cx.spawn(move |this, mut cx| async move {
1341 let (file, contents) = this
1342 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1343 .await?;
1344
1345 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1346 let this = this.as_local_mut().unwrap();
1347 let diagnostics = this.diagnostics.get(&path).cloned();
1348 let language = this
1349 .language_registry
1350 .select_language(file.full_path())
1351 .cloned();
1352 let server = language
1353 .as_ref()
1354 .and_then(|language| this.register_language(language, cx));
1355 (diagnostics, language, server)
1356 });
1357
1358 let mut buffer_operations = Vec::new();
1359 let buffer = cx.add_model(|cx| {
1360 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1361 buffer.set_language(language, language_server, cx);
1362 if let Some(diagnostics) = diagnostics {
1363 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1364 buffer_operations.push(op);
1365 }
1366 buffer
1367 });
1368
1369 this.update(&mut cx, |this, cx| {
1370 for op in buffer_operations {
1371 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1372 }
1373 let this = this.as_local_mut().unwrap();
1374 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1375 });
1376
1377 Ok(buffer)
1378 })
1379 }
1380
1381 pub fn open_remote_buffer(
1382 &mut self,
1383 envelope: TypedEnvelope<proto::OpenBuffer>,
1384 cx: &mut ModelContext<Worktree>,
1385 ) -> Task<Result<proto::OpenBufferResponse>> {
1386 cx.spawn(|this, mut cx| async move {
1387 let peer_id = envelope.original_sender_id();
1388 let path = Path::new(&envelope.payload.path);
1389 let buffer = this
1390 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1391 .await?;
1392 this.update(&mut cx, |this, cx| {
1393 this.as_local_mut()
1394 .unwrap()
1395 .shared_buffers
1396 .entry(peer_id?)
1397 .or_default()
1398 .insert(buffer.id() as u64, buffer.clone());
1399
1400 Ok(proto::OpenBufferResponse {
1401 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1402 })
1403 })
1404 })
1405 }
1406
1407 pub fn close_remote_buffer(
1408 &mut self,
1409 envelope: TypedEnvelope<proto::CloseBuffer>,
1410 cx: &mut ModelContext<Worktree>,
1411 ) -> Result<()> {
1412 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1413 shared_buffers.remove(&envelope.payload.buffer_id);
1414 cx.notify();
1415 }
1416
1417 Ok(())
1418 }
1419
1420 pub fn remove_collaborator(
1421 &mut self,
1422 peer_id: PeerId,
1423 replica_id: ReplicaId,
1424 cx: &mut ModelContext<Worktree>,
1425 ) {
1426 self.shared_buffers.remove(&peer_id);
1427 for (_, buffer) in &self.open_buffers {
1428 if let Some(buffer) = buffer.upgrade(cx) {
1429 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1430 }
1431 }
1432 cx.notify();
1433 }
1434
1435 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1436 let mut scan_state_rx = self.last_scan_state_rx.clone();
1437 async move {
1438 let mut scan_state = Some(scan_state_rx.borrow().clone());
1439 while let Some(ScanState::Scanning) = scan_state {
1440 scan_state = scan_state_rx.recv().await;
1441 }
1442 }
1443 }
1444
1445 fn is_scanning(&self) -> bool {
1446 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1447 true
1448 } else {
1449 false
1450 }
1451 }
1452
1453 pub fn snapshot(&self) -> Snapshot {
1454 self.snapshot.clone()
1455 }
1456
1457 pub fn abs_path(&self) -> &Arc<Path> {
1458 &self.snapshot.abs_path
1459 }
1460
1461 pub fn contains_abs_path(&self, path: &Path) -> bool {
1462 path.starts_with(&self.snapshot.abs_path)
1463 }
1464
1465 fn absolutize(&self, path: &Path) -> PathBuf {
1466 if path.file_name().is_some() {
1467 self.snapshot.abs_path.join(path)
1468 } else {
1469 self.snapshot.abs_path.to_path_buf()
1470 }
1471 }
1472
1473 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1474 let handle = cx.handle();
1475 let path = Arc::from(path);
1476 let worktree_path = self.abs_path.clone();
1477 let abs_path = self.absolutize(&path);
1478 let background_snapshot = self.background_snapshot.clone();
1479 let fs = self.fs.clone();
1480 cx.spawn(|this, mut cx| async move {
1481 let text = fs.load(&abs_path).await?;
1482 // Eagerly populate the snapshot with an updated entry for the loaded file
1483 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1484 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1485 Ok((
1486 File {
1487 entry_id: Some(entry.id),
1488 worktree: handle,
1489 worktree_path,
1490 path: entry.path,
1491 mtime: entry.mtime,
1492 is_local: true,
1493 },
1494 text,
1495 ))
1496 })
1497 }
1498
1499 pub fn save_buffer_as(
1500 &self,
1501 buffer_handle: ModelHandle<Buffer>,
1502 path: impl Into<Arc<Path>>,
1503 cx: &mut ModelContext<Worktree>,
1504 ) -> Task<Result<()>> {
1505 let buffer = buffer_handle.read(cx);
1506 let text = buffer.as_rope().clone();
1507 let version = buffer.version();
1508 let save = self.save(path, text, cx);
1509 cx.spawn(|this, mut cx| async move {
1510 let entry = save.await?;
1511 let file = this.update(&mut cx, |this, cx| {
1512 let this = this.as_local_mut().unwrap();
1513 this.open_buffers
1514 .insert(buffer_handle.id(), buffer_handle.downgrade());
1515 File {
1516 entry_id: Some(entry.id),
1517 worktree: cx.handle(),
1518 worktree_path: this.abs_path.clone(),
1519 path: entry.path,
1520 mtime: entry.mtime,
1521 is_local: true,
1522 }
1523 });
1524
1525 let (language, language_server) = this.update(&mut cx, |worktree, cx| {
1526 let worktree = worktree.as_local_mut().unwrap();
1527 let language = worktree
1528 .language_registry()
1529 .select_language(file.full_path())
1530 .cloned();
1531 let language_server = language
1532 .as_ref()
1533 .and_then(|language| worktree.register_language(language, cx));
1534 (language, language_server.clone())
1535 });
1536
1537 buffer_handle.update(&mut cx, |buffer, cx| {
1538 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1539 buffer.set_language(language, language_server, cx);
1540 });
1541
1542 Ok(())
1543 })
1544 }
1545
1546 fn save(
1547 &self,
1548 path: impl Into<Arc<Path>>,
1549 text: Rope,
1550 cx: &mut ModelContext<Worktree>,
1551 ) -> Task<Result<Entry>> {
1552 let path = path.into();
1553 let abs_path = self.absolutize(&path);
1554 let background_snapshot = self.background_snapshot.clone();
1555 let fs = self.fs.clone();
1556 let save = cx.background().spawn(async move {
1557 fs.save(&abs_path, &text).await?;
1558 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1559 });
1560
1561 cx.spawn(|this, mut cx| async move {
1562 let entry = save.await?;
1563 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1564 Ok(entry)
1565 })
1566 }
1567
1568 pub fn share(
1569 &mut self,
1570 project_id: u64,
1571 cx: &mut ModelContext<Worktree>,
1572 ) -> Task<anyhow::Result<()>> {
1573 if self.share.is_some() {
1574 return Task::ready(Ok(()));
1575 }
1576
1577 let snapshot = self.snapshot();
1578 let rpc = self.client.clone();
1579 let worktree_id = cx.model_id() as u64;
1580 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1581 let maintain_remote_snapshot = cx.background().spawn({
1582 let rpc = rpc.clone();
1583 let snapshot = snapshot.clone();
1584 async move {
1585 let mut prev_snapshot = snapshot;
1586 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1587 let message =
1588 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1589 match rpc.send(message).await {
1590 Ok(()) => prev_snapshot = snapshot,
1591 Err(err) => log::error!("error sending snapshot diff {}", err),
1592 }
1593 }
1594 }
1595 });
1596 self.share = Some(ShareState {
1597 project_id,
1598 snapshots_tx: snapshots_to_send_tx,
1599 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1600 });
1601
1602 let diagnostic_summaries = self.diagnostic_summaries.clone();
1603 let share_message = cx.background().spawn(async move {
1604 proto::ShareWorktree {
1605 project_id,
1606 worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1607 }
1608 });
1609
1610 cx.foreground().spawn(async move {
1611 rpc.request(share_message.await).await?;
1612 Ok(())
1613 })
1614 }
1615
1616 pub fn unshare(&mut self) {
1617 self.share.take();
1618 }
1619
1620 pub fn is_shared(&self) -> bool {
1621 self.share.is_some()
1622 }
1623}
1624
1625fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1626 let contents = smol::block_on(fs.load(&abs_path))?;
1627 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1628 let mut builder = GitignoreBuilder::new(parent);
1629 for line in contents.lines() {
1630 builder.add_line(Some(abs_path.into()), line)?;
1631 }
1632 Ok(builder.build()?)
1633}
1634
1635impl Deref for Worktree {
1636 type Target = Snapshot;
1637
1638 fn deref(&self) -> &Self::Target {
1639 match self {
1640 Worktree::Local(worktree) => &worktree.snapshot,
1641 Worktree::Remote(worktree) => &worktree.snapshot,
1642 }
1643 }
1644}
1645
1646impl Deref for LocalWorktree {
1647 type Target = Snapshot;
1648
1649 fn deref(&self) -> &Self::Target {
1650 &self.snapshot
1651 }
1652}
1653
1654impl Deref for RemoteWorktree {
1655 type Target = Snapshot;
1656
1657 fn deref(&self) -> &Self::Target {
1658 &self.snapshot
1659 }
1660}
1661
1662impl fmt::Debug for LocalWorktree {
1663 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1664 self.snapshot.fmt(f)
1665 }
1666}
1667
1668impl RemoteWorktree {
1669 fn get_open_buffer(
1670 &mut self,
1671 path: &Path,
1672 cx: &mut ModelContext<Worktree>,
1673 ) -> Option<ModelHandle<Buffer>> {
1674 let handle = cx.handle();
1675 let mut existing_buffer = None;
1676 self.open_buffers.retain(|_buffer_id, buffer| {
1677 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1678 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1679 if file.worktree == handle && file.path().as_ref() == path {
1680 existing_buffer = Some(buffer);
1681 }
1682 }
1683 true
1684 } else {
1685 false
1686 }
1687 });
1688 existing_buffer
1689 }
1690
1691 fn open_buffer(
1692 &mut self,
1693 path: &Path,
1694 cx: &mut ModelContext<Worktree>,
1695 ) -> Task<Result<ModelHandle<Buffer>>> {
1696 let rpc = self.client.clone();
1697 let replica_id = self.replica_id;
1698 let project_id = self.project_id;
1699 let remote_worktree_id = self.id();
1700 let root_path = self.snapshot.abs_path.clone();
1701 let path: Arc<Path> = Arc::from(path);
1702 let path_string = path.to_string_lossy().to_string();
1703 cx.spawn_weak(move |this, mut cx| async move {
1704 let entry = this
1705 .upgrade(&cx)
1706 .ok_or_else(|| anyhow!("worktree was closed"))?
1707 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1708 .ok_or_else(|| anyhow!("file does not exist"))?;
1709 let response = rpc
1710 .request(proto::OpenBuffer {
1711 project_id,
1712 worktree_id: remote_worktree_id.to_proto(),
1713 path: path_string,
1714 })
1715 .await?;
1716
1717 let this = this
1718 .upgrade(&cx)
1719 .ok_or_else(|| anyhow!("worktree was closed"))?;
1720 let file = File {
1721 entry_id: Some(entry.id),
1722 worktree: this.clone(),
1723 worktree_path: root_path,
1724 path: entry.path,
1725 mtime: entry.mtime,
1726 is_local: false,
1727 };
1728 let language = this.read_with(&cx, |this, _| {
1729 use language::File;
1730 this.languages().select_language(file.full_path()).cloned()
1731 });
1732 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1733 let buffer_id = remote_buffer.id as usize;
1734 let buffer = cx.add_model(|cx| {
1735 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1736 .unwrap()
1737 .with_language(language, None, cx)
1738 });
1739 this.update(&mut cx, move |this, cx| {
1740 let this = this.as_remote_mut().unwrap();
1741 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1742 .open_buffers
1743 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1744 {
1745 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1746 }
1747 Result::<_, anyhow::Error>::Ok(buffer)
1748 })
1749 })
1750 }
1751
1752 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1753 for (_, buffer) in self.open_buffers.drain() {
1754 if let RemoteBuffer::Loaded(buffer) = buffer {
1755 if let Some(buffer) = buffer.upgrade(cx) {
1756 buffer.update(cx, |buffer, cx| buffer.close(cx))
1757 }
1758 }
1759 }
1760 }
1761
1762 fn snapshot(&self) -> Snapshot {
1763 self.snapshot.clone()
1764 }
1765
1766 pub fn update_from_remote(
1767 &mut self,
1768 envelope: TypedEnvelope<proto::UpdateWorktree>,
1769 cx: &mut ModelContext<Worktree>,
1770 ) -> Result<()> {
1771 let mut tx = self.updates_tx.clone();
1772 let payload = envelope.payload.clone();
1773 cx.background()
1774 .spawn(async move {
1775 tx.send(payload).await.expect("receiver runs to completion");
1776 })
1777 .detach();
1778
1779 Ok(())
1780 }
1781
1782 pub fn update_diagnostic_summary(
1783 &mut self,
1784 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1785 cx: &mut ModelContext<Worktree>,
1786 ) {
1787 if let Some(summary) = envelope.payload.summary {
1788 let path: Arc<Path> = Path::new(&summary.path).into();
1789 self.diagnostic_summaries.insert(
1790 PathKey(path.clone()),
1791 DiagnosticSummary {
1792 error_count: summary.error_count as usize,
1793 warning_count: summary.warning_count as usize,
1794 info_count: summary.info_count as usize,
1795 hint_count: summary.hint_count as usize,
1796 },
1797 );
1798 cx.emit(Event::DiagnosticsUpdated(path));
1799 }
1800 }
1801
1802 pub fn disk_based_diagnostics_updating(&self, cx: &mut ModelContext<Worktree>) {
1803 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1804 }
1805
1806 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1807 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1808 }
1809
1810 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1811 for (_, buffer) in &self.open_buffers {
1812 if let Some(buffer) = buffer.upgrade(cx) {
1813 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1814 }
1815 }
1816 cx.notify();
1817 }
1818}
1819
1820enum RemoteBuffer {
1821 Operations(Vec<Operation>),
1822 Loaded(WeakModelHandle<Buffer>),
1823}
1824
1825impl RemoteBuffer {
1826 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1827 match self {
1828 Self::Operations(_) => None,
1829 Self::Loaded(buffer) => buffer.upgrade(cx),
1830 }
1831 }
1832}
1833
1834impl Snapshot {
1835 pub fn id(&self) -> WorktreeId {
1836 self.id
1837 }
1838
1839 pub fn to_proto(
1840 &self,
1841 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1842 ) -> proto::Worktree {
1843 let root_name = self.root_name.clone();
1844 proto::Worktree {
1845 id: self.id.0 as u64,
1846 root_name,
1847 entries: self
1848 .entries_by_path
1849 .iter()
1850 .filter(|e| !e.is_ignored)
1851 .map(Into::into)
1852 .collect(),
1853 diagnostic_summaries: diagnostic_summaries
1854 .iter()
1855 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1856 .collect(),
1857 }
1858 }
1859
1860 pub fn build_update(
1861 &self,
1862 other: &Self,
1863 project_id: u64,
1864 worktree_id: u64,
1865 include_ignored: bool,
1866 ) -> proto::UpdateWorktree {
1867 let mut updated_entries = Vec::new();
1868 let mut removed_entries = Vec::new();
1869 let mut self_entries = self
1870 .entries_by_id
1871 .cursor::<()>()
1872 .filter(|e| include_ignored || !e.is_ignored)
1873 .peekable();
1874 let mut other_entries = other
1875 .entries_by_id
1876 .cursor::<()>()
1877 .filter(|e| include_ignored || !e.is_ignored)
1878 .peekable();
1879 loop {
1880 match (self_entries.peek(), other_entries.peek()) {
1881 (Some(self_entry), Some(other_entry)) => {
1882 match Ord::cmp(&self_entry.id, &other_entry.id) {
1883 Ordering::Less => {
1884 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1885 updated_entries.push(entry);
1886 self_entries.next();
1887 }
1888 Ordering::Equal => {
1889 if self_entry.scan_id != other_entry.scan_id {
1890 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1891 updated_entries.push(entry);
1892 }
1893
1894 self_entries.next();
1895 other_entries.next();
1896 }
1897 Ordering::Greater => {
1898 removed_entries.push(other_entry.id as u64);
1899 other_entries.next();
1900 }
1901 }
1902 }
1903 (Some(self_entry), None) => {
1904 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1905 updated_entries.push(entry);
1906 self_entries.next();
1907 }
1908 (None, Some(other_entry)) => {
1909 removed_entries.push(other_entry.id as u64);
1910 other_entries.next();
1911 }
1912 (None, None) => break,
1913 }
1914 }
1915
1916 proto::UpdateWorktree {
1917 project_id,
1918 worktree_id,
1919 root_name: self.root_name().to_string(),
1920 updated_entries,
1921 removed_entries,
1922 }
1923 }
1924
1925 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1926 self.scan_id += 1;
1927 let scan_id = self.scan_id;
1928
1929 let mut entries_by_path_edits = Vec::new();
1930 let mut entries_by_id_edits = Vec::new();
1931 for entry_id in update.removed_entries {
1932 let entry_id = entry_id as usize;
1933 let entry = self
1934 .entry_for_id(entry_id)
1935 .ok_or_else(|| anyhow!("unknown entry"))?;
1936 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1937 entries_by_id_edits.push(Edit::Remove(entry.id));
1938 }
1939
1940 for entry in update.updated_entries {
1941 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1942 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1943 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1944 }
1945 entries_by_id_edits.push(Edit::Insert(PathEntry {
1946 id: entry.id,
1947 path: entry.path.clone(),
1948 is_ignored: entry.is_ignored,
1949 scan_id,
1950 }));
1951 entries_by_path_edits.push(Edit::Insert(entry));
1952 }
1953
1954 self.entries_by_path.edit(entries_by_path_edits, &());
1955 self.entries_by_id.edit(entries_by_id_edits, &());
1956
1957 Ok(())
1958 }
1959
1960 pub fn file_count(&self) -> usize {
1961 self.entries_by_path.summary().file_count
1962 }
1963
1964 pub fn visible_file_count(&self) -> usize {
1965 self.entries_by_path.summary().visible_file_count
1966 }
1967
1968 fn traverse_from_offset(
1969 &self,
1970 include_dirs: bool,
1971 include_ignored: bool,
1972 start_offset: usize,
1973 ) -> Traversal {
1974 let mut cursor = self.entries_by_path.cursor();
1975 cursor.seek(
1976 &TraversalTarget::Count {
1977 count: start_offset,
1978 include_dirs,
1979 include_ignored,
1980 },
1981 Bias::Right,
1982 &(),
1983 );
1984 Traversal {
1985 cursor,
1986 include_dirs,
1987 include_ignored,
1988 }
1989 }
1990
1991 fn traverse_from_path(
1992 &self,
1993 include_dirs: bool,
1994 include_ignored: bool,
1995 path: &Path,
1996 ) -> Traversal {
1997 let mut cursor = self.entries_by_path.cursor();
1998 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1999 Traversal {
2000 cursor,
2001 include_dirs,
2002 include_ignored,
2003 }
2004 }
2005
2006 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2007 self.traverse_from_offset(false, include_ignored, start)
2008 }
2009
2010 pub fn entries(&self, include_ignored: bool) -> Traversal {
2011 self.traverse_from_offset(true, include_ignored, 0)
2012 }
2013
2014 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2015 let empty_path = Path::new("");
2016 self.entries_by_path
2017 .cursor::<()>()
2018 .filter(move |entry| entry.path.as_ref() != empty_path)
2019 .map(|entry| &entry.path)
2020 }
2021
2022 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2023 let mut cursor = self.entries_by_path.cursor();
2024 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2025 let traversal = Traversal {
2026 cursor,
2027 include_dirs: true,
2028 include_ignored: true,
2029 };
2030 ChildEntriesIter {
2031 traversal,
2032 parent_path,
2033 }
2034 }
2035
2036 pub fn root_entry(&self) -> Option<&Entry> {
2037 self.entry_for_path("")
2038 }
2039
2040 pub fn root_name(&self) -> &str {
2041 &self.root_name
2042 }
2043
2044 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2045 let path = path.as_ref();
2046 self.traverse_from_path(true, true, path)
2047 .entry()
2048 .and_then(|entry| {
2049 if entry.path.as_ref() == path {
2050 Some(entry)
2051 } else {
2052 None
2053 }
2054 })
2055 }
2056
2057 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
2058 let entry = self.entries_by_id.get(&id, &())?;
2059 self.entry_for_path(&entry.path)
2060 }
2061
2062 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2063 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2064 }
2065
2066 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2067 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
2068 let abs_path = self.abs_path.join(&entry.path);
2069 match build_gitignore(&abs_path, fs) {
2070 Ok(ignore) => {
2071 let ignore_dir_path = entry.path.parent().unwrap();
2072 self.ignores
2073 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
2074 }
2075 Err(error) => {
2076 log::error!(
2077 "error loading .gitignore file {:?} - {:?}",
2078 &entry.path,
2079 error
2080 );
2081 }
2082 }
2083 }
2084
2085 self.reuse_entry_id(&mut entry);
2086 self.entries_by_path.insert_or_replace(entry.clone(), &());
2087 self.entries_by_id.insert_or_replace(
2088 PathEntry {
2089 id: entry.id,
2090 path: entry.path.clone(),
2091 is_ignored: entry.is_ignored,
2092 scan_id: self.scan_id,
2093 },
2094 &(),
2095 );
2096 entry
2097 }
2098
2099 fn populate_dir(
2100 &mut self,
2101 parent_path: Arc<Path>,
2102 entries: impl IntoIterator<Item = Entry>,
2103 ignore: Option<Arc<Gitignore>>,
2104 ) {
2105 let mut parent_entry = self
2106 .entries_by_path
2107 .get(&PathKey(parent_path.clone()), &())
2108 .unwrap()
2109 .clone();
2110 if let Some(ignore) = ignore {
2111 self.ignores.insert(parent_path, (ignore, self.scan_id));
2112 }
2113 if matches!(parent_entry.kind, EntryKind::PendingDir) {
2114 parent_entry.kind = EntryKind::Dir;
2115 } else {
2116 unreachable!();
2117 }
2118
2119 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2120 let mut entries_by_id_edits = Vec::new();
2121
2122 for mut entry in entries {
2123 self.reuse_entry_id(&mut entry);
2124 entries_by_id_edits.push(Edit::Insert(PathEntry {
2125 id: entry.id,
2126 path: entry.path.clone(),
2127 is_ignored: entry.is_ignored,
2128 scan_id: self.scan_id,
2129 }));
2130 entries_by_path_edits.push(Edit::Insert(entry));
2131 }
2132
2133 self.entries_by_path.edit(entries_by_path_edits, &());
2134 self.entries_by_id.edit(entries_by_id_edits, &());
2135 }
2136
2137 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2138 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2139 entry.id = removed_entry_id;
2140 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2141 entry.id = existing_entry.id;
2142 }
2143 }
2144
2145 fn remove_path(&mut self, path: &Path) {
2146 let mut new_entries;
2147 let removed_entries;
2148 {
2149 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2150 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2151 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2152 new_entries.push_tree(cursor.suffix(&()), &());
2153 }
2154 self.entries_by_path = new_entries;
2155
2156 let mut entries_by_id_edits = Vec::new();
2157 for entry in removed_entries.cursor::<()>() {
2158 let removed_entry_id = self
2159 .removed_entry_ids
2160 .entry(entry.inode)
2161 .or_insert(entry.id);
2162 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2163 entries_by_id_edits.push(Edit::Remove(entry.id));
2164 }
2165 self.entries_by_id.edit(entries_by_id_edits, &());
2166
2167 if path.file_name() == Some(&GITIGNORE) {
2168 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2169 *scan_id = self.scan_id;
2170 }
2171 }
2172 }
2173
2174 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2175 let mut new_ignores = Vec::new();
2176 for ancestor in path.ancestors().skip(1) {
2177 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2178 new_ignores.push((ancestor, Some(ignore.clone())));
2179 } else {
2180 new_ignores.push((ancestor, None));
2181 }
2182 }
2183
2184 let mut ignore_stack = IgnoreStack::none();
2185 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2186 if ignore_stack.is_path_ignored(&parent_path, true) {
2187 ignore_stack = IgnoreStack::all();
2188 break;
2189 } else if let Some(ignore) = ignore {
2190 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2191 }
2192 }
2193
2194 if ignore_stack.is_path_ignored(path, is_dir) {
2195 ignore_stack = IgnoreStack::all();
2196 }
2197
2198 ignore_stack
2199 }
2200}
2201
2202impl fmt::Debug for Snapshot {
2203 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2204 for entry in self.entries_by_path.cursor::<()>() {
2205 for _ in entry.path.ancestors().skip(1) {
2206 write!(f, " ")?;
2207 }
2208 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2209 }
2210 Ok(())
2211 }
2212}
2213
2214#[derive(Clone, PartialEq)]
2215pub struct File {
2216 entry_id: Option<usize>,
2217 worktree: ModelHandle<Worktree>,
2218 worktree_path: Arc<Path>,
2219 pub path: Arc<Path>,
2220 pub mtime: SystemTime,
2221 is_local: bool,
2222}
2223
2224impl language::File for File {
2225 fn mtime(&self) -> SystemTime {
2226 self.mtime
2227 }
2228
2229 fn path(&self) -> &Arc<Path> {
2230 &self.path
2231 }
2232
2233 fn abs_path(&self) -> Option<PathBuf> {
2234 if self.is_local {
2235 Some(self.worktree_path.join(&self.path))
2236 } else {
2237 None
2238 }
2239 }
2240
2241 fn full_path(&self) -> PathBuf {
2242 let mut full_path = PathBuf::new();
2243 if let Some(worktree_name) = self.worktree_path.file_name() {
2244 full_path.push(worktree_name);
2245 }
2246 full_path.push(&self.path);
2247 full_path
2248 }
2249
2250 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2251 /// of its worktree, then this method will return the name of the worktree itself.
2252 fn file_name<'a>(&'a self) -> Option<OsString> {
2253 self.path
2254 .file_name()
2255 .or_else(|| self.worktree_path.file_name())
2256 .map(Into::into)
2257 }
2258
2259 fn is_deleted(&self) -> bool {
2260 self.entry_id.is_none()
2261 }
2262
2263 fn save(
2264 &self,
2265 buffer_id: u64,
2266 text: Rope,
2267 version: clock::Global,
2268 cx: &mut MutableAppContext,
2269 ) -> Task<Result<(clock::Global, SystemTime)>> {
2270 let worktree_id = self.worktree.read(cx).id().to_proto();
2271 self.worktree.update(cx, |worktree, cx| match worktree {
2272 Worktree::Local(worktree) => {
2273 let rpc = worktree.client.clone();
2274 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2275 let save = worktree.save(self.path.clone(), text, cx);
2276 cx.background().spawn(async move {
2277 let entry = save.await?;
2278 if let Some(project_id) = project_id {
2279 rpc.send(proto::BufferSaved {
2280 project_id,
2281 worktree_id,
2282 buffer_id,
2283 version: (&version).into(),
2284 mtime: Some(entry.mtime.into()),
2285 })
2286 .await?;
2287 }
2288 Ok((version, entry.mtime))
2289 })
2290 }
2291 Worktree::Remote(worktree) => {
2292 let rpc = worktree.client.clone();
2293 let project_id = worktree.project_id;
2294 cx.foreground().spawn(async move {
2295 let response = rpc
2296 .request(proto::SaveBuffer {
2297 project_id,
2298 worktree_id,
2299 buffer_id,
2300 })
2301 .await?;
2302 let version = response.version.try_into()?;
2303 let mtime = response
2304 .mtime
2305 .ok_or_else(|| anyhow!("missing mtime"))?
2306 .into();
2307 Ok((version, mtime))
2308 })
2309 }
2310 })
2311 }
2312
2313 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2314 let worktree = self.worktree.read(cx).as_local()?;
2315 let abs_path = worktree.absolutize(&self.path);
2316 let fs = worktree.fs.clone();
2317 Some(
2318 cx.background()
2319 .spawn(async move { fs.load(&abs_path).await }),
2320 )
2321 }
2322
2323 fn format_remote(
2324 &self,
2325 buffer_id: u64,
2326 cx: &mut MutableAppContext,
2327 ) -> Option<Task<Result<()>>> {
2328 let worktree = self.worktree.read(cx);
2329 let worktree_id = worktree.id().to_proto();
2330 let worktree = worktree.as_remote()?;
2331 let rpc = worktree.client.clone();
2332 let project_id = worktree.project_id;
2333 Some(cx.foreground().spawn(async move {
2334 rpc.request(proto::FormatBuffer {
2335 project_id,
2336 worktree_id,
2337 buffer_id,
2338 })
2339 .await?;
2340 Ok(())
2341 }))
2342 }
2343
2344 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2345 self.worktree.update(cx, |worktree, cx| {
2346 worktree.send_buffer_update(buffer_id, operation, cx);
2347 });
2348 }
2349
2350 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2351 self.worktree.update(cx, |worktree, cx| {
2352 if let Worktree::Remote(worktree) = worktree {
2353 let project_id = worktree.project_id;
2354 let worktree_id = worktree.id().to_proto();
2355 let rpc = worktree.client.clone();
2356 cx.background()
2357 .spawn(async move {
2358 if let Err(error) = rpc
2359 .send(proto::CloseBuffer {
2360 project_id,
2361 worktree_id,
2362 buffer_id,
2363 })
2364 .await
2365 {
2366 log::error!("error closing remote buffer: {}", error);
2367 }
2368 })
2369 .detach();
2370 }
2371 });
2372 }
2373
2374 fn as_any(&self) -> &dyn Any {
2375 self
2376 }
2377}
2378
2379impl File {
2380 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2381 file.and_then(|f| f.as_any().downcast_ref())
2382 }
2383
2384 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2385 self.worktree.read(cx).id()
2386 }
2387
2388 pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2389 self.entry_id.map(|entry_id| ProjectEntry {
2390 worktree_id: self.worktree_id(cx),
2391 entry_id,
2392 })
2393 }
2394}
2395
2396#[derive(Clone, Debug)]
2397pub struct Entry {
2398 pub id: usize,
2399 pub kind: EntryKind,
2400 pub path: Arc<Path>,
2401 pub inode: u64,
2402 pub mtime: SystemTime,
2403 pub is_symlink: bool,
2404 pub is_ignored: bool,
2405}
2406
2407#[derive(Clone, Debug)]
2408pub enum EntryKind {
2409 PendingDir,
2410 Dir,
2411 File(CharBag),
2412}
2413
2414impl Entry {
2415 fn new(
2416 path: Arc<Path>,
2417 metadata: &fs::Metadata,
2418 next_entry_id: &AtomicUsize,
2419 root_char_bag: CharBag,
2420 ) -> Self {
2421 Self {
2422 id: next_entry_id.fetch_add(1, SeqCst),
2423 kind: if metadata.is_dir {
2424 EntryKind::PendingDir
2425 } else {
2426 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2427 },
2428 path,
2429 inode: metadata.inode,
2430 mtime: metadata.mtime,
2431 is_symlink: metadata.is_symlink,
2432 is_ignored: false,
2433 }
2434 }
2435
2436 pub fn is_dir(&self) -> bool {
2437 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2438 }
2439
2440 pub fn is_file(&self) -> bool {
2441 matches!(self.kind, EntryKind::File(_))
2442 }
2443}
2444
2445impl sum_tree::Item for Entry {
2446 type Summary = EntrySummary;
2447
2448 fn summary(&self) -> Self::Summary {
2449 let visible_count = if self.is_ignored { 0 } else { 1 };
2450 let file_count;
2451 let visible_file_count;
2452 if self.is_file() {
2453 file_count = 1;
2454 visible_file_count = visible_count;
2455 } else {
2456 file_count = 0;
2457 visible_file_count = 0;
2458 }
2459
2460 EntrySummary {
2461 max_path: self.path.clone(),
2462 count: 1,
2463 visible_count,
2464 file_count,
2465 visible_file_count,
2466 }
2467 }
2468}
2469
2470impl sum_tree::KeyedItem for Entry {
2471 type Key = PathKey;
2472
2473 fn key(&self) -> Self::Key {
2474 PathKey(self.path.clone())
2475 }
2476}
2477
2478#[derive(Clone, Debug)]
2479pub struct EntrySummary {
2480 max_path: Arc<Path>,
2481 count: usize,
2482 visible_count: usize,
2483 file_count: usize,
2484 visible_file_count: usize,
2485}
2486
2487impl Default for EntrySummary {
2488 fn default() -> Self {
2489 Self {
2490 max_path: Arc::from(Path::new("")),
2491 count: 0,
2492 visible_count: 0,
2493 file_count: 0,
2494 visible_file_count: 0,
2495 }
2496 }
2497}
2498
2499impl sum_tree::Summary for EntrySummary {
2500 type Context = ();
2501
2502 fn add_summary(&mut self, rhs: &Self, _: &()) {
2503 self.max_path = rhs.max_path.clone();
2504 self.visible_count += rhs.visible_count;
2505 self.file_count += rhs.file_count;
2506 self.visible_file_count += rhs.visible_file_count;
2507 }
2508}
2509
2510#[derive(Clone, Debug)]
2511struct PathEntry {
2512 id: usize,
2513 path: Arc<Path>,
2514 is_ignored: bool,
2515 scan_id: usize,
2516}
2517
2518impl sum_tree::Item for PathEntry {
2519 type Summary = PathEntrySummary;
2520
2521 fn summary(&self) -> Self::Summary {
2522 PathEntrySummary { max_id: self.id }
2523 }
2524}
2525
2526impl sum_tree::KeyedItem for PathEntry {
2527 type Key = usize;
2528
2529 fn key(&self) -> Self::Key {
2530 self.id
2531 }
2532}
2533
2534#[derive(Clone, Debug, Default)]
2535struct PathEntrySummary {
2536 max_id: usize,
2537}
2538
2539impl sum_tree::Summary for PathEntrySummary {
2540 type Context = ();
2541
2542 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2543 self.max_id = summary.max_id;
2544 }
2545}
2546
2547impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2548 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2549 *self = summary.max_id;
2550 }
2551}
2552
2553#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2554pub struct PathKey(Arc<Path>);
2555
2556impl Default for PathKey {
2557 fn default() -> Self {
2558 Self(Path::new("").into())
2559 }
2560}
2561
2562impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2563 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2564 self.0 = summary.max_path.clone();
2565 }
2566}
2567
2568struct BackgroundScanner {
2569 fs: Arc<dyn Fs>,
2570 snapshot: Arc<Mutex<Snapshot>>,
2571 notify: Sender<ScanState>,
2572 executor: Arc<executor::Background>,
2573}
2574
2575impl BackgroundScanner {
2576 fn new(
2577 snapshot: Arc<Mutex<Snapshot>>,
2578 notify: Sender<ScanState>,
2579 fs: Arc<dyn Fs>,
2580 executor: Arc<executor::Background>,
2581 ) -> Self {
2582 Self {
2583 fs,
2584 snapshot,
2585 notify,
2586 executor,
2587 }
2588 }
2589
2590 fn abs_path(&self) -> Arc<Path> {
2591 self.snapshot.lock().abs_path.clone()
2592 }
2593
2594 fn snapshot(&self) -> Snapshot {
2595 self.snapshot.lock().clone()
2596 }
2597
2598 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2599 if self.notify.send(ScanState::Scanning).await.is_err() {
2600 return;
2601 }
2602
2603 if let Err(err) = self.scan_dirs().await {
2604 if self
2605 .notify
2606 .send(ScanState::Err(Arc::new(err)))
2607 .await
2608 .is_err()
2609 {
2610 return;
2611 }
2612 }
2613
2614 if self.notify.send(ScanState::Idle).await.is_err() {
2615 return;
2616 }
2617
2618 futures::pin_mut!(events_rx);
2619 while let Some(events) = events_rx.next().await {
2620 if self.notify.send(ScanState::Scanning).await.is_err() {
2621 break;
2622 }
2623
2624 if !self.process_events(events).await {
2625 break;
2626 }
2627
2628 if self.notify.send(ScanState::Idle).await.is_err() {
2629 break;
2630 }
2631 }
2632 }
2633
2634 async fn scan_dirs(&mut self) -> Result<()> {
2635 let root_char_bag;
2636 let next_entry_id;
2637 let is_dir;
2638 {
2639 let snapshot = self.snapshot.lock();
2640 root_char_bag = snapshot.root_char_bag;
2641 next_entry_id = snapshot.next_entry_id.clone();
2642 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2643 };
2644
2645 if is_dir {
2646 let path: Arc<Path> = Arc::from(Path::new(""));
2647 let abs_path = self.abs_path();
2648 let (tx, rx) = channel::unbounded();
2649 tx.send(ScanJob {
2650 abs_path: abs_path.to_path_buf(),
2651 path,
2652 ignore_stack: IgnoreStack::none(),
2653 scan_queue: tx.clone(),
2654 })
2655 .await
2656 .unwrap();
2657 drop(tx);
2658
2659 self.executor
2660 .scoped(|scope| {
2661 for _ in 0..self.executor.num_cpus() {
2662 scope.spawn(async {
2663 while let Ok(job) = rx.recv().await {
2664 if let Err(err) = self
2665 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2666 .await
2667 {
2668 log::error!("error scanning {:?}: {}", job.abs_path, err);
2669 }
2670 }
2671 });
2672 }
2673 })
2674 .await;
2675 }
2676
2677 Ok(())
2678 }
2679
2680 async fn scan_dir(
2681 &self,
2682 root_char_bag: CharBag,
2683 next_entry_id: Arc<AtomicUsize>,
2684 job: &ScanJob,
2685 ) -> Result<()> {
2686 let mut new_entries: Vec<Entry> = Vec::new();
2687 let mut new_jobs: Vec<ScanJob> = Vec::new();
2688 let mut ignore_stack = job.ignore_stack.clone();
2689 let mut new_ignore = None;
2690
2691 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2692 while let Some(child_abs_path) = child_paths.next().await {
2693 let child_abs_path = match child_abs_path {
2694 Ok(child_abs_path) => child_abs_path,
2695 Err(error) => {
2696 log::error!("error processing entry {:?}", error);
2697 continue;
2698 }
2699 };
2700 let child_name = child_abs_path.file_name().unwrap();
2701 let child_path: Arc<Path> = job.path.join(child_name).into();
2702 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2703 Some(metadata) => metadata,
2704 None => continue,
2705 };
2706
2707 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2708 if child_name == *GITIGNORE {
2709 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2710 Ok(ignore) => {
2711 let ignore = Arc::new(ignore);
2712 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2713 new_ignore = Some(ignore);
2714 }
2715 Err(error) => {
2716 log::error!(
2717 "error loading .gitignore file {:?} - {:?}",
2718 child_name,
2719 error
2720 );
2721 }
2722 }
2723
2724 // Update ignore status of any child entries we've already processed to reflect the
2725 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2726 // there should rarely be too numerous. Update the ignore stack associated with any
2727 // new jobs as well.
2728 let mut new_jobs = new_jobs.iter_mut();
2729 for entry in &mut new_entries {
2730 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2731 if entry.is_dir() {
2732 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2733 IgnoreStack::all()
2734 } else {
2735 ignore_stack.clone()
2736 };
2737 }
2738 }
2739 }
2740
2741 let mut child_entry = Entry::new(
2742 child_path.clone(),
2743 &child_metadata,
2744 &next_entry_id,
2745 root_char_bag,
2746 );
2747
2748 if child_metadata.is_dir {
2749 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2750 child_entry.is_ignored = is_ignored;
2751 new_entries.push(child_entry);
2752 new_jobs.push(ScanJob {
2753 abs_path: child_abs_path,
2754 path: child_path,
2755 ignore_stack: if is_ignored {
2756 IgnoreStack::all()
2757 } else {
2758 ignore_stack.clone()
2759 },
2760 scan_queue: job.scan_queue.clone(),
2761 });
2762 } else {
2763 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2764 new_entries.push(child_entry);
2765 };
2766 }
2767
2768 self.snapshot
2769 .lock()
2770 .populate_dir(job.path.clone(), new_entries, new_ignore);
2771 for new_job in new_jobs {
2772 job.scan_queue.send(new_job).await.unwrap();
2773 }
2774
2775 Ok(())
2776 }
2777
2778 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2779 let mut snapshot = self.snapshot();
2780 snapshot.scan_id += 1;
2781
2782 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2783 abs_path
2784 } else {
2785 return false;
2786 };
2787 let root_char_bag = snapshot.root_char_bag;
2788 let next_entry_id = snapshot.next_entry_id.clone();
2789
2790 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2791 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2792
2793 for event in &events {
2794 match event.path.strip_prefix(&root_abs_path) {
2795 Ok(path) => snapshot.remove_path(&path),
2796 Err(_) => {
2797 log::error!(
2798 "unexpected event {:?} for root path {:?}",
2799 event.path,
2800 root_abs_path
2801 );
2802 continue;
2803 }
2804 }
2805 }
2806
2807 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2808 for event in events {
2809 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2810 Ok(path) => Arc::from(path.to_path_buf()),
2811 Err(_) => {
2812 log::error!(
2813 "unexpected event {:?} for root path {:?}",
2814 event.path,
2815 root_abs_path
2816 );
2817 continue;
2818 }
2819 };
2820
2821 match self.fs.metadata(&event.path).await {
2822 Ok(Some(metadata)) => {
2823 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2824 let mut fs_entry = Entry::new(
2825 path.clone(),
2826 &metadata,
2827 snapshot.next_entry_id.as_ref(),
2828 snapshot.root_char_bag,
2829 );
2830 fs_entry.is_ignored = ignore_stack.is_all();
2831 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2832 if metadata.is_dir {
2833 scan_queue_tx
2834 .send(ScanJob {
2835 abs_path: event.path,
2836 path,
2837 ignore_stack,
2838 scan_queue: scan_queue_tx.clone(),
2839 })
2840 .await
2841 .unwrap();
2842 }
2843 }
2844 Ok(None) => {}
2845 Err(err) => {
2846 // TODO - create a special 'error' entry in the entries tree to mark this
2847 log::error!("error reading file on event {:?}", err);
2848 }
2849 }
2850 }
2851
2852 *self.snapshot.lock() = snapshot;
2853
2854 // Scan any directories that were created as part of this event batch.
2855 drop(scan_queue_tx);
2856 self.executor
2857 .scoped(|scope| {
2858 for _ in 0..self.executor.num_cpus() {
2859 scope.spawn(async {
2860 while let Ok(job) = scan_queue_rx.recv().await {
2861 if let Err(err) = self
2862 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2863 .await
2864 {
2865 log::error!("error scanning {:?}: {}", job.abs_path, err);
2866 }
2867 }
2868 });
2869 }
2870 })
2871 .await;
2872
2873 // Attempt to detect renames only over a single batch of file-system events.
2874 self.snapshot.lock().removed_entry_ids.clear();
2875
2876 self.update_ignore_statuses().await;
2877 true
2878 }
2879
2880 async fn update_ignore_statuses(&self) {
2881 let mut snapshot = self.snapshot();
2882
2883 let mut ignores_to_update = Vec::new();
2884 let mut ignores_to_delete = Vec::new();
2885 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2886 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2887 ignores_to_update.push(parent_path.clone());
2888 }
2889
2890 let ignore_path = parent_path.join(&*GITIGNORE);
2891 if snapshot.entry_for_path(ignore_path).is_none() {
2892 ignores_to_delete.push(parent_path.clone());
2893 }
2894 }
2895
2896 for parent_path in ignores_to_delete {
2897 snapshot.ignores.remove(&parent_path);
2898 self.snapshot.lock().ignores.remove(&parent_path);
2899 }
2900
2901 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2902 ignores_to_update.sort_unstable();
2903 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2904 while let Some(parent_path) = ignores_to_update.next() {
2905 while ignores_to_update
2906 .peek()
2907 .map_or(false, |p| p.starts_with(&parent_path))
2908 {
2909 ignores_to_update.next().unwrap();
2910 }
2911
2912 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2913 ignore_queue_tx
2914 .send(UpdateIgnoreStatusJob {
2915 path: parent_path,
2916 ignore_stack,
2917 ignore_queue: ignore_queue_tx.clone(),
2918 })
2919 .await
2920 .unwrap();
2921 }
2922 drop(ignore_queue_tx);
2923
2924 self.executor
2925 .scoped(|scope| {
2926 for _ in 0..self.executor.num_cpus() {
2927 scope.spawn(async {
2928 while let Ok(job) = ignore_queue_rx.recv().await {
2929 self.update_ignore_status(job, &snapshot).await;
2930 }
2931 });
2932 }
2933 })
2934 .await;
2935 }
2936
2937 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2938 let mut ignore_stack = job.ignore_stack;
2939 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2940 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2941 }
2942
2943 let mut entries_by_id_edits = Vec::new();
2944 let mut entries_by_path_edits = Vec::new();
2945 for mut entry in snapshot.child_entries(&job.path).cloned() {
2946 let was_ignored = entry.is_ignored;
2947 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2948 if entry.is_dir() {
2949 let child_ignore_stack = if entry.is_ignored {
2950 IgnoreStack::all()
2951 } else {
2952 ignore_stack.clone()
2953 };
2954 job.ignore_queue
2955 .send(UpdateIgnoreStatusJob {
2956 path: entry.path.clone(),
2957 ignore_stack: child_ignore_stack,
2958 ignore_queue: job.ignore_queue.clone(),
2959 })
2960 .await
2961 .unwrap();
2962 }
2963
2964 if entry.is_ignored != was_ignored {
2965 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2966 path_entry.scan_id = snapshot.scan_id;
2967 path_entry.is_ignored = entry.is_ignored;
2968 entries_by_id_edits.push(Edit::Insert(path_entry));
2969 entries_by_path_edits.push(Edit::Insert(entry));
2970 }
2971 }
2972
2973 let mut snapshot = self.snapshot.lock();
2974 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2975 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2976 }
2977}
2978
2979async fn refresh_entry(
2980 fs: &dyn Fs,
2981 snapshot: &Mutex<Snapshot>,
2982 path: Arc<Path>,
2983 abs_path: &Path,
2984) -> Result<Entry> {
2985 let root_char_bag;
2986 let next_entry_id;
2987 {
2988 let snapshot = snapshot.lock();
2989 root_char_bag = snapshot.root_char_bag;
2990 next_entry_id = snapshot.next_entry_id.clone();
2991 }
2992 let entry = Entry::new(
2993 path,
2994 &fs.metadata(abs_path)
2995 .await?
2996 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2997 &next_entry_id,
2998 root_char_bag,
2999 );
3000 Ok(snapshot.lock().insert_entry(entry, fs))
3001}
3002
3003fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3004 let mut result = root_char_bag;
3005 result.extend(
3006 path.to_string_lossy()
3007 .chars()
3008 .map(|c| c.to_ascii_lowercase()),
3009 );
3010 result
3011}
3012
3013struct ScanJob {
3014 abs_path: PathBuf,
3015 path: Arc<Path>,
3016 ignore_stack: Arc<IgnoreStack>,
3017 scan_queue: Sender<ScanJob>,
3018}
3019
3020struct UpdateIgnoreStatusJob {
3021 path: Arc<Path>,
3022 ignore_stack: Arc<IgnoreStack>,
3023 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3024}
3025
3026pub trait WorktreeHandle {
3027 #[cfg(test)]
3028 fn flush_fs_events<'a>(
3029 &self,
3030 cx: &'a gpui::TestAppContext,
3031 ) -> futures::future::LocalBoxFuture<'a, ()>;
3032}
3033
3034impl WorktreeHandle for ModelHandle<Worktree> {
3035 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3036 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3037 // extra directory scans, and emit extra scan-state notifications.
3038 //
3039 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3040 // to ensure that all redundant FS events have already been processed.
3041 #[cfg(test)]
3042 fn flush_fs_events<'a>(
3043 &self,
3044 cx: &'a gpui::TestAppContext,
3045 ) -> futures::future::LocalBoxFuture<'a, ()> {
3046 use smol::future::FutureExt;
3047
3048 let filename = "fs-event-sentinel";
3049 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
3050 let tree = self.clone();
3051 async move {
3052 std::fs::write(root_path.join(filename), "").unwrap();
3053 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
3054 .await;
3055
3056 std::fs::remove_file(root_path.join(filename)).unwrap();
3057 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
3058 .await;
3059
3060 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3061 .await;
3062 }
3063 .boxed_local()
3064 }
3065}
3066
3067#[derive(Clone, Debug)]
3068struct TraversalProgress<'a> {
3069 max_path: &'a Path,
3070 count: usize,
3071 visible_count: usize,
3072 file_count: usize,
3073 visible_file_count: usize,
3074}
3075
3076impl<'a> TraversalProgress<'a> {
3077 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3078 match (include_ignored, include_dirs) {
3079 (true, true) => self.count,
3080 (true, false) => self.file_count,
3081 (false, true) => self.visible_count,
3082 (false, false) => self.visible_file_count,
3083 }
3084 }
3085}
3086
3087impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3088 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3089 self.max_path = summary.max_path.as_ref();
3090 self.count += summary.count;
3091 self.visible_count += summary.visible_count;
3092 self.file_count += summary.file_count;
3093 self.visible_file_count += summary.visible_file_count;
3094 }
3095}
3096
3097impl<'a> Default for TraversalProgress<'a> {
3098 fn default() -> Self {
3099 Self {
3100 max_path: Path::new(""),
3101 count: 0,
3102 visible_count: 0,
3103 file_count: 0,
3104 visible_file_count: 0,
3105 }
3106 }
3107}
3108
3109pub struct Traversal<'a> {
3110 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3111 include_ignored: bool,
3112 include_dirs: bool,
3113}
3114
3115impl<'a> Traversal<'a> {
3116 pub fn advance(&mut self) -> bool {
3117 self.advance_to_offset(self.offset() + 1)
3118 }
3119
3120 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3121 self.cursor.seek_forward(
3122 &TraversalTarget::Count {
3123 count: offset,
3124 include_dirs: self.include_dirs,
3125 include_ignored: self.include_ignored,
3126 },
3127 Bias::Right,
3128 &(),
3129 )
3130 }
3131
3132 pub fn advance_to_sibling(&mut self) -> bool {
3133 while let Some(entry) = self.cursor.item() {
3134 self.cursor.seek_forward(
3135 &TraversalTarget::PathSuccessor(&entry.path),
3136 Bias::Left,
3137 &(),
3138 );
3139 if let Some(entry) = self.cursor.item() {
3140 if (self.include_dirs || !entry.is_dir())
3141 && (self.include_ignored || !entry.is_ignored)
3142 {
3143 return true;
3144 }
3145 }
3146 }
3147 false
3148 }
3149
3150 pub fn entry(&self) -> Option<&'a Entry> {
3151 self.cursor.item()
3152 }
3153
3154 pub fn offset(&self) -> usize {
3155 self.cursor
3156 .start()
3157 .count(self.include_dirs, self.include_ignored)
3158 }
3159}
3160
3161impl<'a> Iterator for Traversal<'a> {
3162 type Item = &'a Entry;
3163
3164 fn next(&mut self) -> Option<Self::Item> {
3165 if let Some(item) = self.entry() {
3166 self.advance();
3167 Some(item)
3168 } else {
3169 None
3170 }
3171 }
3172}
3173
3174#[derive(Debug)]
3175enum TraversalTarget<'a> {
3176 Path(&'a Path),
3177 PathSuccessor(&'a Path),
3178 Count {
3179 count: usize,
3180 include_ignored: bool,
3181 include_dirs: bool,
3182 },
3183}
3184
3185impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3186 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3187 match self {
3188 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3189 TraversalTarget::PathSuccessor(path) => {
3190 if !cursor_location.max_path.starts_with(path) {
3191 Ordering::Equal
3192 } else {
3193 Ordering::Greater
3194 }
3195 }
3196 TraversalTarget::Count {
3197 count,
3198 include_dirs,
3199 include_ignored,
3200 } => Ord::cmp(
3201 count,
3202 &cursor_location.count(*include_dirs, *include_ignored),
3203 ),
3204 }
3205 }
3206}
3207
3208struct ChildEntriesIter<'a> {
3209 parent_path: &'a Path,
3210 traversal: Traversal<'a>,
3211}
3212
3213impl<'a> Iterator for ChildEntriesIter<'a> {
3214 type Item = &'a Entry;
3215
3216 fn next(&mut self) -> Option<Self::Item> {
3217 if let Some(item) = self.traversal.entry() {
3218 if item.path.starts_with(&self.parent_path) {
3219 self.traversal.advance_to_sibling();
3220 return Some(item);
3221 }
3222 }
3223 None
3224 }
3225}
3226
3227impl<'a> From<&'a Entry> for proto::Entry {
3228 fn from(entry: &'a Entry) -> Self {
3229 Self {
3230 id: entry.id as u64,
3231 is_dir: entry.is_dir(),
3232 path: entry.path.to_string_lossy().to_string(),
3233 inode: entry.inode,
3234 mtime: Some(entry.mtime.into()),
3235 is_symlink: entry.is_symlink,
3236 is_ignored: entry.is_ignored,
3237 }
3238 }
3239}
3240
3241impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3242 type Error = anyhow::Error;
3243
3244 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3245 if let Some(mtime) = entry.mtime {
3246 let kind = if entry.is_dir {
3247 EntryKind::Dir
3248 } else {
3249 let mut char_bag = root_char_bag.clone();
3250 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3251 EntryKind::File(char_bag)
3252 };
3253 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3254 Ok(Entry {
3255 id: entry.id as usize,
3256 kind,
3257 path: path.clone(),
3258 inode: entry.inode,
3259 mtime: mtime.into(),
3260 is_symlink: entry.is_symlink,
3261 is_ignored: entry.is_ignored,
3262 })
3263 } else {
3264 Err(anyhow!(
3265 "missing mtime in remote worktree entry {:?}",
3266 entry.path
3267 ))
3268 }
3269 }
3270}
3271
3272#[cfg(test)]
3273mod tests {
3274 use super::*;
3275 use crate::fs::FakeFs;
3276 use anyhow::Result;
3277 use client::test::{FakeHttpClient, FakeServer};
3278 use fs::RealFs;
3279 use gpui::test::subscribe;
3280 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3281 use language::{Diagnostic, LanguageConfig};
3282 use lsp::Url;
3283 use rand::prelude::*;
3284 use serde_json::json;
3285 use std::{cell::RefCell, rc::Rc};
3286 use std::{
3287 env,
3288 fmt::Write,
3289 time::{SystemTime, UNIX_EPOCH},
3290 };
3291 use text::Point;
3292 use unindent::Unindent as _;
3293 use util::test::temp_tree;
3294
3295 #[gpui::test]
3296 async fn test_traversal(mut cx: gpui::TestAppContext) {
3297 let fs = FakeFs::new();
3298 fs.insert_tree(
3299 "/root",
3300 json!({
3301 ".gitignore": "a/b\n",
3302 "a": {
3303 "b": "",
3304 "c": "",
3305 }
3306 }),
3307 )
3308 .await;
3309
3310 let http_client = FakeHttpClient::with_404_response();
3311 let client = Client::new(http_client.clone());
3312 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3313
3314 let tree = Worktree::open_local(
3315 client,
3316 user_store,
3317 Arc::from(Path::new("/root")),
3318 Arc::new(fs),
3319 Default::default(),
3320 &mut cx.to_async(),
3321 )
3322 .await
3323 .unwrap();
3324 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3325 .await;
3326
3327 tree.read_with(&cx, |tree, _| {
3328 assert_eq!(
3329 tree.entries(false)
3330 .map(|entry| entry.path.as_ref())
3331 .collect::<Vec<_>>(),
3332 vec![
3333 Path::new(""),
3334 Path::new(".gitignore"),
3335 Path::new("a"),
3336 Path::new("a/c"),
3337 ]
3338 );
3339 })
3340 }
3341
3342 #[gpui::test]
3343 async fn test_save_file(mut cx: gpui::TestAppContext) {
3344 let dir = temp_tree(json!({
3345 "file1": "the old contents",
3346 }));
3347
3348 let http_client = FakeHttpClient::with_404_response();
3349 let client = Client::new(http_client.clone());
3350 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3351
3352 let tree = Worktree::open_local(
3353 client,
3354 user_store,
3355 dir.path(),
3356 Arc::new(RealFs),
3357 Default::default(),
3358 &mut cx.to_async(),
3359 )
3360 .await
3361 .unwrap();
3362 let buffer = tree
3363 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3364 .await
3365 .unwrap();
3366 let save = buffer.update(&mut cx, |buffer, cx| {
3367 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3368 buffer.save(cx).unwrap()
3369 });
3370 save.await.unwrap();
3371
3372 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3373 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3374 }
3375
3376 #[gpui::test]
3377 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3378 let dir = temp_tree(json!({
3379 "file1": "the old contents",
3380 }));
3381 let file_path = dir.path().join("file1");
3382
3383 let http_client = FakeHttpClient::with_404_response();
3384 let client = Client::new(http_client.clone());
3385 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3386
3387 let tree = Worktree::open_local(
3388 client,
3389 user_store,
3390 file_path.clone(),
3391 Arc::new(RealFs),
3392 Default::default(),
3393 &mut cx.to_async(),
3394 )
3395 .await
3396 .unwrap();
3397 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3398 .await;
3399 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3400
3401 let buffer = tree
3402 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3403 .await
3404 .unwrap();
3405 let save = buffer.update(&mut cx, |buffer, cx| {
3406 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3407 buffer.save(cx).unwrap()
3408 });
3409 save.await.unwrap();
3410
3411 let new_text = std::fs::read_to_string(file_path).unwrap();
3412 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3413 }
3414
3415 #[gpui::test]
3416 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3417 let dir = temp_tree(json!({
3418 "a": {
3419 "file1": "",
3420 "file2": "",
3421 "file3": "",
3422 },
3423 "b": {
3424 "c": {
3425 "file4": "",
3426 "file5": "",
3427 }
3428 }
3429 }));
3430
3431 let user_id = 5;
3432 let http_client = FakeHttpClient::with_404_response();
3433 let mut client = Client::new(http_client.clone());
3434 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3435 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3436 let tree = Worktree::open_local(
3437 client,
3438 user_store.clone(),
3439 dir.path(),
3440 Arc::new(RealFs),
3441 Default::default(),
3442 &mut cx.to_async(),
3443 )
3444 .await
3445 .unwrap();
3446
3447 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3448 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3449 async move { buffer.await.unwrap() }
3450 };
3451 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3452 tree.read_with(cx, |tree, _| {
3453 tree.entry_for_path(path)
3454 .expect(&format!("no entry for path {}", path))
3455 .id
3456 })
3457 };
3458
3459 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3460 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3461 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3462 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3463
3464 let file2_id = id_for_path("a/file2", &cx);
3465 let file3_id = id_for_path("a/file3", &cx);
3466 let file4_id = id_for_path("b/c/file4", &cx);
3467
3468 // Wait for the initial scan.
3469 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3470 .await;
3471
3472 // Create a remote copy of this worktree.
3473 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3474 let remote = Worktree::remote(
3475 1,
3476 1,
3477 initial_snapshot.to_proto(&Default::default()),
3478 Client::new(http_client.clone()),
3479 user_store,
3480 Default::default(),
3481 &mut cx.to_async(),
3482 )
3483 .await
3484 .unwrap();
3485
3486 cx.read(|cx| {
3487 assert!(!buffer2.read(cx).is_dirty());
3488 assert!(!buffer3.read(cx).is_dirty());
3489 assert!(!buffer4.read(cx).is_dirty());
3490 assert!(!buffer5.read(cx).is_dirty());
3491 });
3492
3493 // Rename and delete files and directories.
3494 tree.flush_fs_events(&cx).await;
3495 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3496 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3497 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3498 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3499 tree.flush_fs_events(&cx).await;
3500
3501 let expected_paths = vec![
3502 "a",
3503 "a/file1",
3504 "a/file2.new",
3505 "b",
3506 "d",
3507 "d/file3",
3508 "d/file4",
3509 ];
3510
3511 cx.read(|app| {
3512 assert_eq!(
3513 tree.read(app)
3514 .paths()
3515 .map(|p| p.to_str().unwrap())
3516 .collect::<Vec<_>>(),
3517 expected_paths
3518 );
3519
3520 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3521 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3522 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3523
3524 assert_eq!(
3525 buffer2.read(app).file().unwrap().path().as_ref(),
3526 Path::new("a/file2.new")
3527 );
3528 assert_eq!(
3529 buffer3.read(app).file().unwrap().path().as_ref(),
3530 Path::new("d/file3")
3531 );
3532 assert_eq!(
3533 buffer4.read(app).file().unwrap().path().as_ref(),
3534 Path::new("d/file4")
3535 );
3536 assert_eq!(
3537 buffer5.read(app).file().unwrap().path().as_ref(),
3538 Path::new("b/c/file5")
3539 );
3540
3541 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3542 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3543 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3544 assert!(buffer5.read(app).file().unwrap().is_deleted());
3545 });
3546
3547 // Update the remote worktree. Check that it becomes consistent with the
3548 // local worktree.
3549 remote.update(&mut cx, |remote, cx| {
3550 let update_message =
3551 tree.read(cx)
3552 .snapshot()
3553 .build_update(&initial_snapshot, 1, 1, true);
3554 remote
3555 .as_remote_mut()
3556 .unwrap()
3557 .snapshot
3558 .apply_update(update_message)
3559 .unwrap();
3560
3561 assert_eq!(
3562 remote
3563 .paths()
3564 .map(|p| p.to_str().unwrap())
3565 .collect::<Vec<_>>(),
3566 expected_paths
3567 );
3568 });
3569 }
3570
3571 #[gpui::test]
3572 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3573 let dir = temp_tree(json!({
3574 ".git": {},
3575 ".gitignore": "ignored-dir\n",
3576 "tracked-dir": {
3577 "tracked-file1": "tracked contents",
3578 },
3579 "ignored-dir": {
3580 "ignored-file1": "ignored contents",
3581 }
3582 }));
3583
3584 let http_client = FakeHttpClient::with_404_response();
3585 let client = Client::new(http_client.clone());
3586 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3587
3588 let tree = Worktree::open_local(
3589 client,
3590 user_store,
3591 dir.path(),
3592 Arc::new(RealFs),
3593 Default::default(),
3594 &mut cx.to_async(),
3595 )
3596 .await
3597 .unwrap();
3598 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3599 .await;
3600 tree.flush_fs_events(&cx).await;
3601 cx.read(|cx| {
3602 let tree = tree.read(cx);
3603 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3604 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3605 assert_eq!(tracked.is_ignored, false);
3606 assert_eq!(ignored.is_ignored, true);
3607 });
3608
3609 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3610 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3611 tree.flush_fs_events(&cx).await;
3612 cx.read(|cx| {
3613 let tree = tree.read(cx);
3614 let dot_git = tree.entry_for_path(".git").unwrap();
3615 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3616 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3617 assert_eq!(tracked.is_ignored, false);
3618 assert_eq!(ignored.is_ignored, true);
3619 assert_eq!(dot_git.is_ignored, true);
3620 });
3621 }
3622
3623 #[gpui::test]
3624 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3625 let user_id = 100;
3626 let http_client = FakeHttpClient::with_404_response();
3627 let mut client = Client::new(http_client);
3628 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3629 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3630
3631 let fs = Arc::new(FakeFs::new());
3632 fs.insert_tree(
3633 "/the-dir",
3634 json!({
3635 "a.txt": "a-contents",
3636 "b.txt": "b-contents",
3637 }),
3638 )
3639 .await;
3640
3641 let worktree = Worktree::open_local(
3642 client.clone(),
3643 user_store,
3644 "/the-dir".as_ref(),
3645 fs,
3646 Default::default(),
3647 &mut cx.to_async(),
3648 )
3649 .await
3650 .unwrap();
3651
3652 // Spawn multiple tasks to open paths, repeating some paths.
3653 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3654 (
3655 worktree.open_buffer("a.txt", cx),
3656 worktree.open_buffer("b.txt", cx),
3657 worktree.open_buffer("a.txt", cx),
3658 )
3659 });
3660
3661 let buffer_a_1 = buffer_a_1.await.unwrap();
3662 let buffer_a_2 = buffer_a_2.await.unwrap();
3663 let buffer_b = buffer_b.await.unwrap();
3664 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3665 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3666
3667 // There is only one buffer per path.
3668 let buffer_a_id = buffer_a_1.id();
3669 assert_eq!(buffer_a_2.id(), buffer_a_id);
3670
3671 // Open the same path again while it is still open.
3672 drop(buffer_a_1);
3673 let buffer_a_3 = worktree
3674 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3675 .await
3676 .unwrap();
3677
3678 // There's still only one buffer per path.
3679 assert_eq!(buffer_a_3.id(), buffer_a_id);
3680 }
3681
3682 #[gpui::test]
3683 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3684 use std::fs;
3685
3686 let dir = temp_tree(json!({
3687 "file1": "abc",
3688 "file2": "def",
3689 "file3": "ghi",
3690 }));
3691 let http_client = FakeHttpClient::with_404_response();
3692 let client = Client::new(http_client.clone());
3693 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3694
3695 let tree = Worktree::open_local(
3696 client,
3697 user_store,
3698 dir.path(),
3699 Arc::new(RealFs),
3700 Default::default(),
3701 &mut cx.to_async(),
3702 )
3703 .await
3704 .unwrap();
3705 tree.flush_fs_events(&cx).await;
3706 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3707 .await;
3708
3709 let buffer1 = tree
3710 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3711 .await
3712 .unwrap();
3713 let events = Rc::new(RefCell::new(Vec::new()));
3714
3715 // initially, the buffer isn't dirty.
3716 buffer1.update(&mut cx, |buffer, cx| {
3717 cx.subscribe(&buffer1, {
3718 let events = events.clone();
3719 move |_, _, event, _| events.borrow_mut().push(event.clone())
3720 })
3721 .detach();
3722
3723 assert!(!buffer.is_dirty());
3724 assert!(events.borrow().is_empty());
3725
3726 buffer.edit(vec![1..2], "", cx);
3727 });
3728
3729 // after the first edit, the buffer is dirty, and emits a dirtied event.
3730 buffer1.update(&mut cx, |buffer, cx| {
3731 assert!(buffer.text() == "ac");
3732 assert!(buffer.is_dirty());
3733 assert_eq!(
3734 *events.borrow(),
3735 &[language::Event::Edited, language::Event::Dirtied]
3736 );
3737 events.borrow_mut().clear();
3738 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3739 });
3740
3741 // after saving, the buffer is not dirty, and emits a saved event.
3742 buffer1.update(&mut cx, |buffer, cx| {
3743 assert!(!buffer.is_dirty());
3744 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3745 events.borrow_mut().clear();
3746
3747 buffer.edit(vec![1..1], "B", cx);
3748 buffer.edit(vec![2..2], "D", cx);
3749 });
3750
3751 // after editing again, the buffer is dirty, and emits another dirty event.
3752 buffer1.update(&mut cx, |buffer, cx| {
3753 assert!(buffer.text() == "aBDc");
3754 assert!(buffer.is_dirty());
3755 assert_eq!(
3756 *events.borrow(),
3757 &[
3758 language::Event::Edited,
3759 language::Event::Dirtied,
3760 language::Event::Edited,
3761 ],
3762 );
3763 events.borrow_mut().clear();
3764
3765 // TODO - currently, after restoring the buffer to its
3766 // previously-saved state, the is still considered dirty.
3767 buffer.edit([1..3], "", cx);
3768 assert!(buffer.text() == "ac");
3769 assert!(buffer.is_dirty());
3770 });
3771
3772 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3773
3774 // When a file is deleted, the buffer is considered dirty.
3775 let events = Rc::new(RefCell::new(Vec::new()));
3776 let buffer2 = tree
3777 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3778 .await
3779 .unwrap();
3780 buffer2.update(&mut cx, |_, cx| {
3781 cx.subscribe(&buffer2, {
3782 let events = events.clone();
3783 move |_, _, event, _| events.borrow_mut().push(event.clone())
3784 })
3785 .detach();
3786 });
3787
3788 fs::remove_file(dir.path().join("file2")).unwrap();
3789 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3790 assert_eq!(
3791 *events.borrow(),
3792 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3793 );
3794
3795 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3796 let events = Rc::new(RefCell::new(Vec::new()));
3797 let buffer3 = tree
3798 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3799 .await
3800 .unwrap();
3801 buffer3.update(&mut cx, |_, cx| {
3802 cx.subscribe(&buffer3, {
3803 let events = events.clone();
3804 move |_, _, event, _| events.borrow_mut().push(event.clone())
3805 })
3806 .detach();
3807 });
3808
3809 tree.flush_fs_events(&cx).await;
3810 buffer3.update(&mut cx, |buffer, cx| {
3811 buffer.edit(Some(0..0), "x", cx);
3812 });
3813 events.borrow_mut().clear();
3814 fs::remove_file(dir.path().join("file3")).unwrap();
3815 buffer3
3816 .condition(&cx, |_, _| !events.borrow().is_empty())
3817 .await;
3818 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3819 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3820 }
3821
3822 #[gpui::test]
3823 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3824 use std::fs;
3825
3826 let initial_contents = "aaa\nbbbbb\nc\n";
3827 let dir = temp_tree(json!({ "the-file": initial_contents }));
3828 let http_client = FakeHttpClient::with_404_response();
3829 let client = Client::new(http_client.clone());
3830 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3831
3832 let tree = Worktree::open_local(
3833 client,
3834 user_store,
3835 dir.path(),
3836 Arc::new(RealFs),
3837 Default::default(),
3838 &mut cx.to_async(),
3839 )
3840 .await
3841 .unwrap();
3842 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3843 .await;
3844
3845 let abs_path = dir.path().join("the-file");
3846 let buffer = tree
3847 .update(&mut cx, |tree, cx| {
3848 tree.open_buffer(Path::new("the-file"), cx)
3849 })
3850 .await
3851 .unwrap();
3852
3853 // TODO
3854 // Add a cursor on each row.
3855 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3856 // assert!(!buffer.is_dirty());
3857 // buffer.add_selection_set(
3858 // &(0..3)
3859 // .map(|row| Selection {
3860 // id: row as usize,
3861 // start: Point::new(row, 1),
3862 // end: Point::new(row, 1),
3863 // reversed: false,
3864 // goal: SelectionGoal::None,
3865 // })
3866 // .collect::<Vec<_>>(),
3867 // cx,
3868 // )
3869 // });
3870
3871 // Change the file on disk, adding two new lines of text, and removing
3872 // one line.
3873 buffer.read_with(&cx, |buffer, _| {
3874 assert!(!buffer.is_dirty());
3875 assert!(!buffer.has_conflict());
3876 });
3877 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3878 fs::write(&abs_path, new_contents).unwrap();
3879
3880 // Because the buffer was not modified, it is reloaded from disk. Its
3881 // contents are edited according to the diff between the old and new
3882 // file contents.
3883 buffer
3884 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3885 .await;
3886
3887 buffer.update(&mut cx, |buffer, _| {
3888 assert_eq!(buffer.text(), new_contents);
3889 assert!(!buffer.is_dirty());
3890 assert!(!buffer.has_conflict());
3891
3892 // TODO
3893 // let cursor_positions = buffer
3894 // .selection_set(selection_set_id)
3895 // .unwrap()
3896 // .selections::<Point>(&*buffer)
3897 // .map(|selection| {
3898 // assert_eq!(selection.start, selection.end);
3899 // selection.start
3900 // })
3901 // .collect::<Vec<_>>();
3902 // assert_eq!(
3903 // cursor_positions,
3904 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3905 // );
3906 });
3907
3908 // Modify the buffer
3909 buffer.update(&mut cx, |buffer, cx| {
3910 buffer.edit(vec![0..0], " ", cx);
3911 assert!(buffer.is_dirty());
3912 assert!(!buffer.has_conflict());
3913 });
3914
3915 // Change the file on disk again, adding blank lines to the beginning.
3916 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3917
3918 // Because the buffer is modified, it doesn't reload from disk, but is
3919 // marked as having a conflict.
3920 buffer
3921 .condition(&cx, |buffer, _| buffer.has_conflict())
3922 .await;
3923 }
3924
3925 #[gpui::test]
3926 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3927 let (language_server_config, mut fake_server) =
3928 LanguageServerConfig::fake(cx.background()).await;
3929 let progress_token = language_server_config
3930 .disk_based_diagnostics_progress_token
3931 .clone()
3932 .unwrap();
3933 let mut languages = LanguageRegistry::new();
3934 languages.add(Arc::new(Language::new(
3935 LanguageConfig {
3936 name: "Rust".to_string(),
3937 path_suffixes: vec!["rs".to_string()],
3938 language_server: Some(language_server_config),
3939 ..Default::default()
3940 },
3941 Some(tree_sitter_rust::language()),
3942 )));
3943
3944 let dir = temp_tree(json!({
3945 "a.rs": "fn a() { A }",
3946 "b.rs": "const y: i32 = 1",
3947 }));
3948
3949 let http_client = FakeHttpClient::with_404_response();
3950 let client = Client::new(http_client.clone());
3951 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3952
3953 let tree = Worktree::open_local(
3954 client,
3955 user_store,
3956 dir.path(),
3957 Arc::new(RealFs),
3958 Arc::new(languages),
3959 &mut cx.to_async(),
3960 )
3961 .await
3962 .unwrap();
3963 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3964 .await;
3965
3966 // Cause worktree to start the fake language server
3967 let _buffer = tree
3968 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3969 .await
3970 .unwrap();
3971
3972 let mut events = subscribe(&tree, &mut cx);
3973
3974 fake_server.start_progress(&progress_token).await;
3975 assert_eq!(
3976 events.next().await.unwrap(),
3977 Event::DiskBasedDiagnosticsUpdating
3978 );
3979
3980 fake_server.start_progress(&progress_token).await;
3981 fake_server.end_progress(&progress_token).await;
3982 fake_server.start_progress(&progress_token).await;
3983
3984 fake_server
3985 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3986 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3987 version: None,
3988 diagnostics: vec![lsp::Diagnostic {
3989 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3990 severity: Some(lsp::DiagnosticSeverity::ERROR),
3991 message: "undefined variable 'A'".to_string(),
3992 ..Default::default()
3993 }],
3994 })
3995 .await;
3996 assert_eq!(
3997 events.next().await.unwrap(),
3998 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3999 );
4000
4001 fake_server.end_progress(&progress_token).await;
4002 fake_server.end_progress(&progress_token).await;
4003 assert_eq!(
4004 events.next().await.unwrap(),
4005 Event::DiskBasedDiagnosticsUpdated
4006 );
4007
4008 let buffer = tree
4009 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4010 .await
4011 .unwrap();
4012
4013 buffer.read_with(&cx, |buffer, _| {
4014 let snapshot = buffer.snapshot();
4015 let diagnostics = snapshot
4016 .diagnostics_in_range::<_, Point>(0..buffer.len())
4017 .collect::<Vec<_>>();
4018 assert_eq!(
4019 diagnostics,
4020 &[DiagnosticEntry {
4021 range: Point::new(0, 9)..Point::new(0, 10),
4022 diagnostic: Diagnostic {
4023 severity: lsp::DiagnosticSeverity::ERROR,
4024 message: "undefined variable 'A'".to_string(),
4025 group_id: 0,
4026 is_primary: true,
4027 ..Default::default()
4028 }
4029 }]
4030 )
4031 });
4032 }
4033
4034 #[gpui::test]
4035 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
4036 let fs = Arc::new(FakeFs::new());
4037 let http_client = FakeHttpClient::with_404_response();
4038 let client = Client::new(http_client.clone());
4039 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
4040
4041 fs.insert_tree(
4042 "/the-dir",
4043 json!({
4044 "a.rs": "
4045 fn foo(mut v: Vec<usize>) {
4046 for x in &v {
4047 v.push(1);
4048 }
4049 }
4050 "
4051 .unindent(),
4052 }),
4053 )
4054 .await;
4055
4056 let worktree = Worktree::open_local(
4057 client.clone(),
4058 user_store,
4059 "/the-dir".as_ref(),
4060 fs,
4061 Default::default(),
4062 &mut cx.to_async(),
4063 )
4064 .await
4065 .unwrap();
4066
4067 let buffer = worktree
4068 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4069 .await
4070 .unwrap();
4071
4072 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
4073 let message = lsp::PublishDiagnosticsParams {
4074 uri: buffer_uri.clone(),
4075 diagnostics: vec![
4076 lsp::Diagnostic {
4077 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4078 severity: Some(DiagnosticSeverity::WARNING),
4079 message: "error 1".to_string(),
4080 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4081 location: lsp::Location {
4082 uri: buffer_uri.clone(),
4083 range: lsp::Range::new(
4084 lsp::Position::new(1, 8),
4085 lsp::Position::new(1, 9),
4086 ),
4087 },
4088 message: "error 1 hint 1".to_string(),
4089 }]),
4090 ..Default::default()
4091 },
4092 lsp::Diagnostic {
4093 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4094 severity: Some(DiagnosticSeverity::HINT),
4095 message: "error 1 hint 1".to_string(),
4096 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4097 location: lsp::Location {
4098 uri: buffer_uri.clone(),
4099 range: lsp::Range::new(
4100 lsp::Position::new(1, 8),
4101 lsp::Position::new(1, 9),
4102 ),
4103 },
4104 message: "original diagnostic".to_string(),
4105 }]),
4106 ..Default::default()
4107 },
4108 lsp::Diagnostic {
4109 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
4110 severity: Some(DiagnosticSeverity::ERROR),
4111 message: "error 2".to_string(),
4112 related_information: Some(vec![
4113 lsp::DiagnosticRelatedInformation {
4114 location: lsp::Location {
4115 uri: buffer_uri.clone(),
4116 range: lsp::Range::new(
4117 lsp::Position::new(1, 13),
4118 lsp::Position::new(1, 15),
4119 ),
4120 },
4121 message: "error 2 hint 1".to_string(),
4122 },
4123 lsp::DiagnosticRelatedInformation {
4124 location: lsp::Location {
4125 uri: buffer_uri.clone(),
4126 range: lsp::Range::new(
4127 lsp::Position::new(1, 13),
4128 lsp::Position::new(1, 15),
4129 ),
4130 },
4131 message: "error 2 hint 2".to_string(),
4132 },
4133 ]),
4134 ..Default::default()
4135 },
4136 lsp::Diagnostic {
4137 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4138 severity: Some(DiagnosticSeverity::HINT),
4139 message: "error 2 hint 1".to_string(),
4140 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4141 location: lsp::Location {
4142 uri: buffer_uri.clone(),
4143 range: lsp::Range::new(
4144 lsp::Position::new(2, 8),
4145 lsp::Position::new(2, 17),
4146 ),
4147 },
4148 message: "original diagnostic".to_string(),
4149 }]),
4150 ..Default::default()
4151 },
4152 lsp::Diagnostic {
4153 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4154 severity: Some(DiagnosticSeverity::HINT),
4155 message: "error 2 hint 2".to_string(),
4156 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4157 location: lsp::Location {
4158 uri: buffer_uri.clone(),
4159 range: lsp::Range::new(
4160 lsp::Position::new(2, 8),
4161 lsp::Position::new(2, 17),
4162 ),
4163 },
4164 message: "original diagnostic".to_string(),
4165 }]),
4166 ..Default::default()
4167 },
4168 ],
4169 version: None,
4170 };
4171
4172 worktree
4173 .update(&mut cx, |tree, cx| {
4174 tree.update_diagnostics(message, &Default::default(), cx)
4175 })
4176 .unwrap();
4177 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4178
4179 assert_eq!(
4180 buffer
4181 .diagnostics_in_range::<_, Point>(0..buffer.len())
4182 .collect::<Vec<_>>(),
4183 &[
4184 DiagnosticEntry {
4185 range: Point::new(1, 8)..Point::new(1, 9),
4186 diagnostic: Diagnostic {
4187 severity: DiagnosticSeverity::WARNING,
4188 message: "error 1".to_string(),
4189 group_id: 0,
4190 is_primary: true,
4191 ..Default::default()
4192 }
4193 },
4194 DiagnosticEntry {
4195 range: Point::new(1, 8)..Point::new(1, 9),
4196 diagnostic: Diagnostic {
4197 severity: DiagnosticSeverity::HINT,
4198 message: "error 1 hint 1".to_string(),
4199 group_id: 0,
4200 is_primary: false,
4201 ..Default::default()
4202 }
4203 },
4204 DiagnosticEntry {
4205 range: Point::new(1, 13)..Point::new(1, 15),
4206 diagnostic: Diagnostic {
4207 severity: DiagnosticSeverity::HINT,
4208 message: "error 2 hint 1".to_string(),
4209 group_id: 1,
4210 is_primary: false,
4211 ..Default::default()
4212 }
4213 },
4214 DiagnosticEntry {
4215 range: Point::new(1, 13)..Point::new(1, 15),
4216 diagnostic: Diagnostic {
4217 severity: DiagnosticSeverity::HINT,
4218 message: "error 2 hint 2".to_string(),
4219 group_id: 1,
4220 is_primary: false,
4221 ..Default::default()
4222 }
4223 },
4224 DiagnosticEntry {
4225 range: Point::new(2, 8)..Point::new(2, 17),
4226 diagnostic: Diagnostic {
4227 severity: DiagnosticSeverity::ERROR,
4228 message: "error 2".to_string(),
4229 group_id: 1,
4230 is_primary: true,
4231 ..Default::default()
4232 }
4233 }
4234 ]
4235 );
4236
4237 assert_eq!(
4238 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4239 &[
4240 DiagnosticEntry {
4241 range: Point::new(1, 8)..Point::new(1, 9),
4242 diagnostic: Diagnostic {
4243 severity: DiagnosticSeverity::WARNING,
4244 message: "error 1".to_string(),
4245 group_id: 0,
4246 is_primary: true,
4247 ..Default::default()
4248 }
4249 },
4250 DiagnosticEntry {
4251 range: Point::new(1, 8)..Point::new(1, 9),
4252 diagnostic: Diagnostic {
4253 severity: DiagnosticSeverity::HINT,
4254 message: "error 1 hint 1".to_string(),
4255 group_id: 0,
4256 is_primary: false,
4257 ..Default::default()
4258 }
4259 },
4260 ]
4261 );
4262 assert_eq!(
4263 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4264 &[
4265 DiagnosticEntry {
4266 range: Point::new(1, 13)..Point::new(1, 15),
4267 diagnostic: Diagnostic {
4268 severity: DiagnosticSeverity::HINT,
4269 message: "error 2 hint 1".to_string(),
4270 group_id: 1,
4271 is_primary: false,
4272 ..Default::default()
4273 }
4274 },
4275 DiagnosticEntry {
4276 range: Point::new(1, 13)..Point::new(1, 15),
4277 diagnostic: Diagnostic {
4278 severity: DiagnosticSeverity::HINT,
4279 message: "error 2 hint 2".to_string(),
4280 group_id: 1,
4281 is_primary: false,
4282 ..Default::default()
4283 }
4284 },
4285 DiagnosticEntry {
4286 range: Point::new(2, 8)..Point::new(2, 17),
4287 diagnostic: Diagnostic {
4288 severity: DiagnosticSeverity::ERROR,
4289 message: "error 2".to_string(),
4290 group_id: 1,
4291 is_primary: true,
4292 ..Default::default()
4293 }
4294 }
4295 ]
4296 );
4297 }
4298
4299 #[gpui::test(iterations = 100)]
4300 fn test_random(mut rng: StdRng) {
4301 let operations = env::var("OPERATIONS")
4302 .map(|o| o.parse().unwrap())
4303 .unwrap_or(40);
4304 let initial_entries = env::var("INITIAL_ENTRIES")
4305 .map(|o| o.parse().unwrap())
4306 .unwrap_or(20);
4307
4308 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4309 for _ in 0..initial_entries {
4310 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4311 }
4312 log::info!("Generated initial tree");
4313
4314 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4315 let fs = Arc::new(RealFs);
4316 let next_entry_id = Arc::new(AtomicUsize::new(0));
4317 let mut initial_snapshot = Snapshot {
4318 id: WorktreeId::from_usize(0),
4319 scan_id: 0,
4320 abs_path: root_dir.path().into(),
4321 entries_by_path: Default::default(),
4322 entries_by_id: Default::default(),
4323 removed_entry_ids: Default::default(),
4324 ignores: Default::default(),
4325 root_name: Default::default(),
4326 root_char_bag: Default::default(),
4327 next_entry_id: next_entry_id.clone(),
4328 };
4329 initial_snapshot.insert_entry(
4330 Entry::new(
4331 Path::new("").into(),
4332 &smol::block_on(fs.metadata(root_dir.path()))
4333 .unwrap()
4334 .unwrap(),
4335 &next_entry_id,
4336 Default::default(),
4337 ),
4338 fs.as_ref(),
4339 );
4340 let mut scanner = BackgroundScanner::new(
4341 Arc::new(Mutex::new(initial_snapshot.clone())),
4342 notify_tx,
4343 fs.clone(),
4344 Arc::new(gpui::executor::Background::new()),
4345 );
4346 smol::block_on(scanner.scan_dirs()).unwrap();
4347 scanner.snapshot().check_invariants();
4348
4349 let mut events = Vec::new();
4350 let mut snapshots = Vec::new();
4351 let mut mutations_len = operations;
4352 while mutations_len > 1 {
4353 if !events.is_empty() && rng.gen_bool(0.4) {
4354 let len = rng.gen_range(0..=events.len());
4355 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4356 log::info!("Delivering events: {:#?}", to_deliver);
4357 smol::block_on(scanner.process_events(to_deliver));
4358 scanner.snapshot().check_invariants();
4359 } else {
4360 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4361 mutations_len -= 1;
4362 }
4363
4364 if rng.gen_bool(0.2) {
4365 snapshots.push(scanner.snapshot());
4366 }
4367 }
4368 log::info!("Quiescing: {:#?}", events);
4369 smol::block_on(scanner.process_events(events));
4370 scanner.snapshot().check_invariants();
4371
4372 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4373 let mut new_scanner = BackgroundScanner::new(
4374 Arc::new(Mutex::new(initial_snapshot)),
4375 notify_tx,
4376 scanner.fs.clone(),
4377 scanner.executor.clone(),
4378 );
4379 smol::block_on(new_scanner.scan_dirs()).unwrap();
4380 assert_eq!(
4381 scanner.snapshot().to_vec(true),
4382 new_scanner.snapshot().to_vec(true)
4383 );
4384
4385 for mut prev_snapshot in snapshots {
4386 let include_ignored = rng.gen::<bool>();
4387 if !include_ignored {
4388 let mut entries_by_path_edits = Vec::new();
4389 let mut entries_by_id_edits = Vec::new();
4390 for entry in prev_snapshot
4391 .entries_by_id
4392 .cursor::<()>()
4393 .filter(|e| e.is_ignored)
4394 {
4395 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4396 entries_by_id_edits.push(Edit::Remove(entry.id));
4397 }
4398
4399 prev_snapshot
4400 .entries_by_path
4401 .edit(entries_by_path_edits, &());
4402 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4403 }
4404
4405 let update = scanner
4406 .snapshot()
4407 .build_update(&prev_snapshot, 0, 0, include_ignored);
4408 prev_snapshot.apply_update(update).unwrap();
4409 assert_eq!(
4410 prev_snapshot.to_vec(true),
4411 scanner.snapshot().to_vec(include_ignored)
4412 );
4413 }
4414 }
4415
4416 fn randomly_mutate_tree(
4417 root_path: &Path,
4418 insertion_probability: f64,
4419 rng: &mut impl Rng,
4420 ) -> Result<Vec<fsevent::Event>> {
4421 let root_path = root_path.canonicalize().unwrap();
4422 let (dirs, files) = read_dir_recursive(root_path.clone());
4423
4424 let mut events = Vec::new();
4425 let mut record_event = |path: PathBuf| {
4426 events.push(fsevent::Event {
4427 event_id: SystemTime::now()
4428 .duration_since(UNIX_EPOCH)
4429 .unwrap()
4430 .as_secs(),
4431 flags: fsevent::StreamFlags::empty(),
4432 path,
4433 });
4434 };
4435
4436 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4437 let path = dirs.choose(rng).unwrap();
4438 let new_path = path.join(gen_name(rng));
4439
4440 if rng.gen() {
4441 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4442 std::fs::create_dir(&new_path)?;
4443 } else {
4444 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4445 std::fs::write(&new_path, "")?;
4446 }
4447 record_event(new_path);
4448 } else if rng.gen_bool(0.05) {
4449 let ignore_dir_path = dirs.choose(rng).unwrap();
4450 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4451
4452 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4453 let files_to_ignore = {
4454 let len = rng.gen_range(0..=subfiles.len());
4455 subfiles.choose_multiple(rng, len)
4456 };
4457 let dirs_to_ignore = {
4458 let len = rng.gen_range(0..subdirs.len());
4459 subdirs.choose_multiple(rng, len)
4460 };
4461
4462 let mut ignore_contents = String::new();
4463 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4464 write!(
4465 ignore_contents,
4466 "{}\n",
4467 path_to_ignore
4468 .strip_prefix(&ignore_dir_path)?
4469 .to_str()
4470 .unwrap()
4471 )
4472 .unwrap();
4473 }
4474 log::info!(
4475 "Creating {:?} with contents:\n{}",
4476 ignore_path.strip_prefix(&root_path)?,
4477 ignore_contents
4478 );
4479 std::fs::write(&ignore_path, ignore_contents).unwrap();
4480 record_event(ignore_path);
4481 } else {
4482 let old_path = {
4483 let file_path = files.choose(rng);
4484 let dir_path = dirs[1..].choose(rng);
4485 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4486 };
4487
4488 let is_rename = rng.gen();
4489 if is_rename {
4490 let new_path_parent = dirs
4491 .iter()
4492 .filter(|d| !d.starts_with(old_path))
4493 .choose(rng)
4494 .unwrap();
4495
4496 let overwrite_existing_dir =
4497 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4498 let new_path = if overwrite_existing_dir {
4499 std::fs::remove_dir_all(&new_path_parent).ok();
4500 new_path_parent.to_path_buf()
4501 } else {
4502 new_path_parent.join(gen_name(rng))
4503 };
4504
4505 log::info!(
4506 "Renaming {:?} to {}{:?}",
4507 old_path.strip_prefix(&root_path)?,
4508 if overwrite_existing_dir {
4509 "overwrite "
4510 } else {
4511 ""
4512 },
4513 new_path.strip_prefix(&root_path)?
4514 );
4515 std::fs::rename(&old_path, &new_path)?;
4516 record_event(old_path.clone());
4517 record_event(new_path);
4518 } else if old_path.is_dir() {
4519 let (dirs, files) = read_dir_recursive(old_path.clone());
4520
4521 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4522 std::fs::remove_dir_all(&old_path).unwrap();
4523 for file in files {
4524 record_event(file);
4525 }
4526 for dir in dirs {
4527 record_event(dir);
4528 }
4529 } else {
4530 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4531 std::fs::remove_file(old_path).unwrap();
4532 record_event(old_path.clone());
4533 }
4534 }
4535
4536 Ok(events)
4537 }
4538
4539 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4540 let child_entries = std::fs::read_dir(&path).unwrap();
4541 let mut dirs = vec![path];
4542 let mut files = Vec::new();
4543 for child_entry in child_entries {
4544 let child_path = child_entry.unwrap().path();
4545 if child_path.is_dir() {
4546 let (child_dirs, child_files) = read_dir_recursive(child_path);
4547 dirs.extend(child_dirs);
4548 files.extend(child_files);
4549 } else {
4550 files.push(child_path);
4551 }
4552 }
4553 (dirs, files)
4554 }
4555
4556 fn gen_name(rng: &mut impl Rng) -> String {
4557 (0..6)
4558 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4559 .map(char::from)
4560 .collect()
4561 }
4562
4563 impl Snapshot {
4564 fn check_invariants(&self) {
4565 let mut files = self.files(true, 0);
4566 let mut visible_files = self.files(false, 0);
4567 for entry in self.entries_by_path.cursor::<()>() {
4568 if entry.is_file() {
4569 assert_eq!(files.next().unwrap().inode, entry.inode);
4570 if !entry.is_ignored {
4571 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4572 }
4573 }
4574 }
4575 assert!(files.next().is_none());
4576 assert!(visible_files.next().is_none());
4577
4578 let mut bfs_paths = Vec::new();
4579 let mut stack = vec![Path::new("")];
4580 while let Some(path) = stack.pop() {
4581 bfs_paths.push(path);
4582 let ix = stack.len();
4583 for child_entry in self.child_entries(path) {
4584 stack.insert(ix, &child_entry.path);
4585 }
4586 }
4587
4588 let dfs_paths = self
4589 .entries_by_path
4590 .cursor::<()>()
4591 .map(|e| e.path.as_ref())
4592 .collect::<Vec<_>>();
4593 assert_eq!(bfs_paths, dfs_paths);
4594
4595 for (ignore_parent_path, _) in &self.ignores {
4596 assert!(self.entry_for_path(ignore_parent_path).is_some());
4597 assert!(self
4598 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4599 .is_some());
4600 }
4601 }
4602
4603 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4604 let mut paths = Vec::new();
4605 for entry in self.entries_by_path.cursor::<()>() {
4606 if include_ignored || !entry.is_ignored {
4607 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4608 }
4609 }
4610 paths.sort_by(|a, b| a.0.cmp(&b.0));
4611 paths
4612 }
4613 }
4614}