1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{atomic::AtomicBool, Arc},
27};
28use util::{post_inc, ResultExt, TryFutureExt as _};
29
30pub use fs::*;
31pub use worktree::*;
32
33pub struct Project {
34 worktrees: Vec<WorktreeHandle>,
35 active_entry: Option<ProjectEntry>,
36 languages: Arc<LanguageRegistry>,
37 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
38 client: Arc<client::Client>,
39 user_store: ModelHandle<UserStore>,
40 fs: Arc<dyn Fs>,
41 client_state: ProjectClientState,
42 collaborators: HashMap<PeerId, Collaborator>,
43 subscriptions: Vec<client::Subscription>,
44 language_servers_with_diagnostics_running: isize,
45 open_buffers: HashMap<usize, OpenBuffer>,
46 loading_buffers: HashMap<
47 ProjectPath,
48 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
49 >,
50 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
51}
52
53enum OpenBuffer {
54 Operations(Vec<Operation>),
55 Loaded(WeakModelHandle<Buffer>),
56}
57
58enum WorktreeHandle {
59 Strong(ModelHandle<Worktree>),
60 Weak(WeakModelHandle<Worktree>),
61}
62
63enum ProjectClientState {
64 Local {
65 is_shared: bool,
66 remote_id_tx: watch::Sender<Option<u64>>,
67 remote_id_rx: watch::Receiver<Option<u64>>,
68 _maintain_remote_id_task: Task<Option<()>>,
69 },
70 Remote {
71 sharing_has_stopped: bool,
72 remote_id: u64,
73 replica_id: ReplicaId,
74 },
75}
76
77#[derive(Clone, Debug)]
78pub struct Collaborator {
79 pub user: Arc<User>,
80 pub peer_id: PeerId,
81 pub replica_id: ReplicaId,
82}
83
84#[derive(Clone, Debug, PartialEq)]
85pub enum Event {
86 ActiveEntryChanged(Option<ProjectEntry>),
87 WorktreeRemoved(WorktreeId),
88 DiskBasedDiagnosticsStarted,
89 DiskBasedDiagnosticsUpdated,
90 DiskBasedDiagnosticsFinished,
91 DiagnosticsUpdated(ProjectPath),
92}
93
94#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
95pub struct ProjectPath {
96 pub worktree_id: WorktreeId,
97 pub path: Arc<Path>,
98}
99
100#[derive(Clone, Debug, Default, PartialEq)]
101pub struct DiagnosticSummary {
102 pub error_count: usize,
103 pub warning_count: usize,
104 pub info_count: usize,
105 pub hint_count: usize,
106}
107
108#[derive(Debug)]
109pub struct Definition {
110 pub target_buffer: ModelHandle<Buffer>,
111 pub target_range: Range<language::Anchor>,
112}
113
114impl DiagnosticSummary {
115 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
116 let mut this = Self {
117 error_count: 0,
118 warning_count: 0,
119 info_count: 0,
120 hint_count: 0,
121 };
122
123 for entry in diagnostics {
124 if entry.diagnostic.is_primary {
125 match entry.diagnostic.severity {
126 DiagnosticSeverity::ERROR => this.error_count += 1,
127 DiagnosticSeverity::WARNING => this.warning_count += 1,
128 DiagnosticSeverity::INFORMATION => this.info_count += 1,
129 DiagnosticSeverity::HINT => this.hint_count += 1,
130 _ => {}
131 }
132 }
133 }
134
135 this
136 }
137
138 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
139 proto::DiagnosticSummary {
140 path: path.to_string_lossy().to_string(),
141 error_count: self.error_count as u32,
142 warning_count: self.warning_count as u32,
143 info_count: self.info_count as u32,
144 hint_count: self.hint_count as u32,
145 }
146 }
147}
148
149#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
150pub struct ProjectEntry {
151 pub worktree_id: WorktreeId,
152 pub entry_id: usize,
153}
154
155impl Project {
156 pub fn local(
157 client: Arc<Client>,
158 user_store: ModelHandle<UserStore>,
159 languages: Arc<LanguageRegistry>,
160 fs: Arc<dyn Fs>,
161 cx: &mut MutableAppContext,
162 ) -> ModelHandle<Self> {
163 cx.add_model(|cx: &mut ModelContext<Self>| {
164 let (remote_id_tx, remote_id_rx) = watch::channel();
165 let _maintain_remote_id_task = cx.spawn_weak({
166 let rpc = client.clone();
167 move |this, mut cx| {
168 async move {
169 let mut status = rpc.status();
170 while let Some(status) = status.recv().await {
171 if let Some(this) = this.upgrade(&cx) {
172 let remote_id = if let client::Status::Connected { .. } = status {
173 let response = rpc.request(proto::RegisterProject {}).await?;
174 Some(response.project_id)
175 } else {
176 None
177 };
178
179 if let Some(project_id) = remote_id {
180 let mut registrations = Vec::new();
181 this.update(&mut cx, |this, cx| {
182 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
183 registrations.push(worktree.update(
184 cx,
185 |worktree, cx| {
186 let worktree = worktree.as_local_mut().unwrap();
187 worktree.register(project_id, cx)
188 },
189 ));
190 }
191 });
192 for registration in registrations {
193 registration.await?;
194 }
195 }
196 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
197 }
198 }
199 Ok(())
200 }
201 .log_err()
202 }
203 });
204
205 Self {
206 worktrees: Default::default(),
207 collaborators: Default::default(),
208 open_buffers: Default::default(),
209 loading_buffers: Default::default(),
210 shared_buffers: Default::default(),
211 client_state: ProjectClientState::Local {
212 is_shared: false,
213 remote_id_tx,
214 remote_id_rx,
215 _maintain_remote_id_task,
216 },
217 subscriptions: Vec::new(),
218 active_entry: None,
219 languages,
220 client,
221 user_store,
222 fs,
223 language_servers_with_diagnostics_running: 0,
224 language_servers: Default::default(),
225 }
226 })
227 }
228
229 pub async fn remote(
230 remote_id: u64,
231 client: Arc<Client>,
232 user_store: ModelHandle<UserStore>,
233 languages: Arc<LanguageRegistry>,
234 fs: Arc<dyn Fs>,
235 cx: &mut AsyncAppContext,
236 ) -> Result<ModelHandle<Self>> {
237 client.authenticate_and_connect(&cx).await?;
238
239 let response = client
240 .request(proto::JoinProject {
241 project_id: remote_id,
242 })
243 .await?;
244
245 let replica_id = response.replica_id as ReplicaId;
246
247 let mut worktrees = Vec::new();
248 for worktree in response.worktrees {
249 worktrees
250 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
251 }
252
253 let user_ids = response
254 .collaborators
255 .iter()
256 .map(|peer| peer.user_id)
257 .collect();
258 user_store
259 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
260 .await?;
261 let mut collaborators = HashMap::default();
262 for message in response.collaborators {
263 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
264 collaborators.insert(collaborator.peer_id, collaborator);
265 }
266
267 Ok(cx.add_model(|cx| {
268 let mut this = Self {
269 worktrees: Vec::new(),
270 open_buffers: Default::default(),
271 loading_buffers: Default::default(),
272 shared_buffers: Default::default(),
273 active_entry: None,
274 collaborators,
275 languages,
276 user_store,
277 fs,
278 subscriptions: vec![
279 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
283 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
284 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
285 client.subscribe_to_entity(
286 remote_id,
287 cx,
288 Self::handle_update_diagnostic_summary,
289 ),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_disk_based_diagnostics_updating,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updated,
299 ),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
302 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_reloaded),
303 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
304 ],
305 client,
306 client_state: ProjectClientState::Remote {
307 sharing_has_stopped: false,
308 remote_id,
309 replica_id,
310 },
311 language_servers_with_diagnostics_running: 0,
312 language_servers: Default::default(),
313 };
314 for worktree in worktrees {
315 this.add_worktree(&worktree, cx);
316 }
317 this
318 }))
319 }
320
321 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
322 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
323 *remote_id_tx.borrow_mut() = remote_id;
324 }
325
326 self.subscriptions.clear();
327 if let Some(remote_id) = remote_id {
328 let client = &self.client;
329 self.subscriptions.extend([
330 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
338 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
339 ]);
340 }
341 }
342
343 pub fn remote_id(&self) -> Option<u64> {
344 match &self.client_state {
345 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
346 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
347 }
348 }
349
350 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
351 let mut id = None;
352 let mut watch = None;
353 match &self.client_state {
354 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
355 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
356 }
357
358 async move {
359 if let Some(id) = id {
360 return id;
361 }
362 let mut watch = watch.unwrap();
363 loop {
364 let id = *watch.borrow();
365 if let Some(id) = id {
366 return id;
367 }
368 watch.recv().await;
369 }
370 }
371 }
372
373 pub fn replica_id(&self) -> ReplicaId {
374 match &self.client_state {
375 ProjectClientState::Local { .. } => 0,
376 ProjectClientState::Remote { replica_id, .. } => *replica_id,
377 }
378 }
379
380 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
381 &self.collaborators
382 }
383
384 pub fn worktrees<'a>(
385 &'a self,
386 cx: &'a AppContext,
387 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
388 self.worktrees
389 .iter()
390 .filter_map(move |worktree| worktree.upgrade(cx))
391 }
392
393 pub fn worktree_for_id(
394 &self,
395 id: WorktreeId,
396 cx: &AppContext,
397 ) -> Option<ModelHandle<Worktree>> {
398 self.worktrees(cx)
399 .find(|worktree| worktree.read(cx).id() == id)
400 }
401
402 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
403 let rpc = self.client.clone();
404 cx.spawn(|this, mut cx| async move {
405 let project_id = this.update(&mut cx, |this, _| {
406 if let ProjectClientState::Local {
407 is_shared,
408 remote_id_rx,
409 ..
410 } = &mut this.client_state
411 {
412 *is_shared = true;
413 remote_id_rx
414 .borrow()
415 .ok_or_else(|| anyhow!("no project id"))
416 } else {
417 Err(anyhow!("can't share a remote project"))
418 }
419 })?;
420
421 rpc.request(proto::ShareProject { project_id }).await?;
422 let mut tasks = Vec::new();
423 this.update(&mut cx, |this, cx| {
424 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
425 worktree.update(cx, |worktree, cx| {
426 let worktree = worktree.as_local_mut().unwrap();
427 tasks.push(worktree.share(project_id, cx));
428 });
429 }
430 });
431 for task in tasks {
432 task.await?;
433 }
434 this.update(&mut cx, |_, cx| cx.notify());
435 Ok(())
436 })
437 }
438
439 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
440 let rpc = self.client.clone();
441 cx.spawn(|this, mut cx| async move {
442 let project_id = this.update(&mut cx, |this, _| {
443 if let ProjectClientState::Local {
444 is_shared,
445 remote_id_rx,
446 ..
447 } = &mut this.client_state
448 {
449 *is_shared = false;
450 remote_id_rx
451 .borrow()
452 .ok_or_else(|| anyhow!("no project id"))
453 } else {
454 Err(anyhow!("can't share a remote project"))
455 }
456 })?;
457
458 rpc.send(proto::UnshareProject { project_id }).await?;
459 this.update(&mut cx, |this, cx| {
460 this.collaborators.clear();
461 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
462 worktree.update(cx, |worktree, _| {
463 worktree.as_local_mut().unwrap().unshare();
464 });
465 }
466 cx.notify()
467 });
468 Ok(())
469 })
470 }
471
472 pub fn is_read_only(&self) -> bool {
473 match &self.client_state {
474 ProjectClientState::Local { .. } => false,
475 ProjectClientState::Remote {
476 sharing_has_stopped,
477 ..
478 } => *sharing_has_stopped,
479 }
480 }
481
482 pub fn is_local(&self) -> bool {
483 match &self.client_state {
484 ProjectClientState::Local { .. } => true,
485 ProjectClientState::Remote { .. } => false,
486 }
487 }
488
489 pub fn open_buffer(
490 &mut self,
491 path: impl Into<ProjectPath>,
492 cx: &mut ModelContext<Self>,
493 ) -> Task<Result<ModelHandle<Buffer>>> {
494 let project_path = path.into();
495 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
496 worktree
497 } else {
498 return Task::ready(Err(anyhow!("no such worktree")));
499 };
500
501 // If there is already a buffer for the given path, then return it.
502 let existing_buffer = self.get_open_buffer(&project_path, cx);
503 if let Some(existing_buffer) = existing_buffer {
504 return Task::ready(Ok(existing_buffer));
505 }
506
507 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
508 // If the given path is already being loaded, then wait for that existing
509 // task to complete and return the same buffer.
510 hash_map::Entry::Occupied(e) => e.get().clone(),
511
512 // Otherwise, record the fact that this path is now being loaded.
513 hash_map::Entry::Vacant(entry) => {
514 let (mut tx, rx) = postage::watch::channel();
515 entry.insert(rx.clone());
516
517 let load_buffer = worktree.update(cx, |worktree, cx| {
518 worktree.load_buffer(&project_path.path, cx)
519 });
520
521 cx.spawn(move |this, mut cx| async move {
522 let load_result = load_buffer.await;
523 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
524 // Record the fact that the buffer is no longer loading.
525 this.loading_buffers.remove(&project_path);
526 let buffer = load_result.map_err(Arc::new)?;
527 this.register_buffer(&buffer, &worktree, cx)?;
528 Ok(buffer)
529 }));
530 })
531 .detach();
532 rx
533 }
534 };
535
536 cx.foreground().spawn(async move {
537 loop {
538 if let Some(result) = loading_watch.borrow().as_ref() {
539 match result {
540 Ok(buffer) => return Ok(buffer.clone()),
541 Err(error) => return Err(anyhow!("{}", error)),
542 }
543 }
544 loading_watch.recv().await;
545 }
546 })
547 }
548
549 pub fn save_buffer_as(
550 &self,
551 buffer: ModelHandle<Buffer>,
552 abs_path: PathBuf,
553 cx: &mut ModelContext<Project>,
554 ) -> Task<Result<()>> {
555 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
556 cx.spawn(|this, mut cx| async move {
557 let (worktree, path) = worktree_task.await?;
558 worktree
559 .update(&mut cx, |worktree, cx| {
560 worktree
561 .as_local_mut()
562 .unwrap()
563 .save_buffer_as(buffer.clone(), path, cx)
564 })
565 .await?;
566 this.update(&mut cx, |this, cx| {
567 this.assign_language_to_buffer(&buffer, &worktree, cx);
568 });
569 Ok(())
570 })
571 }
572
573 #[cfg(any(test, feature = "test-support"))]
574 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
575 let path = path.into();
576 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
577 self.open_buffers.iter().any(|(_, buffer)| {
578 if let Some(buffer) = buffer.upgrade(cx) {
579 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
580 if file.worktree == worktree && file.path() == &path.path {
581 return true;
582 }
583 }
584 }
585 false
586 })
587 } else {
588 false
589 }
590 }
591
592 fn get_open_buffer(
593 &mut self,
594 path: &ProjectPath,
595 cx: &mut ModelContext<Self>,
596 ) -> Option<ModelHandle<Buffer>> {
597 let mut result = None;
598 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
599 self.open_buffers.retain(|_, buffer| {
600 if let OpenBuffer::Loaded(buffer) = buffer {
601 if let Some(buffer) = buffer.upgrade(cx) {
602 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
603 if file.worktree == worktree && file.path() == &path.path {
604 result = Some(buffer);
605 }
606 }
607 return true;
608 }
609 }
610 false
611 });
612 result
613 }
614
615 fn register_buffer(
616 &mut self,
617 buffer: &ModelHandle<Buffer>,
618 worktree: &ModelHandle<Worktree>,
619 cx: &mut ModelContext<Self>,
620 ) -> Result<()> {
621 match self.open_buffers.insert(
622 buffer.read(cx).remote_id() as usize,
623 OpenBuffer::Loaded(buffer.downgrade()),
624 ) {
625 Some(OpenBuffer::Operations(pending_ops)) => {
626 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
627 }
628 Some(OpenBuffer::Loaded(_)) => {
629 return Err(anyhow!("registered the same buffer twice"));
630 }
631 None => {}
632 }
633 self.assign_language_to_buffer(&buffer, &worktree, cx);
634 Ok(())
635 }
636
637 fn assign_language_to_buffer(
638 &mut self,
639 buffer: &ModelHandle<Buffer>,
640 worktree: &ModelHandle<Worktree>,
641 cx: &mut ModelContext<Self>,
642 ) -> Option<()> {
643 let (path, full_path) = {
644 let file = buffer.read(cx).file()?;
645 (file.path().clone(), file.full_path(cx))
646 };
647
648 // If the buffer has a language, set it and start/assign the language server
649 if let Some(language) = self.languages.select_language(&full_path) {
650 buffer.update(cx, |buffer, cx| {
651 buffer.set_language(Some(language.clone()), cx);
652 });
653
654 // For local worktrees, start a language server if needed.
655 // Also assign the language server and any previously stored diagnostics to the buffer.
656 if let Some(local_worktree) = worktree.read(cx).as_local() {
657 let worktree_id = local_worktree.id();
658 let worktree_abs_path = local_worktree.abs_path().clone();
659
660 let language_server = match self
661 .language_servers
662 .entry((worktree_id, language.name().to_string()))
663 {
664 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
665 hash_map::Entry::Vacant(e) => Self::start_language_server(
666 self.client.clone(),
667 language.clone(),
668 &worktree_abs_path,
669 cx,
670 )
671 .map(|server| e.insert(server).clone()),
672 };
673
674 buffer.update(cx, |buffer, cx| {
675 buffer.set_language_server(language_server, cx);
676 });
677 }
678 }
679
680 if let Some(local_worktree) = worktree.read(cx).as_local() {
681 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
682 buffer.update(cx, |buffer, cx| {
683 buffer.update_diagnostics(None, diagnostics, cx).log_err();
684 });
685 }
686 }
687
688 None
689 }
690
691 fn start_language_server(
692 rpc: Arc<Client>,
693 language: Arc<Language>,
694 worktree_path: &Path,
695 cx: &mut ModelContext<Self>,
696 ) -> Option<Arc<LanguageServer>> {
697 enum LspEvent {
698 DiagnosticsStart,
699 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
700 DiagnosticsFinish,
701 }
702
703 let language_server = language
704 .start_server(worktree_path, cx)
705 .log_err()
706 .flatten()?;
707 let disk_based_sources = language
708 .disk_based_diagnostic_sources()
709 .cloned()
710 .unwrap_or_default();
711 let disk_based_diagnostics_progress_token =
712 language.disk_based_diagnostics_progress_token().cloned();
713 let has_disk_based_diagnostic_progress_token =
714 disk_based_diagnostics_progress_token.is_some();
715 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
716
717 // Listen for `PublishDiagnostics` notifications.
718 language_server
719 .on_notification::<lsp::notification::PublishDiagnostics, _>({
720 let diagnostics_tx = diagnostics_tx.clone();
721 move |params| {
722 if !has_disk_based_diagnostic_progress_token {
723 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
724 }
725 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
726 if !has_disk_based_diagnostic_progress_token {
727 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
728 }
729 }
730 })
731 .detach();
732
733 // Listen for `Progress` notifications. Send an event when the language server
734 // transitions between running jobs and not running any jobs.
735 let mut running_jobs_for_this_server: i32 = 0;
736 language_server
737 .on_notification::<lsp::notification::Progress, _>(move |params| {
738 let token = match params.token {
739 lsp::NumberOrString::Number(_) => None,
740 lsp::NumberOrString::String(token) => Some(token),
741 };
742
743 if token == disk_based_diagnostics_progress_token {
744 match params.value {
745 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
746 lsp::WorkDoneProgress::Begin(_) => {
747 running_jobs_for_this_server += 1;
748 if running_jobs_for_this_server == 1 {
749 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
750 }
751 }
752 lsp::WorkDoneProgress::End(_) => {
753 running_jobs_for_this_server -= 1;
754 if running_jobs_for_this_server == 0 {
755 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
756 }
757 }
758 _ => {}
759 },
760 }
761 }
762 })
763 .detach();
764
765 // Process all the LSP events.
766 cx.spawn_weak(|this, mut cx| async move {
767 while let Ok(message) = diagnostics_rx.recv().await {
768 let this = cx.read(|cx| this.upgrade(cx))?;
769 match message {
770 LspEvent::DiagnosticsStart => {
771 let send = this.update(&mut cx, |this, cx| {
772 this.disk_based_diagnostics_started(cx);
773 this.remote_id().map(|project_id| {
774 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
775 })
776 });
777 if let Some(send) = send {
778 send.await.log_err();
779 }
780 }
781 LspEvent::DiagnosticsUpdate(params) => {
782 this.update(&mut cx, |this, cx| {
783 this.update_diagnostics(params, &disk_based_sources, cx)
784 .log_err();
785 });
786 }
787 LspEvent::DiagnosticsFinish => {
788 let send = this.update(&mut cx, |this, cx| {
789 this.disk_based_diagnostics_finished(cx);
790 this.remote_id().map(|project_id| {
791 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
792 })
793 });
794 if let Some(send) = send {
795 send.await.log_err();
796 }
797 }
798 }
799 }
800 Some(())
801 })
802 .detach();
803
804 Some(language_server)
805 }
806
807 pub fn update_diagnostics(
808 &mut self,
809 params: lsp::PublishDiagnosticsParams,
810 disk_based_sources: &HashSet<String>,
811 cx: &mut ModelContext<Self>,
812 ) -> Result<()> {
813 let abs_path = params
814 .uri
815 .to_file_path()
816 .map_err(|_| anyhow!("URI is not a file"))?;
817 let mut next_group_id = 0;
818 let mut diagnostics = Vec::default();
819 let mut primary_diagnostic_group_ids = HashMap::default();
820 let mut sources_by_group_id = HashMap::default();
821 let mut supporting_diagnostic_severities = HashMap::default();
822 for diagnostic in ¶ms.diagnostics {
823 let source = diagnostic.source.as_ref();
824 let code = diagnostic.code.as_ref().map(|code| match code {
825 lsp::NumberOrString::Number(code) => code.to_string(),
826 lsp::NumberOrString::String(code) => code.clone(),
827 });
828 let range = range_from_lsp(diagnostic.range);
829 let is_supporting = diagnostic
830 .related_information
831 .as_ref()
832 .map_or(false, |infos| {
833 infos.iter().any(|info| {
834 primary_diagnostic_group_ids.contains_key(&(
835 source,
836 code.clone(),
837 range_from_lsp(info.location.range),
838 ))
839 })
840 });
841
842 if is_supporting {
843 if let Some(severity) = diagnostic.severity {
844 supporting_diagnostic_severities
845 .insert((source, code.clone(), range), severity);
846 }
847 } else {
848 let group_id = post_inc(&mut next_group_id);
849 let is_disk_based =
850 source.map_or(false, |source| disk_based_sources.contains(source));
851
852 sources_by_group_id.insert(group_id, source);
853 primary_diagnostic_group_ids
854 .insert((source, code.clone(), range.clone()), group_id);
855
856 diagnostics.push(DiagnosticEntry {
857 range,
858 diagnostic: Diagnostic {
859 code: code.clone(),
860 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
861 message: diagnostic.message.clone(),
862 group_id,
863 is_primary: true,
864 is_valid: true,
865 is_disk_based,
866 },
867 });
868 if let Some(infos) = &diagnostic.related_information {
869 for info in infos {
870 if info.location.uri == params.uri {
871 let range = range_from_lsp(info.location.range);
872 diagnostics.push(DiagnosticEntry {
873 range,
874 diagnostic: Diagnostic {
875 code: code.clone(),
876 severity: DiagnosticSeverity::INFORMATION,
877 message: info.message.clone(),
878 group_id,
879 is_primary: false,
880 is_valid: true,
881 is_disk_based,
882 },
883 });
884 }
885 }
886 }
887 }
888 }
889
890 for entry in &mut diagnostics {
891 let diagnostic = &mut entry.diagnostic;
892 if !diagnostic.is_primary {
893 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
894 if let Some(&severity) = supporting_diagnostic_severities.get(&(
895 source,
896 diagnostic.code.clone(),
897 entry.range.clone(),
898 )) {
899 diagnostic.severity = severity;
900 }
901 }
902 }
903
904 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
905 Ok(())
906 }
907
908 pub fn update_diagnostic_entries(
909 &mut self,
910 abs_path: PathBuf,
911 version: Option<i32>,
912 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
913 cx: &mut ModelContext<Project>,
914 ) -> Result<(), anyhow::Error> {
915 let (worktree, relative_path) = self
916 .find_local_worktree(&abs_path, cx)
917 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
918 let project_path = ProjectPath {
919 worktree_id: worktree.read(cx).id(),
920 path: relative_path.into(),
921 };
922
923 for buffer in self.open_buffers.values() {
924 if let Some(buffer) = buffer.upgrade(cx) {
925 if buffer
926 .read(cx)
927 .file()
928 .map_or(false, |file| *file.path() == project_path.path)
929 {
930 buffer.update(cx, |buffer, cx| {
931 buffer.update_diagnostics(version, diagnostics.clone(), cx)
932 })?;
933 break;
934 }
935 }
936 }
937 worktree.update(cx, |worktree, cx| {
938 worktree
939 .as_local_mut()
940 .ok_or_else(|| anyhow!("not a local worktree"))?
941 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
942 })?;
943 cx.emit(Event::DiagnosticsUpdated(project_path));
944 Ok(())
945 }
946
947 pub fn definition<T: ToOffset>(
948 &self,
949 source_buffer_handle: &ModelHandle<Buffer>,
950 position: T,
951 cx: &mut ModelContext<Self>,
952 ) -> Task<Result<Vec<Definition>>> {
953 let source_buffer_handle = source_buffer_handle.clone();
954 let buffer = source_buffer_handle.read(cx);
955 let worktree;
956 let buffer_abs_path;
957 if let Some(file) = File::from_dyn(buffer.file()) {
958 worktree = file.worktree.clone();
959 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
960 } else {
961 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
962 };
963
964 if worktree.read(cx).as_local().is_some() {
965 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
966 let buffer_abs_path = buffer_abs_path.unwrap();
967 let lang_name;
968 let lang_server;
969 if let Some(lang) = buffer.language() {
970 lang_name = lang.name().to_string();
971 if let Some(server) = self
972 .language_servers
973 .get(&(worktree.read(cx).id(), lang_name.clone()))
974 {
975 lang_server = server.clone();
976 } else {
977 return Task::ready(Err(anyhow!("buffer does not have a language server")));
978 };
979 } else {
980 return Task::ready(Err(anyhow!("buffer does not have a language")));
981 }
982
983 cx.spawn(|this, mut cx| async move {
984 let response = lang_server
985 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
986 text_document_position_params: lsp::TextDocumentPositionParams {
987 text_document: lsp::TextDocumentIdentifier::new(
988 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
989 ),
990 position: lsp::Position::new(point.row, point.column),
991 },
992 work_done_progress_params: Default::default(),
993 partial_result_params: Default::default(),
994 })
995 .await?;
996
997 let mut definitions = Vec::new();
998 if let Some(response) = response {
999 let mut unresolved_locations = Vec::new();
1000 match response {
1001 lsp::GotoDefinitionResponse::Scalar(loc) => {
1002 unresolved_locations.push((loc.uri, loc.range));
1003 }
1004 lsp::GotoDefinitionResponse::Array(locs) => {
1005 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
1006 }
1007 lsp::GotoDefinitionResponse::Link(links) => {
1008 unresolved_locations.extend(
1009 links
1010 .into_iter()
1011 .map(|l| (l.target_uri, l.target_selection_range)),
1012 );
1013 }
1014 }
1015
1016 for (target_uri, target_range) in unresolved_locations {
1017 let abs_path = target_uri
1018 .to_file_path()
1019 .map_err(|_| anyhow!("invalid target path"))?;
1020
1021 let (worktree, relative_path) = if let Some(result) =
1022 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1023 {
1024 result
1025 } else {
1026 let worktree = this
1027 .update(&mut cx, |this, cx| {
1028 this.create_local_worktree(&abs_path, true, cx)
1029 })
1030 .await?;
1031 this.update(&mut cx, |this, cx| {
1032 this.language_servers.insert(
1033 (worktree.read(cx).id(), lang_name.clone()),
1034 lang_server.clone(),
1035 );
1036 });
1037 (worktree, PathBuf::new())
1038 };
1039
1040 let project_path = ProjectPath {
1041 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1042 path: relative_path.into(),
1043 };
1044 let target_buffer_handle = this
1045 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1046 .await?;
1047 cx.read(|cx| {
1048 let target_buffer = target_buffer_handle.read(cx);
1049 let target_start = target_buffer
1050 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1051 let target_end = target_buffer
1052 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1053 definitions.push(Definition {
1054 target_buffer: target_buffer_handle,
1055 target_range: target_buffer.anchor_after(target_start)
1056 ..target_buffer.anchor_before(target_end),
1057 });
1058 });
1059 }
1060 }
1061
1062 Ok(definitions)
1063 })
1064 } else {
1065 log::info!("go to definition is not yet implemented for guests");
1066 Task::ready(Ok(Default::default()))
1067 }
1068 }
1069
1070 pub fn find_or_create_local_worktree(
1071 &self,
1072 abs_path: impl AsRef<Path>,
1073 weak: bool,
1074 cx: &mut ModelContext<Self>,
1075 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1076 let abs_path = abs_path.as_ref();
1077 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1078 Task::ready(Ok((tree.clone(), relative_path.into())))
1079 } else {
1080 let worktree = self.create_local_worktree(abs_path, weak, cx);
1081 cx.foreground()
1082 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1083 }
1084 }
1085
1086 fn find_local_worktree(
1087 &self,
1088 abs_path: &Path,
1089 cx: &AppContext,
1090 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1091 for tree in self.worktrees(cx) {
1092 if let Some(relative_path) = tree
1093 .read(cx)
1094 .as_local()
1095 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1096 {
1097 return Some((tree.clone(), relative_path.into()));
1098 }
1099 }
1100 None
1101 }
1102
1103 pub fn is_shared(&self) -> bool {
1104 match &self.client_state {
1105 ProjectClientState::Local { is_shared, .. } => *is_shared,
1106 ProjectClientState::Remote { .. } => false,
1107 }
1108 }
1109
1110 fn create_local_worktree(
1111 &self,
1112 abs_path: impl AsRef<Path>,
1113 weak: bool,
1114 cx: &mut ModelContext<Self>,
1115 ) -> Task<Result<ModelHandle<Worktree>>> {
1116 let fs = self.fs.clone();
1117 let client = self.client.clone();
1118 let path = Arc::from(abs_path.as_ref());
1119 cx.spawn(|project, mut cx| async move {
1120 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1121
1122 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1123 project.add_worktree(&worktree, cx);
1124 (project.remote_id(), project.is_shared())
1125 });
1126
1127 if let Some(project_id) = remote_project_id {
1128 worktree
1129 .update(&mut cx, |worktree, cx| {
1130 worktree.as_local_mut().unwrap().register(project_id, cx)
1131 })
1132 .await?;
1133 if is_shared {
1134 worktree
1135 .update(&mut cx, |worktree, cx| {
1136 worktree.as_local_mut().unwrap().share(project_id, cx)
1137 })
1138 .await?;
1139 }
1140 }
1141
1142 Ok(worktree)
1143 })
1144 }
1145
1146 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1147 self.worktrees.retain(|worktree| {
1148 worktree
1149 .upgrade(cx)
1150 .map_or(false, |w| w.read(cx).id() != id)
1151 });
1152 cx.notify();
1153 }
1154
1155 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1156 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1157 if worktree.read(cx).is_local() {
1158 cx.subscribe(&worktree, |this, worktree, _, cx| {
1159 this.update_local_worktree_buffers(worktree, cx);
1160 })
1161 .detach();
1162 }
1163
1164 let push_weak_handle = {
1165 let worktree = worktree.read(cx);
1166 worktree.is_local() && worktree.is_weak()
1167 };
1168 if push_weak_handle {
1169 cx.observe_release(&worktree, |this, cx| {
1170 this.worktrees
1171 .retain(|worktree| worktree.upgrade(cx).is_some());
1172 cx.notify();
1173 })
1174 .detach();
1175 self.worktrees
1176 .push(WorktreeHandle::Weak(worktree.downgrade()));
1177 } else {
1178 self.worktrees
1179 .push(WorktreeHandle::Strong(worktree.clone()));
1180 }
1181 cx.notify();
1182 }
1183
1184 fn update_local_worktree_buffers(
1185 &mut self,
1186 worktree_handle: ModelHandle<Worktree>,
1187 cx: &mut ModelContext<Self>,
1188 ) {
1189 let snapshot = worktree_handle.read(cx).snapshot();
1190 let mut buffers_to_delete = Vec::new();
1191 for (buffer_id, buffer) in &self.open_buffers {
1192 if let OpenBuffer::Loaded(buffer) = buffer {
1193 if let Some(buffer) = buffer.upgrade(cx) {
1194 buffer.update(cx, |buffer, cx| {
1195 if let Some(old_file) = File::from_dyn(buffer.file()) {
1196 if old_file.worktree != worktree_handle {
1197 return;
1198 }
1199
1200 let new_file = if let Some(entry) = old_file
1201 .entry_id
1202 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1203 {
1204 File {
1205 is_local: true,
1206 entry_id: Some(entry.id),
1207 mtime: entry.mtime,
1208 path: entry.path.clone(),
1209 worktree: worktree_handle.clone(),
1210 }
1211 } else if let Some(entry) =
1212 snapshot.entry_for_path(old_file.path().as_ref())
1213 {
1214 File {
1215 is_local: true,
1216 entry_id: Some(entry.id),
1217 mtime: entry.mtime,
1218 path: entry.path.clone(),
1219 worktree: worktree_handle.clone(),
1220 }
1221 } else {
1222 File {
1223 is_local: true,
1224 entry_id: None,
1225 path: old_file.path().clone(),
1226 mtime: old_file.mtime(),
1227 worktree: worktree_handle.clone(),
1228 }
1229 };
1230
1231 if let Some(project_id) = self.remote_id() {
1232 let client = self.client.clone();
1233 let message = proto::UpdateBufferFile {
1234 project_id,
1235 buffer_id: *buffer_id as u64,
1236 file: Some(new_file.to_proto()),
1237 };
1238 cx.foreground()
1239 .spawn(async move { client.send(message).await })
1240 .detach_and_log_err(cx);
1241 }
1242 buffer.file_updated(Box::new(new_file), cx).detach();
1243 }
1244 });
1245 } else {
1246 buffers_to_delete.push(*buffer_id);
1247 }
1248 }
1249 }
1250
1251 for buffer_id in buffers_to_delete {
1252 self.open_buffers.remove(&buffer_id);
1253 }
1254 }
1255
1256 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1257 let new_active_entry = entry.and_then(|project_path| {
1258 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1259 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1260 Some(ProjectEntry {
1261 worktree_id: project_path.worktree_id,
1262 entry_id: entry.id,
1263 })
1264 });
1265 if new_active_entry != self.active_entry {
1266 self.active_entry = new_active_entry;
1267 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1268 }
1269 }
1270
1271 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1272 self.language_servers_with_diagnostics_running > 0
1273 }
1274
1275 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1276 let mut summary = DiagnosticSummary::default();
1277 for (_, path_summary) in self.diagnostic_summaries(cx) {
1278 summary.error_count += path_summary.error_count;
1279 summary.warning_count += path_summary.warning_count;
1280 summary.info_count += path_summary.info_count;
1281 summary.hint_count += path_summary.hint_count;
1282 }
1283 summary
1284 }
1285
1286 pub fn diagnostic_summaries<'a>(
1287 &'a self,
1288 cx: &'a AppContext,
1289 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1290 self.worktrees(cx).flat_map(move |worktree| {
1291 let worktree = worktree.read(cx);
1292 let worktree_id = worktree.id();
1293 worktree
1294 .diagnostic_summaries()
1295 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1296 })
1297 }
1298
1299 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1300 self.language_servers_with_diagnostics_running += 1;
1301 if self.language_servers_with_diagnostics_running == 1 {
1302 cx.emit(Event::DiskBasedDiagnosticsStarted);
1303 }
1304 }
1305
1306 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1307 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1308 self.language_servers_with_diagnostics_running -= 1;
1309 if self.language_servers_with_diagnostics_running == 0 {
1310 cx.emit(Event::DiskBasedDiagnosticsFinished);
1311 }
1312 }
1313
1314 pub fn active_entry(&self) -> Option<ProjectEntry> {
1315 self.active_entry
1316 }
1317
1318 // RPC message handlers
1319
1320 fn handle_unshare_project(
1321 &mut self,
1322 _: TypedEnvelope<proto::UnshareProject>,
1323 _: Arc<Client>,
1324 cx: &mut ModelContext<Self>,
1325 ) -> Result<()> {
1326 if let ProjectClientState::Remote {
1327 sharing_has_stopped,
1328 ..
1329 } = &mut self.client_state
1330 {
1331 *sharing_has_stopped = true;
1332 self.collaborators.clear();
1333 cx.notify();
1334 Ok(())
1335 } else {
1336 unreachable!()
1337 }
1338 }
1339
1340 fn handle_add_collaborator(
1341 &mut self,
1342 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1343 _: Arc<Client>,
1344 cx: &mut ModelContext<Self>,
1345 ) -> Result<()> {
1346 let user_store = self.user_store.clone();
1347 let collaborator = envelope
1348 .payload
1349 .collaborator
1350 .take()
1351 .ok_or_else(|| anyhow!("empty collaborator"))?;
1352
1353 cx.spawn(|this, mut cx| {
1354 async move {
1355 let collaborator =
1356 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1357 this.update(&mut cx, |this, cx| {
1358 this.collaborators
1359 .insert(collaborator.peer_id, collaborator);
1360 cx.notify();
1361 });
1362 Ok(())
1363 }
1364 .log_err()
1365 })
1366 .detach();
1367
1368 Ok(())
1369 }
1370
1371 fn handle_remove_collaborator(
1372 &mut self,
1373 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1374 _: Arc<Client>,
1375 cx: &mut ModelContext<Self>,
1376 ) -> Result<()> {
1377 let peer_id = PeerId(envelope.payload.peer_id);
1378 let replica_id = self
1379 .collaborators
1380 .remove(&peer_id)
1381 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1382 .replica_id;
1383 self.shared_buffers.remove(&peer_id);
1384 for (_, buffer) in &self.open_buffers {
1385 if let OpenBuffer::Loaded(buffer) = buffer {
1386 if let Some(buffer) = buffer.upgrade(cx) {
1387 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1388 }
1389 }
1390 }
1391 cx.notify();
1392 Ok(())
1393 }
1394
1395 fn handle_share_worktree(
1396 &mut self,
1397 envelope: TypedEnvelope<proto::ShareWorktree>,
1398 client: Arc<Client>,
1399 cx: &mut ModelContext<Self>,
1400 ) -> Result<()> {
1401 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1402 let replica_id = self.replica_id();
1403 let worktree = envelope
1404 .payload
1405 .worktree
1406 .ok_or_else(|| anyhow!("invalid worktree"))?;
1407 cx.spawn(|this, mut cx| {
1408 async move {
1409 let worktree =
1410 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1411 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1412 Ok(())
1413 }
1414 .log_err()
1415 })
1416 .detach();
1417 Ok(())
1418 }
1419
1420 fn handle_unregister_worktree(
1421 &mut self,
1422 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1423 _: Arc<Client>,
1424 cx: &mut ModelContext<Self>,
1425 ) -> Result<()> {
1426 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1427 self.remove_worktree(worktree_id, cx);
1428 Ok(())
1429 }
1430
1431 fn handle_update_worktree(
1432 &mut self,
1433 envelope: TypedEnvelope<proto::UpdateWorktree>,
1434 _: Arc<Client>,
1435 cx: &mut ModelContext<Self>,
1436 ) -> Result<()> {
1437 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1438 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1439 worktree.update(cx, |worktree, cx| {
1440 let worktree = worktree.as_remote_mut().unwrap();
1441 worktree.update_from_remote(envelope, cx)
1442 })?;
1443 }
1444 Ok(())
1445 }
1446
1447 fn handle_update_diagnostic_summary(
1448 &mut self,
1449 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1450 _: Arc<Client>,
1451 cx: &mut ModelContext<Self>,
1452 ) -> Result<()> {
1453 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1454 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1455 if let Some(summary) = envelope.payload.summary {
1456 let project_path = ProjectPath {
1457 worktree_id,
1458 path: Path::new(&summary.path).into(),
1459 };
1460 worktree.update(cx, |worktree, _| {
1461 worktree
1462 .as_remote_mut()
1463 .unwrap()
1464 .update_diagnostic_summary(project_path.path.clone(), &summary);
1465 });
1466 cx.emit(Event::DiagnosticsUpdated(project_path));
1467 }
1468 }
1469 Ok(())
1470 }
1471
1472 fn handle_disk_based_diagnostics_updating(
1473 &mut self,
1474 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1475 _: Arc<Client>,
1476 cx: &mut ModelContext<Self>,
1477 ) -> Result<()> {
1478 self.disk_based_diagnostics_started(cx);
1479 Ok(())
1480 }
1481
1482 fn handle_disk_based_diagnostics_updated(
1483 &mut self,
1484 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1485 _: Arc<Client>,
1486 cx: &mut ModelContext<Self>,
1487 ) -> Result<()> {
1488 self.disk_based_diagnostics_finished(cx);
1489 Ok(())
1490 }
1491
1492 pub fn handle_update_buffer(
1493 &mut self,
1494 envelope: TypedEnvelope<proto::UpdateBuffer>,
1495 _: Arc<Client>,
1496 cx: &mut ModelContext<Self>,
1497 ) -> Result<()> {
1498 let payload = envelope.payload.clone();
1499 let buffer_id = payload.buffer_id as usize;
1500 let ops = payload
1501 .operations
1502 .into_iter()
1503 .map(|op| language::proto::deserialize_operation(op))
1504 .collect::<Result<Vec<_>, _>>()?;
1505 match self.open_buffers.get_mut(&buffer_id) {
1506 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1507 Some(OpenBuffer::Loaded(buffer)) => {
1508 if let Some(buffer) = buffer.upgrade(cx) {
1509 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1510 } else {
1511 self.open_buffers
1512 .insert(buffer_id, OpenBuffer::Operations(ops));
1513 }
1514 }
1515 None => {
1516 self.open_buffers
1517 .insert(buffer_id, OpenBuffer::Operations(ops));
1518 }
1519 }
1520 Ok(())
1521 }
1522
1523 pub fn handle_update_buffer_file(
1524 &mut self,
1525 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1526 _: Arc<Client>,
1527 cx: &mut ModelContext<Self>,
1528 ) -> Result<()> {
1529 let payload = envelope.payload.clone();
1530 let buffer_id = payload.buffer_id as usize;
1531 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1532 let worktree = self
1533 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1534 .ok_or_else(|| anyhow!("no such worktree"))?;
1535 let file = File::from_proto(file, worktree.clone(), cx)?;
1536 let buffer = self
1537 .open_buffers
1538 .get_mut(&buffer_id)
1539 .and_then(|b| b.upgrade(cx))
1540 .ok_or_else(|| anyhow!("no such buffer"))?;
1541 buffer.update(cx, |buffer, cx| {
1542 buffer.file_updated(Box::new(file), cx).detach();
1543 });
1544
1545 Ok(())
1546 }
1547
1548 pub fn handle_save_buffer(
1549 &mut self,
1550 envelope: TypedEnvelope<proto::SaveBuffer>,
1551 rpc: Arc<Client>,
1552 cx: &mut ModelContext<Self>,
1553 ) -> Result<()> {
1554 let sender_id = envelope.original_sender_id()?;
1555 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1556 let buffer = self
1557 .shared_buffers
1558 .get(&sender_id)
1559 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1560 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1561 let receipt = envelope.receipt();
1562 let buffer_id = envelope.payload.buffer_id;
1563 let save = cx.spawn(|_, mut cx| async move {
1564 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1565 });
1566
1567 cx.background()
1568 .spawn(
1569 async move {
1570 let (version, mtime) = save.await?;
1571
1572 rpc.respond(
1573 receipt,
1574 proto::BufferSaved {
1575 project_id,
1576 buffer_id,
1577 version: (&version).into(),
1578 mtime: Some(mtime.into()),
1579 },
1580 )
1581 .await?;
1582
1583 Ok(())
1584 }
1585 .log_err(),
1586 )
1587 .detach();
1588 Ok(())
1589 }
1590
1591 pub fn handle_format_buffer(
1592 &mut self,
1593 envelope: TypedEnvelope<proto::FormatBuffer>,
1594 rpc: Arc<Client>,
1595 cx: &mut ModelContext<Self>,
1596 ) -> Result<()> {
1597 let receipt = envelope.receipt();
1598 let sender_id = envelope.original_sender_id()?;
1599 let buffer = self
1600 .shared_buffers
1601 .get(&sender_id)
1602 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1603 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1604 cx.spawn(|_, mut cx| async move {
1605 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1606 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1607 // associated with formatting.
1608 cx.spawn(|_| async move {
1609 match format {
1610 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1611 Err(error) => {
1612 rpc.respond_with_error(
1613 receipt,
1614 proto::Error {
1615 message: error.to_string(),
1616 },
1617 )
1618 .await?
1619 }
1620 }
1621 Ok::<_, anyhow::Error>(())
1622 })
1623 .await
1624 .log_err();
1625 })
1626 .detach();
1627 Ok(())
1628 }
1629
1630 pub fn handle_open_buffer(
1631 &mut self,
1632 envelope: TypedEnvelope<proto::OpenBuffer>,
1633 rpc: Arc<Client>,
1634 cx: &mut ModelContext<Self>,
1635 ) -> anyhow::Result<()> {
1636 let receipt = envelope.receipt();
1637 let peer_id = envelope.original_sender_id()?;
1638 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1639 let open_buffer = self.open_buffer(
1640 ProjectPath {
1641 worktree_id,
1642 path: PathBuf::from(envelope.payload.path).into(),
1643 },
1644 cx,
1645 );
1646 cx.spawn(|this, mut cx| {
1647 async move {
1648 let buffer = open_buffer.await?;
1649 this.update(&mut cx, |this, _| {
1650 this.shared_buffers
1651 .entry(peer_id)
1652 .or_default()
1653 .insert(buffer.id() as u64, buffer.clone());
1654 });
1655 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1656 rpc.respond(
1657 receipt,
1658 proto::OpenBufferResponse {
1659 buffer: Some(message),
1660 },
1661 )
1662 .await
1663 }
1664 .log_err()
1665 })
1666 .detach();
1667 Ok(())
1668 }
1669
1670 pub fn handle_close_buffer(
1671 &mut self,
1672 envelope: TypedEnvelope<proto::CloseBuffer>,
1673 _: Arc<Client>,
1674 cx: &mut ModelContext<Self>,
1675 ) -> anyhow::Result<()> {
1676 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1677 shared_buffers.remove(&envelope.payload.buffer_id);
1678 cx.notify();
1679 }
1680 Ok(())
1681 }
1682
1683 pub fn handle_buffer_saved(
1684 &mut self,
1685 envelope: TypedEnvelope<proto::BufferSaved>,
1686 _: Arc<Client>,
1687 cx: &mut ModelContext<Self>,
1688 ) -> Result<()> {
1689 let payload = envelope.payload.clone();
1690 let buffer = self
1691 .open_buffers
1692 .get(&(payload.buffer_id as usize))
1693 .and_then(|buf| {
1694 if let OpenBuffer::Loaded(buffer) = buf {
1695 buffer.upgrade(cx)
1696 } else {
1697 None
1698 }
1699 });
1700 if let Some(buffer) = buffer {
1701 buffer.update(cx, |buffer, cx| {
1702 let version = payload.version.try_into()?;
1703 let mtime = payload
1704 .mtime
1705 .ok_or_else(|| anyhow!("missing mtime"))?
1706 .into();
1707 buffer.did_save(version, mtime, None, cx);
1708 Result::<_, anyhow::Error>::Ok(())
1709 })?;
1710 }
1711 Ok(())
1712 }
1713
1714 pub fn handle_buffer_reloaded(
1715 &mut self,
1716 envelope: TypedEnvelope<proto::BufferReloaded>,
1717 _: Arc<Client>,
1718 cx: &mut ModelContext<Self>,
1719 ) -> Result<()> {
1720 let payload = envelope.payload.clone();
1721 let buffer = self
1722 .open_buffers
1723 .get(&(payload.buffer_id as usize))
1724 .and_then(|buf| {
1725 if let OpenBuffer::Loaded(buffer) = buf {
1726 buffer.upgrade(cx)
1727 } else {
1728 None
1729 }
1730 });
1731 if let Some(buffer) = buffer {
1732 buffer.update(cx, |buffer, cx| {
1733 let version = payload.version.try_into()?;
1734 let mtime = payload
1735 .mtime
1736 .ok_or_else(|| anyhow!("missing mtime"))?
1737 .into();
1738 buffer.did_reload(version, mtime, cx);
1739 Result::<_, anyhow::Error>::Ok(())
1740 })?;
1741 }
1742 Ok(())
1743 }
1744
1745 pub fn match_paths<'a>(
1746 &self,
1747 query: &'a str,
1748 include_ignored: bool,
1749 smart_case: bool,
1750 max_results: usize,
1751 cancel_flag: &'a AtomicBool,
1752 cx: &AppContext,
1753 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1754 let worktrees = self
1755 .worktrees(cx)
1756 .filter(|worktree| !worktree.read(cx).is_weak())
1757 .collect::<Vec<_>>();
1758 let include_root_name = worktrees.len() > 1;
1759 let candidate_sets = worktrees
1760 .into_iter()
1761 .map(|worktree| CandidateSet {
1762 snapshot: worktree.read(cx).snapshot(),
1763 include_ignored,
1764 include_root_name,
1765 })
1766 .collect::<Vec<_>>();
1767
1768 let background = cx.background().clone();
1769 async move {
1770 fuzzy::match_paths(
1771 candidate_sets.as_slice(),
1772 query,
1773 smart_case,
1774 max_results,
1775 cancel_flag,
1776 background,
1777 )
1778 .await
1779 }
1780 }
1781}
1782
1783impl WorktreeHandle {
1784 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1785 match self {
1786 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1787 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1788 }
1789 }
1790}
1791
1792struct CandidateSet {
1793 snapshot: Snapshot,
1794 include_ignored: bool,
1795 include_root_name: bool,
1796}
1797
1798impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1799 type Candidates = CandidateSetIter<'a>;
1800
1801 fn id(&self) -> usize {
1802 self.snapshot.id().to_usize()
1803 }
1804
1805 fn len(&self) -> usize {
1806 if self.include_ignored {
1807 self.snapshot.file_count()
1808 } else {
1809 self.snapshot.visible_file_count()
1810 }
1811 }
1812
1813 fn prefix(&self) -> Arc<str> {
1814 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1815 self.snapshot.root_name().into()
1816 } else if self.include_root_name {
1817 format!("{}/", self.snapshot.root_name()).into()
1818 } else {
1819 "".into()
1820 }
1821 }
1822
1823 fn candidates(&'a self, start: usize) -> Self::Candidates {
1824 CandidateSetIter {
1825 traversal: self.snapshot.files(self.include_ignored, start),
1826 }
1827 }
1828}
1829
1830struct CandidateSetIter<'a> {
1831 traversal: Traversal<'a>,
1832}
1833
1834impl<'a> Iterator for CandidateSetIter<'a> {
1835 type Item = PathMatchCandidate<'a>;
1836
1837 fn next(&mut self) -> Option<Self::Item> {
1838 self.traversal.next().map(|entry| {
1839 if let EntryKind::File(char_bag) = entry.kind {
1840 PathMatchCandidate {
1841 path: &entry.path,
1842 char_bag,
1843 }
1844 } else {
1845 unreachable!()
1846 }
1847 })
1848 }
1849}
1850
1851impl Entity for Project {
1852 type Event = Event;
1853
1854 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1855 match &self.client_state {
1856 ProjectClientState::Local { remote_id_rx, .. } => {
1857 if let Some(project_id) = *remote_id_rx.borrow() {
1858 let rpc = self.client.clone();
1859 cx.spawn(|_| async move {
1860 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1861 log::error!("error unregistering project: {}", err);
1862 }
1863 })
1864 .detach();
1865 }
1866 }
1867 ProjectClientState::Remote { remote_id, .. } => {
1868 let rpc = self.client.clone();
1869 let project_id = *remote_id;
1870 cx.spawn(|_| async move {
1871 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1872 log::error!("error leaving project: {}", err);
1873 }
1874 })
1875 .detach();
1876 }
1877 }
1878 }
1879
1880 fn app_will_quit(
1881 &mut self,
1882 _: &mut MutableAppContext,
1883 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1884 use futures::FutureExt;
1885
1886 let shutdown_futures = self
1887 .language_servers
1888 .drain()
1889 .filter_map(|(_, server)| server.shutdown())
1890 .collect::<Vec<_>>();
1891 Some(
1892 async move {
1893 futures::future::join_all(shutdown_futures).await;
1894 }
1895 .boxed(),
1896 )
1897 }
1898}
1899
1900impl Collaborator {
1901 fn from_proto(
1902 message: proto::Collaborator,
1903 user_store: &ModelHandle<UserStore>,
1904 cx: &mut AsyncAppContext,
1905 ) -> impl Future<Output = Result<Self>> {
1906 let user = user_store.update(cx, |user_store, cx| {
1907 user_store.fetch_user(message.user_id, cx)
1908 });
1909
1910 async move {
1911 Ok(Self {
1912 peer_id: PeerId(message.peer_id),
1913 user: user.await?,
1914 replica_id: message.replica_id as ReplicaId,
1915 })
1916 }
1917 }
1918}
1919
1920impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1921 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1922 Self {
1923 worktree_id,
1924 path: path.as_ref().into(),
1925 }
1926 }
1927}
1928
1929impl OpenBuffer {
1930 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1931 match self {
1932 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1933 OpenBuffer::Operations(_) => None,
1934 }
1935 }
1936}
1937
1938#[cfg(test)]
1939mod tests {
1940 use super::{Event, *};
1941 use client::test::FakeHttpClient;
1942 use fs::RealFs;
1943 use futures::StreamExt;
1944 use gpui::{test::subscribe, TestAppContext};
1945 use language::{
1946 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1947 LanguageServerConfig, Point,
1948 };
1949 use lsp::Url;
1950 use serde_json::json;
1951 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1952 use unindent::Unindent as _;
1953 use util::test::temp_tree;
1954 use worktree::WorktreeHandle as _;
1955
1956 #[gpui::test]
1957 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1958 let dir = temp_tree(json!({
1959 "root": {
1960 "apple": "",
1961 "banana": {
1962 "carrot": {
1963 "date": "",
1964 "endive": "",
1965 }
1966 },
1967 "fennel": {
1968 "grape": "",
1969 }
1970 }
1971 }));
1972
1973 let root_link_path = dir.path().join("root_link");
1974 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1975 unix::fs::symlink(
1976 &dir.path().join("root/fennel"),
1977 &dir.path().join("root/finnochio"),
1978 )
1979 .unwrap();
1980
1981 let project = build_project(Arc::new(RealFs), &mut cx);
1982
1983 let (tree, _) = project
1984 .update(&mut cx, |project, cx| {
1985 project.find_or_create_local_worktree(&root_link_path, false, cx)
1986 })
1987 .await
1988 .unwrap();
1989
1990 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1991 .await;
1992 cx.read(|cx| {
1993 let tree = tree.read(cx);
1994 assert_eq!(tree.file_count(), 5);
1995 assert_eq!(
1996 tree.inode_for_path("fennel/grape"),
1997 tree.inode_for_path("finnochio/grape")
1998 );
1999 });
2000
2001 let cancel_flag = Default::default();
2002 let results = project
2003 .read_with(&cx, |project, cx| {
2004 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
2005 })
2006 .await;
2007 assert_eq!(
2008 results
2009 .into_iter()
2010 .map(|result| result.path)
2011 .collect::<Vec<Arc<Path>>>(),
2012 vec![
2013 PathBuf::from("banana/carrot/date").into(),
2014 PathBuf::from("banana/carrot/endive").into(),
2015 ]
2016 );
2017 }
2018
2019 #[gpui::test]
2020 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
2021 let (language_server_config, mut fake_server) =
2022 LanguageServerConfig::fake(cx.background()).await;
2023 let progress_token = language_server_config
2024 .disk_based_diagnostics_progress_token
2025 .clone()
2026 .unwrap();
2027
2028 let mut languages = LanguageRegistry::new();
2029 languages.add(Arc::new(Language::new(
2030 LanguageConfig {
2031 name: "Rust".to_string(),
2032 path_suffixes: vec!["rs".to_string()],
2033 language_server: Some(language_server_config),
2034 ..Default::default()
2035 },
2036 Some(tree_sitter_rust::language()),
2037 )));
2038
2039 let dir = temp_tree(json!({
2040 "a.rs": "fn a() { A }",
2041 "b.rs": "const y: i32 = 1",
2042 }));
2043
2044 let http_client = FakeHttpClient::with_404_response();
2045 let client = Client::new(http_client.clone());
2046 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2047
2048 let project = cx.update(|cx| {
2049 Project::local(
2050 client,
2051 user_store,
2052 Arc::new(languages),
2053 Arc::new(RealFs),
2054 cx,
2055 )
2056 });
2057
2058 let (tree, _) = project
2059 .update(&mut cx, |project, cx| {
2060 project.find_or_create_local_worktree(dir.path(), false, cx)
2061 })
2062 .await
2063 .unwrap();
2064 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2065
2066 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2067 .await;
2068
2069 // Cause worktree to start the fake language server
2070 let _buffer = project
2071 .update(&mut cx, |project, cx| {
2072 project.open_buffer(
2073 ProjectPath {
2074 worktree_id,
2075 path: Path::new("b.rs").into(),
2076 },
2077 cx,
2078 )
2079 })
2080 .await
2081 .unwrap();
2082
2083 let mut events = subscribe(&project, &mut cx);
2084
2085 fake_server.start_progress(&progress_token).await;
2086 assert_eq!(
2087 events.next().await.unwrap(),
2088 Event::DiskBasedDiagnosticsStarted
2089 );
2090
2091 fake_server.start_progress(&progress_token).await;
2092 fake_server.end_progress(&progress_token).await;
2093 fake_server.start_progress(&progress_token).await;
2094
2095 fake_server
2096 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2097 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2098 version: None,
2099 diagnostics: vec![lsp::Diagnostic {
2100 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2101 severity: Some(lsp::DiagnosticSeverity::ERROR),
2102 message: "undefined variable 'A'".to_string(),
2103 ..Default::default()
2104 }],
2105 })
2106 .await;
2107 assert_eq!(
2108 events.next().await.unwrap(),
2109 Event::DiagnosticsUpdated(ProjectPath {
2110 worktree_id,
2111 path: Arc::from(Path::new("a.rs"))
2112 })
2113 );
2114
2115 fake_server.end_progress(&progress_token).await;
2116 fake_server.end_progress(&progress_token).await;
2117 assert_eq!(
2118 events.next().await.unwrap(),
2119 Event::DiskBasedDiagnosticsUpdated
2120 );
2121 assert_eq!(
2122 events.next().await.unwrap(),
2123 Event::DiskBasedDiagnosticsFinished
2124 );
2125
2126 let buffer = project
2127 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2128 .await
2129 .unwrap();
2130
2131 buffer.read_with(&cx, |buffer, _| {
2132 let snapshot = buffer.snapshot();
2133 let diagnostics = snapshot
2134 .diagnostics_in_range::<_, Point>(0..buffer.len())
2135 .collect::<Vec<_>>();
2136 assert_eq!(
2137 diagnostics,
2138 &[DiagnosticEntry {
2139 range: Point::new(0, 9)..Point::new(0, 10),
2140 diagnostic: Diagnostic {
2141 severity: lsp::DiagnosticSeverity::ERROR,
2142 message: "undefined variable 'A'".to_string(),
2143 group_id: 0,
2144 is_primary: true,
2145 ..Default::default()
2146 }
2147 }]
2148 )
2149 });
2150 }
2151
2152 #[gpui::test]
2153 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2154 let dir = temp_tree(json!({
2155 "root": {
2156 "dir1": {},
2157 "dir2": {
2158 "dir3": {}
2159 }
2160 }
2161 }));
2162
2163 let project = build_project(Arc::new(RealFs), &mut cx);
2164 let (tree, _) = project
2165 .update(&mut cx, |project, cx| {
2166 project.find_or_create_local_worktree(&dir.path(), false, cx)
2167 })
2168 .await
2169 .unwrap();
2170
2171 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2172 .await;
2173
2174 let cancel_flag = Default::default();
2175 let results = project
2176 .read_with(&cx, |project, cx| {
2177 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2178 })
2179 .await;
2180
2181 assert!(results.is_empty());
2182 }
2183
2184 #[gpui::test]
2185 async fn test_definition(mut cx: gpui::TestAppContext) {
2186 let (language_server_config, mut fake_server) =
2187 LanguageServerConfig::fake(cx.background()).await;
2188
2189 let mut languages = LanguageRegistry::new();
2190 languages.add(Arc::new(Language::new(
2191 LanguageConfig {
2192 name: "Rust".to_string(),
2193 path_suffixes: vec!["rs".to_string()],
2194 language_server: Some(language_server_config),
2195 ..Default::default()
2196 },
2197 Some(tree_sitter_rust::language()),
2198 )));
2199
2200 let dir = temp_tree(json!({
2201 "a.rs": "const fn a() { A }",
2202 "b.rs": "const y: i32 = crate::a()",
2203 }));
2204
2205 let http_client = FakeHttpClient::with_404_response();
2206 let client = Client::new(http_client.clone());
2207 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2208 let project = cx.update(|cx| {
2209 Project::local(
2210 client,
2211 user_store,
2212 Arc::new(languages),
2213 Arc::new(RealFs),
2214 cx,
2215 )
2216 });
2217
2218 let (tree, _) = project
2219 .update(&mut cx, |project, cx| {
2220 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2221 })
2222 .await
2223 .unwrap();
2224 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2225 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2226 .await;
2227
2228 // Cause worktree to start the fake language server
2229 let buffer = project
2230 .update(&mut cx, |project, cx| {
2231 project.open_buffer(
2232 ProjectPath {
2233 worktree_id,
2234 path: Path::new("").into(),
2235 },
2236 cx,
2237 )
2238 })
2239 .await
2240 .unwrap();
2241 let definitions =
2242 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2243 let (request_id, request) = fake_server
2244 .receive_request::<lsp::request::GotoDefinition>()
2245 .await;
2246 let request_params = request.text_document_position_params;
2247 assert_eq!(
2248 request_params.text_document.uri.to_file_path().unwrap(),
2249 dir.path().join("b.rs")
2250 );
2251 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2252
2253 fake_server
2254 .respond(
2255 request_id,
2256 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2257 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2258 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2259 ))),
2260 )
2261 .await;
2262 let mut definitions = definitions.await.unwrap();
2263 assert_eq!(definitions.len(), 1);
2264 let definition = definitions.pop().unwrap();
2265 cx.update(|cx| {
2266 let target_buffer = definition.target_buffer.read(cx);
2267 assert_eq!(
2268 target_buffer
2269 .file()
2270 .unwrap()
2271 .as_local()
2272 .unwrap()
2273 .abs_path(cx),
2274 dir.path().join("a.rs")
2275 );
2276 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2277 assert_eq!(
2278 list_worktrees(&project, cx),
2279 [
2280 (dir.path().join("b.rs"), false),
2281 (dir.path().join("a.rs"), true)
2282 ]
2283 );
2284
2285 drop(definition);
2286 });
2287 cx.read(|cx| {
2288 assert_eq!(
2289 list_worktrees(&project, cx),
2290 [(dir.path().join("b.rs"), false)]
2291 );
2292 });
2293
2294 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2295 project
2296 .read(cx)
2297 .worktrees(cx)
2298 .map(|worktree| {
2299 let worktree = worktree.read(cx);
2300 (
2301 worktree.as_local().unwrap().abs_path().to_path_buf(),
2302 worktree.is_weak(),
2303 )
2304 })
2305 .collect::<Vec<_>>()
2306 }
2307 }
2308
2309 #[gpui::test]
2310 async fn test_save_file(mut cx: gpui::TestAppContext) {
2311 let fs = Arc::new(FakeFs::new());
2312 fs.insert_tree(
2313 "/dir",
2314 json!({
2315 "file1": "the old contents",
2316 }),
2317 )
2318 .await;
2319
2320 let project = build_project(fs.clone(), &mut cx);
2321 let worktree_id = project
2322 .update(&mut cx, |p, cx| {
2323 p.find_or_create_local_worktree("/dir", false, cx)
2324 })
2325 .await
2326 .unwrap()
2327 .0
2328 .read_with(&cx, |tree, _| tree.id());
2329
2330 let buffer = project
2331 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2332 .await
2333 .unwrap();
2334 buffer
2335 .update(&mut cx, |buffer, cx| {
2336 assert_eq!(buffer.text(), "the old contents");
2337 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2338 buffer.save(cx)
2339 })
2340 .await
2341 .unwrap();
2342
2343 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2344 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2345 }
2346
2347 #[gpui::test]
2348 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2349 let fs = Arc::new(FakeFs::new());
2350 fs.insert_tree(
2351 "/dir",
2352 json!({
2353 "file1": "the old contents",
2354 }),
2355 )
2356 .await;
2357
2358 let project = build_project(fs.clone(), &mut cx);
2359 let worktree_id = project
2360 .update(&mut cx, |p, cx| {
2361 p.find_or_create_local_worktree("/dir/file1", false, cx)
2362 })
2363 .await
2364 .unwrap()
2365 .0
2366 .read_with(&cx, |tree, _| tree.id());
2367
2368 let buffer = project
2369 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2370 .await
2371 .unwrap();
2372 buffer
2373 .update(&mut cx, |buffer, cx| {
2374 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2375 buffer.save(cx)
2376 })
2377 .await
2378 .unwrap();
2379
2380 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2381 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2382 }
2383
2384 #[gpui::test]
2385 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2386 let dir = temp_tree(json!({
2387 "a": {
2388 "file1": "",
2389 "file2": "",
2390 "file3": "",
2391 },
2392 "b": {
2393 "c": {
2394 "file4": "",
2395 "file5": "",
2396 }
2397 }
2398 }));
2399
2400 let project = build_project(Arc::new(RealFs), &mut cx);
2401 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2402
2403 let (tree, _) = project
2404 .update(&mut cx, |p, cx| {
2405 p.find_or_create_local_worktree(dir.path(), false, cx)
2406 })
2407 .await
2408 .unwrap();
2409 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2410
2411 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2412 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2413 async move { buffer.await.unwrap() }
2414 };
2415 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2416 tree.read_with(cx, |tree, _| {
2417 tree.entry_for_path(path)
2418 .expect(&format!("no entry for path {}", path))
2419 .id
2420 })
2421 };
2422
2423 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2424 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2425 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2426 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2427
2428 let file2_id = id_for_path("a/file2", &cx);
2429 let file3_id = id_for_path("a/file3", &cx);
2430 let file4_id = id_for_path("b/c/file4", &cx);
2431
2432 // Wait for the initial scan.
2433 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2434 .await;
2435
2436 // Create a remote copy of this worktree.
2437 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2438 let remote = Worktree::remote(
2439 1,
2440 1,
2441 initial_snapshot.to_proto(&Default::default(), Default::default()),
2442 rpc.clone(),
2443 &mut cx.to_async(),
2444 )
2445 .await
2446 .unwrap();
2447
2448 cx.read(|cx| {
2449 assert!(!buffer2.read(cx).is_dirty());
2450 assert!(!buffer3.read(cx).is_dirty());
2451 assert!(!buffer4.read(cx).is_dirty());
2452 assert!(!buffer5.read(cx).is_dirty());
2453 });
2454
2455 // Rename and delete files and directories.
2456 tree.flush_fs_events(&cx).await;
2457 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2458 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2459 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2460 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2461 tree.flush_fs_events(&cx).await;
2462
2463 let expected_paths = vec![
2464 "a",
2465 "a/file1",
2466 "a/file2.new",
2467 "b",
2468 "d",
2469 "d/file3",
2470 "d/file4",
2471 ];
2472
2473 cx.read(|app| {
2474 assert_eq!(
2475 tree.read(app)
2476 .paths()
2477 .map(|p| p.to_str().unwrap())
2478 .collect::<Vec<_>>(),
2479 expected_paths
2480 );
2481
2482 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2483 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2484 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2485
2486 assert_eq!(
2487 buffer2.read(app).file().unwrap().path().as_ref(),
2488 Path::new("a/file2.new")
2489 );
2490 assert_eq!(
2491 buffer3.read(app).file().unwrap().path().as_ref(),
2492 Path::new("d/file3")
2493 );
2494 assert_eq!(
2495 buffer4.read(app).file().unwrap().path().as_ref(),
2496 Path::new("d/file4")
2497 );
2498 assert_eq!(
2499 buffer5.read(app).file().unwrap().path().as_ref(),
2500 Path::new("b/c/file5")
2501 );
2502
2503 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2504 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2505 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2506 assert!(buffer5.read(app).file().unwrap().is_deleted());
2507 });
2508
2509 // Update the remote worktree. Check that it becomes consistent with the
2510 // local worktree.
2511 remote.update(&mut cx, |remote, cx| {
2512 let update_message =
2513 tree.read(cx)
2514 .snapshot()
2515 .build_update(&initial_snapshot, 1, 1, true);
2516 remote
2517 .as_remote_mut()
2518 .unwrap()
2519 .snapshot
2520 .apply_remote_update(update_message)
2521 .unwrap();
2522
2523 assert_eq!(
2524 remote
2525 .paths()
2526 .map(|p| p.to_str().unwrap())
2527 .collect::<Vec<_>>(),
2528 expected_paths
2529 );
2530 });
2531 }
2532
2533 #[gpui::test]
2534 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2535 let fs = Arc::new(FakeFs::new());
2536 fs.insert_tree(
2537 "/the-dir",
2538 json!({
2539 "a.txt": "a-contents",
2540 "b.txt": "b-contents",
2541 }),
2542 )
2543 .await;
2544
2545 let project = build_project(fs.clone(), &mut cx);
2546 let worktree_id = project
2547 .update(&mut cx, |p, cx| {
2548 p.find_or_create_local_worktree("/the-dir", false, cx)
2549 })
2550 .await
2551 .unwrap()
2552 .0
2553 .read_with(&cx, |tree, _| tree.id());
2554
2555 // Spawn multiple tasks to open paths, repeating some paths.
2556 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2557 (
2558 p.open_buffer((worktree_id, "a.txt"), cx),
2559 p.open_buffer((worktree_id, "b.txt"), cx),
2560 p.open_buffer((worktree_id, "a.txt"), cx),
2561 )
2562 });
2563
2564 let buffer_a_1 = buffer_a_1.await.unwrap();
2565 let buffer_a_2 = buffer_a_2.await.unwrap();
2566 let buffer_b = buffer_b.await.unwrap();
2567 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2568 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2569
2570 // There is only one buffer per path.
2571 let buffer_a_id = buffer_a_1.id();
2572 assert_eq!(buffer_a_2.id(), buffer_a_id);
2573
2574 // Open the same path again while it is still open.
2575 drop(buffer_a_1);
2576 let buffer_a_3 = project
2577 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2578 .await
2579 .unwrap();
2580
2581 // There's still only one buffer per path.
2582 assert_eq!(buffer_a_3.id(), buffer_a_id);
2583 }
2584
2585 #[gpui::test]
2586 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2587 use std::fs;
2588
2589 let dir = temp_tree(json!({
2590 "file1": "abc",
2591 "file2": "def",
2592 "file3": "ghi",
2593 }));
2594
2595 let project = build_project(Arc::new(RealFs), &mut cx);
2596 let (worktree, _) = project
2597 .update(&mut cx, |p, cx| {
2598 p.find_or_create_local_worktree(dir.path(), false, cx)
2599 })
2600 .await
2601 .unwrap();
2602 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2603
2604 worktree.flush_fs_events(&cx).await;
2605 worktree
2606 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2607 .await;
2608
2609 let buffer1 = project
2610 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2611 .await
2612 .unwrap();
2613 let events = Rc::new(RefCell::new(Vec::new()));
2614
2615 // initially, the buffer isn't dirty.
2616 buffer1.update(&mut cx, |buffer, cx| {
2617 cx.subscribe(&buffer1, {
2618 let events = events.clone();
2619 move |_, _, event, _| events.borrow_mut().push(event.clone())
2620 })
2621 .detach();
2622
2623 assert!(!buffer.is_dirty());
2624 assert!(events.borrow().is_empty());
2625
2626 buffer.edit(vec![1..2], "", cx);
2627 });
2628
2629 // after the first edit, the buffer is dirty, and emits a dirtied event.
2630 buffer1.update(&mut cx, |buffer, cx| {
2631 assert!(buffer.text() == "ac");
2632 assert!(buffer.is_dirty());
2633 assert_eq!(
2634 *events.borrow(),
2635 &[language::Event::Edited, language::Event::Dirtied]
2636 );
2637 events.borrow_mut().clear();
2638 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2639 });
2640
2641 // after saving, the buffer is not dirty, and emits a saved event.
2642 buffer1.update(&mut cx, |buffer, cx| {
2643 assert!(!buffer.is_dirty());
2644 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2645 events.borrow_mut().clear();
2646
2647 buffer.edit(vec![1..1], "B", cx);
2648 buffer.edit(vec![2..2], "D", cx);
2649 });
2650
2651 // after editing again, the buffer is dirty, and emits another dirty event.
2652 buffer1.update(&mut cx, |buffer, cx| {
2653 assert!(buffer.text() == "aBDc");
2654 assert!(buffer.is_dirty());
2655 assert_eq!(
2656 *events.borrow(),
2657 &[
2658 language::Event::Edited,
2659 language::Event::Dirtied,
2660 language::Event::Edited,
2661 ],
2662 );
2663 events.borrow_mut().clear();
2664
2665 // TODO - currently, after restoring the buffer to its
2666 // previously-saved state, the is still considered dirty.
2667 buffer.edit([1..3], "", cx);
2668 assert!(buffer.text() == "ac");
2669 assert!(buffer.is_dirty());
2670 });
2671
2672 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2673
2674 // When a file is deleted, the buffer is considered dirty.
2675 let events = Rc::new(RefCell::new(Vec::new()));
2676 let buffer2 = project
2677 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2678 .await
2679 .unwrap();
2680 buffer2.update(&mut cx, |_, cx| {
2681 cx.subscribe(&buffer2, {
2682 let events = events.clone();
2683 move |_, _, event, _| events.borrow_mut().push(event.clone())
2684 })
2685 .detach();
2686 });
2687
2688 fs::remove_file(dir.path().join("file2")).unwrap();
2689 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2690 assert_eq!(
2691 *events.borrow(),
2692 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2693 );
2694
2695 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2696 let events = Rc::new(RefCell::new(Vec::new()));
2697 let buffer3 = project
2698 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2699 .await
2700 .unwrap();
2701 buffer3.update(&mut cx, |_, cx| {
2702 cx.subscribe(&buffer3, {
2703 let events = events.clone();
2704 move |_, _, event, _| events.borrow_mut().push(event.clone())
2705 })
2706 .detach();
2707 });
2708
2709 worktree.flush_fs_events(&cx).await;
2710 buffer3.update(&mut cx, |buffer, cx| {
2711 buffer.edit(Some(0..0), "x", cx);
2712 });
2713 events.borrow_mut().clear();
2714 fs::remove_file(dir.path().join("file3")).unwrap();
2715 buffer3
2716 .condition(&cx, |_, _| !events.borrow().is_empty())
2717 .await;
2718 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2719 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2720 }
2721
2722 #[gpui::test]
2723 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2724 use std::fs;
2725
2726 let initial_contents = "aaa\nbbbbb\nc\n";
2727 let dir = temp_tree(json!({ "the-file": initial_contents }));
2728
2729 let project = build_project(Arc::new(RealFs), &mut cx);
2730 let (worktree, _) = project
2731 .update(&mut cx, |p, cx| {
2732 p.find_or_create_local_worktree(dir.path(), false, cx)
2733 })
2734 .await
2735 .unwrap();
2736 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2737
2738 worktree
2739 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2740 .await;
2741
2742 let abs_path = dir.path().join("the-file");
2743 let buffer = project
2744 .update(&mut cx, |p, cx| {
2745 p.open_buffer((worktree_id, "the-file"), cx)
2746 })
2747 .await
2748 .unwrap();
2749
2750 // TODO
2751 // Add a cursor on each row.
2752 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2753 // assert!(!buffer.is_dirty());
2754 // buffer.add_selection_set(
2755 // &(0..3)
2756 // .map(|row| Selection {
2757 // id: row as usize,
2758 // start: Point::new(row, 1),
2759 // end: Point::new(row, 1),
2760 // reversed: false,
2761 // goal: SelectionGoal::None,
2762 // })
2763 // .collect::<Vec<_>>(),
2764 // cx,
2765 // )
2766 // });
2767
2768 // Change the file on disk, adding two new lines of text, and removing
2769 // one line.
2770 buffer.read_with(&cx, |buffer, _| {
2771 assert!(!buffer.is_dirty());
2772 assert!(!buffer.has_conflict());
2773 });
2774 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2775 fs::write(&abs_path, new_contents).unwrap();
2776
2777 // Because the buffer was not modified, it is reloaded from disk. Its
2778 // contents are edited according to the diff between the old and new
2779 // file contents.
2780 buffer
2781 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2782 .await;
2783
2784 buffer.update(&mut cx, |buffer, _| {
2785 assert_eq!(buffer.text(), new_contents);
2786 assert!(!buffer.is_dirty());
2787 assert!(!buffer.has_conflict());
2788
2789 // TODO
2790 // let cursor_positions = buffer
2791 // .selection_set(selection_set_id)
2792 // .unwrap()
2793 // .selections::<Point>(&*buffer)
2794 // .map(|selection| {
2795 // assert_eq!(selection.start, selection.end);
2796 // selection.start
2797 // })
2798 // .collect::<Vec<_>>();
2799 // assert_eq!(
2800 // cursor_positions,
2801 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2802 // );
2803 });
2804
2805 // Modify the buffer
2806 buffer.update(&mut cx, |buffer, cx| {
2807 buffer.edit(vec![0..0], " ", cx);
2808 assert!(buffer.is_dirty());
2809 assert!(!buffer.has_conflict());
2810 });
2811
2812 // Change the file on disk again, adding blank lines to the beginning.
2813 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2814
2815 // Because the buffer is modified, it doesn't reload from disk, but is
2816 // marked as having a conflict.
2817 buffer
2818 .condition(&cx, |buffer, _| buffer.has_conflict())
2819 .await;
2820 }
2821
2822 #[gpui::test]
2823 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2824 let fs = Arc::new(FakeFs::new());
2825 fs.insert_tree(
2826 "/the-dir",
2827 json!({
2828 "a.rs": "
2829 fn foo(mut v: Vec<usize>) {
2830 for x in &v {
2831 v.push(1);
2832 }
2833 }
2834 "
2835 .unindent(),
2836 }),
2837 )
2838 .await;
2839
2840 let project = build_project(fs.clone(), &mut cx);
2841 let (worktree, _) = project
2842 .update(&mut cx, |p, cx| {
2843 p.find_or_create_local_worktree("/the-dir", false, cx)
2844 })
2845 .await
2846 .unwrap();
2847 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2848
2849 let buffer = project
2850 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2851 .await
2852 .unwrap();
2853
2854 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2855 let message = lsp::PublishDiagnosticsParams {
2856 uri: buffer_uri.clone(),
2857 diagnostics: vec![
2858 lsp::Diagnostic {
2859 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2860 severity: Some(DiagnosticSeverity::WARNING),
2861 message: "error 1".to_string(),
2862 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2863 location: lsp::Location {
2864 uri: buffer_uri.clone(),
2865 range: lsp::Range::new(
2866 lsp::Position::new(1, 8),
2867 lsp::Position::new(1, 9),
2868 ),
2869 },
2870 message: "error 1 hint 1".to_string(),
2871 }]),
2872 ..Default::default()
2873 },
2874 lsp::Diagnostic {
2875 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2876 severity: Some(DiagnosticSeverity::HINT),
2877 message: "error 1 hint 1".to_string(),
2878 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2879 location: lsp::Location {
2880 uri: buffer_uri.clone(),
2881 range: lsp::Range::new(
2882 lsp::Position::new(1, 8),
2883 lsp::Position::new(1, 9),
2884 ),
2885 },
2886 message: "original diagnostic".to_string(),
2887 }]),
2888 ..Default::default()
2889 },
2890 lsp::Diagnostic {
2891 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2892 severity: Some(DiagnosticSeverity::ERROR),
2893 message: "error 2".to_string(),
2894 related_information: Some(vec![
2895 lsp::DiagnosticRelatedInformation {
2896 location: lsp::Location {
2897 uri: buffer_uri.clone(),
2898 range: lsp::Range::new(
2899 lsp::Position::new(1, 13),
2900 lsp::Position::new(1, 15),
2901 ),
2902 },
2903 message: "error 2 hint 1".to_string(),
2904 },
2905 lsp::DiagnosticRelatedInformation {
2906 location: lsp::Location {
2907 uri: buffer_uri.clone(),
2908 range: lsp::Range::new(
2909 lsp::Position::new(1, 13),
2910 lsp::Position::new(1, 15),
2911 ),
2912 },
2913 message: "error 2 hint 2".to_string(),
2914 },
2915 ]),
2916 ..Default::default()
2917 },
2918 lsp::Diagnostic {
2919 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2920 severity: Some(DiagnosticSeverity::HINT),
2921 message: "error 2 hint 1".to_string(),
2922 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2923 location: lsp::Location {
2924 uri: buffer_uri.clone(),
2925 range: lsp::Range::new(
2926 lsp::Position::new(2, 8),
2927 lsp::Position::new(2, 17),
2928 ),
2929 },
2930 message: "original diagnostic".to_string(),
2931 }]),
2932 ..Default::default()
2933 },
2934 lsp::Diagnostic {
2935 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2936 severity: Some(DiagnosticSeverity::HINT),
2937 message: "error 2 hint 2".to_string(),
2938 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2939 location: lsp::Location {
2940 uri: buffer_uri.clone(),
2941 range: lsp::Range::new(
2942 lsp::Position::new(2, 8),
2943 lsp::Position::new(2, 17),
2944 ),
2945 },
2946 message: "original diagnostic".to_string(),
2947 }]),
2948 ..Default::default()
2949 },
2950 ],
2951 version: None,
2952 };
2953
2954 project
2955 .update(&mut cx, |p, cx| {
2956 p.update_diagnostics(message, &Default::default(), cx)
2957 })
2958 .unwrap();
2959 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2960
2961 assert_eq!(
2962 buffer
2963 .diagnostics_in_range::<_, Point>(0..buffer.len())
2964 .collect::<Vec<_>>(),
2965 &[
2966 DiagnosticEntry {
2967 range: Point::new(1, 8)..Point::new(1, 9),
2968 diagnostic: Diagnostic {
2969 severity: DiagnosticSeverity::WARNING,
2970 message: "error 1".to_string(),
2971 group_id: 0,
2972 is_primary: true,
2973 ..Default::default()
2974 }
2975 },
2976 DiagnosticEntry {
2977 range: Point::new(1, 8)..Point::new(1, 9),
2978 diagnostic: Diagnostic {
2979 severity: DiagnosticSeverity::HINT,
2980 message: "error 1 hint 1".to_string(),
2981 group_id: 0,
2982 is_primary: false,
2983 ..Default::default()
2984 }
2985 },
2986 DiagnosticEntry {
2987 range: Point::new(1, 13)..Point::new(1, 15),
2988 diagnostic: Diagnostic {
2989 severity: DiagnosticSeverity::HINT,
2990 message: "error 2 hint 1".to_string(),
2991 group_id: 1,
2992 is_primary: false,
2993 ..Default::default()
2994 }
2995 },
2996 DiagnosticEntry {
2997 range: Point::new(1, 13)..Point::new(1, 15),
2998 diagnostic: Diagnostic {
2999 severity: DiagnosticSeverity::HINT,
3000 message: "error 2 hint 2".to_string(),
3001 group_id: 1,
3002 is_primary: false,
3003 ..Default::default()
3004 }
3005 },
3006 DiagnosticEntry {
3007 range: Point::new(2, 8)..Point::new(2, 17),
3008 diagnostic: Diagnostic {
3009 severity: DiagnosticSeverity::ERROR,
3010 message: "error 2".to_string(),
3011 group_id: 1,
3012 is_primary: true,
3013 ..Default::default()
3014 }
3015 }
3016 ]
3017 );
3018
3019 assert_eq!(
3020 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3021 &[
3022 DiagnosticEntry {
3023 range: Point::new(1, 8)..Point::new(1, 9),
3024 diagnostic: Diagnostic {
3025 severity: DiagnosticSeverity::WARNING,
3026 message: "error 1".to_string(),
3027 group_id: 0,
3028 is_primary: true,
3029 ..Default::default()
3030 }
3031 },
3032 DiagnosticEntry {
3033 range: Point::new(1, 8)..Point::new(1, 9),
3034 diagnostic: Diagnostic {
3035 severity: DiagnosticSeverity::HINT,
3036 message: "error 1 hint 1".to_string(),
3037 group_id: 0,
3038 is_primary: false,
3039 ..Default::default()
3040 }
3041 },
3042 ]
3043 );
3044 assert_eq!(
3045 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3046 &[
3047 DiagnosticEntry {
3048 range: Point::new(1, 13)..Point::new(1, 15),
3049 diagnostic: Diagnostic {
3050 severity: DiagnosticSeverity::HINT,
3051 message: "error 2 hint 1".to_string(),
3052 group_id: 1,
3053 is_primary: false,
3054 ..Default::default()
3055 }
3056 },
3057 DiagnosticEntry {
3058 range: Point::new(1, 13)..Point::new(1, 15),
3059 diagnostic: Diagnostic {
3060 severity: DiagnosticSeverity::HINT,
3061 message: "error 2 hint 2".to_string(),
3062 group_id: 1,
3063 is_primary: false,
3064 ..Default::default()
3065 }
3066 },
3067 DiagnosticEntry {
3068 range: Point::new(2, 8)..Point::new(2, 17),
3069 diagnostic: Diagnostic {
3070 severity: DiagnosticSeverity::ERROR,
3071 message: "error 2".to_string(),
3072 group_id: 1,
3073 is_primary: true,
3074 ..Default::default()
3075 }
3076 }
3077 ]
3078 );
3079 }
3080
3081 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3082 let languages = Arc::new(LanguageRegistry::new());
3083 let http_client = FakeHttpClient::with_404_response();
3084 let client = client::Client::new(http_client.clone());
3085 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3086 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3087 }
3088}