1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{
27 atomic::{AtomicBool, Ordering::SeqCst},
28 Arc,
29 },
30};
31use util::{post_inc, ResultExt, TryFutureExt as _};
32
33pub use fs::*;
34pub use worktree::*;
35
36pub struct Project {
37 worktrees: Vec<WorktreeHandle>,
38 active_entry: Option<ProjectEntry>,
39 languages: Arc<LanguageRegistry>,
40 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
41 client: Arc<client::Client>,
42 user_store: ModelHandle<UserStore>,
43 fs: Arc<dyn Fs>,
44 client_state: ProjectClientState,
45 collaborators: HashMap<PeerId, Collaborator>,
46 subscriptions: Vec<client::Subscription>,
47 language_servers_with_diagnostics_running: isize,
48 open_buffers: HashMap<usize, OpenBuffer>,
49 loading_buffers: HashMap<
50 ProjectPath,
51 postage::watch::Receiver<
52 Option<Result<(ModelHandle<Buffer>, Arc<AtomicBool>), Arc<anyhow::Error>>>,
53 >,
54 >,
55 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
56}
57
58enum OpenBuffer {
59 Operations(Vec<Operation>),
60 Loaded(WeakModelHandle<Buffer>),
61}
62
63enum WorktreeHandle {
64 Strong(ModelHandle<Worktree>),
65 Weak(WeakModelHandle<Worktree>),
66}
67
68enum ProjectClientState {
69 Local {
70 is_shared: bool,
71 remote_id_tx: watch::Sender<Option<u64>>,
72 remote_id_rx: watch::Receiver<Option<u64>>,
73 _maintain_remote_id_task: Task<Option<()>>,
74 },
75 Remote {
76 sharing_has_stopped: bool,
77 remote_id: u64,
78 replica_id: ReplicaId,
79 },
80}
81
82#[derive(Clone, Debug)]
83pub struct Collaborator {
84 pub user: Arc<User>,
85 pub peer_id: PeerId,
86 pub replica_id: ReplicaId,
87}
88
89#[derive(Clone, Debug, PartialEq)]
90pub enum Event {
91 ActiveEntryChanged(Option<ProjectEntry>),
92 WorktreeRemoved(WorktreeId),
93 DiskBasedDiagnosticsStarted,
94 DiskBasedDiagnosticsUpdated,
95 DiskBasedDiagnosticsFinished,
96 DiagnosticsUpdated(ProjectPath),
97}
98
99#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
100pub struct ProjectPath {
101 pub worktree_id: WorktreeId,
102 pub path: Arc<Path>,
103}
104
105#[derive(Clone, Debug, Default, PartialEq)]
106pub struct DiagnosticSummary {
107 pub error_count: usize,
108 pub warning_count: usize,
109 pub info_count: usize,
110 pub hint_count: usize,
111}
112
113#[derive(Debug)]
114pub struct Definition {
115 pub target_buffer: ModelHandle<Buffer>,
116 pub target_range: Range<language::Anchor>,
117}
118
119impl DiagnosticSummary {
120 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
121 let mut this = Self {
122 error_count: 0,
123 warning_count: 0,
124 info_count: 0,
125 hint_count: 0,
126 };
127
128 for entry in diagnostics {
129 if entry.diagnostic.is_primary {
130 match entry.diagnostic.severity {
131 DiagnosticSeverity::ERROR => this.error_count += 1,
132 DiagnosticSeverity::WARNING => this.warning_count += 1,
133 DiagnosticSeverity::INFORMATION => this.info_count += 1,
134 DiagnosticSeverity::HINT => this.hint_count += 1,
135 _ => {}
136 }
137 }
138 }
139
140 this
141 }
142
143 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
144 proto::DiagnosticSummary {
145 path: path.to_string_lossy().to_string(),
146 error_count: self.error_count as u32,
147 warning_count: self.warning_count as u32,
148 info_count: self.info_count as u32,
149 hint_count: self.hint_count as u32,
150 }
151 }
152}
153
154#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
155pub struct ProjectEntry {
156 pub worktree_id: WorktreeId,
157 pub entry_id: usize,
158}
159
160impl Project {
161 pub fn local(
162 client: Arc<Client>,
163 user_store: ModelHandle<UserStore>,
164 languages: Arc<LanguageRegistry>,
165 fs: Arc<dyn Fs>,
166 cx: &mut MutableAppContext,
167 ) -> ModelHandle<Self> {
168 cx.add_model(|cx: &mut ModelContext<Self>| {
169 let (remote_id_tx, remote_id_rx) = watch::channel();
170 let _maintain_remote_id_task = cx.spawn_weak({
171 let rpc = client.clone();
172 move |this, mut cx| {
173 async move {
174 let mut status = rpc.status();
175 while let Some(status) = status.recv().await {
176 if let Some(this) = this.upgrade(&cx) {
177 let remote_id = if let client::Status::Connected { .. } = status {
178 let response = rpc.request(proto::RegisterProject {}).await?;
179 Some(response.project_id)
180 } else {
181 None
182 };
183
184 if let Some(project_id) = remote_id {
185 let mut registrations = Vec::new();
186 this.update(&mut cx, |this, cx| {
187 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
188 registrations.push(worktree.update(
189 cx,
190 |worktree, cx| {
191 let worktree = worktree.as_local_mut().unwrap();
192 worktree.register(project_id, cx)
193 },
194 ));
195 }
196 });
197 for registration in registrations {
198 registration.await?;
199 }
200 }
201 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
202 }
203 }
204 Ok(())
205 }
206 .log_err()
207 }
208 });
209
210 Self {
211 worktrees: Default::default(),
212 collaborators: Default::default(),
213 open_buffers: Default::default(),
214 loading_buffers: Default::default(),
215 shared_buffers: Default::default(),
216 client_state: ProjectClientState::Local {
217 is_shared: false,
218 remote_id_tx,
219 remote_id_rx,
220 _maintain_remote_id_task,
221 },
222 subscriptions: Vec::new(),
223 active_entry: None,
224 languages,
225 client,
226 user_store,
227 fs,
228 language_servers_with_diagnostics_running: 0,
229 language_servers: Default::default(),
230 }
231 })
232 }
233
234 pub async fn remote(
235 remote_id: u64,
236 client: Arc<Client>,
237 user_store: ModelHandle<UserStore>,
238 languages: Arc<LanguageRegistry>,
239 fs: Arc<dyn Fs>,
240 cx: &mut AsyncAppContext,
241 ) -> Result<ModelHandle<Self>> {
242 client.authenticate_and_connect(&cx).await?;
243
244 let response = client
245 .request(proto::JoinProject {
246 project_id: remote_id,
247 })
248 .await?;
249
250 let replica_id = response.replica_id as ReplicaId;
251
252 let mut worktrees = Vec::new();
253 for worktree in response.worktrees {
254 worktrees
255 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
256 }
257
258 let user_ids = response
259 .collaborators
260 .iter()
261 .map(|peer| peer.user_id)
262 .collect();
263 user_store
264 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
265 .await?;
266 let mut collaborators = HashMap::default();
267 for message in response.collaborators {
268 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
269 collaborators.insert(collaborator.peer_id, collaborator);
270 }
271
272 Ok(cx.add_model(|cx| {
273 let mut this = Self {
274 worktrees: Vec::new(),
275 open_buffers: Default::default(),
276 loading_buffers: Default::default(),
277 shared_buffers: Default::default(),
278 active_entry: None,
279 collaborators,
280 languages,
281 user_store,
282 fs,
283 subscriptions: vec![
284 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
285 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
286 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
287 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
288 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
289 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_update_diagnostic_summary,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updating,
299 ),
300 client.subscribe_to_entity(
301 remote_id,
302 cx,
303 Self::handle_disk_based_diagnostics_updated,
304 ),
305 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
306 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
307 ],
308 client,
309 client_state: ProjectClientState::Remote {
310 sharing_has_stopped: false,
311 remote_id,
312 replica_id,
313 },
314 language_servers_with_diagnostics_running: 0,
315 language_servers: Default::default(),
316 };
317 for worktree in worktrees {
318 this.add_worktree(&worktree, cx);
319 }
320 this
321 }))
322 }
323
324 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
325 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
326 *remote_id_tx.borrow_mut() = remote_id;
327 }
328
329 self.subscriptions.clear();
330 if let Some(remote_id) = remote_id {
331 let client = &self.client;
332 self.subscriptions.extend([
333 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
338 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
339 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
340 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
341 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
342 ]);
343 }
344 }
345
346 pub fn remote_id(&self) -> Option<u64> {
347 match &self.client_state {
348 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
349 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
350 }
351 }
352
353 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
354 let mut id = None;
355 let mut watch = None;
356 match &self.client_state {
357 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
358 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
359 }
360
361 async move {
362 if let Some(id) = id {
363 return id;
364 }
365 let mut watch = watch.unwrap();
366 loop {
367 let id = *watch.borrow();
368 if let Some(id) = id {
369 return id;
370 }
371 watch.recv().await;
372 }
373 }
374 }
375
376 pub fn replica_id(&self) -> ReplicaId {
377 match &self.client_state {
378 ProjectClientState::Local { .. } => 0,
379 ProjectClientState::Remote { replica_id, .. } => *replica_id,
380 }
381 }
382
383 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
384 &self.collaborators
385 }
386
387 pub fn worktrees<'a>(
388 &'a self,
389 cx: &'a AppContext,
390 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
391 self.worktrees
392 .iter()
393 .filter_map(move |worktree| worktree.upgrade(cx))
394 }
395
396 pub fn worktree_for_id(
397 &self,
398 id: WorktreeId,
399 cx: &AppContext,
400 ) -> Option<ModelHandle<Worktree>> {
401 self.worktrees(cx)
402 .find(|worktree| worktree.read(cx).id() == id)
403 }
404
405 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
406 let rpc = self.client.clone();
407 cx.spawn(|this, mut cx| async move {
408 let project_id = this.update(&mut cx, |this, _| {
409 if let ProjectClientState::Local {
410 is_shared,
411 remote_id_rx,
412 ..
413 } = &mut this.client_state
414 {
415 *is_shared = true;
416 remote_id_rx
417 .borrow()
418 .ok_or_else(|| anyhow!("no project id"))
419 } else {
420 Err(anyhow!("can't share a remote project"))
421 }
422 })?;
423
424 rpc.request(proto::ShareProject { project_id }).await?;
425 let mut tasks = Vec::new();
426 this.update(&mut cx, |this, cx| {
427 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
428 worktree.update(cx, |worktree, cx| {
429 let worktree = worktree.as_local_mut().unwrap();
430 tasks.push(worktree.share(project_id, cx));
431 });
432 }
433 });
434 for task in tasks {
435 task.await?;
436 }
437 this.update(&mut cx, |_, cx| cx.notify());
438 Ok(())
439 })
440 }
441
442 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
443 let rpc = self.client.clone();
444 cx.spawn(|this, mut cx| async move {
445 let project_id = this.update(&mut cx, |this, _| {
446 if let ProjectClientState::Local {
447 is_shared,
448 remote_id_rx,
449 ..
450 } = &mut this.client_state
451 {
452 *is_shared = false;
453 remote_id_rx
454 .borrow()
455 .ok_or_else(|| anyhow!("no project id"))
456 } else {
457 Err(anyhow!("can't share a remote project"))
458 }
459 })?;
460
461 rpc.send(proto::UnshareProject { project_id }).await?;
462 this.update(&mut cx, |this, cx| {
463 this.collaborators.clear();
464 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
465 worktree.update(cx, |worktree, _| {
466 worktree.as_local_mut().unwrap().unshare();
467 });
468 }
469 cx.notify()
470 });
471 Ok(())
472 })
473 }
474
475 pub fn is_read_only(&self) -> bool {
476 match &self.client_state {
477 ProjectClientState::Local { .. } => false,
478 ProjectClientState::Remote {
479 sharing_has_stopped,
480 ..
481 } => *sharing_has_stopped,
482 }
483 }
484
485 pub fn is_local(&self) -> bool {
486 match &self.client_state {
487 ProjectClientState::Local { .. } => true,
488 ProjectClientState::Remote { .. } => false,
489 }
490 }
491
492 pub fn open_buffer(
493 &mut self,
494 path: impl Into<ProjectPath>,
495 cx: &mut ModelContext<Self>,
496 ) -> Task<Result<ModelHandle<Buffer>>> {
497 let path = path.into();
498 let worktree = if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
499 worktree
500 } else {
501 return cx
502 .foreground()
503 .spawn(async move { Err(anyhow!("no such worktree")) });
504 };
505
506 // If there is already a buffer for the given path, then return it.
507 let existing_buffer = self.get_open_buffer(&path, cx);
508 if let Some(existing_buffer) = existing_buffer {
509 return cx.foreground().spawn(async move { Ok(existing_buffer) });
510 }
511
512 let mut loading_watch = match self.loading_buffers.entry(path.clone()) {
513 // If the given path is already being loaded, then wait for that existing
514 // task to complete and return the same buffer.
515 hash_map::Entry::Occupied(e) => e.get().clone(),
516
517 // Otherwise, record the fact that this path is now being loaded.
518 hash_map::Entry::Vacant(entry) => {
519 let (mut tx, rx) = postage::watch::channel();
520 entry.insert(rx.clone());
521
522 let load_buffer = worktree.update(cx, |worktree, cx| match worktree {
523 Worktree::Local(worktree) => worktree.open_buffer(&path.path, cx),
524 Worktree::Remote(worktree) => worktree.open_buffer(&path.path, cx),
525 });
526
527 cx.spawn(move |this, mut cx| async move {
528 let load_result = load_buffer.await;
529 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
530 // Record the fact that the buffer is no longer loading.
531 this.loading_buffers.remove(&path);
532 let buffer = load_result.map_err(Arc::new)?;
533 this.open_buffers.insert(
534 buffer.read(cx).remote_id() as usize,
535 OpenBuffer::Loaded(buffer.downgrade()),
536 );
537 Ok((buffer, Arc::new(AtomicBool::new(true))))
538 }));
539 })
540 .detach();
541 rx
542 }
543 };
544
545 cx.spawn(|this, mut cx| async move {
546 let (buffer, buffer_is_new) = loop {
547 if let Some(result) = loading_watch.borrow().as_ref() {
548 break match result {
549 Ok((buf, is_new)) => Ok((buf.clone(), is_new.fetch_and(false, SeqCst))),
550 Err(error) => Err(anyhow!("{}", error)),
551 };
552 }
553 loading_watch.recv().await;
554 }?;
555
556 if buffer_is_new {
557 this.update(&mut cx, |this, cx| {
558 this.assign_language_to_buffer(worktree, buffer.clone(), cx)
559 });
560 }
561 Ok(buffer)
562 })
563 }
564
565 pub fn save_buffer_as(
566 &self,
567 buffer: ModelHandle<Buffer>,
568 abs_path: PathBuf,
569 cx: &mut ModelContext<Project>,
570 ) -> Task<Result<()>> {
571 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
572 cx.spawn(|this, mut cx| async move {
573 let (worktree, path) = worktree_task.await?;
574 worktree
575 .update(&mut cx, |worktree, cx| {
576 worktree
577 .as_local_mut()
578 .unwrap()
579 .save_buffer_as(buffer.clone(), path, cx)
580 })
581 .await?;
582 this.update(&mut cx, |this, cx| {
583 this.open_buffers
584 .insert(buffer.id(), OpenBuffer::Loaded(buffer.downgrade()));
585 this.assign_language_to_buffer(worktree, buffer, cx)
586 });
587 Ok(())
588 })
589 }
590
591 #[cfg(any(test, feature = "test-support"))]
592 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
593 let path = path.into();
594 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
595 self.open_buffers.iter().any(|(_, buffer)| {
596 if let Some(buffer) = buffer.upgrade(cx) {
597 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
598 if file.worktree == worktree && file.path() == &path.path {
599 return true;
600 }
601 }
602 }
603 false
604 })
605 } else {
606 false
607 }
608 }
609
610 fn get_open_buffer(
611 &mut self,
612 path: &ProjectPath,
613 cx: &mut ModelContext<Self>,
614 ) -> Option<ModelHandle<Buffer>> {
615 let mut result = None;
616 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
617 self.open_buffers.retain(|_, buffer| {
618 if let OpenBuffer::Loaded(buffer) = buffer {
619 if let Some(buffer) = buffer.upgrade(cx) {
620 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
621 if file.worktree == worktree && file.path() == &path.path {
622 result = Some(buffer);
623 }
624 }
625 return true;
626 }
627 }
628 false
629 });
630 result
631 }
632
633 fn assign_language_to_buffer(
634 &mut self,
635 worktree: ModelHandle<Worktree>,
636 buffer: ModelHandle<Buffer>,
637 cx: &mut ModelContext<Self>,
638 ) -> Option<()> {
639 // Set the buffer's language
640 let full_path = buffer.read(cx).file()?.full_path();
641 let language = self.languages.select_language(&full_path)?.clone();
642 buffer.update(cx, |buffer, cx| {
643 buffer.set_language(Some(language.clone()), cx);
644 });
645
646 // For local worktrees, start a language server if needed.
647 let worktree = worktree.read(cx);
648 let worktree_id = worktree.id();
649 let worktree_abs_path = worktree.as_local()?.abs_path().clone();
650 let language_server = match self
651 .language_servers
652 .entry((worktree_id, language.name().to_string()))
653 {
654 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
655 hash_map::Entry::Vacant(e) => {
656 Self::start_language_server(self.client.clone(), language, &worktree_abs_path, cx)
657 .map(|server| e.insert(server).clone())
658 }
659 };
660
661 buffer.update(cx, |buffer, cx| {
662 buffer.set_language_server(language_server, cx)
663 });
664
665 None
666 }
667
668 fn start_language_server(
669 rpc: Arc<Client>,
670 language: Arc<Language>,
671 worktree_path: &Path,
672 cx: &mut ModelContext<Self>,
673 ) -> Option<Arc<LanguageServer>> {
674 enum LspEvent {
675 DiagnosticsStart,
676 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
677 DiagnosticsFinish,
678 }
679
680 let language_server = language
681 .start_server(worktree_path, cx)
682 .log_err()
683 .flatten()?;
684 let disk_based_sources = language
685 .disk_based_diagnostic_sources()
686 .cloned()
687 .unwrap_or_default();
688 let disk_based_diagnostics_progress_token =
689 language.disk_based_diagnostics_progress_token().cloned();
690 let has_disk_based_diagnostic_progress_token =
691 disk_based_diagnostics_progress_token.is_some();
692 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
693
694 // Listen for `PublishDiagnostics` notifications.
695 language_server
696 .on_notification::<lsp::notification::PublishDiagnostics, _>({
697 let diagnostics_tx = diagnostics_tx.clone();
698 move |params| {
699 if !has_disk_based_diagnostic_progress_token {
700 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
701 }
702 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
703 if !has_disk_based_diagnostic_progress_token {
704 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
705 }
706 }
707 })
708 .detach();
709
710 // Listen for `Progress` notifications. Send an event when the language server
711 // transitions between running jobs and not running any jobs.
712 let mut running_jobs_for_this_server: i32 = 0;
713 language_server
714 .on_notification::<lsp::notification::Progress, _>(move |params| {
715 let token = match params.token {
716 lsp::NumberOrString::Number(_) => None,
717 lsp::NumberOrString::String(token) => Some(token),
718 };
719
720 if token == disk_based_diagnostics_progress_token {
721 match params.value {
722 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
723 lsp::WorkDoneProgress::Begin(_) => {
724 running_jobs_for_this_server += 1;
725 if running_jobs_for_this_server == 1 {
726 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
727 }
728 }
729 lsp::WorkDoneProgress::End(_) => {
730 running_jobs_for_this_server -= 1;
731 if running_jobs_for_this_server == 0 {
732 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
733 }
734 }
735 _ => {}
736 },
737 }
738 }
739 })
740 .detach();
741
742 // Process all the LSP events.
743 cx.spawn_weak(|this, mut cx| async move {
744 while let Ok(message) = diagnostics_rx.recv().await {
745 let this = cx.read(|cx| this.upgrade(cx))?;
746 match message {
747 LspEvent::DiagnosticsStart => {
748 let send = this.update(&mut cx, |this, cx| {
749 this.disk_based_diagnostics_started(cx);
750 this.remote_id().map(|project_id| {
751 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
752 })
753 });
754 if let Some(send) = send {
755 send.await.log_err();
756 }
757 }
758 LspEvent::DiagnosticsUpdate(params) => {
759 this.update(&mut cx, |this, cx| {
760 this.update_diagnostics(params, &disk_based_sources, cx)
761 .log_err();
762 });
763 }
764 LspEvent::DiagnosticsFinish => {
765 let send = this.update(&mut cx, |this, cx| {
766 this.disk_based_diagnostics_finished(cx);
767 this.remote_id().map(|project_id| {
768 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
769 })
770 });
771 if let Some(send) = send {
772 send.await.log_err();
773 }
774 }
775 }
776 }
777 Some(())
778 })
779 .detach();
780
781 Some(language_server)
782 }
783
784 pub fn update_diagnostics(
785 &mut self,
786 params: lsp::PublishDiagnosticsParams,
787 disk_based_sources: &HashSet<String>,
788 cx: &mut ModelContext<Self>,
789 ) -> Result<()> {
790 let abs_path = params
791 .uri
792 .to_file_path()
793 .map_err(|_| anyhow!("URI is not a file"))?;
794 let mut next_group_id = 0;
795 let mut diagnostics = Vec::default();
796 let mut primary_diagnostic_group_ids = HashMap::default();
797 let mut sources_by_group_id = HashMap::default();
798 let mut supporting_diagnostic_severities = HashMap::default();
799 for diagnostic in ¶ms.diagnostics {
800 let source = diagnostic.source.as_ref();
801 let code = diagnostic.code.as_ref().map(|code| match code {
802 lsp::NumberOrString::Number(code) => code.to_string(),
803 lsp::NumberOrString::String(code) => code.clone(),
804 });
805 let range = range_from_lsp(diagnostic.range);
806 let is_supporting = diagnostic
807 .related_information
808 .as_ref()
809 .map_or(false, |infos| {
810 infos.iter().any(|info| {
811 primary_diagnostic_group_ids.contains_key(&(
812 source,
813 code.clone(),
814 range_from_lsp(info.location.range),
815 ))
816 })
817 });
818
819 if is_supporting {
820 if let Some(severity) = diagnostic.severity {
821 supporting_diagnostic_severities
822 .insert((source, code.clone(), range), severity);
823 }
824 } else {
825 let group_id = post_inc(&mut next_group_id);
826 let is_disk_based =
827 source.map_or(false, |source| disk_based_sources.contains(source));
828
829 sources_by_group_id.insert(group_id, source);
830 primary_diagnostic_group_ids
831 .insert((source, code.clone(), range.clone()), group_id);
832
833 diagnostics.push(DiagnosticEntry {
834 range,
835 diagnostic: Diagnostic {
836 code: code.clone(),
837 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
838 message: diagnostic.message.clone(),
839 group_id,
840 is_primary: true,
841 is_valid: true,
842 is_disk_based,
843 },
844 });
845 if let Some(infos) = &diagnostic.related_information {
846 for info in infos {
847 if info.location.uri == params.uri {
848 let range = range_from_lsp(info.location.range);
849 diagnostics.push(DiagnosticEntry {
850 range,
851 diagnostic: Diagnostic {
852 code: code.clone(),
853 severity: DiagnosticSeverity::INFORMATION,
854 message: info.message.clone(),
855 group_id,
856 is_primary: false,
857 is_valid: true,
858 is_disk_based,
859 },
860 });
861 }
862 }
863 }
864 }
865 }
866
867 for entry in &mut diagnostics {
868 let diagnostic = &mut entry.diagnostic;
869 if !diagnostic.is_primary {
870 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
871 if let Some(&severity) = supporting_diagnostic_severities.get(&(
872 source,
873 diagnostic.code.clone(),
874 entry.range.clone(),
875 )) {
876 diagnostic.severity = severity;
877 }
878 }
879 }
880
881 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
882 Ok(())
883 }
884
885 pub fn update_diagnostic_entries(
886 &mut self,
887 abs_path: PathBuf,
888 version: Option<i32>,
889 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
890 cx: &mut ModelContext<Project>,
891 ) -> Result<(), anyhow::Error> {
892 let (worktree, relative_path) = self
893 .find_local_worktree(&abs_path, cx)
894 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
895 let project_path = ProjectPath {
896 worktree_id: worktree.read(cx).id(),
897 path: relative_path.into(),
898 };
899
900 for buffer in self.open_buffers.values() {
901 if let Some(buffer) = buffer.upgrade(cx) {
902 if buffer
903 .read(cx)
904 .file()
905 .map_or(false, |file| *file.path() == project_path.path)
906 {
907 buffer.update(cx, |buffer, cx| {
908 buffer.update_diagnostics(version, diagnostics.clone(), cx)
909 })?;
910 break;
911 }
912 }
913 }
914 worktree.update(cx, |worktree, cx| {
915 worktree
916 .as_local_mut()
917 .ok_or_else(|| anyhow!("not a local worktree"))?
918 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
919 })?;
920 cx.emit(Event::DiagnosticsUpdated(project_path));
921 Ok(())
922 }
923
924 pub fn definition<T: ToOffset>(
925 &self,
926 source_buffer_handle: &ModelHandle<Buffer>,
927 position: T,
928 cx: &mut ModelContext<Self>,
929 ) -> Task<Result<Vec<Definition>>> {
930 let source_buffer_handle = source_buffer_handle.clone();
931 let buffer = source_buffer_handle.read(cx);
932 let worktree;
933 let buffer_abs_path;
934 if let Some(file) = File::from_dyn(buffer.file()) {
935 worktree = file.worktree.clone();
936 buffer_abs_path = file.abs_path();
937 } else {
938 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
939 };
940
941 if worktree.read(cx).as_local().is_some() {
942 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
943 let buffer_abs_path = buffer_abs_path.unwrap();
944 let lang_name;
945 let lang_server;
946 if let Some(lang) = buffer.language() {
947 lang_name = lang.name().to_string();
948 if let Some(server) = self
949 .language_servers
950 .get(&(worktree.read(cx).id(), lang_name.clone()))
951 {
952 lang_server = server.clone();
953 } else {
954 return Task::ready(Err(anyhow!("buffer does not have a language server")));
955 };
956 } else {
957 return Task::ready(Err(anyhow!("buffer does not have a language")));
958 }
959
960 cx.spawn(|this, mut cx| async move {
961 let response = lang_server
962 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
963 text_document_position_params: lsp::TextDocumentPositionParams {
964 text_document: lsp::TextDocumentIdentifier::new(
965 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
966 ),
967 position: lsp::Position::new(point.row, point.column),
968 },
969 work_done_progress_params: Default::default(),
970 partial_result_params: Default::default(),
971 })
972 .await?;
973
974 let mut definitions = Vec::new();
975 if let Some(response) = response {
976 let mut unresolved_locations = Vec::new();
977 match response {
978 lsp::GotoDefinitionResponse::Scalar(loc) => {
979 unresolved_locations.push((loc.uri, loc.range));
980 }
981 lsp::GotoDefinitionResponse::Array(locs) => {
982 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
983 }
984 lsp::GotoDefinitionResponse::Link(links) => {
985 unresolved_locations.extend(
986 links
987 .into_iter()
988 .map(|l| (l.target_uri, l.target_selection_range)),
989 );
990 }
991 }
992
993 for (target_uri, target_range) in unresolved_locations {
994 let abs_path = target_uri
995 .to_file_path()
996 .map_err(|_| anyhow!("invalid target path"))?;
997
998 let (worktree, relative_path) = if let Some(result) =
999 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1000 {
1001 result
1002 } else {
1003 let worktree = this
1004 .update(&mut cx, |this, cx| {
1005 this.create_local_worktree(&abs_path, true, cx)
1006 })
1007 .await?;
1008 this.update(&mut cx, |this, cx| {
1009 this.language_servers.insert(
1010 (worktree.read(cx).id(), lang_name.clone()),
1011 lang_server.clone(),
1012 );
1013 });
1014 (worktree, PathBuf::new())
1015 };
1016
1017 let project_path = ProjectPath {
1018 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1019 path: relative_path.into(),
1020 };
1021 let target_buffer_handle = this
1022 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1023 .await?;
1024 cx.read(|cx| {
1025 let target_buffer = target_buffer_handle.read(cx);
1026 let target_start = target_buffer
1027 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1028 let target_end = target_buffer
1029 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1030 definitions.push(Definition {
1031 target_buffer: target_buffer_handle,
1032 target_range: target_buffer.anchor_after(target_start)
1033 ..target_buffer.anchor_before(target_end),
1034 });
1035 });
1036 }
1037 }
1038
1039 Ok(definitions)
1040 })
1041 } else {
1042 log::info!("go to definition is not yet implemented for guests");
1043 Task::ready(Ok(Default::default()))
1044 }
1045 }
1046
1047 pub fn find_or_create_local_worktree(
1048 &self,
1049 abs_path: impl AsRef<Path>,
1050 weak: bool,
1051 cx: &mut ModelContext<Self>,
1052 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1053 let abs_path = abs_path.as_ref();
1054 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1055 Task::ready(Ok((tree.clone(), relative_path.into())))
1056 } else {
1057 let worktree = self.create_local_worktree(abs_path, weak, cx);
1058 cx.foreground()
1059 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1060 }
1061 }
1062
1063 fn find_local_worktree(
1064 &self,
1065 abs_path: &Path,
1066 cx: &AppContext,
1067 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1068 for tree in self.worktrees(cx) {
1069 if let Some(relative_path) = tree
1070 .read(cx)
1071 .as_local()
1072 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1073 {
1074 return Some((tree.clone(), relative_path.into()));
1075 }
1076 }
1077 None
1078 }
1079
1080 pub fn is_shared(&self) -> bool {
1081 match &self.client_state {
1082 ProjectClientState::Local { is_shared, .. } => *is_shared,
1083 ProjectClientState::Remote { .. } => false,
1084 }
1085 }
1086
1087 fn create_local_worktree(
1088 &self,
1089 abs_path: impl AsRef<Path>,
1090 weak: bool,
1091 cx: &mut ModelContext<Self>,
1092 ) -> Task<Result<ModelHandle<Worktree>>> {
1093 let fs = self.fs.clone();
1094 let client = self.client.clone();
1095 let path = Arc::from(abs_path.as_ref());
1096 cx.spawn(|project, mut cx| async move {
1097 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1098
1099 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1100 project.add_worktree(&worktree, cx);
1101 (project.remote_id(), project.is_shared())
1102 });
1103
1104 if let Some(project_id) = remote_project_id {
1105 worktree
1106 .update(&mut cx, |worktree, cx| {
1107 worktree.as_local_mut().unwrap().register(project_id, cx)
1108 })
1109 .await?;
1110 if is_shared {
1111 worktree
1112 .update(&mut cx, |worktree, cx| {
1113 worktree.as_local_mut().unwrap().share(project_id, cx)
1114 })
1115 .await?;
1116 }
1117 }
1118
1119 Ok(worktree)
1120 })
1121 }
1122
1123 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1124 self.worktrees.retain(|worktree| {
1125 worktree
1126 .upgrade(cx)
1127 .map_or(false, |w| w.read(cx).id() != id)
1128 });
1129 cx.notify();
1130 }
1131
1132 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1133 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1134 cx.subscribe(&worktree, |this, worktree, _, cx| {
1135 this.update_open_buffers(worktree, cx)
1136 })
1137 .detach();
1138
1139 let push_weak_handle = {
1140 let worktree = worktree.read(cx);
1141 worktree.is_local() && worktree.is_weak()
1142 };
1143 if push_weak_handle {
1144 cx.observe_release(&worktree, |this, cx| {
1145 this.worktrees
1146 .retain(|worktree| worktree.upgrade(cx).is_some());
1147 cx.notify();
1148 })
1149 .detach();
1150 self.worktrees
1151 .push(WorktreeHandle::Weak(worktree.downgrade()));
1152 } else {
1153 self.worktrees
1154 .push(WorktreeHandle::Strong(worktree.clone()));
1155 }
1156 cx.notify();
1157 }
1158
1159 fn update_open_buffers(
1160 &mut self,
1161 worktree_handle: ModelHandle<Worktree>,
1162 cx: &mut ModelContext<Self>,
1163 ) {
1164 let local = worktree_handle.read(cx).is_local();
1165 let snapshot = worktree_handle.read(cx).snapshot();
1166 let worktree_path = snapshot.abs_path();
1167 let mut buffers_to_delete = Vec::new();
1168 for (buffer_id, buffer) in &self.open_buffers {
1169 if let OpenBuffer::Loaded(buffer) = buffer {
1170 if let Some(buffer) = buffer.upgrade(cx) {
1171 buffer.update(cx, |buffer, cx| {
1172 if let Some(old_file) = File::from_dyn(buffer.file()) {
1173 if old_file.worktree != worktree_handle {
1174 return;
1175 }
1176
1177 let new_file = if let Some(entry) = old_file
1178 .entry_id
1179 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1180 {
1181 File {
1182 is_local: local,
1183 worktree_path: worktree_path.clone(),
1184 entry_id: Some(entry.id),
1185 mtime: entry.mtime,
1186 path: entry.path.clone(),
1187 worktree: worktree_handle.clone(),
1188 }
1189 } else if let Some(entry) =
1190 snapshot.entry_for_path(old_file.path().as_ref())
1191 {
1192 File {
1193 is_local: local,
1194 worktree_path: worktree_path.clone(),
1195 entry_id: Some(entry.id),
1196 mtime: entry.mtime,
1197 path: entry.path.clone(),
1198 worktree: worktree_handle.clone(),
1199 }
1200 } else {
1201 File {
1202 is_local: local,
1203 worktree_path: worktree_path.clone(),
1204 entry_id: None,
1205 path: old_file.path().clone(),
1206 mtime: old_file.mtime(),
1207 worktree: worktree_handle.clone(),
1208 }
1209 };
1210
1211 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
1212 task.detach();
1213 }
1214 }
1215 });
1216 } else {
1217 buffers_to_delete.push(*buffer_id);
1218 }
1219 }
1220 }
1221
1222 for buffer_id in buffers_to_delete {
1223 self.open_buffers.remove(&buffer_id);
1224 }
1225 }
1226
1227 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1228 let new_active_entry = entry.and_then(|project_path| {
1229 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1230 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1231 Some(ProjectEntry {
1232 worktree_id: project_path.worktree_id,
1233 entry_id: entry.id,
1234 })
1235 });
1236 if new_active_entry != self.active_entry {
1237 self.active_entry = new_active_entry;
1238 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1239 }
1240 }
1241
1242 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1243 self.language_servers_with_diagnostics_running > 0
1244 }
1245
1246 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1247 let mut summary = DiagnosticSummary::default();
1248 for (_, path_summary) in self.diagnostic_summaries(cx) {
1249 summary.error_count += path_summary.error_count;
1250 summary.warning_count += path_summary.warning_count;
1251 summary.info_count += path_summary.info_count;
1252 summary.hint_count += path_summary.hint_count;
1253 }
1254 summary
1255 }
1256
1257 pub fn diagnostic_summaries<'a>(
1258 &'a self,
1259 cx: &'a AppContext,
1260 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1261 self.worktrees(cx).flat_map(move |worktree| {
1262 let worktree = worktree.read(cx);
1263 let worktree_id = worktree.id();
1264 worktree
1265 .diagnostic_summaries()
1266 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1267 })
1268 }
1269
1270 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1271 self.language_servers_with_diagnostics_running += 1;
1272 if self.language_servers_with_diagnostics_running == 1 {
1273 cx.emit(Event::DiskBasedDiagnosticsStarted);
1274 }
1275 }
1276
1277 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1278 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1279 self.language_servers_with_diagnostics_running -= 1;
1280 if self.language_servers_with_diagnostics_running == 0 {
1281 cx.emit(Event::DiskBasedDiagnosticsFinished);
1282 }
1283 }
1284
1285 pub fn active_entry(&self) -> Option<ProjectEntry> {
1286 self.active_entry
1287 }
1288
1289 // RPC message handlers
1290
1291 fn handle_unshare_project(
1292 &mut self,
1293 _: TypedEnvelope<proto::UnshareProject>,
1294 _: Arc<Client>,
1295 cx: &mut ModelContext<Self>,
1296 ) -> Result<()> {
1297 if let ProjectClientState::Remote {
1298 sharing_has_stopped,
1299 ..
1300 } = &mut self.client_state
1301 {
1302 *sharing_has_stopped = true;
1303 self.collaborators.clear();
1304 cx.notify();
1305 Ok(())
1306 } else {
1307 unreachable!()
1308 }
1309 }
1310
1311 fn handle_add_collaborator(
1312 &mut self,
1313 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1314 _: Arc<Client>,
1315 cx: &mut ModelContext<Self>,
1316 ) -> Result<()> {
1317 let user_store = self.user_store.clone();
1318 let collaborator = envelope
1319 .payload
1320 .collaborator
1321 .take()
1322 .ok_or_else(|| anyhow!("empty collaborator"))?;
1323
1324 cx.spawn(|this, mut cx| {
1325 async move {
1326 let collaborator =
1327 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1328 this.update(&mut cx, |this, cx| {
1329 this.collaborators
1330 .insert(collaborator.peer_id, collaborator);
1331 cx.notify();
1332 });
1333 Ok(())
1334 }
1335 .log_err()
1336 })
1337 .detach();
1338
1339 Ok(())
1340 }
1341
1342 fn handle_remove_collaborator(
1343 &mut self,
1344 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1345 _: Arc<Client>,
1346 cx: &mut ModelContext<Self>,
1347 ) -> Result<()> {
1348 let peer_id = PeerId(envelope.payload.peer_id);
1349 let replica_id = self
1350 .collaborators
1351 .remove(&peer_id)
1352 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1353 .replica_id;
1354 self.shared_buffers.remove(&peer_id);
1355 for (_, buffer) in &self.open_buffers {
1356 if let OpenBuffer::Loaded(buffer) = buffer {
1357 if let Some(buffer) = buffer.upgrade(cx) {
1358 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1359 }
1360 }
1361 }
1362 cx.notify();
1363 Ok(())
1364 }
1365
1366 fn handle_share_worktree(
1367 &mut self,
1368 envelope: TypedEnvelope<proto::ShareWorktree>,
1369 client: Arc<Client>,
1370 cx: &mut ModelContext<Self>,
1371 ) -> Result<()> {
1372 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1373 let replica_id = self.replica_id();
1374 let worktree = envelope
1375 .payload
1376 .worktree
1377 .ok_or_else(|| anyhow!("invalid worktree"))?;
1378 cx.spawn(|this, mut cx| {
1379 async move {
1380 let worktree =
1381 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1382 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1383 Ok(())
1384 }
1385 .log_err()
1386 })
1387 .detach();
1388 Ok(())
1389 }
1390
1391 fn handle_unregister_worktree(
1392 &mut self,
1393 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1394 _: Arc<Client>,
1395 cx: &mut ModelContext<Self>,
1396 ) -> Result<()> {
1397 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1398 self.remove_worktree(worktree_id, cx);
1399 Ok(())
1400 }
1401
1402 fn handle_update_worktree(
1403 &mut self,
1404 envelope: TypedEnvelope<proto::UpdateWorktree>,
1405 _: Arc<Client>,
1406 cx: &mut ModelContext<Self>,
1407 ) -> Result<()> {
1408 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1409 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1410 worktree.update(cx, |worktree, cx| {
1411 let worktree = worktree.as_remote_mut().unwrap();
1412 worktree.update_from_remote(envelope, cx)
1413 })?;
1414 }
1415 Ok(())
1416 }
1417
1418 fn handle_update_diagnostic_summary(
1419 &mut self,
1420 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1421 _: Arc<Client>,
1422 cx: &mut ModelContext<Self>,
1423 ) -> Result<()> {
1424 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1425 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1426 if let Some(summary) = envelope.payload.summary {
1427 let project_path = ProjectPath {
1428 worktree_id,
1429 path: Path::new(&summary.path).into(),
1430 };
1431 worktree.update(cx, |worktree, _| {
1432 worktree
1433 .as_remote_mut()
1434 .unwrap()
1435 .update_diagnostic_summary(project_path.path.clone(), &summary);
1436 });
1437 cx.emit(Event::DiagnosticsUpdated(project_path));
1438 }
1439 }
1440 Ok(())
1441 }
1442
1443 fn handle_disk_based_diagnostics_updating(
1444 &mut self,
1445 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1446 _: Arc<Client>,
1447 cx: &mut ModelContext<Self>,
1448 ) -> Result<()> {
1449 self.disk_based_diagnostics_started(cx);
1450 Ok(())
1451 }
1452
1453 fn handle_disk_based_diagnostics_updated(
1454 &mut self,
1455 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1456 _: Arc<Client>,
1457 cx: &mut ModelContext<Self>,
1458 ) -> Result<()> {
1459 self.disk_based_diagnostics_finished(cx);
1460 Ok(())
1461 }
1462
1463 pub fn handle_update_buffer(
1464 &mut self,
1465 envelope: TypedEnvelope<proto::UpdateBuffer>,
1466 _: Arc<Client>,
1467 cx: &mut ModelContext<Self>,
1468 ) -> Result<()> {
1469 let payload = envelope.payload.clone();
1470 let buffer_id = payload.buffer_id as usize;
1471 let ops = payload
1472 .operations
1473 .into_iter()
1474 .map(|op| language::proto::deserialize_operation(op))
1475 .collect::<Result<Vec<_>, _>>()?;
1476 match self.open_buffers.get_mut(&buffer_id) {
1477 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1478 Some(OpenBuffer::Loaded(buffer)) => {
1479 if let Some(buffer) = buffer.upgrade(cx) {
1480 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1481 } else {
1482 self.open_buffers
1483 .insert(buffer_id, OpenBuffer::Operations(ops));
1484 }
1485 }
1486 None => {
1487 self.open_buffers
1488 .insert(buffer_id, OpenBuffer::Operations(ops));
1489 }
1490 }
1491 Ok(())
1492 }
1493
1494 pub fn handle_save_buffer(
1495 &mut self,
1496 envelope: TypedEnvelope<proto::SaveBuffer>,
1497 rpc: Arc<Client>,
1498 cx: &mut ModelContext<Self>,
1499 ) -> Result<()> {
1500 let sender_id = envelope.original_sender_id()?;
1501 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1502 let buffer = self
1503 .shared_buffers
1504 .get(&sender_id)
1505 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1506 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1507 let receipt = envelope.receipt();
1508 let buffer_id = envelope.payload.buffer_id;
1509 let save = cx.spawn(|_, mut cx| async move {
1510 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1511 });
1512
1513 cx.background()
1514 .spawn(
1515 async move {
1516 let (version, mtime) = save.await?;
1517
1518 rpc.respond(
1519 receipt,
1520 proto::BufferSaved {
1521 project_id,
1522 buffer_id,
1523 version: (&version).into(),
1524 mtime: Some(mtime.into()),
1525 },
1526 )
1527 .await?;
1528
1529 Ok(())
1530 }
1531 .log_err(),
1532 )
1533 .detach();
1534 Ok(())
1535 }
1536
1537 pub fn handle_format_buffer(
1538 &mut self,
1539 envelope: TypedEnvelope<proto::FormatBuffer>,
1540 rpc: Arc<Client>,
1541 cx: &mut ModelContext<Self>,
1542 ) -> Result<()> {
1543 let receipt = envelope.receipt();
1544 let sender_id = envelope.original_sender_id()?;
1545 let buffer = self
1546 .shared_buffers
1547 .get(&sender_id)
1548 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1549 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1550 cx.spawn(|_, mut cx| async move {
1551 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1552 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1553 // associated with formatting.
1554 cx.spawn(|_| async move {
1555 match format {
1556 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1557 Err(error) => {
1558 rpc.respond_with_error(
1559 receipt,
1560 proto::Error {
1561 message: error.to_string(),
1562 },
1563 )
1564 .await?
1565 }
1566 }
1567 Ok::<_, anyhow::Error>(())
1568 })
1569 .await
1570 .log_err();
1571 })
1572 .detach();
1573 Ok(())
1574 }
1575
1576 pub fn handle_open_buffer(
1577 &mut self,
1578 envelope: TypedEnvelope<proto::OpenBuffer>,
1579 rpc: Arc<Client>,
1580 cx: &mut ModelContext<Self>,
1581 ) -> anyhow::Result<()> {
1582 let receipt = envelope.receipt();
1583 let peer_id = envelope.original_sender_id()?;
1584 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1585 let open_buffer = self.open_buffer(
1586 ProjectPath {
1587 worktree_id,
1588 path: PathBuf::from(envelope.payload.path).into(),
1589 },
1590 cx,
1591 );
1592 cx.spawn(|this, mut cx| {
1593 async move {
1594 let buffer = open_buffer.await?;
1595 this.update(&mut cx, |this, _| {
1596 this.shared_buffers
1597 .entry(peer_id)
1598 .or_default()
1599 .insert(buffer.id() as u64, buffer.clone());
1600 });
1601 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1602 rpc.respond(
1603 receipt,
1604 proto::OpenBufferResponse {
1605 buffer: Some(message),
1606 },
1607 )
1608 .await
1609 }
1610 .log_err()
1611 })
1612 .detach();
1613 Ok(())
1614 }
1615
1616 pub fn handle_close_buffer(
1617 &mut self,
1618 envelope: TypedEnvelope<proto::CloseBuffer>,
1619 _: Arc<Client>,
1620 cx: &mut ModelContext<Self>,
1621 ) -> anyhow::Result<()> {
1622 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1623 shared_buffers.remove(&envelope.payload.buffer_id);
1624 cx.notify();
1625 }
1626 Ok(())
1627 }
1628
1629 pub fn handle_buffer_saved(
1630 &mut self,
1631 envelope: TypedEnvelope<proto::BufferSaved>,
1632 _: Arc<Client>,
1633 cx: &mut ModelContext<Self>,
1634 ) -> Result<()> {
1635 let payload = envelope.payload.clone();
1636 let buffer = self
1637 .open_buffers
1638 .get(&(payload.buffer_id as usize))
1639 .and_then(|buf| {
1640 if let OpenBuffer::Loaded(buffer) = buf {
1641 buffer.upgrade(cx)
1642 } else {
1643 None
1644 }
1645 });
1646 if let Some(buffer) = buffer {
1647 buffer.update(cx, |buffer, cx| {
1648 let version = payload.version.try_into()?;
1649 let mtime = payload
1650 .mtime
1651 .ok_or_else(|| anyhow!("missing mtime"))?
1652 .into();
1653 buffer.did_save(version, mtime, None, cx);
1654 Result::<_, anyhow::Error>::Ok(())
1655 })?;
1656 }
1657 Ok(())
1658 }
1659
1660 pub fn match_paths<'a>(
1661 &self,
1662 query: &'a str,
1663 include_ignored: bool,
1664 smart_case: bool,
1665 max_results: usize,
1666 cancel_flag: &'a AtomicBool,
1667 cx: &AppContext,
1668 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1669 let worktrees = self
1670 .worktrees(cx)
1671 .filter(|worktree| !worktree.read(cx).is_weak())
1672 .collect::<Vec<_>>();
1673 let include_root_name = worktrees.len() > 1;
1674 let candidate_sets = worktrees
1675 .into_iter()
1676 .map(|worktree| CandidateSet {
1677 snapshot: worktree.read(cx).snapshot(),
1678 include_ignored,
1679 include_root_name,
1680 })
1681 .collect::<Vec<_>>();
1682
1683 let background = cx.background().clone();
1684 async move {
1685 fuzzy::match_paths(
1686 candidate_sets.as_slice(),
1687 query,
1688 smart_case,
1689 max_results,
1690 cancel_flag,
1691 background,
1692 )
1693 .await
1694 }
1695 }
1696}
1697
1698impl WorktreeHandle {
1699 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1700 match self {
1701 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1702 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1703 }
1704 }
1705}
1706
1707struct CandidateSet {
1708 snapshot: Snapshot,
1709 include_ignored: bool,
1710 include_root_name: bool,
1711}
1712
1713impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1714 type Candidates = CandidateSetIter<'a>;
1715
1716 fn id(&self) -> usize {
1717 self.snapshot.id().to_usize()
1718 }
1719
1720 fn len(&self) -> usize {
1721 if self.include_ignored {
1722 self.snapshot.file_count()
1723 } else {
1724 self.snapshot.visible_file_count()
1725 }
1726 }
1727
1728 fn prefix(&self) -> Arc<str> {
1729 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1730 self.snapshot.root_name().into()
1731 } else if self.include_root_name {
1732 format!("{}/", self.snapshot.root_name()).into()
1733 } else {
1734 "".into()
1735 }
1736 }
1737
1738 fn candidates(&'a self, start: usize) -> Self::Candidates {
1739 CandidateSetIter {
1740 traversal: self.snapshot.files(self.include_ignored, start),
1741 }
1742 }
1743}
1744
1745struct CandidateSetIter<'a> {
1746 traversal: Traversal<'a>,
1747}
1748
1749impl<'a> Iterator for CandidateSetIter<'a> {
1750 type Item = PathMatchCandidate<'a>;
1751
1752 fn next(&mut self) -> Option<Self::Item> {
1753 self.traversal.next().map(|entry| {
1754 if let EntryKind::File(char_bag) = entry.kind {
1755 PathMatchCandidate {
1756 path: &entry.path,
1757 char_bag,
1758 }
1759 } else {
1760 unreachable!()
1761 }
1762 })
1763 }
1764}
1765
1766impl Entity for Project {
1767 type Event = Event;
1768
1769 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1770 match &self.client_state {
1771 ProjectClientState::Local { remote_id_rx, .. } => {
1772 if let Some(project_id) = *remote_id_rx.borrow() {
1773 let rpc = self.client.clone();
1774 cx.spawn(|_| async move {
1775 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1776 log::error!("error unregistering project: {}", err);
1777 }
1778 })
1779 .detach();
1780 }
1781 }
1782 ProjectClientState::Remote { remote_id, .. } => {
1783 let rpc = self.client.clone();
1784 let project_id = *remote_id;
1785 cx.spawn(|_| async move {
1786 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1787 log::error!("error leaving project: {}", err);
1788 }
1789 })
1790 .detach();
1791 }
1792 }
1793 }
1794
1795 fn app_will_quit(
1796 &mut self,
1797 _: &mut MutableAppContext,
1798 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1799 use futures::FutureExt;
1800
1801 let shutdown_futures = self
1802 .language_servers
1803 .drain()
1804 .filter_map(|(_, server)| server.shutdown())
1805 .collect::<Vec<_>>();
1806 Some(
1807 async move {
1808 futures::future::join_all(shutdown_futures).await;
1809 }
1810 .boxed(),
1811 )
1812 }
1813}
1814
1815impl Collaborator {
1816 fn from_proto(
1817 message: proto::Collaborator,
1818 user_store: &ModelHandle<UserStore>,
1819 cx: &mut AsyncAppContext,
1820 ) -> impl Future<Output = Result<Self>> {
1821 let user = user_store.update(cx, |user_store, cx| {
1822 user_store.fetch_user(message.user_id, cx)
1823 });
1824
1825 async move {
1826 Ok(Self {
1827 peer_id: PeerId(message.peer_id),
1828 user: user.await?,
1829 replica_id: message.replica_id as ReplicaId,
1830 })
1831 }
1832 }
1833}
1834
1835impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1836 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1837 Self {
1838 worktree_id,
1839 path: path.as_ref().into(),
1840 }
1841 }
1842}
1843
1844impl OpenBuffer {
1845 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1846 match self {
1847 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1848 OpenBuffer::Operations(_) => None,
1849 }
1850 }
1851}
1852
1853#[cfg(test)]
1854mod tests {
1855 use super::{Event, *};
1856 use client::test::FakeHttpClient;
1857 use fs::RealFs;
1858 use futures::StreamExt;
1859 use gpui::{test::subscribe, TestAppContext};
1860 use language::{
1861 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1862 LanguageServerConfig, Point,
1863 };
1864 use lsp::Url;
1865 use serde_json::json;
1866 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1867 use unindent::Unindent as _;
1868 use util::test::temp_tree;
1869 use worktree::WorktreeHandle as _;
1870
1871 #[gpui::test]
1872 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1873 let dir = temp_tree(json!({
1874 "root": {
1875 "apple": "",
1876 "banana": {
1877 "carrot": {
1878 "date": "",
1879 "endive": "",
1880 }
1881 },
1882 "fennel": {
1883 "grape": "",
1884 }
1885 }
1886 }));
1887
1888 let root_link_path = dir.path().join("root_link");
1889 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1890 unix::fs::symlink(
1891 &dir.path().join("root/fennel"),
1892 &dir.path().join("root/finnochio"),
1893 )
1894 .unwrap();
1895
1896 let project = build_project(Arc::new(RealFs), &mut cx);
1897
1898 let (tree, _) = project
1899 .update(&mut cx, |project, cx| {
1900 project.find_or_create_local_worktree(&root_link_path, false, cx)
1901 })
1902 .await
1903 .unwrap();
1904
1905 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1906 .await;
1907 cx.read(|cx| {
1908 let tree = tree.read(cx);
1909 assert_eq!(tree.file_count(), 5);
1910 assert_eq!(
1911 tree.inode_for_path("fennel/grape"),
1912 tree.inode_for_path("finnochio/grape")
1913 );
1914 });
1915
1916 let cancel_flag = Default::default();
1917 let results = project
1918 .read_with(&cx, |project, cx| {
1919 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
1920 })
1921 .await;
1922 assert_eq!(
1923 results
1924 .into_iter()
1925 .map(|result| result.path)
1926 .collect::<Vec<Arc<Path>>>(),
1927 vec![
1928 PathBuf::from("banana/carrot/date").into(),
1929 PathBuf::from("banana/carrot/endive").into(),
1930 ]
1931 );
1932 }
1933
1934 #[gpui::test]
1935 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
1936 let (language_server_config, mut fake_server) =
1937 LanguageServerConfig::fake(cx.background()).await;
1938 let progress_token = language_server_config
1939 .disk_based_diagnostics_progress_token
1940 .clone()
1941 .unwrap();
1942
1943 let mut languages = LanguageRegistry::new();
1944 languages.add(Arc::new(Language::new(
1945 LanguageConfig {
1946 name: "Rust".to_string(),
1947 path_suffixes: vec!["rs".to_string()],
1948 language_server: Some(language_server_config),
1949 ..Default::default()
1950 },
1951 Some(tree_sitter_rust::language()),
1952 )));
1953
1954 let dir = temp_tree(json!({
1955 "a.rs": "fn a() { A }",
1956 "b.rs": "const y: i32 = 1",
1957 }));
1958
1959 let http_client = FakeHttpClient::with_404_response();
1960 let client = Client::new(http_client.clone());
1961 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
1962
1963 let project = cx.update(|cx| {
1964 Project::local(
1965 client,
1966 user_store,
1967 Arc::new(languages),
1968 Arc::new(RealFs),
1969 cx,
1970 )
1971 });
1972
1973 let (tree, _) = project
1974 .update(&mut cx, |project, cx| {
1975 project.find_or_create_local_worktree(dir.path(), false, cx)
1976 })
1977 .await
1978 .unwrap();
1979 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
1980
1981 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1982 .await;
1983
1984 // Cause worktree to start the fake language server
1985 let _buffer = project
1986 .update(&mut cx, |project, cx| {
1987 project.open_buffer(
1988 ProjectPath {
1989 worktree_id,
1990 path: Path::new("b.rs").into(),
1991 },
1992 cx,
1993 )
1994 })
1995 .await
1996 .unwrap();
1997
1998 let mut events = subscribe(&project, &mut cx);
1999
2000 fake_server.start_progress(&progress_token).await;
2001 assert_eq!(
2002 events.next().await.unwrap(),
2003 Event::DiskBasedDiagnosticsStarted
2004 );
2005
2006 fake_server.start_progress(&progress_token).await;
2007 fake_server.end_progress(&progress_token).await;
2008 fake_server.start_progress(&progress_token).await;
2009
2010 fake_server
2011 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2012 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2013 version: None,
2014 diagnostics: vec![lsp::Diagnostic {
2015 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2016 severity: Some(lsp::DiagnosticSeverity::ERROR),
2017 message: "undefined variable 'A'".to_string(),
2018 ..Default::default()
2019 }],
2020 })
2021 .await;
2022 assert_eq!(
2023 events.next().await.unwrap(),
2024 Event::DiagnosticsUpdated(ProjectPath {
2025 worktree_id,
2026 path: Arc::from(Path::new("a.rs"))
2027 })
2028 );
2029
2030 fake_server.end_progress(&progress_token).await;
2031 fake_server.end_progress(&progress_token).await;
2032 assert_eq!(
2033 events.next().await.unwrap(),
2034 Event::DiskBasedDiagnosticsUpdated
2035 );
2036 assert_eq!(
2037 events.next().await.unwrap(),
2038 Event::DiskBasedDiagnosticsFinished
2039 );
2040
2041 let buffer = project
2042 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2043 .await
2044 .unwrap();
2045
2046 buffer.read_with(&cx, |buffer, _| {
2047 let snapshot = buffer.snapshot();
2048 let diagnostics = snapshot
2049 .diagnostics_in_range::<_, Point>(0..buffer.len())
2050 .collect::<Vec<_>>();
2051 assert_eq!(
2052 diagnostics,
2053 &[DiagnosticEntry {
2054 range: Point::new(0, 9)..Point::new(0, 10),
2055 diagnostic: Diagnostic {
2056 severity: lsp::DiagnosticSeverity::ERROR,
2057 message: "undefined variable 'A'".to_string(),
2058 group_id: 0,
2059 is_primary: true,
2060 ..Default::default()
2061 }
2062 }]
2063 )
2064 });
2065 }
2066
2067 #[gpui::test]
2068 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2069 let dir = temp_tree(json!({
2070 "root": {
2071 "dir1": {},
2072 "dir2": {
2073 "dir3": {}
2074 }
2075 }
2076 }));
2077
2078 let project = build_project(Arc::new(RealFs), &mut cx);
2079 let (tree, _) = project
2080 .update(&mut cx, |project, cx| {
2081 project.find_or_create_local_worktree(&dir.path(), false, cx)
2082 })
2083 .await
2084 .unwrap();
2085
2086 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2087 .await;
2088
2089 let cancel_flag = Default::default();
2090 let results = project
2091 .read_with(&cx, |project, cx| {
2092 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2093 })
2094 .await;
2095
2096 assert!(results.is_empty());
2097 }
2098
2099 #[gpui::test]
2100 async fn test_definition(mut cx: gpui::TestAppContext) {
2101 let (language_server_config, mut fake_server) =
2102 LanguageServerConfig::fake(cx.background()).await;
2103
2104 let mut languages = LanguageRegistry::new();
2105 languages.add(Arc::new(Language::new(
2106 LanguageConfig {
2107 name: "Rust".to_string(),
2108 path_suffixes: vec!["rs".to_string()],
2109 language_server: Some(language_server_config),
2110 ..Default::default()
2111 },
2112 Some(tree_sitter_rust::language()),
2113 )));
2114
2115 let dir = temp_tree(json!({
2116 "a.rs": "const fn a() { A }",
2117 "b.rs": "const y: i32 = crate::a()",
2118 }));
2119
2120 let http_client = FakeHttpClient::with_404_response();
2121 let client = Client::new(http_client.clone());
2122 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2123 let project = cx.update(|cx| {
2124 Project::local(
2125 client,
2126 user_store,
2127 Arc::new(languages),
2128 Arc::new(RealFs),
2129 cx,
2130 )
2131 });
2132
2133 let (tree, _) = project
2134 .update(&mut cx, |project, cx| {
2135 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2136 })
2137 .await
2138 .unwrap();
2139 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2140 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2141 .await;
2142
2143 // Cause worktree to start the fake language server
2144 let buffer = project
2145 .update(&mut cx, |project, cx| {
2146 project.open_buffer(
2147 ProjectPath {
2148 worktree_id,
2149 path: Path::new("").into(),
2150 },
2151 cx,
2152 )
2153 })
2154 .await
2155 .unwrap();
2156 let definitions =
2157 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2158 let (request_id, request) = fake_server
2159 .receive_request::<lsp::request::GotoDefinition>()
2160 .await;
2161 let request_params = request.text_document_position_params;
2162 assert_eq!(
2163 request_params.text_document.uri.to_file_path().unwrap(),
2164 dir.path().join("b.rs")
2165 );
2166 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2167
2168 fake_server
2169 .respond(
2170 request_id,
2171 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2172 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2173 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2174 ))),
2175 )
2176 .await;
2177 let mut definitions = definitions.await.unwrap();
2178 assert_eq!(definitions.len(), 1);
2179 let definition = definitions.pop().unwrap();
2180 cx.update(|cx| {
2181 let target_buffer = definition.target_buffer.read(cx);
2182 assert_eq!(
2183 target_buffer.file().unwrap().abs_path(),
2184 Some(dir.path().join("a.rs"))
2185 );
2186 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2187 assert_eq!(
2188 list_worktrees(&project, cx),
2189 [
2190 (dir.path().join("b.rs"), false),
2191 (dir.path().join("a.rs"), true)
2192 ]
2193 );
2194
2195 drop(definition);
2196 });
2197 cx.read(|cx| {
2198 assert_eq!(
2199 list_worktrees(&project, cx),
2200 [(dir.path().join("b.rs"), false)]
2201 );
2202 });
2203
2204 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2205 project
2206 .read(cx)
2207 .worktrees(cx)
2208 .map(|worktree| {
2209 let worktree = worktree.read(cx);
2210 (
2211 worktree.as_local().unwrap().abs_path().to_path_buf(),
2212 worktree.is_weak(),
2213 )
2214 })
2215 .collect::<Vec<_>>()
2216 }
2217 }
2218
2219 #[gpui::test]
2220 async fn test_save_file(mut cx: gpui::TestAppContext) {
2221 let fs = Arc::new(FakeFs::new());
2222 fs.insert_tree(
2223 "/dir",
2224 json!({
2225 "file1": "the old contents",
2226 }),
2227 )
2228 .await;
2229
2230 let project = build_project(fs.clone(), &mut cx);
2231 let worktree_id = project
2232 .update(&mut cx, |p, cx| {
2233 p.find_or_create_local_worktree("/dir", false, cx)
2234 })
2235 .await
2236 .unwrap()
2237 .0
2238 .read_with(&cx, |tree, _| tree.id());
2239
2240 let buffer = project
2241 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2242 .await
2243 .unwrap();
2244 buffer
2245 .update(&mut cx, |buffer, cx| {
2246 assert_eq!(buffer.text(), "the old contents");
2247 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2248 buffer.save(cx)
2249 })
2250 .await
2251 .unwrap();
2252
2253 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2254 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2255 }
2256
2257 #[gpui::test]
2258 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2259 let fs = Arc::new(FakeFs::new());
2260 fs.insert_tree(
2261 "/dir",
2262 json!({
2263 "file1": "the old contents",
2264 }),
2265 )
2266 .await;
2267
2268 let project = build_project(fs.clone(), &mut cx);
2269 let worktree_id = project
2270 .update(&mut cx, |p, cx| {
2271 p.find_or_create_local_worktree("/dir/file1", false, cx)
2272 })
2273 .await
2274 .unwrap()
2275 .0
2276 .read_with(&cx, |tree, _| tree.id());
2277
2278 let buffer = project
2279 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2280 .await
2281 .unwrap();
2282 buffer
2283 .update(&mut cx, |buffer, cx| {
2284 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2285 buffer.save(cx)
2286 })
2287 .await
2288 .unwrap();
2289
2290 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2291 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2292 }
2293
2294 #[gpui::test]
2295 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2296 let dir = temp_tree(json!({
2297 "a": {
2298 "file1": "",
2299 "file2": "",
2300 "file3": "",
2301 },
2302 "b": {
2303 "c": {
2304 "file4": "",
2305 "file5": "",
2306 }
2307 }
2308 }));
2309
2310 let project = build_project(Arc::new(RealFs), &mut cx);
2311 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2312
2313 let (tree, _) = project
2314 .update(&mut cx, |p, cx| {
2315 p.find_or_create_local_worktree(dir.path(), false, cx)
2316 })
2317 .await
2318 .unwrap();
2319 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2320
2321 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2322 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2323 async move { buffer.await.unwrap() }
2324 };
2325 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2326 tree.read_with(cx, |tree, _| {
2327 tree.entry_for_path(path)
2328 .expect(&format!("no entry for path {}", path))
2329 .id
2330 })
2331 };
2332
2333 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2334 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2335 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2336 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2337
2338 let file2_id = id_for_path("a/file2", &cx);
2339 let file3_id = id_for_path("a/file3", &cx);
2340 let file4_id = id_for_path("b/c/file4", &cx);
2341
2342 // Wait for the initial scan.
2343 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2344 .await;
2345
2346 // Create a remote copy of this worktree.
2347 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2348 let remote = Worktree::remote(
2349 1,
2350 1,
2351 initial_snapshot.to_proto(&Default::default(), Default::default()),
2352 rpc.clone(),
2353 &mut cx.to_async(),
2354 )
2355 .await
2356 .unwrap();
2357
2358 cx.read(|cx| {
2359 assert!(!buffer2.read(cx).is_dirty());
2360 assert!(!buffer3.read(cx).is_dirty());
2361 assert!(!buffer4.read(cx).is_dirty());
2362 assert!(!buffer5.read(cx).is_dirty());
2363 });
2364
2365 // Rename and delete files and directories.
2366 tree.flush_fs_events(&cx).await;
2367 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2368 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2369 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2370 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2371 tree.flush_fs_events(&cx).await;
2372
2373 let expected_paths = vec![
2374 "a",
2375 "a/file1",
2376 "a/file2.new",
2377 "b",
2378 "d",
2379 "d/file3",
2380 "d/file4",
2381 ];
2382
2383 cx.read(|app| {
2384 assert_eq!(
2385 tree.read(app)
2386 .paths()
2387 .map(|p| p.to_str().unwrap())
2388 .collect::<Vec<_>>(),
2389 expected_paths
2390 );
2391
2392 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2393 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2394 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2395
2396 assert_eq!(
2397 buffer2.read(app).file().unwrap().path().as_ref(),
2398 Path::new("a/file2.new")
2399 );
2400 assert_eq!(
2401 buffer3.read(app).file().unwrap().path().as_ref(),
2402 Path::new("d/file3")
2403 );
2404 assert_eq!(
2405 buffer4.read(app).file().unwrap().path().as_ref(),
2406 Path::new("d/file4")
2407 );
2408 assert_eq!(
2409 buffer5.read(app).file().unwrap().path().as_ref(),
2410 Path::new("b/c/file5")
2411 );
2412
2413 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2414 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2415 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2416 assert!(buffer5.read(app).file().unwrap().is_deleted());
2417 });
2418
2419 // Update the remote worktree. Check that it becomes consistent with the
2420 // local worktree.
2421 remote.update(&mut cx, |remote, cx| {
2422 let update_message =
2423 tree.read(cx)
2424 .snapshot()
2425 .build_update(&initial_snapshot, 1, 1, true);
2426 remote
2427 .as_remote_mut()
2428 .unwrap()
2429 .snapshot
2430 .apply_update(update_message)
2431 .unwrap();
2432
2433 assert_eq!(
2434 remote
2435 .paths()
2436 .map(|p| p.to_str().unwrap())
2437 .collect::<Vec<_>>(),
2438 expected_paths
2439 );
2440 });
2441 }
2442
2443 #[gpui::test]
2444 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2445 let fs = Arc::new(FakeFs::new());
2446 fs.insert_tree(
2447 "/the-dir",
2448 json!({
2449 "a.txt": "a-contents",
2450 "b.txt": "b-contents",
2451 }),
2452 )
2453 .await;
2454
2455 let project = build_project(fs.clone(), &mut cx);
2456 let worktree_id = project
2457 .update(&mut cx, |p, cx| {
2458 p.find_or_create_local_worktree("/the-dir", false, cx)
2459 })
2460 .await
2461 .unwrap()
2462 .0
2463 .read_with(&cx, |tree, _| tree.id());
2464
2465 // Spawn multiple tasks to open paths, repeating some paths.
2466 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2467 (
2468 p.open_buffer((worktree_id, "a.txt"), cx),
2469 p.open_buffer((worktree_id, "b.txt"), cx),
2470 p.open_buffer((worktree_id, "a.txt"), cx),
2471 )
2472 });
2473
2474 let buffer_a_1 = buffer_a_1.await.unwrap();
2475 let buffer_a_2 = buffer_a_2.await.unwrap();
2476 let buffer_b = buffer_b.await.unwrap();
2477 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2478 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2479
2480 // There is only one buffer per path.
2481 let buffer_a_id = buffer_a_1.id();
2482 assert_eq!(buffer_a_2.id(), buffer_a_id);
2483
2484 // Open the same path again while it is still open.
2485 drop(buffer_a_1);
2486 let buffer_a_3 = project
2487 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2488 .await
2489 .unwrap();
2490
2491 // There's still only one buffer per path.
2492 assert_eq!(buffer_a_3.id(), buffer_a_id);
2493 }
2494
2495 #[gpui::test]
2496 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2497 use std::fs;
2498
2499 let dir = temp_tree(json!({
2500 "file1": "abc",
2501 "file2": "def",
2502 "file3": "ghi",
2503 }));
2504
2505 let project = build_project(Arc::new(RealFs), &mut cx);
2506 let (worktree, _) = project
2507 .update(&mut cx, |p, cx| {
2508 p.find_or_create_local_worktree(dir.path(), false, cx)
2509 })
2510 .await
2511 .unwrap();
2512 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2513
2514 worktree.flush_fs_events(&cx).await;
2515 worktree
2516 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2517 .await;
2518
2519 let buffer1 = project
2520 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2521 .await
2522 .unwrap();
2523 let events = Rc::new(RefCell::new(Vec::new()));
2524
2525 // initially, the buffer isn't dirty.
2526 buffer1.update(&mut cx, |buffer, cx| {
2527 cx.subscribe(&buffer1, {
2528 let events = events.clone();
2529 move |_, _, event, _| events.borrow_mut().push(event.clone())
2530 })
2531 .detach();
2532
2533 assert!(!buffer.is_dirty());
2534 assert!(events.borrow().is_empty());
2535
2536 buffer.edit(vec![1..2], "", cx);
2537 });
2538
2539 // after the first edit, the buffer is dirty, and emits a dirtied event.
2540 buffer1.update(&mut cx, |buffer, cx| {
2541 assert!(buffer.text() == "ac");
2542 assert!(buffer.is_dirty());
2543 assert_eq!(
2544 *events.borrow(),
2545 &[language::Event::Edited, language::Event::Dirtied]
2546 );
2547 events.borrow_mut().clear();
2548 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2549 });
2550
2551 // after saving, the buffer is not dirty, and emits a saved event.
2552 buffer1.update(&mut cx, |buffer, cx| {
2553 assert!(!buffer.is_dirty());
2554 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2555 events.borrow_mut().clear();
2556
2557 buffer.edit(vec![1..1], "B", cx);
2558 buffer.edit(vec![2..2], "D", cx);
2559 });
2560
2561 // after editing again, the buffer is dirty, and emits another dirty event.
2562 buffer1.update(&mut cx, |buffer, cx| {
2563 assert!(buffer.text() == "aBDc");
2564 assert!(buffer.is_dirty());
2565 assert_eq!(
2566 *events.borrow(),
2567 &[
2568 language::Event::Edited,
2569 language::Event::Dirtied,
2570 language::Event::Edited,
2571 ],
2572 );
2573 events.borrow_mut().clear();
2574
2575 // TODO - currently, after restoring the buffer to its
2576 // previously-saved state, the is still considered dirty.
2577 buffer.edit([1..3], "", cx);
2578 assert!(buffer.text() == "ac");
2579 assert!(buffer.is_dirty());
2580 });
2581
2582 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2583
2584 // When a file is deleted, the buffer is considered dirty.
2585 let events = Rc::new(RefCell::new(Vec::new()));
2586 let buffer2 = project
2587 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2588 .await
2589 .unwrap();
2590 buffer2.update(&mut cx, |_, cx| {
2591 cx.subscribe(&buffer2, {
2592 let events = events.clone();
2593 move |_, _, event, _| events.borrow_mut().push(event.clone())
2594 })
2595 .detach();
2596 });
2597
2598 fs::remove_file(dir.path().join("file2")).unwrap();
2599 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2600 assert_eq!(
2601 *events.borrow(),
2602 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2603 );
2604
2605 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2606 let events = Rc::new(RefCell::new(Vec::new()));
2607 let buffer3 = project
2608 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2609 .await
2610 .unwrap();
2611 buffer3.update(&mut cx, |_, cx| {
2612 cx.subscribe(&buffer3, {
2613 let events = events.clone();
2614 move |_, _, event, _| events.borrow_mut().push(event.clone())
2615 })
2616 .detach();
2617 });
2618
2619 worktree.flush_fs_events(&cx).await;
2620 buffer3.update(&mut cx, |buffer, cx| {
2621 buffer.edit(Some(0..0), "x", cx);
2622 });
2623 events.borrow_mut().clear();
2624 fs::remove_file(dir.path().join("file3")).unwrap();
2625 buffer3
2626 .condition(&cx, |_, _| !events.borrow().is_empty())
2627 .await;
2628 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2629 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2630 }
2631
2632 #[gpui::test]
2633 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2634 use std::fs;
2635
2636 let initial_contents = "aaa\nbbbbb\nc\n";
2637 let dir = temp_tree(json!({ "the-file": initial_contents }));
2638
2639 let project = build_project(Arc::new(RealFs), &mut cx);
2640 let (worktree, _) = project
2641 .update(&mut cx, |p, cx| {
2642 p.find_or_create_local_worktree(dir.path(), false, cx)
2643 })
2644 .await
2645 .unwrap();
2646 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2647
2648 worktree
2649 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2650 .await;
2651
2652 let abs_path = dir.path().join("the-file");
2653 let buffer = project
2654 .update(&mut cx, |p, cx| {
2655 p.open_buffer((worktree_id, "the-file"), cx)
2656 })
2657 .await
2658 .unwrap();
2659
2660 // TODO
2661 // Add a cursor on each row.
2662 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2663 // assert!(!buffer.is_dirty());
2664 // buffer.add_selection_set(
2665 // &(0..3)
2666 // .map(|row| Selection {
2667 // id: row as usize,
2668 // start: Point::new(row, 1),
2669 // end: Point::new(row, 1),
2670 // reversed: false,
2671 // goal: SelectionGoal::None,
2672 // })
2673 // .collect::<Vec<_>>(),
2674 // cx,
2675 // )
2676 // });
2677
2678 // Change the file on disk, adding two new lines of text, and removing
2679 // one line.
2680 buffer.read_with(&cx, |buffer, _| {
2681 assert!(!buffer.is_dirty());
2682 assert!(!buffer.has_conflict());
2683 });
2684 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2685 fs::write(&abs_path, new_contents).unwrap();
2686
2687 // Because the buffer was not modified, it is reloaded from disk. Its
2688 // contents are edited according to the diff between the old and new
2689 // file contents.
2690 buffer
2691 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2692 .await;
2693
2694 buffer.update(&mut cx, |buffer, _| {
2695 assert_eq!(buffer.text(), new_contents);
2696 assert!(!buffer.is_dirty());
2697 assert!(!buffer.has_conflict());
2698
2699 // TODO
2700 // let cursor_positions = buffer
2701 // .selection_set(selection_set_id)
2702 // .unwrap()
2703 // .selections::<Point>(&*buffer)
2704 // .map(|selection| {
2705 // assert_eq!(selection.start, selection.end);
2706 // selection.start
2707 // })
2708 // .collect::<Vec<_>>();
2709 // assert_eq!(
2710 // cursor_positions,
2711 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2712 // );
2713 });
2714
2715 // Modify the buffer
2716 buffer.update(&mut cx, |buffer, cx| {
2717 buffer.edit(vec![0..0], " ", cx);
2718 assert!(buffer.is_dirty());
2719 assert!(!buffer.has_conflict());
2720 });
2721
2722 // Change the file on disk again, adding blank lines to the beginning.
2723 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2724
2725 // Because the buffer is modified, it doesn't reload from disk, but is
2726 // marked as having a conflict.
2727 buffer
2728 .condition(&cx, |buffer, _| buffer.has_conflict())
2729 .await;
2730 }
2731
2732 #[gpui::test]
2733 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2734 let fs = Arc::new(FakeFs::new());
2735 fs.insert_tree(
2736 "/the-dir",
2737 json!({
2738 "a.rs": "
2739 fn foo(mut v: Vec<usize>) {
2740 for x in &v {
2741 v.push(1);
2742 }
2743 }
2744 "
2745 .unindent(),
2746 }),
2747 )
2748 .await;
2749
2750 let project = build_project(fs.clone(), &mut cx);
2751 let (worktree, _) = project
2752 .update(&mut cx, |p, cx| {
2753 p.find_or_create_local_worktree("/the-dir", false, cx)
2754 })
2755 .await
2756 .unwrap();
2757 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2758
2759 let buffer = project
2760 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2761 .await
2762 .unwrap();
2763
2764 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2765 let message = lsp::PublishDiagnosticsParams {
2766 uri: buffer_uri.clone(),
2767 diagnostics: vec![
2768 lsp::Diagnostic {
2769 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2770 severity: Some(DiagnosticSeverity::WARNING),
2771 message: "error 1".to_string(),
2772 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2773 location: lsp::Location {
2774 uri: buffer_uri.clone(),
2775 range: lsp::Range::new(
2776 lsp::Position::new(1, 8),
2777 lsp::Position::new(1, 9),
2778 ),
2779 },
2780 message: "error 1 hint 1".to_string(),
2781 }]),
2782 ..Default::default()
2783 },
2784 lsp::Diagnostic {
2785 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2786 severity: Some(DiagnosticSeverity::HINT),
2787 message: "error 1 hint 1".to_string(),
2788 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2789 location: lsp::Location {
2790 uri: buffer_uri.clone(),
2791 range: lsp::Range::new(
2792 lsp::Position::new(1, 8),
2793 lsp::Position::new(1, 9),
2794 ),
2795 },
2796 message: "original diagnostic".to_string(),
2797 }]),
2798 ..Default::default()
2799 },
2800 lsp::Diagnostic {
2801 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2802 severity: Some(DiagnosticSeverity::ERROR),
2803 message: "error 2".to_string(),
2804 related_information: Some(vec![
2805 lsp::DiagnosticRelatedInformation {
2806 location: lsp::Location {
2807 uri: buffer_uri.clone(),
2808 range: lsp::Range::new(
2809 lsp::Position::new(1, 13),
2810 lsp::Position::new(1, 15),
2811 ),
2812 },
2813 message: "error 2 hint 1".to_string(),
2814 },
2815 lsp::DiagnosticRelatedInformation {
2816 location: lsp::Location {
2817 uri: buffer_uri.clone(),
2818 range: lsp::Range::new(
2819 lsp::Position::new(1, 13),
2820 lsp::Position::new(1, 15),
2821 ),
2822 },
2823 message: "error 2 hint 2".to_string(),
2824 },
2825 ]),
2826 ..Default::default()
2827 },
2828 lsp::Diagnostic {
2829 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2830 severity: Some(DiagnosticSeverity::HINT),
2831 message: "error 2 hint 1".to_string(),
2832 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2833 location: lsp::Location {
2834 uri: buffer_uri.clone(),
2835 range: lsp::Range::new(
2836 lsp::Position::new(2, 8),
2837 lsp::Position::new(2, 17),
2838 ),
2839 },
2840 message: "original diagnostic".to_string(),
2841 }]),
2842 ..Default::default()
2843 },
2844 lsp::Diagnostic {
2845 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2846 severity: Some(DiagnosticSeverity::HINT),
2847 message: "error 2 hint 2".to_string(),
2848 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2849 location: lsp::Location {
2850 uri: buffer_uri.clone(),
2851 range: lsp::Range::new(
2852 lsp::Position::new(2, 8),
2853 lsp::Position::new(2, 17),
2854 ),
2855 },
2856 message: "original diagnostic".to_string(),
2857 }]),
2858 ..Default::default()
2859 },
2860 ],
2861 version: None,
2862 };
2863
2864 project
2865 .update(&mut cx, |p, cx| {
2866 p.update_diagnostics(message, &Default::default(), cx)
2867 })
2868 .unwrap();
2869 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2870
2871 assert_eq!(
2872 buffer
2873 .diagnostics_in_range::<_, Point>(0..buffer.len())
2874 .collect::<Vec<_>>(),
2875 &[
2876 DiagnosticEntry {
2877 range: Point::new(1, 8)..Point::new(1, 9),
2878 diagnostic: Diagnostic {
2879 severity: DiagnosticSeverity::WARNING,
2880 message: "error 1".to_string(),
2881 group_id: 0,
2882 is_primary: true,
2883 ..Default::default()
2884 }
2885 },
2886 DiagnosticEntry {
2887 range: Point::new(1, 8)..Point::new(1, 9),
2888 diagnostic: Diagnostic {
2889 severity: DiagnosticSeverity::HINT,
2890 message: "error 1 hint 1".to_string(),
2891 group_id: 0,
2892 is_primary: false,
2893 ..Default::default()
2894 }
2895 },
2896 DiagnosticEntry {
2897 range: Point::new(1, 13)..Point::new(1, 15),
2898 diagnostic: Diagnostic {
2899 severity: DiagnosticSeverity::HINT,
2900 message: "error 2 hint 1".to_string(),
2901 group_id: 1,
2902 is_primary: false,
2903 ..Default::default()
2904 }
2905 },
2906 DiagnosticEntry {
2907 range: Point::new(1, 13)..Point::new(1, 15),
2908 diagnostic: Diagnostic {
2909 severity: DiagnosticSeverity::HINT,
2910 message: "error 2 hint 2".to_string(),
2911 group_id: 1,
2912 is_primary: false,
2913 ..Default::default()
2914 }
2915 },
2916 DiagnosticEntry {
2917 range: Point::new(2, 8)..Point::new(2, 17),
2918 diagnostic: Diagnostic {
2919 severity: DiagnosticSeverity::ERROR,
2920 message: "error 2".to_string(),
2921 group_id: 1,
2922 is_primary: true,
2923 ..Default::default()
2924 }
2925 }
2926 ]
2927 );
2928
2929 assert_eq!(
2930 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
2931 &[
2932 DiagnosticEntry {
2933 range: Point::new(1, 8)..Point::new(1, 9),
2934 diagnostic: Diagnostic {
2935 severity: DiagnosticSeverity::WARNING,
2936 message: "error 1".to_string(),
2937 group_id: 0,
2938 is_primary: true,
2939 ..Default::default()
2940 }
2941 },
2942 DiagnosticEntry {
2943 range: Point::new(1, 8)..Point::new(1, 9),
2944 diagnostic: Diagnostic {
2945 severity: DiagnosticSeverity::HINT,
2946 message: "error 1 hint 1".to_string(),
2947 group_id: 0,
2948 is_primary: false,
2949 ..Default::default()
2950 }
2951 },
2952 ]
2953 );
2954 assert_eq!(
2955 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
2956 &[
2957 DiagnosticEntry {
2958 range: Point::new(1, 13)..Point::new(1, 15),
2959 diagnostic: Diagnostic {
2960 severity: DiagnosticSeverity::HINT,
2961 message: "error 2 hint 1".to_string(),
2962 group_id: 1,
2963 is_primary: false,
2964 ..Default::default()
2965 }
2966 },
2967 DiagnosticEntry {
2968 range: Point::new(1, 13)..Point::new(1, 15),
2969 diagnostic: Diagnostic {
2970 severity: DiagnosticSeverity::HINT,
2971 message: "error 2 hint 2".to_string(),
2972 group_id: 1,
2973 is_primary: false,
2974 ..Default::default()
2975 }
2976 },
2977 DiagnosticEntry {
2978 range: Point::new(2, 8)..Point::new(2, 17),
2979 diagnostic: Diagnostic {
2980 severity: DiagnosticSeverity::ERROR,
2981 message: "error 2".to_string(),
2982 group_id: 1,
2983 is_primary: true,
2984 ..Default::default()
2985 }
2986 }
2987 ]
2988 );
2989 }
2990
2991 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
2992 let languages = Arc::new(LanguageRegistry::new());
2993 let http_client = FakeHttpClient::with_404_response();
2994 let client = client::Client::new(http_client.clone());
2995 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2996 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
2997 }
2998}