1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{atomic::AtomicBool, Arc},
27};
28use util::{post_inc, ResultExt, TryFutureExt as _};
29
30pub use fs::*;
31pub use worktree::*;
32
33pub struct Project {
34 worktrees: Vec<WorktreeHandle>,
35 active_entry: Option<ProjectEntry>,
36 languages: Arc<LanguageRegistry>,
37 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
38 client: Arc<client::Client>,
39 user_store: ModelHandle<UserStore>,
40 fs: Arc<dyn Fs>,
41 client_state: ProjectClientState,
42 collaborators: HashMap<PeerId, Collaborator>,
43 subscriptions: Vec<client::Subscription>,
44 language_servers_with_diagnostics_running: isize,
45 open_buffers: HashMap<usize, OpenBuffer>,
46 loading_buffers: HashMap<
47 ProjectPath,
48 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
49 >,
50 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
51}
52
53enum OpenBuffer {
54 Operations(Vec<Operation>),
55 Loaded(WeakModelHandle<Buffer>),
56}
57
58enum WorktreeHandle {
59 Strong(ModelHandle<Worktree>),
60 Weak(WeakModelHandle<Worktree>),
61}
62
63enum ProjectClientState {
64 Local {
65 is_shared: bool,
66 remote_id_tx: watch::Sender<Option<u64>>,
67 remote_id_rx: watch::Receiver<Option<u64>>,
68 _maintain_remote_id_task: Task<Option<()>>,
69 },
70 Remote {
71 sharing_has_stopped: bool,
72 remote_id: u64,
73 replica_id: ReplicaId,
74 },
75}
76
77#[derive(Clone, Debug)]
78pub struct Collaborator {
79 pub user: Arc<User>,
80 pub peer_id: PeerId,
81 pub replica_id: ReplicaId,
82}
83
84#[derive(Clone, Debug, PartialEq)]
85pub enum Event {
86 ActiveEntryChanged(Option<ProjectEntry>),
87 WorktreeRemoved(WorktreeId),
88 DiskBasedDiagnosticsStarted,
89 DiskBasedDiagnosticsUpdated,
90 DiskBasedDiagnosticsFinished,
91 DiagnosticsUpdated(ProjectPath),
92}
93
94#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
95pub struct ProjectPath {
96 pub worktree_id: WorktreeId,
97 pub path: Arc<Path>,
98}
99
100#[derive(Clone, Debug, Default, PartialEq)]
101pub struct DiagnosticSummary {
102 pub error_count: usize,
103 pub warning_count: usize,
104 pub info_count: usize,
105 pub hint_count: usize,
106}
107
108#[derive(Debug)]
109pub struct Definition {
110 pub target_buffer: ModelHandle<Buffer>,
111 pub target_range: Range<language::Anchor>,
112}
113
114impl DiagnosticSummary {
115 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
116 let mut this = Self {
117 error_count: 0,
118 warning_count: 0,
119 info_count: 0,
120 hint_count: 0,
121 };
122
123 for entry in diagnostics {
124 if entry.diagnostic.is_primary {
125 match entry.diagnostic.severity {
126 DiagnosticSeverity::ERROR => this.error_count += 1,
127 DiagnosticSeverity::WARNING => this.warning_count += 1,
128 DiagnosticSeverity::INFORMATION => this.info_count += 1,
129 DiagnosticSeverity::HINT => this.hint_count += 1,
130 _ => {}
131 }
132 }
133 }
134
135 this
136 }
137
138 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
139 proto::DiagnosticSummary {
140 path: path.to_string_lossy().to_string(),
141 error_count: self.error_count as u32,
142 warning_count: self.warning_count as u32,
143 info_count: self.info_count as u32,
144 hint_count: self.hint_count as u32,
145 }
146 }
147}
148
149#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
150pub struct ProjectEntry {
151 pub worktree_id: WorktreeId,
152 pub entry_id: usize,
153}
154
155impl Project {
156 pub fn local(
157 client: Arc<Client>,
158 user_store: ModelHandle<UserStore>,
159 languages: Arc<LanguageRegistry>,
160 fs: Arc<dyn Fs>,
161 cx: &mut MutableAppContext,
162 ) -> ModelHandle<Self> {
163 cx.add_model(|cx: &mut ModelContext<Self>| {
164 let (remote_id_tx, remote_id_rx) = watch::channel();
165 let _maintain_remote_id_task = cx.spawn_weak({
166 let rpc = client.clone();
167 move |this, mut cx| {
168 async move {
169 let mut status = rpc.status();
170 while let Some(status) = status.recv().await {
171 if let Some(this) = this.upgrade(&cx) {
172 let remote_id = if let client::Status::Connected { .. } = status {
173 let response = rpc.request(proto::RegisterProject {}).await?;
174 Some(response.project_id)
175 } else {
176 None
177 };
178
179 if let Some(project_id) = remote_id {
180 let mut registrations = Vec::new();
181 this.update(&mut cx, |this, cx| {
182 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
183 registrations.push(worktree.update(
184 cx,
185 |worktree, cx| {
186 let worktree = worktree.as_local_mut().unwrap();
187 worktree.register(project_id, cx)
188 },
189 ));
190 }
191 });
192 for registration in registrations {
193 registration.await?;
194 }
195 }
196 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
197 }
198 }
199 Ok(())
200 }
201 .log_err()
202 }
203 });
204
205 Self {
206 worktrees: Default::default(),
207 collaborators: Default::default(),
208 open_buffers: Default::default(),
209 loading_buffers: Default::default(),
210 shared_buffers: Default::default(),
211 client_state: ProjectClientState::Local {
212 is_shared: false,
213 remote_id_tx,
214 remote_id_rx,
215 _maintain_remote_id_task,
216 },
217 subscriptions: Vec::new(),
218 active_entry: None,
219 languages,
220 client,
221 user_store,
222 fs,
223 language_servers_with_diagnostics_running: 0,
224 language_servers: Default::default(),
225 }
226 })
227 }
228
229 pub async fn remote(
230 remote_id: u64,
231 client: Arc<Client>,
232 user_store: ModelHandle<UserStore>,
233 languages: Arc<LanguageRegistry>,
234 fs: Arc<dyn Fs>,
235 cx: &mut AsyncAppContext,
236 ) -> Result<ModelHandle<Self>> {
237 client.authenticate_and_connect(&cx).await?;
238
239 let response = client
240 .request(proto::JoinProject {
241 project_id: remote_id,
242 })
243 .await?;
244
245 let replica_id = response.replica_id as ReplicaId;
246
247 let mut worktrees = Vec::new();
248 for worktree in response.worktrees {
249 worktrees
250 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
251 }
252
253 let user_ids = response
254 .collaborators
255 .iter()
256 .map(|peer| peer.user_id)
257 .collect();
258 user_store
259 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
260 .await?;
261 let mut collaborators = HashMap::default();
262 for message in response.collaborators {
263 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
264 collaborators.insert(collaborator.peer_id, collaborator);
265 }
266
267 Ok(cx.add_model(|cx| {
268 let mut this = Self {
269 worktrees: Vec::new(),
270 open_buffers: Default::default(),
271 loading_buffers: Default::default(),
272 shared_buffers: Default::default(),
273 active_entry: None,
274 collaborators,
275 languages,
276 user_store,
277 fs,
278 subscriptions: vec![
279 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
283 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
284 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
285 client.subscribe_to_entity(
286 remote_id,
287 cx,
288 Self::handle_update_diagnostic_summary,
289 ),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_disk_based_diagnostics_updating,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updated,
299 ),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
302 ],
303 client,
304 client_state: ProjectClientState::Remote {
305 sharing_has_stopped: false,
306 remote_id,
307 replica_id,
308 },
309 language_servers_with_diagnostics_running: 0,
310 language_servers: Default::default(),
311 };
312 for worktree in worktrees {
313 this.add_worktree(&worktree, cx);
314 }
315 this
316 }))
317 }
318
319 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
320 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
321 *remote_id_tx.borrow_mut() = remote_id;
322 }
323
324 self.subscriptions.clear();
325 if let Some(remote_id) = remote_id {
326 let client = &self.client;
327 self.subscriptions.extend([
328 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
329 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
337 ]);
338 }
339 }
340
341 pub fn remote_id(&self) -> Option<u64> {
342 match &self.client_state {
343 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
344 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
345 }
346 }
347
348 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
349 let mut id = None;
350 let mut watch = None;
351 match &self.client_state {
352 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
353 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
354 }
355
356 async move {
357 if let Some(id) = id {
358 return id;
359 }
360 let mut watch = watch.unwrap();
361 loop {
362 let id = *watch.borrow();
363 if let Some(id) = id {
364 return id;
365 }
366 watch.recv().await;
367 }
368 }
369 }
370
371 pub fn replica_id(&self) -> ReplicaId {
372 match &self.client_state {
373 ProjectClientState::Local { .. } => 0,
374 ProjectClientState::Remote { replica_id, .. } => *replica_id,
375 }
376 }
377
378 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
379 &self.collaborators
380 }
381
382 pub fn worktrees<'a>(
383 &'a self,
384 cx: &'a AppContext,
385 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
386 self.worktrees
387 .iter()
388 .filter_map(move |worktree| worktree.upgrade(cx))
389 }
390
391 pub fn worktree_for_id(
392 &self,
393 id: WorktreeId,
394 cx: &AppContext,
395 ) -> Option<ModelHandle<Worktree>> {
396 self.worktrees(cx)
397 .find(|worktree| worktree.read(cx).id() == id)
398 }
399
400 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
401 let rpc = self.client.clone();
402 cx.spawn(|this, mut cx| async move {
403 let project_id = this.update(&mut cx, |this, _| {
404 if let ProjectClientState::Local {
405 is_shared,
406 remote_id_rx,
407 ..
408 } = &mut this.client_state
409 {
410 *is_shared = true;
411 remote_id_rx
412 .borrow()
413 .ok_or_else(|| anyhow!("no project id"))
414 } else {
415 Err(anyhow!("can't share a remote project"))
416 }
417 })?;
418
419 rpc.request(proto::ShareProject { project_id }).await?;
420 let mut tasks = Vec::new();
421 this.update(&mut cx, |this, cx| {
422 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
423 worktree.update(cx, |worktree, cx| {
424 let worktree = worktree.as_local_mut().unwrap();
425 tasks.push(worktree.share(project_id, cx));
426 });
427 }
428 });
429 for task in tasks {
430 task.await?;
431 }
432 this.update(&mut cx, |_, cx| cx.notify());
433 Ok(())
434 })
435 }
436
437 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
438 let rpc = self.client.clone();
439 cx.spawn(|this, mut cx| async move {
440 let project_id = this.update(&mut cx, |this, _| {
441 if let ProjectClientState::Local {
442 is_shared,
443 remote_id_rx,
444 ..
445 } = &mut this.client_state
446 {
447 *is_shared = false;
448 remote_id_rx
449 .borrow()
450 .ok_or_else(|| anyhow!("no project id"))
451 } else {
452 Err(anyhow!("can't share a remote project"))
453 }
454 })?;
455
456 rpc.send(proto::UnshareProject { project_id }).await?;
457 this.update(&mut cx, |this, cx| {
458 this.collaborators.clear();
459 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
460 worktree.update(cx, |worktree, _| {
461 worktree.as_local_mut().unwrap().unshare();
462 });
463 }
464 cx.notify()
465 });
466 Ok(())
467 })
468 }
469
470 pub fn is_read_only(&self) -> bool {
471 match &self.client_state {
472 ProjectClientState::Local { .. } => false,
473 ProjectClientState::Remote {
474 sharing_has_stopped,
475 ..
476 } => *sharing_has_stopped,
477 }
478 }
479
480 pub fn is_local(&self) -> bool {
481 match &self.client_state {
482 ProjectClientState::Local { .. } => true,
483 ProjectClientState::Remote { .. } => false,
484 }
485 }
486
487 pub fn open_buffer(
488 &mut self,
489 path: impl Into<ProjectPath>,
490 cx: &mut ModelContext<Self>,
491 ) -> Task<Result<ModelHandle<Buffer>>> {
492 let project_path = path.into();
493 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
494 worktree
495 } else {
496 return Task::ready(Err(anyhow!("no such worktree")));
497 };
498
499 // If there is already a buffer for the given path, then return it.
500 let existing_buffer = self.get_open_buffer(&project_path, cx);
501 if let Some(existing_buffer) = existing_buffer {
502 return Task::ready(Ok(existing_buffer));
503 }
504
505 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
506 // If the given path is already being loaded, then wait for that existing
507 // task to complete and return the same buffer.
508 hash_map::Entry::Occupied(e) => e.get().clone(),
509
510 // Otherwise, record the fact that this path is now being loaded.
511 hash_map::Entry::Vacant(entry) => {
512 let (mut tx, rx) = postage::watch::channel();
513 entry.insert(rx.clone());
514
515 let load_buffer = worktree.update(cx, |worktree, cx| {
516 worktree.load_buffer(&project_path.path, cx)
517 });
518
519 cx.spawn(move |this, mut cx| async move {
520 let load_result = load_buffer.await;
521 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
522 // Record the fact that the buffer is no longer loading.
523 this.loading_buffers.remove(&project_path);
524 let buffer = load_result.map_err(Arc::new)?;
525 this.open_buffers.insert(
526 buffer.read(cx).remote_id() as usize,
527 OpenBuffer::Loaded(buffer.downgrade()),
528 );
529 this.assign_language_to_buffer(&worktree, &buffer, cx);
530 Ok(buffer)
531 }));
532 })
533 .detach();
534 rx
535 }
536 };
537
538 cx.foreground().spawn(async move {
539 loop {
540 if let Some(result) = loading_watch.borrow().as_ref() {
541 match result {
542 Ok(buffer) => return Ok(buffer.clone()),
543 Err(error) => return Err(anyhow!("{}", error)),
544 }
545 }
546 loading_watch.recv().await;
547 }
548 })
549 }
550
551 pub fn save_buffer_as(
552 &self,
553 buffer: ModelHandle<Buffer>,
554 abs_path: PathBuf,
555 cx: &mut ModelContext<Project>,
556 ) -> Task<Result<()>> {
557 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
558 cx.spawn(|this, mut cx| async move {
559 let (worktree, path) = worktree_task.await?;
560 worktree
561 .update(&mut cx, |worktree, cx| {
562 worktree
563 .as_local_mut()
564 .unwrap()
565 .save_buffer_as(buffer.clone(), path, cx)
566 })
567 .await?;
568 this.update(&mut cx, |this, cx| {
569 this.open_buffers
570 .insert(buffer.id(), OpenBuffer::Loaded(buffer.downgrade()));
571 this.assign_language_to_buffer(&worktree, &buffer, cx);
572 });
573 Ok(())
574 })
575 }
576
577 #[cfg(any(test, feature = "test-support"))]
578 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
579 let path = path.into();
580 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
581 self.open_buffers.iter().any(|(_, buffer)| {
582 if let Some(buffer) = buffer.upgrade(cx) {
583 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
584 if file.worktree == worktree && file.path() == &path.path {
585 return true;
586 }
587 }
588 }
589 false
590 })
591 } else {
592 false
593 }
594 }
595
596 fn get_open_buffer(
597 &mut self,
598 path: &ProjectPath,
599 cx: &mut ModelContext<Self>,
600 ) -> Option<ModelHandle<Buffer>> {
601 let mut result = None;
602 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
603 self.open_buffers.retain(|_, buffer| {
604 if let OpenBuffer::Loaded(buffer) = buffer {
605 if let Some(buffer) = buffer.upgrade(cx) {
606 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
607 if file.worktree == worktree && file.path() == &path.path {
608 result = Some(buffer);
609 }
610 }
611 return true;
612 }
613 }
614 false
615 });
616 result
617 }
618
619 fn assign_language_to_buffer(
620 &mut self,
621 worktree: &ModelHandle<Worktree>,
622 buffer: &ModelHandle<Buffer>,
623 cx: &mut ModelContext<Self>,
624 ) -> Option<()> {
625 let (path, full_path) = {
626 let file = buffer.read(cx).file()?;
627 (file.path().clone(), file.full_path())
628 };
629
630 // If the buffer has a language, set it and start/assign the language server
631 if let Some(language) = self.languages.select_language(&full_path) {
632 buffer.update(cx, |buffer, cx| {
633 buffer.set_language(Some(language.clone()), cx);
634 });
635
636 // For local worktrees, start a language server if needed.
637 // Also assign the language server and any previously stored diagnostics to the buffer.
638 if let Some(local_worktree) = worktree.read(cx).as_local() {
639 let worktree_id = local_worktree.id();
640 let worktree_abs_path = local_worktree.abs_path().clone();
641
642 let language_server = match self
643 .language_servers
644 .entry((worktree_id, language.name().to_string()))
645 {
646 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
647 hash_map::Entry::Vacant(e) => Self::start_language_server(
648 self.client.clone(),
649 language.clone(),
650 &worktree_abs_path,
651 cx,
652 )
653 .map(|server| e.insert(server).clone()),
654 };
655
656 buffer.update(cx, |buffer, cx| {
657 buffer.set_language_server(language_server, cx);
658 });
659 }
660 }
661
662 if let Some(local_worktree) = worktree.read(cx).as_local() {
663 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
664 buffer.update(cx, |buffer, cx| {
665 buffer.update_diagnostics(None, diagnostics, cx).log_err();
666 });
667 }
668 }
669
670 None
671 }
672
673 fn start_language_server(
674 rpc: Arc<Client>,
675 language: Arc<Language>,
676 worktree_path: &Path,
677 cx: &mut ModelContext<Self>,
678 ) -> Option<Arc<LanguageServer>> {
679 enum LspEvent {
680 DiagnosticsStart,
681 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
682 DiagnosticsFinish,
683 }
684
685 let language_server = language
686 .start_server(worktree_path, cx)
687 .log_err()
688 .flatten()?;
689 let disk_based_sources = language
690 .disk_based_diagnostic_sources()
691 .cloned()
692 .unwrap_or_default();
693 let disk_based_diagnostics_progress_token =
694 language.disk_based_diagnostics_progress_token().cloned();
695 let has_disk_based_diagnostic_progress_token =
696 disk_based_diagnostics_progress_token.is_some();
697 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
698
699 // Listen for `PublishDiagnostics` notifications.
700 language_server
701 .on_notification::<lsp::notification::PublishDiagnostics, _>({
702 let diagnostics_tx = diagnostics_tx.clone();
703 move |params| {
704 if !has_disk_based_diagnostic_progress_token {
705 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
706 }
707 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
708 if !has_disk_based_diagnostic_progress_token {
709 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
710 }
711 }
712 })
713 .detach();
714
715 // Listen for `Progress` notifications. Send an event when the language server
716 // transitions between running jobs and not running any jobs.
717 let mut running_jobs_for_this_server: i32 = 0;
718 language_server
719 .on_notification::<lsp::notification::Progress, _>(move |params| {
720 let token = match params.token {
721 lsp::NumberOrString::Number(_) => None,
722 lsp::NumberOrString::String(token) => Some(token),
723 };
724
725 if token == disk_based_diagnostics_progress_token {
726 match params.value {
727 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
728 lsp::WorkDoneProgress::Begin(_) => {
729 running_jobs_for_this_server += 1;
730 if running_jobs_for_this_server == 1 {
731 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
732 }
733 }
734 lsp::WorkDoneProgress::End(_) => {
735 running_jobs_for_this_server -= 1;
736 if running_jobs_for_this_server == 0 {
737 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
738 }
739 }
740 _ => {}
741 },
742 }
743 }
744 })
745 .detach();
746
747 // Process all the LSP events.
748 cx.spawn_weak(|this, mut cx| async move {
749 while let Ok(message) = diagnostics_rx.recv().await {
750 let this = cx.read(|cx| this.upgrade(cx))?;
751 match message {
752 LspEvent::DiagnosticsStart => {
753 let send = this.update(&mut cx, |this, cx| {
754 this.disk_based_diagnostics_started(cx);
755 this.remote_id().map(|project_id| {
756 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
757 })
758 });
759 if let Some(send) = send {
760 send.await.log_err();
761 }
762 }
763 LspEvent::DiagnosticsUpdate(params) => {
764 this.update(&mut cx, |this, cx| {
765 this.update_diagnostics(params, &disk_based_sources, cx)
766 .log_err();
767 });
768 }
769 LspEvent::DiagnosticsFinish => {
770 let send = this.update(&mut cx, |this, cx| {
771 this.disk_based_diagnostics_finished(cx);
772 this.remote_id().map(|project_id| {
773 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
774 })
775 });
776 if let Some(send) = send {
777 send.await.log_err();
778 }
779 }
780 }
781 }
782 Some(())
783 })
784 .detach();
785
786 Some(language_server)
787 }
788
789 pub fn update_diagnostics(
790 &mut self,
791 params: lsp::PublishDiagnosticsParams,
792 disk_based_sources: &HashSet<String>,
793 cx: &mut ModelContext<Self>,
794 ) -> Result<()> {
795 let abs_path = params
796 .uri
797 .to_file_path()
798 .map_err(|_| anyhow!("URI is not a file"))?;
799 let mut next_group_id = 0;
800 let mut diagnostics = Vec::default();
801 let mut primary_diagnostic_group_ids = HashMap::default();
802 let mut sources_by_group_id = HashMap::default();
803 let mut supporting_diagnostic_severities = HashMap::default();
804 for diagnostic in ¶ms.diagnostics {
805 let source = diagnostic.source.as_ref();
806 let code = diagnostic.code.as_ref().map(|code| match code {
807 lsp::NumberOrString::Number(code) => code.to_string(),
808 lsp::NumberOrString::String(code) => code.clone(),
809 });
810 let range = range_from_lsp(diagnostic.range);
811 let is_supporting = diagnostic
812 .related_information
813 .as_ref()
814 .map_or(false, |infos| {
815 infos.iter().any(|info| {
816 primary_diagnostic_group_ids.contains_key(&(
817 source,
818 code.clone(),
819 range_from_lsp(info.location.range),
820 ))
821 })
822 });
823
824 if is_supporting {
825 if let Some(severity) = diagnostic.severity {
826 supporting_diagnostic_severities
827 .insert((source, code.clone(), range), severity);
828 }
829 } else {
830 let group_id = post_inc(&mut next_group_id);
831 let is_disk_based =
832 source.map_or(false, |source| disk_based_sources.contains(source));
833
834 sources_by_group_id.insert(group_id, source);
835 primary_diagnostic_group_ids
836 .insert((source, code.clone(), range.clone()), group_id);
837
838 diagnostics.push(DiagnosticEntry {
839 range,
840 diagnostic: Diagnostic {
841 code: code.clone(),
842 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
843 message: diagnostic.message.clone(),
844 group_id,
845 is_primary: true,
846 is_valid: true,
847 is_disk_based,
848 },
849 });
850 if let Some(infos) = &diagnostic.related_information {
851 for info in infos {
852 if info.location.uri == params.uri {
853 let range = range_from_lsp(info.location.range);
854 diagnostics.push(DiagnosticEntry {
855 range,
856 diagnostic: Diagnostic {
857 code: code.clone(),
858 severity: DiagnosticSeverity::INFORMATION,
859 message: info.message.clone(),
860 group_id,
861 is_primary: false,
862 is_valid: true,
863 is_disk_based,
864 },
865 });
866 }
867 }
868 }
869 }
870 }
871
872 for entry in &mut diagnostics {
873 let diagnostic = &mut entry.diagnostic;
874 if !diagnostic.is_primary {
875 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
876 if let Some(&severity) = supporting_diagnostic_severities.get(&(
877 source,
878 diagnostic.code.clone(),
879 entry.range.clone(),
880 )) {
881 diagnostic.severity = severity;
882 }
883 }
884 }
885
886 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
887 Ok(())
888 }
889
890 pub fn update_diagnostic_entries(
891 &mut self,
892 abs_path: PathBuf,
893 version: Option<i32>,
894 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
895 cx: &mut ModelContext<Project>,
896 ) -> Result<(), anyhow::Error> {
897 let (worktree, relative_path) = self
898 .find_local_worktree(&abs_path, cx)
899 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
900 let project_path = ProjectPath {
901 worktree_id: worktree.read(cx).id(),
902 path: relative_path.into(),
903 };
904
905 for buffer in self.open_buffers.values() {
906 if let Some(buffer) = buffer.upgrade(cx) {
907 if buffer
908 .read(cx)
909 .file()
910 .map_or(false, |file| *file.path() == project_path.path)
911 {
912 buffer.update(cx, |buffer, cx| {
913 buffer.update_diagnostics(version, diagnostics.clone(), cx)
914 })?;
915 break;
916 }
917 }
918 }
919 worktree.update(cx, |worktree, cx| {
920 worktree
921 .as_local_mut()
922 .ok_or_else(|| anyhow!("not a local worktree"))?
923 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
924 })?;
925 cx.emit(Event::DiagnosticsUpdated(project_path));
926 Ok(())
927 }
928
929 pub fn definition<T: ToOffset>(
930 &self,
931 source_buffer_handle: &ModelHandle<Buffer>,
932 position: T,
933 cx: &mut ModelContext<Self>,
934 ) -> Task<Result<Vec<Definition>>> {
935 let source_buffer_handle = source_buffer_handle.clone();
936 let buffer = source_buffer_handle.read(cx);
937 let worktree;
938 let buffer_abs_path;
939 if let Some(file) = File::from_dyn(buffer.file()) {
940 worktree = file.worktree.clone();
941 buffer_abs_path = file.abs_path();
942 } else {
943 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
944 };
945
946 if worktree.read(cx).as_local().is_some() {
947 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
948 let buffer_abs_path = buffer_abs_path.unwrap();
949 let lang_name;
950 let lang_server;
951 if let Some(lang) = buffer.language() {
952 lang_name = lang.name().to_string();
953 if let Some(server) = self
954 .language_servers
955 .get(&(worktree.read(cx).id(), lang_name.clone()))
956 {
957 lang_server = server.clone();
958 } else {
959 return Task::ready(Err(anyhow!("buffer does not have a language server")));
960 };
961 } else {
962 return Task::ready(Err(anyhow!("buffer does not have a language")));
963 }
964
965 cx.spawn(|this, mut cx| async move {
966 let response = lang_server
967 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
968 text_document_position_params: lsp::TextDocumentPositionParams {
969 text_document: lsp::TextDocumentIdentifier::new(
970 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
971 ),
972 position: lsp::Position::new(point.row, point.column),
973 },
974 work_done_progress_params: Default::default(),
975 partial_result_params: Default::default(),
976 })
977 .await?;
978
979 let mut definitions = Vec::new();
980 if let Some(response) = response {
981 let mut unresolved_locations = Vec::new();
982 match response {
983 lsp::GotoDefinitionResponse::Scalar(loc) => {
984 unresolved_locations.push((loc.uri, loc.range));
985 }
986 lsp::GotoDefinitionResponse::Array(locs) => {
987 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
988 }
989 lsp::GotoDefinitionResponse::Link(links) => {
990 unresolved_locations.extend(
991 links
992 .into_iter()
993 .map(|l| (l.target_uri, l.target_selection_range)),
994 );
995 }
996 }
997
998 for (target_uri, target_range) in unresolved_locations {
999 let abs_path = target_uri
1000 .to_file_path()
1001 .map_err(|_| anyhow!("invalid target path"))?;
1002
1003 let (worktree, relative_path) = if let Some(result) =
1004 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1005 {
1006 result
1007 } else {
1008 let worktree = this
1009 .update(&mut cx, |this, cx| {
1010 this.create_local_worktree(&abs_path, true, cx)
1011 })
1012 .await?;
1013 this.update(&mut cx, |this, cx| {
1014 this.language_servers.insert(
1015 (worktree.read(cx).id(), lang_name.clone()),
1016 lang_server.clone(),
1017 );
1018 });
1019 (worktree, PathBuf::new())
1020 };
1021
1022 let project_path = ProjectPath {
1023 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1024 path: relative_path.into(),
1025 };
1026 let target_buffer_handle = this
1027 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1028 .await?;
1029 cx.read(|cx| {
1030 let target_buffer = target_buffer_handle.read(cx);
1031 let target_start = target_buffer
1032 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1033 let target_end = target_buffer
1034 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1035 definitions.push(Definition {
1036 target_buffer: target_buffer_handle,
1037 target_range: target_buffer.anchor_after(target_start)
1038 ..target_buffer.anchor_before(target_end),
1039 });
1040 });
1041 }
1042 }
1043
1044 Ok(definitions)
1045 })
1046 } else {
1047 log::info!("go to definition is not yet implemented for guests");
1048 Task::ready(Ok(Default::default()))
1049 }
1050 }
1051
1052 pub fn find_or_create_local_worktree(
1053 &self,
1054 abs_path: impl AsRef<Path>,
1055 weak: bool,
1056 cx: &mut ModelContext<Self>,
1057 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1058 let abs_path = abs_path.as_ref();
1059 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1060 Task::ready(Ok((tree.clone(), relative_path.into())))
1061 } else {
1062 let worktree = self.create_local_worktree(abs_path, weak, cx);
1063 cx.foreground()
1064 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1065 }
1066 }
1067
1068 fn find_local_worktree(
1069 &self,
1070 abs_path: &Path,
1071 cx: &AppContext,
1072 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1073 for tree in self.worktrees(cx) {
1074 if let Some(relative_path) = tree
1075 .read(cx)
1076 .as_local()
1077 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1078 {
1079 return Some((tree.clone(), relative_path.into()));
1080 }
1081 }
1082 None
1083 }
1084
1085 pub fn is_shared(&self) -> bool {
1086 match &self.client_state {
1087 ProjectClientState::Local { is_shared, .. } => *is_shared,
1088 ProjectClientState::Remote { .. } => false,
1089 }
1090 }
1091
1092 fn create_local_worktree(
1093 &self,
1094 abs_path: impl AsRef<Path>,
1095 weak: bool,
1096 cx: &mut ModelContext<Self>,
1097 ) -> Task<Result<ModelHandle<Worktree>>> {
1098 let fs = self.fs.clone();
1099 let client = self.client.clone();
1100 let path = Arc::from(abs_path.as_ref());
1101 cx.spawn(|project, mut cx| async move {
1102 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1103
1104 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1105 project.add_worktree(&worktree, cx);
1106 (project.remote_id(), project.is_shared())
1107 });
1108
1109 if let Some(project_id) = remote_project_id {
1110 worktree
1111 .update(&mut cx, |worktree, cx| {
1112 worktree.as_local_mut().unwrap().register(project_id, cx)
1113 })
1114 .await?;
1115 if is_shared {
1116 worktree
1117 .update(&mut cx, |worktree, cx| {
1118 worktree.as_local_mut().unwrap().share(project_id, cx)
1119 })
1120 .await?;
1121 }
1122 }
1123
1124 Ok(worktree)
1125 })
1126 }
1127
1128 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1129 self.worktrees.retain(|worktree| {
1130 worktree
1131 .upgrade(cx)
1132 .map_or(false, |w| w.read(cx).id() != id)
1133 });
1134 cx.notify();
1135 }
1136
1137 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1138 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1139 cx.subscribe(&worktree, |this, worktree, _, cx| {
1140 this.update_open_buffers(worktree, cx)
1141 })
1142 .detach();
1143
1144 let push_weak_handle = {
1145 let worktree = worktree.read(cx);
1146 worktree.is_local() && worktree.is_weak()
1147 };
1148 if push_weak_handle {
1149 cx.observe_release(&worktree, |this, cx| {
1150 this.worktrees
1151 .retain(|worktree| worktree.upgrade(cx).is_some());
1152 cx.notify();
1153 })
1154 .detach();
1155 self.worktrees
1156 .push(WorktreeHandle::Weak(worktree.downgrade()));
1157 } else {
1158 self.worktrees
1159 .push(WorktreeHandle::Strong(worktree.clone()));
1160 }
1161 cx.notify();
1162 }
1163
1164 fn update_open_buffers(
1165 &mut self,
1166 worktree_handle: ModelHandle<Worktree>,
1167 cx: &mut ModelContext<Self>,
1168 ) {
1169 let local = worktree_handle.read(cx).is_local();
1170 let snapshot = worktree_handle.read(cx).snapshot();
1171 let worktree_path = snapshot.abs_path();
1172 let mut buffers_to_delete = Vec::new();
1173 for (buffer_id, buffer) in &self.open_buffers {
1174 if let OpenBuffer::Loaded(buffer) = buffer {
1175 if let Some(buffer) = buffer.upgrade(cx) {
1176 buffer.update(cx, |buffer, cx| {
1177 if let Some(old_file) = File::from_dyn(buffer.file()) {
1178 if old_file.worktree != worktree_handle {
1179 return;
1180 }
1181
1182 let new_file = if let Some(entry) = old_file
1183 .entry_id
1184 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1185 {
1186 File {
1187 is_local: local,
1188 worktree_path: worktree_path.clone(),
1189 entry_id: Some(entry.id),
1190 mtime: entry.mtime,
1191 path: entry.path.clone(),
1192 worktree: worktree_handle.clone(),
1193 }
1194 } else if let Some(entry) =
1195 snapshot.entry_for_path(old_file.path().as_ref())
1196 {
1197 File {
1198 is_local: local,
1199 worktree_path: worktree_path.clone(),
1200 entry_id: Some(entry.id),
1201 mtime: entry.mtime,
1202 path: entry.path.clone(),
1203 worktree: worktree_handle.clone(),
1204 }
1205 } else {
1206 File {
1207 is_local: local,
1208 worktree_path: worktree_path.clone(),
1209 entry_id: None,
1210 path: old_file.path().clone(),
1211 mtime: old_file.mtime(),
1212 worktree: worktree_handle.clone(),
1213 }
1214 };
1215
1216 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
1217 task.detach();
1218 }
1219 }
1220 });
1221 } else {
1222 buffers_to_delete.push(*buffer_id);
1223 }
1224 }
1225 }
1226
1227 for buffer_id in buffers_to_delete {
1228 self.open_buffers.remove(&buffer_id);
1229 }
1230 }
1231
1232 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1233 let new_active_entry = entry.and_then(|project_path| {
1234 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1235 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1236 Some(ProjectEntry {
1237 worktree_id: project_path.worktree_id,
1238 entry_id: entry.id,
1239 })
1240 });
1241 if new_active_entry != self.active_entry {
1242 self.active_entry = new_active_entry;
1243 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1244 }
1245 }
1246
1247 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1248 self.language_servers_with_diagnostics_running > 0
1249 }
1250
1251 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1252 let mut summary = DiagnosticSummary::default();
1253 for (_, path_summary) in self.diagnostic_summaries(cx) {
1254 summary.error_count += path_summary.error_count;
1255 summary.warning_count += path_summary.warning_count;
1256 summary.info_count += path_summary.info_count;
1257 summary.hint_count += path_summary.hint_count;
1258 }
1259 summary
1260 }
1261
1262 pub fn diagnostic_summaries<'a>(
1263 &'a self,
1264 cx: &'a AppContext,
1265 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1266 self.worktrees(cx).flat_map(move |worktree| {
1267 let worktree = worktree.read(cx);
1268 let worktree_id = worktree.id();
1269 worktree
1270 .diagnostic_summaries()
1271 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1272 })
1273 }
1274
1275 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1276 self.language_servers_with_diagnostics_running += 1;
1277 if self.language_servers_with_diagnostics_running == 1 {
1278 cx.emit(Event::DiskBasedDiagnosticsStarted);
1279 }
1280 }
1281
1282 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1283 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1284 self.language_servers_with_diagnostics_running -= 1;
1285 if self.language_servers_with_diagnostics_running == 0 {
1286 cx.emit(Event::DiskBasedDiagnosticsFinished);
1287 }
1288 }
1289
1290 pub fn active_entry(&self) -> Option<ProjectEntry> {
1291 self.active_entry
1292 }
1293
1294 // RPC message handlers
1295
1296 fn handle_unshare_project(
1297 &mut self,
1298 _: TypedEnvelope<proto::UnshareProject>,
1299 _: Arc<Client>,
1300 cx: &mut ModelContext<Self>,
1301 ) -> Result<()> {
1302 if let ProjectClientState::Remote {
1303 sharing_has_stopped,
1304 ..
1305 } = &mut self.client_state
1306 {
1307 *sharing_has_stopped = true;
1308 self.collaborators.clear();
1309 cx.notify();
1310 Ok(())
1311 } else {
1312 unreachable!()
1313 }
1314 }
1315
1316 fn handle_add_collaborator(
1317 &mut self,
1318 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1319 _: Arc<Client>,
1320 cx: &mut ModelContext<Self>,
1321 ) -> Result<()> {
1322 let user_store = self.user_store.clone();
1323 let collaborator = envelope
1324 .payload
1325 .collaborator
1326 .take()
1327 .ok_or_else(|| anyhow!("empty collaborator"))?;
1328
1329 cx.spawn(|this, mut cx| {
1330 async move {
1331 let collaborator =
1332 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1333 this.update(&mut cx, |this, cx| {
1334 this.collaborators
1335 .insert(collaborator.peer_id, collaborator);
1336 cx.notify();
1337 });
1338 Ok(())
1339 }
1340 .log_err()
1341 })
1342 .detach();
1343
1344 Ok(())
1345 }
1346
1347 fn handle_remove_collaborator(
1348 &mut self,
1349 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1350 _: Arc<Client>,
1351 cx: &mut ModelContext<Self>,
1352 ) -> Result<()> {
1353 let peer_id = PeerId(envelope.payload.peer_id);
1354 let replica_id = self
1355 .collaborators
1356 .remove(&peer_id)
1357 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1358 .replica_id;
1359 self.shared_buffers.remove(&peer_id);
1360 for (_, buffer) in &self.open_buffers {
1361 if let OpenBuffer::Loaded(buffer) = buffer {
1362 if let Some(buffer) = buffer.upgrade(cx) {
1363 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1364 }
1365 }
1366 }
1367 cx.notify();
1368 Ok(())
1369 }
1370
1371 fn handle_share_worktree(
1372 &mut self,
1373 envelope: TypedEnvelope<proto::ShareWorktree>,
1374 client: Arc<Client>,
1375 cx: &mut ModelContext<Self>,
1376 ) -> Result<()> {
1377 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1378 let replica_id = self.replica_id();
1379 let worktree = envelope
1380 .payload
1381 .worktree
1382 .ok_or_else(|| anyhow!("invalid worktree"))?;
1383 cx.spawn(|this, mut cx| {
1384 async move {
1385 let worktree =
1386 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1387 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1388 Ok(())
1389 }
1390 .log_err()
1391 })
1392 .detach();
1393 Ok(())
1394 }
1395
1396 fn handle_unregister_worktree(
1397 &mut self,
1398 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1399 _: Arc<Client>,
1400 cx: &mut ModelContext<Self>,
1401 ) -> Result<()> {
1402 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1403 self.remove_worktree(worktree_id, cx);
1404 Ok(())
1405 }
1406
1407 fn handle_update_worktree(
1408 &mut self,
1409 envelope: TypedEnvelope<proto::UpdateWorktree>,
1410 _: Arc<Client>,
1411 cx: &mut ModelContext<Self>,
1412 ) -> Result<()> {
1413 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1414 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1415 worktree.update(cx, |worktree, cx| {
1416 let worktree = worktree.as_remote_mut().unwrap();
1417 worktree.update_from_remote(envelope, cx)
1418 })?;
1419 }
1420 Ok(())
1421 }
1422
1423 fn handle_update_diagnostic_summary(
1424 &mut self,
1425 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1426 _: Arc<Client>,
1427 cx: &mut ModelContext<Self>,
1428 ) -> Result<()> {
1429 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1430 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1431 if let Some(summary) = envelope.payload.summary {
1432 let project_path = ProjectPath {
1433 worktree_id,
1434 path: Path::new(&summary.path).into(),
1435 };
1436 worktree.update(cx, |worktree, _| {
1437 worktree
1438 .as_remote_mut()
1439 .unwrap()
1440 .update_diagnostic_summary(project_path.path.clone(), &summary);
1441 });
1442 cx.emit(Event::DiagnosticsUpdated(project_path));
1443 }
1444 }
1445 Ok(())
1446 }
1447
1448 fn handle_disk_based_diagnostics_updating(
1449 &mut self,
1450 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1451 _: Arc<Client>,
1452 cx: &mut ModelContext<Self>,
1453 ) -> Result<()> {
1454 self.disk_based_diagnostics_started(cx);
1455 Ok(())
1456 }
1457
1458 fn handle_disk_based_diagnostics_updated(
1459 &mut self,
1460 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1461 _: Arc<Client>,
1462 cx: &mut ModelContext<Self>,
1463 ) -> Result<()> {
1464 self.disk_based_diagnostics_finished(cx);
1465 Ok(())
1466 }
1467
1468 pub fn handle_update_buffer(
1469 &mut self,
1470 envelope: TypedEnvelope<proto::UpdateBuffer>,
1471 _: Arc<Client>,
1472 cx: &mut ModelContext<Self>,
1473 ) -> Result<()> {
1474 let payload = envelope.payload.clone();
1475 let buffer_id = payload.buffer_id as usize;
1476 let ops = payload
1477 .operations
1478 .into_iter()
1479 .map(|op| language::proto::deserialize_operation(op))
1480 .collect::<Result<Vec<_>, _>>()?;
1481 match self.open_buffers.get_mut(&buffer_id) {
1482 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1483 Some(OpenBuffer::Loaded(buffer)) => {
1484 if let Some(buffer) = buffer.upgrade(cx) {
1485 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1486 } else {
1487 self.open_buffers
1488 .insert(buffer_id, OpenBuffer::Operations(ops));
1489 }
1490 }
1491 None => {
1492 self.open_buffers
1493 .insert(buffer_id, OpenBuffer::Operations(ops));
1494 }
1495 }
1496 Ok(())
1497 }
1498
1499 pub fn handle_save_buffer(
1500 &mut self,
1501 envelope: TypedEnvelope<proto::SaveBuffer>,
1502 rpc: Arc<Client>,
1503 cx: &mut ModelContext<Self>,
1504 ) -> Result<()> {
1505 let sender_id = envelope.original_sender_id()?;
1506 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1507 let buffer = self
1508 .shared_buffers
1509 .get(&sender_id)
1510 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1511 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1512 let receipt = envelope.receipt();
1513 let buffer_id = envelope.payload.buffer_id;
1514 let save = cx.spawn(|_, mut cx| async move {
1515 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1516 });
1517
1518 cx.background()
1519 .spawn(
1520 async move {
1521 let (version, mtime) = save.await?;
1522
1523 rpc.respond(
1524 receipt,
1525 proto::BufferSaved {
1526 project_id,
1527 buffer_id,
1528 version: (&version).into(),
1529 mtime: Some(mtime.into()),
1530 },
1531 )
1532 .await?;
1533
1534 Ok(())
1535 }
1536 .log_err(),
1537 )
1538 .detach();
1539 Ok(())
1540 }
1541
1542 pub fn handle_format_buffer(
1543 &mut self,
1544 envelope: TypedEnvelope<proto::FormatBuffer>,
1545 rpc: Arc<Client>,
1546 cx: &mut ModelContext<Self>,
1547 ) -> Result<()> {
1548 let receipt = envelope.receipt();
1549 let sender_id = envelope.original_sender_id()?;
1550 let buffer = self
1551 .shared_buffers
1552 .get(&sender_id)
1553 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1554 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1555 cx.spawn(|_, mut cx| async move {
1556 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1557 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1558 // associated with formatting.
1559 cx.spawn(|_| async move {
1560 match format {
1561 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1562 Err(error) => {
1563 rpc.respond_with_error(
1564 receipt,
1565 proto::Error {
1566 message: error.to_string(),
1567 },
1568 )
1569 .await?
1570 }
1571 }
1572 Ok::<_, anyhow::Error>(())
1573 })
1574 .await
1575 .log_err();
1576 })
1577 .detach();
1578 Ok(())
1579 }
1580
1581 pub fn handle_open_buffer(
1582 &mut self,
1583 envelope: TypedEnvelope<proto::OpenBuffer>,
1584 rpc: Arc<Client>,
1585 cx: &mut ModelContext<Self>,
1586 ) -> anyhow::Result<()> {
1587 let receipt = envelope.receipt();
1588 let peer_id = envelope.original_sender_id()?;
1589 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1590 let open_buffer = self.open_buffer(
1591 ProjectPath {
1592 worktree_id,
1593 path: PathBuf::from(envelope.payload.path).into(),
1594 },
1595 cx,
1596 );
1597 cx.spawn(|this, mut cx| {
1598 async move {
1599 let buffer = open_buffer.await?;
1600 this.update(&mut cx, |this, _| {
1601 this.shared_buffers
1602 .entry(peer_id)
1603 .or_default()
1604 .insert(buffer.id() as u64, buffer.clone());
1605 });
1606 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1607 rpc.respond(
1608 receipt,
1609 proto::OpenBufferResponse {
1610 buffer: Some(message),
1611 },
1612 )
1613 .await
1614 }
1615 .log_err()
1616 })
1617 .detach();
1618 Ok(())
1619 }
1620
1621 pub fn handle_close_buffer(
1622 &mut self,
1623 envelope: TypedEnvelope<proto::CloseBuffer>,
1624 _: Arc<Client>,
1625 cx: &mut ModelContext<Self>,
1626 ) -> anyhow::Result<()> {
1627 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1628 shared_buffers.remove(&envelope.payload.buffer_id);
1629 cx.notify();
1630 }
1631 Ok(())
1632 }
1633
1634 pub fn handle_buffer_saved(
1635 &mut self,
1636 envelope: TypedEnvelope<proto::BufferSaved>,
1637 _: Arc<Client>,
1638 cx: &mut ModelContext<Self>,
1639 ) -> Result<()> {
1640 let payload = envelope.payload.clone();
1641 let buffer = self
1642 .open_buffers
1643 .get(&(payload.buffer_id as usize))
1644 .and_then(|buf| {
1645 if let OpenBuffer::Loaded(buffer) = buf {
1646 buffer.upgrade(cx)
1647 } else {
1648 None
1649 }
1650 });
1651 if let Some(buffer) = buffer {
1652 buffer.update(cx, |buffer, cx| {
1653 let version = payload.version.try_into()?;
1654 let mtime = payload
1655 .mtime
1656 .ok_or_else(|| anyhow!("missing mtime"))?
1657 .into();
1658 buffer.did_save(version, mtime, None, cx);
1659 Result::<_, anyhow::Error>::Ok(())
1660 })?;
1661 }
1662 Ok(())
1663 }
1664
1665 pub fn match_paths<'a>(
1666 &self,
1667 query: &'a str,
1668 include_ignored: bool,
1669 smart_case: bool,
1670 max_results: usize,
1671 cancel_flag: &'a AtomicBool,
1672 cx: &AppContext,
1673 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1674 let worktrees = self
1675 .worktrees(cx)
1676 .filter(|worktree| !worktree.read(cx).is_weak())
1677 .collect::<Vec<_>>();
1678 let include_root_name = worktrees.len() > 1;
1679 let candidate_sets = worktrees
1680 .into_iter()
1681 .map(|worktree| CandidateSet {
1682 snapshot: worktree.read(cx).snapshot(),
1683 include_ignored,
1684 include_root_name,
1685 })
1686 .collect::<Vec<_>>();
1687
1688 let background = cx.background().clone();
1689 async move {
1690 fuzzy::match_paths(
1691 candidate_sets.as_slice(),
1692 query,
1693 smart_case,
1694 max_results,
1695 cancel_flag,
1696 background,
1697 )
1698 .await
1699 }
1700 }
1701}
1702
1703impl WorktreeHandle {
1704 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1705 match self {
1706 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1707 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1708 }
1709 }
1710}
1711
1712struct CandidateSet {
1713 snapshot: Snapshot,
1714 include_ignored: bool,
1715 include_root_name: bool,
1716}
1717
1718impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1719 type Candidates = CandidateSetIter<'a>;
1720
1721 fn id(&self) -> usize {
1722 self.snapshot.id().to_usize()
1723 }
1724
1725 fn len(&self) -> usize {
1726 if self.include_ignored {
1727 self.snapshot.file_count()
1728 } else {
1729 self.snapshot.visible_file_count()
1730 }
1731 }
1732
1733 fn prefix(&self) -> Arc<str> {
1734 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1735 self.snapshot.root_name().into()
1736 } else if self.include_root_name {
1737 format!("{}/", self.snapshot.root_name()).into()
1738 } else {
1739 "".into()
1740 }
1741 }
1742
1743 fn candidates(&'a self, start: usize) -> Self::Candidates {
1744 CandidateSetIter {
1745 traversal: self.snapshot.files(self.include_ignored, start),
1746 }
1747 }
1748}
1749
1750struct CandidateSetIter<'a> {
1751 traversal: Traversal<'a>,
1752}
1753
1754impl<'a> Iterator for CandidateSetIter<'a> {
1755 type Item = PathMatchCandidate<'a>;
1756
1757 fn next(&mut self) -> Option<Self::Item> {
1758 self.traversal.next().map(|entry| {
1759 if let EntryKind::File(char_bag) = entry.kind {
1760 PathMatchCandidate {
1761 path: &entry.path,
1762 char_bag,
1763 }
1764 } else {
1765 unreachable!()
1766 }
1767 })
1768 }
1769}
1770
1771impl Entity for Project {
1772 type Event = Event;
1773
1774 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1775 match &self.client_state {
1776 ProjectClientState::Local { remote_id_rx, .. } => {
1777 if let Some(project_id) = *remote_id_rx.borrow() {
1778 let rpc = self.client.clone();
1779 cx.spawn(|_| async move {
1780 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1781 log::error!("error unregistering project: {}", err);
1782 }
1783 })
1784 .detach();
1785 }
1786 }
1787 ProjectClientState::Remote { remote_id, .. } => {
1788 let rpc = self.client.clone();
1789 let project_id = *remote_id;
1790 cx.spawn(|_| async move {
1791 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1792 log::error!("error leaving project: {}", err);
1793 }
1794 })
1795 .detach();
1796 }
1797 }
1798 }
1799
1800 fn app_will_quit(
1801 &mut self,
1802 _: &mut MutableAppContext,
1803 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1804 use futures::FutureExt;
1805
1806 let shutdown_futures = self
1807 .language_servers
1808 .drain()
1809 .filter_map(|(_, server)| server.shutdown())
1810 .collect::<Vec<_>>();
1811 Some(
1812 async move {
1813 futures::future::join_all(shutdown_futures).await;
1814 }
1815 .boxed(),
1816 )
1817 }
1818}
1819
1820impl Collaborator {
1821 fn from_proto(
1822 message: proto::Collaborator,
1823 user_store: &ModelHandle<UserStore>,
1824 cx: &mut AsyncAppContext,
1825 ) -> impl Future<Output = Result<Self>> {
1826 let user = user_store.update(cx, |user_store, cx| {
1827 user_store.fetch_user(message.user_id, cx)
1828 });
1829
1830 async move {
1831 Ok(Self {
1832 peer_id: PeerId(message.peer_id),
1833 user: user.await?,
1834 replica_id: message.replica_id as ReplicaId,
1835 })
1836 }
1837 }
1838}
1839
1840impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1841 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1842 Self {
1843 worktree_id,
1844 path: path.as_ref().into(),
1845 }
1846 }
1847}
1848
1849impl OpenBuffer {
1850 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1851 match self {
1852 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1853 OpenBuffer::Operations(_) => None,
1854 }
1855 }
1856}
1857
1858#[cfg(test)]
1859mod tests {
1860 use super::{Event, *};
1861 use client::test::FakeHttpClient;
1862 use fs::RealFs;
1863 use futures::StreamExt;
1864 use gpui::{test::subscribe, TestAppContext};
1865 use language::{
1866 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1867 LanguageServerConfig, Point,
1868 };
1869 use lsp::Url;
1870 use serde_json::json;
1871 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1872 use unindent::Unindent as _;
1873 use util::test::temp_tree;
1874 use worktree::WorktreeHandle as _;
1875
1876 #[gpui::test]
1877 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1878 let dir = temp_tree(json!({
1879 "root": {
1880 "apple": "",
1881 "banana": {
1882 "carrot": {
1883 "date": "",
1884 "endive": "",
1885 }
1886 },
1887 "fennel": {
1888 "grape": "",
1889 }
1890 }
1891 }));
1892
1893 let root_link_path = dir.path().join("root_link");
1894 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1895 unix::fs::symlink(
1896 &dir.path().join("root/fennel"),
1897 &dir.path().join("root/finnochio"),
1898 )
1899 .unwrap();
1900
1901 let project = build_project(Arc::new(RealFs), &mut cx);
1902
1903 let (tree, _) = project
1904 .update(&mut cx, |project, cx| {
1905 project.find_or_create_local_worktree(&root_link_path, false, cx)
1906 })
1907 .await
1908 .unwrap();
1909
1910 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1911 .await;
1912 cx.read(|cx| {
1913 let tree = tree.read(cx);
1914 assert_eq!(tree.file_count(), 5);
1915 assert_eq!(
1916 tree.inode_for_path("fennel/grape"),
1917 tree.inode_for_path("finnochio/grape")
1918 );
1919 });
1920
1921 let cancel_flag = Default::default();
1922 let results = project
1923 .read_with(&cx, |project, cx| {
1924 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
1925 })
1926 .await;
1927 assert_eq!(
1928 results
1929 .into_iter()
1930 .map(|result| result.path)
1931 .collect::<Vec<Arc<Path>>>(),
1932 vec![
1933 PathBuf::from("banana/carrot/date").into(),
1934 PathBuf::from("banana/carrot/endive").into(),
1935 ]
1936 );
1937 }
1938
1939 #[gpui::test]
1940 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
1941 let (language_server_config, mut fake_server) =
1942 LanguageServerConfig::fake(cx.background()).await;
1943 let progress_token = language_server_config
1944 .disk_based_diagnostics_progress_token
1945 .clone()
1946 .unwrap();
1947
1948 let mut languages = LanguageRegistry::new();
1949 languages.add(Arc::new(Language::new(
1950 LanguageConfig {
1951 name: "Rust".to_string(),
1952 path_suffixes: vec!["rs".to_string()],
1953 language_server: Some(language_server_config),
1954 ..Default::default()
1955 },
1956 Some(tree_sitter_rust::language()),
1957 )));
1958
1959 let dir = temp_tree(json!({
1960 "a.rs": "fn a() { A }",
1961 "b.rs": "const y: i32 = 1",
1962 }));
1963
1964 let http_client = FakeHttpClient::with_404_response();
1965 let client = Client::new(http_client.clone());
1966 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
1967
1968 let project = cx.update(|cx| {
1969 Project::local(
1970 client,
1971 user_store,
1972 Arc::new(languages),
1973 Arc::new(RealFs),
1974 cx,
1975 )
1976 });
1977
1978 let (tree, _) = project
1979 .update(&mut cx, |project, cx| {
1980 project.find_or_create_local_worktree(dir.path(), false, cx)
1981 })
1982 .await
1983 .unwrap();
1984 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
1985
1986 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1987 .await;
1988
1989 // Cause worktree to start the fake language server
1990 let _buffer = project
1991 .update(&mut cx, |project, cx| {
1992 project.open_buffer(
1993 ProjectPath {
1994 worktree_id,
1995 path: Path::new("b.rs").into(),
1996 },
1997 cx,
1998 )
1999 })
2000 .await
2001 .unwrap();
2002
2003 let mut events = subscribe(&project, &mut cx);
2004
2005 fake_server.start_progress(&progress_token).await;
2006 assert_eq!(
2007 events.next().await.unwrap(),
2008 Event::DiskBasedDiagnosticsStarted
2009 );
2010
2011 fake_server.start_progress(&progress_token).await;
2012 fake_server.end_progress(&progress_token).await;
2013 fake_server.start_progress(&progress_token).await;
2014
2015 fake_server
2016 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2017 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2018 version: None,
2019 diagnostics: vec![lsp::Diagnostic {
2020 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2021 severity: Some(lsp::DiagnosticSeverity::ERROR),
2022 message: "undefined variable 'A'".to_string(),
2023 ..Default::default()
2024 }],
2025 })
2026 .await;
2027 assert_eq!(
2028 events.next().await.unwrap(),
2029 Event::DiagnosticsUpdated(ProjectPath {
2030 worktree_id,
2031 path: Arc::from(Path::new("a.rs"))
2032 })
2033 );
2034
2035 fake_server.end_progress(&progress_token).await;
2036 fake_server.end_progress(&progress_token).await;
2037 assert_eq!(
2038 events.next().await.unwrap(),
2039 Event::DiskBasedDiagnosticsUpdated
2040 );
2041 assert_eq!(
2042 events.next().await.unwrap(),
2043 Event::DiskBasedDiagnosticsFinished
2044 );
2045
2046 let buffer = project
2047 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2048 .await
2049 .unwrap();
2050
2051 buffer.read_with(&cx, |buffer, _| {
2052 let snapshot = buffer.snapshot();
2053 let diagnostics = snapshot
2054 .diagnostics_in_range::<_, Point>(0..buffer.len())
2055 .collect::<Vec<_>>();
2056 assert_eq!(
2057 diagnostics,
2058 &[DiagnosticEntry {
2059 range: Point::new(0, 9)..Point::new(0, 10),
2060 diagnostic: Diagnostic {
2061 severity: lsp::DiagnosticSeverity::ERROR,
2062 message: "undefined variable 'A'".to_string(),
2063 group_id: 0,
2064 is_primary: true,
2065 ..Default::default()
2066 }
2067 }]
2068 )
2069 });
2070 }
2071
2072 #[gpui::test]
2073 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2074 let dir = temp_tree(json!({
2075 "root": {
2076 "dir1": {},
2077 "dir2": {
2078 "dir3": {}
2079 }
2080 }
2081 }));
2082
2083 let project = build_project(Arc::new(RealFs), &mut cx);
2084 let (tree, _) = project
2085 .update(&mut cx, |project, cx| {
2086 project.find_or_create_local_worktree(&dir.path(), false, cx)
2087 })
2088 .await
2089 .unwrap();
2090
2091 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2092 .await;
2093
2094 let cancel_flag = Default::default();
2095 let results = project
2096 .read_with(&cx, |project, cx| {
2097 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2098 })
2099 .await;
2100
2101 assert!(results.is_empty());
2102 }
2103
2104 #[gpui::test]
2105 async fn test_definition(mut cx: gpui::TestAppContext) {
2106 let (language_server_config, mut fake_server) =
2107 LanguageServerConfig::fake(cx.background()).await;
2108
2109 let mut languages = LanguageRegistry::new();
2110 languages.add(Arc::new(Language::new(
2111 LanguageConfig {
2112 name: "Rust".to_string(),
2113 path_suffixes: vec!["rs".to_string()],
2114 language_server: Some(language_server_config),
2115 ..Default::default()
2116 },
2117 Some(tree_sitter_rust::language()),
2118 )));
2119
2120 let dir = temp_tree(json!({
2121 "a.rs": "const fn a() { A }",
2122 "b.rs": "const y: i32 = crate::a()",
2123 }));
2124
2125 let http_client = FakeHttpClient::with_404_response();
2126 let client = Client::new(http_client.clone());
2127 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2128 let project = cx.update(|cx| {
2129 Project::local(
2130 client,
2131 user_store,
2132 Arc::new(languages),
2133 Arc::new(RealFs),
2134 cx,
2135 )
2136 });
2137
2138 let (tree, _) = project
2139 .update(&mut cx, |project, cx| {
2140 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2141 })
2142 .await
2143 .unwrap();
2144 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2145 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2146 .await;
2147
2148 // Cause worktree to start the fake language server
2149 let buffer = project
2150 .update(&mut cx, |project, cx| {
2151 project.open_buffer(
2152 ProjectPath {
2153 worktree_id,
2154 path: Path::new("").into(),
2155 },
2156 cx,
2157 )
2158 })
2159 .await
2160 .unwrap();
2161 let definitions =
2162 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2163 let (request_id, request) = fake_server
2164 .receive_request::<lsp::request::GotoDefinition>()
2165 .await;
2166 let request_params = request.text_document_position_params;
2167 assert_eq!(
2168 request_params.text_document.uri.to_file_path().unwrap(),
2169 dir.path().join("b.rs")
2170 );
2171 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2172
2173 fake_server
2174 .respond(
2175 request_id,
2176 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2177 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2178 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2179 ))),
2180 )
2181 .await;
2182 let mut definitions = definitions.await.unwrap();
2183 assert_eq!(definitions.len(), 1);
2184 let definition = definitions.pop().unwrap();
2185 cx.update(|cx| {
2186 let target_buffer = definition.target_buffer.read(cx);
2187 assert_eq!(
2188 target_buffer.file().unwrap().abs_path(),
2189 Some(dir.path().join("a.rs"))
2190 );
2191 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2192 assert_eq!(
2193 list_worktrees(&project, cx),
2194 [
2195 (dir.path().join("b.rs"), false),
2196 (dir.path().join("a.rs"), true)
2197 ]
2198 );
2199
2200 drop(definition);
2201 });
2202 cx.read(|cx| {
2203 assert_eq!(
2204 list_worktrees(&project, cx),
2205 [(dir.path().join("b.rs"), false)]
2206 );
2207 });
2208
2209 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2210 project
2211 .read(cx)
2212 .worktrees(cx)
2213 .map(|worktree| {
2214 let worktree = worktree.read(cx);
2215 (
2216 worktree.as_local().unwrap().abs_path().to_path_buf(),
2217 worktree.is_weak(),
2218 )
2219 })
2220 .collect::<Vec<_>>()
2221 }
2222 }
2223
2224 #[gpui::test]
2225 async fn test_save_file(mut cx: gpui::TestAppContext) {
2226 let fs = Arc::new(FakeFs::new());
2227 fs.insert_tree(
2228 "/dir",
2229 json!({
2230 "file1": "the old contents",
2231 }),
2232 )
2233 .await;
2234
2235 let project = build_project(fs.clone(), &mut cx);
2236 let worktree_id = project
2237 .update(&mut cx, |p, cx| {
2238 p.find_or_create_local_worktree("/dir", false, cx)
2239 })
2240 .await
2241 .unwrap()
2242 .0
2243 .read_with(&cx, |tree, _| tree.id());
2244
2245 let buffer = project
2246 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2247 .await
2248 .unwrap();
2249 buffer
2250 .update(&mut cx, |buffer, cx| {
2251 assert_eq!(buffer.text(), "the old contents");
2252 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2253 buffer.save(cx)
2254 })
2255 .await
2256 .unwrap();
2257
2258 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2259 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2260 }
2261
2262 #[gpui::test]
2263 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2264 let fs = Arc::new(FakeFs::new());
2265 fs.insert_tree(
2266 "/dir",
2267 json!({
2268 "file1": "the old contents",
2269 }),
2270 )
2271 .await;
2272
2273 let project = build_project(fs.clone(), &mut cx);
2274 let worktree_id = project
2275 .update(&mut cx, |p, cx| {
2276 p.find_or_create_local_worktree("/dir/file1", false, cx)
2277 })
2278 .await
2279 .unwrap()
2280 .0
2281 .read_with(&cx, |tree, _| tree.id());
2282
2283 let buffer = project
2284 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2285 .await
2286 .unwrap();
2287 buffer
2288 .update(&mut cx, |buffer, cx| {
2289 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2290 buffer.save(cx)
2291 })
2292 .await
2293 .unwrap();
2294
2295 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2296 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2297 }
2298
2299 #[gpui::test]
2300 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2301 let dir = temp_tree(json!({
2302 "a": {
2303 "file1": "",
2304 "file2": "",
2305 "file3": "",
2306 },
2307 "b": {
2308 "c": {
2309 "file4": "",
2310 "file5": "",
2311 }
2312 }
2313 }));
2314
2315 let project = build_project(Arc::new(RealFs), &mut cx);
2316 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2317
2318 let (tree, _) = project
2319 .update(&mut cx, |p, cx| {
2320 p.find_or_create_local_worktree(dir.path(), false, cx)
2321 })
2322 .await
2323 .unwrap();
2324 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2325
2326 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2327 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2328 async move { buffer.await.unwrap() }
2329 };
2330 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2331 tree.read_with(cx, |tree, _| {
2332 tree.entry_for_path(path)
2333 .expect(&format!("no entry for path {}", path))
2334 .id
2335 })
2336 };
2337
2338 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2339 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2340 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2341 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2342
2343 let file2_id = id_for_path("a/file2", &cx);
2344 let file3_id = id_for_path("a/file3", &cx);
2345 let file4_id = id_for_path("b/c/file4", &cx);
2346
2347 // Wait for the initial scan.
2348 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2349 .await;
2350
2351 // Create a remote copy of this worktree.
2352 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2353 let remote = Worktree::remote(
2354 1,
2355 1,
2356 initial_snapshot.to_proto(&Default::default(), Default::default()),
2357 rpc.clone(),
2358 &mut cx.to_async(),
2359 )
2360 .await
2361 .unwrap();
2362
2363 cx.read(|cx| {
2364 assert!(!buffer2.read(cx).is_dirty());
2365 assert!(!buffer3.read(cx).is_dirty());
2366 assert!(!buffer4.read(cx).is_dirty());
2367 assert!(!buffer5.read(cx).is_dirty());
2368 });
2369
2370 // Rename and delete files and directories.
2371 tree.flush_fs_events(&cx).await;
2372 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2373 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2374 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2375 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2376 tree.flush_fs_events(&cx).await;
2377
2378 let expected_paths = vec![
2379 "a",
2380 "a/file1",
2381 "a/file2.new",
2382 "b",
2383 "d",
2384 "d/file3",
2385 "d/file4",
2386 ];
2387
2388 cx.read(|app| {
2389 assert_eq!(
2390 tree.read(app)
2391 .paths()
2392 .map(|p| p.to_str().unwrap())
2393 .collect::<Vec<_>>(),
2394 expected_paths
2395 );
2396
2397 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2398 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2399 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2400
2401 assert_eq!(
2402 buffer2.read(app).file().unwrap().path().as_ref(),
2403 Path::new("a/file2.new")
2404 );
2405 assert_eq!(
2406 buffer3.read(app).file().unwrap().path().as_ref(),
2407 Path::new("d/file3")
2408 );
2409 assert_eq!(
2410 buffer4.read(app).file().unwrap().path().as_ref(),
2411 Path::new("d/file4")
2412 );
2413 assert_eq!(
2414 buffer5.read(app).file().unwrap().path().as_ref(),
2415 Path::new("b/c/file5")
2416 );
2417
2418 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2419 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2420 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2421 assert!(buffer5.read(app).file().unwrap().is_deleted());
2422 });
2423
2424 // Update the remote worktree. Check that it becomes consistent with the
2425 // local worktree.
2426 remote.update(&mut cx, |remote, cx| {
2427 let update_message =
2428 tree.read(cx)
2429 .snapshot()
2430 .build_update(&initial_snapshot, 1, 1, true);
2431 remote
2432 .as_remote_mut()
2433 .unwrap()
2434 .snapshot
2435 .apply_update(update_message)
2436 .unwrap();
2437
2438 assert_eq!(
2439 remote
2440 .paths()
2441 .map(|p| p.to_str().unwrap())
2442 .collect::<Vec<_>>(),
2443 expected_paths
2444 );
2445 });
2446 }
2447
2448 #[gpui::test]
2449 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2450 let fs = Arc::new(FakeFs::new());
2451 fs.insert_tree(
2452 "/the-dir",
2453 json!({
2454 "a.txt": "a-contents",
2455 "b.txt": "b-contents",
2456 }),
2457 )
2458 .await;
2459
2460 let project = build_project(fs.clone(), &mut cx);
2461 let worktree_id = project
2462 .update(&mut cx, |p, cx| {
2463 p.find_or_create_local_worktree("/the-dir", false, cx)
2464 })
2465 .await
2466 .unwrap()
2467 .0
2468 .read_with(&cx, |tree, _| tree.id());
2469
2470 // Spawn multiple tasks to open paths, repeating some paths.
2471 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2472 (
2473 p.open_buffer((worktree_id, "a.txt"), cx),
2474 p.open_buffer((worktree_id, "b.txt"), cx),
2475 p.open_buffer((worktree_id, "a.txt"), cx),
2476 )
2477 });
2478
2479 let buffer_a_1 = buffer_a_1.await.unwrap();
2480 let buffer_a_2 = buffer_a_2.await.unwrap();
2481 let buffer_b = buffer_b.await.unwrap();
2482 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2483 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2484
2485 // There is only one buffer per path.
2486 let buffer_a_id = buffer_a_1.id();
2487 assert_eq!(buffer_a_2.id(), buffer_a_id);
2488
2489 // Open the same path again while it is still open.
2490 drop(buffer_a_1);
2491 let buffer_a_3 = project
2492 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2493 .await
2494 .unwrap();
2495
2496 // There's still only one buffer per path.
2497 assert_eq!(buffer_a_3.id(), buffer_a_id);
2498 }
2499
2500 #[gpui::test]
2501 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2502 use std::fs;
2503
2504 let dir = temp_tree(json!({
2505 "file1": "abc",
2506 "file2": "def",
2507 "file3": "ghi",
2508 }));
2509
2510 let project = build_project(Arc::new(RealFs), &mut cx);
2511 let (worktree, _) = project
2512 .update(&mut cx, |p, cx| {
2513 p.find_or_create_local_worktree(dir.path(), false, cx)
2514 })
2515 .await
2516 .unwrap();
2517 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2518
2519 worktree.flush_fs_events(&cx).await;
2520 worktree
2521 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2522 .await;
2523
2524 let buffer1 = project
2525 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2526 .await
2527 .unwrap();
2528 let events = Rc::new(RefCell::new(Vec::new()));
2529
2530 // initially, the buffer isn't dirty.
2531 buffer1.update(&mut cx, |buffer, cx| {
2532 cx.subscribe(&buffer1, {
2533 let events = events.clone();
2534 move |_, _, event, _| events.borrow_mut().push(event.clone())
2535 })
2536 .detach();
2537
2538 assert!(!buffer.is_dirty());
2539 assert!(events.borrow().is_empty());
2540
2541 buffer.edit(vec![1..2], "", cx);
2542 });
2543
2544 // after the first edit, the buffer is dirty, and emits a dirtied event.
2545 buffer1.update(&mut cx, |buffer, cx| {
2546 assert!(buffer.text() == "ac");
2547 assert!(buffer.is_dirty());
2548 assert_eq!(
2549 *events.borrow(),
2550 &[language::Event::Edited, language::Event::Dirtied]
2551 );
2552 events.borrow_mut().clear();
2553 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2554 });
2555
2556 // after saving, the buffer is not dirty, and emits a saved event.
2557 buffer1.update(&mut cx, |buffer, cx| {
2558 assert!(!buffer.is_dirty());
2559 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2560 events.borrow_mut().clear();
2561
2562 buffer.edit(vec![1..1], "B", cx);
2563 buffer.edit(vec![2..2], "D", cx);
2564 });
2565
2566 // after editing again, the buffer is dirty, and emits another dirty event.
2567 buffer1.update(&mut cx, |buffer, cx| {
2568 assert!(buffer.text() == "aBDc");
2569 assert!(buffer.is_dirty());
2570 assert_eq!(
2571 *events.borrow(),
2572 &[
2573 language::Event::Edited,
2574 language::Event::Dirtied,
2575 language::Event::Edited,
2576 ],
2577 );
2578 events.borrow_mut().clear();
2579
2580 // TODO - currently, after restoring the buffer to its
2581 // previously-saved state, the is still considered dirty.
2582 buffer.edit([1..3], "", cx);
2583 assert!(buffer.text() == "ac");
2584 assert!(buffer.is_dirty());
2585 });
2586
2587 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2588
2589 // When a file is deleted, the buffer is considered dirty.
2590 let events = Rc::new(RefCell::new(Vec::new()));
2591 let buffer2 = project
2592 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2593 .await
2594 .unwrap();
2595 buffer2.update(&mut cx, |_, cx| {
2596 cx.subscribe(&buffer2, {
2597 let events = events.clone();
2598 move |_, _, event, _| events.borrow_mut().push(event.clone())
2599 })
2600 .detach();
2601 });
2602
2603 fs::remove_file(dir.path().join("file2")).unwrap();
2604 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2605 assert_eq!(
2606 *events.borrow(),
2607 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2608 );
2609
2610 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2611 let events = Rc::new(RefCell::new(Vec::new()));
2612 let buffer3 = project
2613 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2614 .await
2615 .unwrap();
2616 buffer3.update(&mut cx, |_, cx| {
2617 cx.subscribe(&buffer3, {
2618 let events = events.clone();
2619 move |_, _, event, _| events.borrow_mut().push(event.clone())
2620 })
2621 .detach();
2622 });
2623
2624 worktree.flush_fs_events(&cx).await;
2625 buffer3.update(&mut cx, |buffer, cx| {
2626 buffer.edit(Some(0..0), "x", cx);
2627 });
2628 events.borrow_mut().clear();
2629 fs::remove_file(dir.path().join("file3")).unwrap();
2630 buffer3
2631 .condition(&cx, |_, _| !events.borrow().is_empty())
2632 .await;
2633 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2634 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2635 }
2636
2637 #[gpui::test]
2638 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2639 use std::fs;
2640
2641 let initial_contents = "aaa\nbbbbb\nc\n";
2642 let dir = temp_tree(json!({ "the-file": initial_contents }));
2643
2644 let project = build_project(Arc::new(RealFs), &mut cx);
2645 let (worktree, _) = project
2646 .update(&mut cx, |p, cx| {
2647 p.find_or_create_local_worktree(dir.path(), false, cx)
2648 })
2649 .await
2650 .unwrap();
2651 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2652
2653 worktree
2654 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2655 .await;
2656
2657 let abs_path = dir.path().join("the-file");
2658 let buffer = project
2659 .update(&mut cx, |p, cx| {
2660 p.open_buffer((worktree_id, "the-file"), cx)
2661 })
2662 .await
2663 .unwrap();
2664
2665 // TODO
2666 // Add a cursor on each row.
2667 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2668 // assert!(!buffer.is_dirty());
2669 // buffer.add_selection_set(
2670 // &(0..3)
2671 // .map(|row| Selection {
2672 // id: row as usize,
2673 // start: Point::new(row, 1),
2674 // end: Point::new(row, 1),
2675 // reversed: false,
2676 // goal: SelectionGoal::None,
2677 // })
2678 // .collect::<Vec<_>>(),
2679 // cx,
2680 // )
2681 // });
2682
2683 // Change the file on disk, adding two new lines of text, and removing
2684 // one line.
2685 buffer.read_with(&cx, |buffer, _| {
2686 assert!(!buffer.is_dirty());
2687 assert!(!buffer.has_conflict());
2688 });
2689 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2690 fs::write(&abs_path, new_contents).unwrap();
2691
2692 // Because the buffer was not modified, it is reloaded from disk. Its
2693 // contents are edited according to the diff between the old and new
2694 // file contents.
2695 buffer
2696 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2697 .await;
2698
2699 buffer.update(&mut cx, |buffer, _| {
2700 assert_eq!(buffer.text(), new_contents);
2701 assert!(!buffer.is_dirty());
2702 assert!(!buffer.has_conflict());
2703
2704 // TODO
2705 // let cursor_positions = buffer
2706 // .selection_set(selection_set_id)
2707 // .unwrap()
2708 // .selections::<Point>(&*buffer)
2709 // .map(|selection| {
2710 // assert_eq!(selection.start, selection.end);
2711 // selection.start
2712 // })
2713 // .collect::<Vec<_>>();
2714 // assert_eq!(
2715 // cursor_positions,
2716 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2717 // );
2718 });
2719
2720 // Modify the buffer
2721 buffer.update(&mut cx, |buffer, cx| {
2722 buffer.edit(vec![0..0], " ", cx);
2723 assert!(buffer.is_dirty());
2724 assert!(!buffer.has_conflict());
2725 });
2726
2727 // Change the file on disk again, adding blank lines to the beginning.
2728 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2729
2730 // Because the buffer is modified, it doesn't reload from disk, but is
2731 // marked as having a conflict.
2732 buffer
2733 .condition(&cx, |buffer, _| buffer.has_conflict())
2734 .await;
2735 }
2736
2737 #[gpui::test]
2738 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2739 let fs = Arc::new(FakeFs::new());
2740 fs.insert_tree(
2741 "/the-dir",
2742 json!({
2743 "a.rs": "
2744 fn foo(mut v: Vec<usize>) {
2745 for x in &v {
2746 v.push(1);
2747 }
2748 }
2749 "
2750 .unindent(),
2751 }),
2752 )
2753 .await;
2754
2755 let project = build_project(fs.clone(), &mut cx);
2756 let (worktree, _) = project
2757 .update(&mut cx, |p, cx| {
2758 p.find_or_create_local_worktree("/the-dir", false, cx)
2759 })
2760 .await
2761 .unwrap();
2762 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2763
2764 let buffer = project
2765 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2766 .await
2767 .unwrap();
2768
2769 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2770 let message = lsp::PublishDiagnosticsParams {
2771 uri: buffer_uri.clone(),
2772 diagnostics: vec![
2773 lsp::Diagnostic {
2774 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2775 severity: Some(DiagnosticSeverity::WARNING),
2776 message: "error 1".to_string(),
2777 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2778 location: lsp::Location {
2779 uri: buffer_uri.clone(),
2780 range: lsp::Range::new(
2781 lsp::Position::new(1, 8),
2782 lsp::Position::new(1, 9),
2783 ),
2784 },
2785 message: "error 1 hint 1".to_string(),
2786 }]),
2787 ..Default::default()
2788 },
2789 lsp::Diagnostic {
2790 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2791 severity: Some(DiagnosticSeverity::HINT),
2792 message: "error 1 hint 1".to_string(),
2793 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2794 location: lsp::Location {
2795 uri: buffer_uri.clone(),
2796 range: lsp::Range::new(
2797 lsp::Position::new(1, 8),
2798 lsp::Position::new(1, 9),
2799 ),
2800 },
2801 message: "original diagnostic".to_string(),
2802 }]),
2803 ..Default::default()
2804 },
2805 lsp::Diagnostic {
2806 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2807 severity: Some(DiagnosticSeverity::ERROR),
2808 message: "error 2".to_string(),
2809 related_information: Some(vec![
2810 lsp::DiagnosticRelatedInformation {
2811 location: lsp::Location {
2812 uri: buffer_uri.clone(),
2813 range: lsp::Range::new(
2814 lsp::Position::new(1, 13),
2815 lsp::Position::new(1, 15),
2816 ),
2817 },
2818 message: "error 2 hint 1".to_string(),
2819 },
2820 lsp::DiagnosticRelatedInformation {
2821 location: lsp::Location {
2822 uri: buffer_uri.clone(),
2823 range: lsp::Range::new(
2824 lsp::Position::new(1, 13),
2825 lsp::Position::new(1, 15),
2826 ),
2827 },
2828 message: "error 2 hint 2".to_string(),
2829 },
2830 ]),
2831 ..Default::default()
2832 },
2833 lsp::Diagnostic {
2834 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2835 severity: Some(DiagnosticSeverity::HINT),
2836 message: "error 2 hint 1".to_string(),
2837 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2838 location: lsp::Location {
2839 uri: buffer_uri.clone(),
2840 range: lsp::Range::new(
2841 lsp::Position::new(2, 8),
2842 lsp::Position::new(2, 17),
2843 ),
2844 },
2845 message: "original diagnostic".to_string(),
2846 }]),
2847 ..Default::default()
2848 },
2849 lsp::Diagnostic {
2850 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2851 severity: Some(DiagnosticSeverity::HINT),
2852 message: "error 2 hint 2".to_string(),
2853 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2854 location: lsp::Location {
2855 uri: buffer_uri.clone(),
2856 range: lsp::Range::new(
2857 lsp::Position::new(2, 8),
2858 lsp::Position::new(2, 17),
2859 ),
2860 },
2861 message: "original diagnostic".to_string(),
2862 }]),
2863 ..Default::default()
2864 },
2865 ],
2866 version: None,
2867 };
2868
2869 project
2870 .update(&mut cx, |p, cx| {
2871 p.update_diagnostics(message, &Default::default(), cx)
2872 })
2873 .unwrap();
2874 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2875
2876 assert_eq!(
2877 buffer
2878 .diagnostics_in_range::<_, Point>(0..buffer.len())
2879 .collect::<Vec<_>>(),
2880 &[
2881 DiagnosticEntry {
2882 range: Point::new(1, 8)..Point::new(1, 9),
2883 diagnostic: Diagnostic {
2884 severity: DiagnosticSeverity::WARNING,
2885 message: "error 1".to_string(),
2886 group_id: 0,
2887 is_primary: true,
2888 ..Default::default()
2889 }
2890 },
2891 DiagnosticEntry {
2892 range: Point::new(1, 8)..Point::new(1, 9),
2893 diagnostic: Diagnostic {
2894 severity: DiagnosticSeverity::HINT,
2895 message: "error 1 hint 1".to_string(),
2896 group_id: 0,
2897 is_primary: false,
2898 ..Default::default()
2899 }
2900 },
2901 DiagnosticEntry {
2902 range: Point::new(1, 13)..Point::new(1, 15),
2903 diagnostic: Diagnostic {
2904 severity: DiagnosticSeverity::HINT,
2905 message: "error 2 hint 1".to_string(),
2906 group_id: 1,
2907 is_primary: false,
2908 ..Default::default()
2909 }
2910 },
2911 DiagnosticEntry {
2912 range: Point::new(1, 13)..Point::new(1, 15),
2913 diagnostic: Diagnostic {
2914 severity: DiagnosticSeverity::HINT,
2915 message: "error 2 hint 2".to_string(),
2916 group_id: 1,
2917 is_primary: false,
2918 ..Default::default()
2919 }
2920 },
2921 DiagnosticEntry {
2922 range: Point::new(2, 8)..Point::new(2, 17),
2923 diagnostic: Diagnostic {
2924 severity: DiagnosticSeverity::ERROR,
2925 message: "error 2".to_string(),
2926 group_id: 1,
2927 is_primary: true,
2928 ..Default::default()
2929 }
2930 }
2931 ]
2932 );
2933
2934 assert_eq!(
2935 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
2936 &[
2937 DiagnosticEntry {
2938 range: Point::new(1, 8)..Point::new(1, 9),
2939 diagnostic: Diagnostic {
2940 severity: DiagnosticSeverity::WARNING,
2941 message: "error 1".to_string(),
2942 group_id: 0,
2943 is_primary: true,
2944 ..Default::default()
2945 }
2946 },
2947 DiagnosticEntry {
2948 range: Point::new(1, 8)..Point::new(1, 9),
2949 diagnostic: Diagnostic {
2950 severity: DiagnosticSeverity::HINT,
2951 message: "error 1 hint 1".to_string(),
2952 group_id: 0,
2953 is_primary: false,
2954 ..Default::default()
2955 }
2956 },
2957 ]
2958 );
2959 assert_eq!(
2960 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
2961 &[
2962 DiagnosticEntry {
2963 range: Point::new(1, 13)..Point::new(1, 15),
2964 diagnostic: Diagnostic {
2965 severity: DiagnosticSeverity::HINT,
2966 message: "error 2 hint 1".to_string(),
2967 group_id: 1,
2968 is_primary: false,
2969 ..Default::default()
2970 }
2971 },
2972 DiagnosticEntry {
2973 range: Point::new(1, 13)..Point::new(1, 15),
2974 diagnostic: Diagnostic {
2975 severity: DiagnosticSeverity::HINT,
2976 message: "error 2 hint 2".to_string(),
2977 group_id: 1,
2978 is_primary: false,
2979 ..Default::default()
2980 }
2981 },
2982 DiagnosticEntry {
2983 range: Point::new(2, 8)..Point::new(2, 17),
2984 diagnostic: Diagnostic {
2985 severity: DiagnosticSeverity::ERROR,
2986 message: "error 2".to_string(),
2987 group_id: 1,
2988 is_primary: true,
2989 ..Default::default()
2990 }
2991 }
2992 ]
2993 );
2994 }
2995
2996 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
2997 let languages = Arc::new(LanguageRegistry::new());
2998 let http_client = FakeHttpClient::with_404_response();
2999 let client = client::Client::new(http_client.clone());
3000 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3001 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3002 }
3003}