1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{atomic::AtomicBool, Arc},
27};
28use util::{post_inc, ResultExt, TryFutureExt as _};
29
30pub use fs::*;
31pub use worktree::*;
32
33pub struct Project {
34 worktrees: Vec<WorktreeHandle>,
35 active_entry: Option<ProjectEntry>,
36 languages: Arc<LanguageRegistry>,
37 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
38 client: Arc<client::Client>,
39 user_store: ModelHandle<UserStore>,
40 fs: Arc<dyn Fs>,
41 client_state: ProjectClientState,
42 collaborators: HashMap<PeerId, Collaborator>,
43 subscriptions: Vec<client::Subscription>,
44 language_servers_with_diagnostics_running: isize,
45 open_buffers: HashMap<usize, OpenBuffer>,
46 loading_buffers: HashMap<
47 ProjectPath,
48 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
49 >,
50 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
51}
52
53enum OpenBuffer {
54 Operations(Vec<Operation>),
55 Loaded(WeakModelHandle<Buffer>),
56}
57
58enum WorktreeHandle {
59 Strong(ModelHandle<Worktree>),
60 Weak(WeakModelHandle<Worktree>),
61}
62
63enum ProjectClientState {
64 Local {
65 is_shared: bool,
66 remote_id_tx: watch::Sender<Option<u64>>,
67 remote_id_rx: watch::Receiver<Option<u64>>,
68 _maintain_remote_id_task: Task<Option<()>>,
69 },
70 Remote {
71 sharing_has_stopped: bool,
72 remote_id: u64,
73 replica_id: ReplicaId,
74 },
75}
76
77#[derive(Clone, Debug)]
78pub struct Collaborator {
79 pub user: Arc<User>,
80 pub peer_id: PeerId,
81 pub replica_id: ReplicaId,
82}
83
84#[derive(Clone, Debug, PartialEq)]
85pub enum Event {
86 ActiveEntryChanged(Option<ProjectEntry>),
87 WorktreeRemoved(WorktreeId),
88 DiskBasedDiagnosticsStarted,
89 DiskBasedDiagnosticsUpdated,
90 DiskBasedDiagnosticsFinished,
91 DiagnosticsUpdated(ProjectPath),
92}
93
94#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
95pub struct ProjectPath {
96 pub worktree_id: WorktreeId,
97 pub path: Arc<Path>,
98}
99
100#[derive(Clone, Debug, Default, PartialEq)]
101pub struct DiagnosticSummary {
102 pub error_count: usize,
103 pub warning_count: usize,
104 pub info_count: usize,
105 pub hint_count: usize,
106}
107
108#[derive(Debug)]
109pub struct Definition {
110 pub target_buffer: ModelHandle<Buffer>,
111 pub target_range: Range<language::Anchor>,
112}
113
114impl DiagnosticSummary {
115 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
116 let mut this = Self {
117 error_count: 0,
118 warning_count: 0,
119 info_count: 0,
120 hint_count: 0,
121 };
122
123 for entry in diagnostics {
124 if entry.diagnostic.is_primary {
125 match entry.diagnostic.severity {
126 DiagnosticSeverity::ERROR => this.error_count += 1,
127 DiagnosticSeverity::WARNING => this.warning_count += 1,
128 DiagnosticSeverity::INFORMATION => this.info_count += 1,
129 DiagnosticSeverity::HINT => this.hint_count += 1,
130 _ => {}
131 }
132 }
133 }
134
135 this
136 }
137
138 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
139 proto::DiagnosticSummary {
140 path: path.to_string_lossy().to_string(),
141 error_count: self.error_count as u32,
142 warning_count: self.warning_count as u32,
143 info_count: self.info_count as u32,
144 hint_count: self.hint_count as u32,
145 }
146 }
147}
148
149#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
150pub struct ProjectEntry {
151 pub worktree_id: WorktreeId,
152 pub entry_id: usize,
153}
154
155impl Project {
156 pub fn local(
157 client: Arc<Client>,
158 user_store: ModelHandle<UserStore>,
159 languages: Arc<LanguageRegistry>,
160 fs: Arc<dyn Fs>,
161 cx: &mut MutableAppContext,
162 ) -> ModelHandle<Self> {
163 cx.add_model(|cx: &mut ModelContext<Self>| {
164 let (remote_id_tx, remote_id_rx) = watch::channel();
165 let _maintain_remote_id_task = cx.spawn_weak({
166 let rpc = client.clone();
167 move |this, mut cx| {
168 async move {
169 let mut status = rpc.status();
170 while let Some(status) = status.recv().await {
171 if let Some(this) = this.upgrade(&cx) {
172 let remote_id = if let client::Status::Connected { .. } = status {
173 let response = rpc.request(proto::RegisterProject {}).await?;
174 Some(response.project_id)
175 } else {
176 None
177 };
178
179 if let Some(project_id) = remote_id {
180 let mut registrations = Vec::new();
181 this.update(&mut cx, |this, cx| {
182 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
183 registrations.push(worktree.update(
184 cx,
185 |worktree, cx| {
186 let worktree = worktree.as_local_mut().unwrap();
187 worktree.register(project_id, cx)
188 },
189 ));
190 }
191 });
192 for registration in registrations {
193 registration.await?;
194 }
195 }
196 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
197 }
198 }
199 Ok(())
200 }
201 .log_err()
202 }
203 });
204
205 Self {
206 worktrees: Default::default(),
207 collaborators: Default::default(),
208 open_buffers: Default::default(),
209 loading_buffers: Default::default(),
210 shared_buffers: Default::default(),
211 client_state: ProjectClientState::Local {
212 is_shared: false,
213 remote_id_tx,
214 remote_id_rx,
215 _maintain_remote_id_task,
216 },
217 subscriptions: Vec::new(),
218 active_entry: None,
219 languages,
220 client,
221 user_store,
222 fs,
223 language_servers_with_diagnostics_running: 0,
224 language_servers: Default::default(),
225 }
226 })
227 }
228
229 pub async fn remote(
230 remote_id: u64,
231 client: Arc<Client>,
232 user_store: ModelHandle<UserStore>,
233 languages: Arc<LanguageRegistry>,
234 fs: Arc<dyn Fs>,
235 cx: &mut AsyncAppContext,
236 ) -> Result<ModelHandle<Self>> {
237 client.authenticate_and_connect(&cx).await?;
238
239 let response = client
240 .request(proto::JoinProject {
241 project_id: remote_id,
242 })
243 .await?;
244
245 let replica_id = response.replica_id as ReplicaId;
246
247 let mut worktrees = Vec::new();
248 for worktree in response.worktrees {
249 worktrees
250 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
251 }
252
253 let user_ids = response
254 .collaborators
255 .iter()
256 .map(|peer| peer.user_id)
257 .collect();
258 user_store
259 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
260 .await?;
261 let mut collaborators = HashMap::default();
262 for message in response.collaborators {
263 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
264 collaborators.insert(collaborator.peer_id, collaborator);
265 }
266
267 Ok(cx.add_model(|cx| {
268 let mut this = Self {
269 worktrees: Vec::new(),
270 open_buffers: Default::default(),
271 loading_buffers: Default::default(),
272 shared_buffers: Default::default(),
273 active_entry: None,
274 collaborators,
275 languages,
276 user_store,
277 fs,
278 subscriptions: vec![
279 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
283 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
284 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
285 client.subscribe_to_entity(
286 remote_id,
287 cx,
288 Self::handle_update_diagnostic_summary,
289 ),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_disk_based_diagnostics_updating,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updated,
299 ),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
302 ],
303 client,
304 client_state: ProjectClientState::Remote {
305 sharing_has_stopped: false,
306 remote_id,
307 replica_id,
308 },
309 language_servers_with_diagnostics_running: 0,
310 language_servers: Default::default(),
311 };
312 for worktree in worktrees {
313 this.add_worktree(&worktree, cx);
314 }
315 this
316 }))
317 }
318
319 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
320 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
321 *remote_id_tx.borrow_mut() = remote_id;
322 }
323
324 self.subscriptions.clear();
325 if let Some(remote_id) = remote_id {
326 let client = &self.client;
327 self.subscriptions.extend([
328 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
329 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
337 ]);
338 }
339 }
340
341 pub fn remote_id(&self) -> Option<u64> {
342 match &self.client_state {
343 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
344 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
345 }
346 }
347
348 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
349 let mut id = None;
350 let mut watch = None;
351 match &self.client_state {
352 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
353 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
354 }
355
356 async move {
357 if let Some(id) = id {
358 return id;
359 }
360 let mut watch = watch.unwrap();
361 loop {
362 let id = *watch.borrow();
363 if let Some(id) = id {
364 return id;
365 }
366 watch.recv().await;
367 }
368 }
369 }
370
371 pub fn replica_id(&self) -> ReplicaId {
372 match &self.client_state {
373 ProjectClientState::Local { .. } => 0,
374 ProjectClientState::Remote { replica_id, .. } => *replica_id,
375 }
376 }
377
378 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
379 &self.collaborators
380 }
381
382 pub fn worktrees<'a>(
383 &'a self,
384 cx: &'a AppContext,
385 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
386 self.worktrees
387 .iter()
388 .filter_map(move |worktree| worktree.upgrade(cx))
389 }
390
391 pub fn worktree_for_id(
392 &self,
393 id: WorktreeId,
394 cx: &AppContext,
395 ) -> Option<ModelHandle<Worktree>> {
396 self.worktrees(cx)
397 .find(|worktree| worktree.read(cx).id() == id)
398 }
399
400 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
401 let rpc = self.client.clone();
402 cx.spawn(|this, mut cx| async move {
403 let project_id = this.update(&mut cx, |this, _| {
404 if let ProjectClientState::Local {
405 is_shared,
406 remote_id_rx,
407 ..
408 } = &mut this.client_state
409 {
410 *is_shared = true;
411 remote_id_rx
412 .borrow()
413 .ok_or_else(|| anyhow!("no project id"))
414 } else {
415 Err(anyhow!("can't share a remote project"))
416 }
417 })?;
418
419 rpc.request(proto::ShareProject { project_id }).await?;
420 let mut tasks = Vec::new();
421 this.update(&mut cx, |this, cx| {
422 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
423 worktree.update(cx, |worktree, cx| {
424 let worktree = worktree.as_local_mut().unwrap();
425 tasks.push(worktree.share(project_id, cx));
426 });
427 }
428 });
429 for task in tasks {
430 task.await?;
431 }
432 this.update(&mut cx, |_, cx| cx.notify());
433 Ok(())
434 })
435 }
436
437 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
438 let rpc = self.client.clone();
439 cx.spawn(|this, mut cx| async move {
440 let project_id = this.update(&mut cx, |this, _| {
441 if let ProjectClientState::Local {
442 is_shared,
443 remote_id_rx,
444 ..
445 } = &mut this.client_state
446 {
447 *is_shared = false;
448 remote_id_rx
449 .borrow()
450 .ok_or_else(|| anyhow!("no project id"))
451 } else {
452 Err(anyhow!("can't share a remote project"))
453 }
454 })?;
455
456 rpc.send(proto::UnshareProject { project_id }).await?;
457 this.update(&mut cx, |this, cx| {
458 this.collaborators.clear();
459 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
460 worktree.update(cx, |worktree, _| {
461 worktree.as_local_mut().unwrap().unshare();
462 });
463 }
464 cx.notify()
465 });
466 Ok(())
467 })
468 }
469
470 pub fn is_read_only(&self) -> bool {
471 match &self.client_state {
472 ProjectClientState::Local { .. } => false,
473 ProjectClientState::Remote {
474 sharing_has_stopped,
475 ..
476 } => *sharing_has_stopped,
477 }
478 }
479
480 pub fn is_local(&self) -> bool {
481 match &self.client_state {
482 ProjectClientState::Local { .. } => true,
483 ProjectClientState::Remote { .. } => false,
484 }
485 }
486
487 pub fn open_buffer(
488 &mut self,
489 path: impl Into<ProjectPath>,
490 cx: &mut ModelContext<Self>,
491 ) -> Task<Result<ModelHandle<Buffer>>> {
492 let project_path = path.into();
493 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
494 worktree
495 } else {
496 return Task::ready(Err(anyhow!("no such worktree")));
497 };
498
499 // If there is already a buffer for the given path, then return it.
500 let existing_buffer = self.get_open_buffer(&project_path, cx);
501 if let Some(existing_buffer) = existing_buffer {
502 return Task::ready(Ok(existing_buffer));
503 }
504
505 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
506 // If the given path is already being loaded, then wait for that existing
507 // task to complete and return the same buffer.
508 hash_map::Entry::Occupied(e) => e.get().clone(),
509
510 // Otherwise, record the fact that this path is now being loaded.
511 hash_map::Entry::Vacant(entry) => {
512 let (mut tx, rx) = postage::watch::channel();
513 entry.insert(rx.clone());
514
515 let load_buffer = worktree.update(cx, |worktree, cx| {
516 worktree.load_buffer(&project_path.path, cx)
517 });
518
519 cx.spawn(move |this, mut cx| async move {
520 let load_result = load_buffer.await;
521 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
522 // Record the fact that the buffer is no longer loading.
523 this.loading_buffers.remove(&project_path);
524 let buffer = load_result.map_err(Arc::new)?;
525 this.open_buffers.insert(
526 buffer.read(cx).remote_id() as usize,
527 OpenBuffer::Loaded(buffer.downgrade()),
528 );
529 this.assign_language_to_buffer(&worktree, &buffer, cx);
530 Ok(buffer)
531 }));
532 })
533 .detach();
534 rx
535 }
536 };
537
538 cx.foreground().spawn(async move {
539 loop {
540 if let Some(result) = loading_watch.borrow().as_ref() {
541 match result {
542 Ok(buffer) => return Ok(buffer.clone()),
543 Err(error) => return Err(anyhow!("{}", error)),
544 }
545 }
546 loading_watch.recv().await;
547 }
548 })
549 }
550
551 pub fn save_buffer_as(
552 &self,
553 buffer: ModelHandle<Buffer>,
554 abs_path: PathBuf,
555 cx: &mut ModelContext<Project>,
556 ) -> Task<Result<()>> {
557 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
558 cx.spawn(|this, mut cx| async move {
559 let (worktree, path) = worktree_task.await?;
560 worktree
561 .update(&mut cx, |worktree, cx| {
562 worktree
563 .as_local_mut()
564 .unwrap()
565 .save_buffer_as(buffer.clone(), path, cx)
566 })
567 .await?;
568 this.update(&mut cx, |this, cx| {
569 this.open_buffers
570 .insert(buffer.id(), OpenBuffer::Loaded(buffer.downgrade()));
571 this.assign_language_to_buffer(&worktree, &buffer, cx);
572 });
573 Ok(())
574 })
575 }
576
577 #[cfg(any(test, feature = "test-support"))]
578 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
579 let path = path.into();
580 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
581 self.open_buffers.iter().any(|(_, buffer)| {
582 if let Some(buffer) = buffer.upgrade(cx) {
583 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
584 if file.worktree == worktree && file.path() == &path.path {
585 return true;
586 }
587 }
588 }
589 false
590 })
591 } else {
592 false
593 }
594 }
595
596 fn get_open_buffer(
597 &mut self,
598 path: &ProjectPath,
599 cx: &mut ModelContext<Self>,
600 ) -> Option<ModelHandle<Buffer>> {
601 let mut result = None;
602 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
603 self.open_buffers.retain(|_, buffer| {
604 if let OpenBuffer::Loaded(buffer) = buffer {
605 if let Some(buffer) = buffer.upgrade(cx) {
606 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
607 if file.worktree == worktree && file.path() == &path.path {
608 result = Some(buffer);
609 }
610 }
611 return true;
612 }
613 }
614 false
615 });
616 result
617 }
618
619 fn assign_language_to_buffer(
620 &mut self,
621 worktree: &ModelHandle<Worktree>,
622 buffer: &ModelHandle<Buffer>,
623 cx: &mut ModelContext<Self>,
624 ) -> Option<()> {
625 let (path, full_path) = {
626 let file = buffer.read(cx).file()?;
627 (file.path().clone(), file.full_path(cx))
628 };
629
630 // If the buffer has a language, set it and start/assign the language server
631 if let Some(language) = self.languages.select_language(&full_path) {
632 buffer.update(cx, |buffer, cx| {
633 buffer.set_language(Some(language.clone()), cx);
634 });
635
636 // For local worktrees, start a language server if needed.
637 // Also assign the language server and any previously stored diagnostics to the buffer.
638 if let Some(local_worktree) = worktree.read(cx).as_local() {
639 let worktree_id = local_worktree.id();
640 let worktree_abs_path = local_worktree.abs_path().clone();
641
642 let language_server = match self
643 .language_servers
644 .entry((worktree_id, language.name().to_string()))
645 {
646 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
647 hash_map::Entry::Vacant(e) => Self::start_language_server(
648 self.client.clone(),
649 language.clone(),
650 &worktree_abs_path,
651 cx,
652 )
653 .map(|server| e.insert(server).clone()),
654 };
655
656 buffer.update(cx, |buffer, cx| {
657 buffer.set_language_server(language_server, cx);
658 });
659 }
660 }
661
662 if let Some(local_worktree) = worktree.read(cx).as_local() {
663 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
664 buffer.update(cx, |buffer, cx| {
665 buffer.update_diagnostics(None, diagnostics, cx).log_err();
666 });
667 }
668 }
669
670 None
671 }
672
673 fn start_language_server(
674 rpc: Arc<Client>,
675 language: Arc<Language>,
676 worktree_path: &Path,
677 cx: &mut ModelContext<Self>,
678 ) -> Option<Arc<LanguageServer>> {
679 enum LspEvent {
680 DiagnosticsStart,
681 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
682 DiagnosticsFinish,
683 }
684
685 let language_server = language
686 .start_server(worktree_path, cx)
687 .log_err()
688 .flatten()?;
689 let disk_based_sources = language
690 .disk_based_diagnostic_sources()
691 .cloned()
692 .unwrap_or_default();
693 let disk_based_diagnostics_progress_token =
694 language.disk_based_diagnostics_progress_token().cloned();
695 let has_disk_based_diagnostic_progress_token =
696 disk_based_diagnostics_progress_token.is_some();
697 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
698
699 // Listen for `PublishDiagnostics` notifications.
700 language_server
701 .on_notification::<lsp::notification::PublishDiagnostics, _>({
702 let diagnostics_tx = diagnostics_tx.clone();
703 move |params| {
704 if !has_disk_based_diagnostic_progress_token {
705 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
706 }
707 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
708 if !has_disk_based_diagnostic_progress_token {
709 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
710 }
711 }
712 })
713 .detach();
714
715 // Listen for `Progress` notifications. Send an event when the language server
716 // transitions between running jobs and not running any jobs.
717 let mut running_jobs_for_this_server: i32 = 0;
718 language_server
719 .on_notification::<lsp::notification::Progress, _>(move |params| {
720 let token = match params.token {
721 lsp::NumberOrString::Number(_) => None,
722 lsp::NumberOrString::String(token) => Some(token),
723 };
724
725 if token == disk_based_diagnostics_progress_token {
726 match params.value {
727 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
728 lsp::WorkDoneProgress::Begin(_) => {
729 running_jobs_for_this_server += 1;
730 if running_jobs_for_this_server == 1 {
731 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
732 }
733 }
734 lsp::WorkDoneProgress::End(_) => {
735 running_jobs_for_this_server -= 1;
736 if running_jobs_for_this_server == 0 {
737 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
738 }
739 }
740 _ => {}
741 },
742 }
743 }
744 })
745 .detach();
746
747 // Process all the LSP events.
748 cx.spawn_weak(|this, mut cx| async move {
749 while let Ok(message) = diagnostics_rx.recv().await {
750 let this = cx.read(|cx| this.upgrade(cx))?;
751 match message {
752 LspEvent::DiagnosticsStart => {
753 let send = this.update(&mut cx, |this, cx| {
754 this.disk_based_diagnostics_started(cx);
755 this.remote_id().map(|project_id| {
756 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
757 })
758 });
759 if let Some(send) = send {
760 send.await.log_err();
761 }
762 }
763 LspEvent::DiagnosticsUpdate(params) => {
764 this.update(&mut cx, |this, cx| {
765 this.update_diagnostics(params, &disk_based_sources, cx)
766 .log_err();
767 });
768 }
769 LspEvent::DiagnosticsFinish => {
770 let send = this.update(&mut cx, |this, cx| {
771 this.disk_based_diagnostics_finished(cx);
772 this.remote_id().map(|project_id| {
773 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
774 })
775 });
776 if let Some(send) = send {
777 send.await.log_err();
778 }
779 }
780 }
781 }
782 Some(())
783 })
784 .detach();
785
786 Some(language_server)
787 }
788
789 pub fn update_diagnostics(
790 &mut self,
791 params: lsp::PublishDiagnosticsParams,
792 disk_based_sources: &HashSet<String>,
793 cx: &mut ModelContext<Self>,
794 ) -> Result<()> {
795 let abs_path = params
796 .uri
797 .to_file_path()
798 .map_err(|_| anyhow!("URI is not a file"))?;
799 let mut next_group_id = 0;
800 let mut diagnostics = Vec::default();
801 let mut primary_diagnostic_group_ids = HashMap::default();
802 let mut sources_by_group_id = HashMap::default();
803 let mut supporting_diagnostic_severities = HashMap::default();
804 for diagnostic in ¶ms.diagnostics {
805 let source = diagnostic.source.as_ref();
806 let code = diagnostic.code.as_ref().map(|code| match code {
807 lsp::NumberOrString::Number(code) => code.to_string(),
808 lsp::NumberOrString::String(code) => code.clone(),
809 });
810 let range = range_from_lsp(diagnostic.range);
811 let is_supporting = diagnostic
812 .related_information
813 .as_ref()
814 .map_or(false, |infos| {
815 infos.iter().any(|info| {
816 primary_diagnostic_group_ids.contains_key(&(
817 source,
818 code.clone(),
819 range_from_lsp(info.location.range),
820 ))
821 })
822 });
823
824 if is_supporting {
825 if let Some(severity) = diagnostic.severity {
826 supporting_diagnostic_severities
827 .insert((source, code.clone(), range), severity);
828 }
829 } else {
830 let group_id = post_inc(&mut next_group_id);
831 let is_disk_based =
832 source.map_or(false, |source| disk_based_sources.contains(source));
833
834 sources_by_group_id.insert(group_id, source);
835 primary_diagnostic_group_ids
836 .insert((source, code.clone(), range.clone()), group_id);
837
838 diagnostics.push(DiagnosticEntry {
839 range,
840 diagnostic: Diagnostic {
841 code: code.clone(),
842 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
843 message: diagnostic.message.clone(),
844 group_id,
845 is_primary: true,
846 is_valid: true,
847 is_disk_based,
848 },
849 });
850 if let Some(infos) = &diagnostic.related_information {
851 for info in infos {
852 if info.location.uri == params.uri {
853 let range = range_from_lsp(info.location.range);
854 diagnostics.push(DiagnosticEntry {
855 range,
856 diagnostic: Diagnostic {
857 code: code.clone(),
858 severity: DiagnosticSeverity::INFORMATION,
859 message: info.message.clone(),
860 group_id,
861 is_primary: false,
862 is_valid: true,
863 is_disk_based,
864 },
865 });
866 }
867 }
868 }
869 }
870 }
871
872 for entry in &mut diagnostics {
873 let diagnostic = &mut entry.diagnostic;
874 if !diagnostic.is_primary {
875 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
876 if let Some(&severity) = supporting_diagnostic_severities.get(&(
877 source,
878 diagnostic.code.clone(),
879 entry.range.clone(),
880 )) {
881 diagnostic.severity = severity;
882 }
883 }
884 }
885
886 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
887 Ok(())
888 }
889
890 pub fn update_diagnostic_entries(
891 &mut self,
892 abs_path: PathBuf,
893 version: Option<i32>,
894 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
895 cx: &mut ModelContext<Project>,
896 ) -> Result<(), anyhow::Error> {
897 let (worktree, relative_path) = self
898 .find_local_worktree(&abs_path, cx)
899 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
900 let project_path = ProjectPath {
901 worktree_id: worktree.read(cx).id(),
902 path: relative_path.into(),
903 };
904
905 for buffer in self.open_buffers.values() {
906 if let Some(buffer) = buffer.upgrade(cx) {
907 if buffer
908 .read(cx)
909 .file()
910 .map_or(false, |file| *file.path() == project_path.path)
911 {
912 buffer.update(cx, |buffer, cx| {
913 buffer.update_diagnostics(version, diagnostics.clone(), cx)
914 })?;
915 break;
916 }
917 }
918 }
919 worktree.update(cx, |worktree, cx| {
920 worktree
921 .as_local_mut()
922 .ok_or_else(|| anyhow!("not a local worktree"))?
923 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
924 })?;
925 cx.emit(Event::DiagnosticsUpdated(project_path));
926 Ok(())
927 }
928
929 pub fn definition<T: ToOffset>(
930 &self,
931 source_buffer_handle: &ModelHandle<Buffer>,
932 position: T,
933 cx: &mut ModelContext<Self>,
934 ) -> Task<Result<Vec<Definition>>> {
935 let source_buffer_handle = source_buffer_handle.clone();
936 let buffer = source_buffer_handle.read(cx);
937 let worktree;
938 let buffer_abs_path;
939 if let Some(file) = File::from_dyn(buffer.file()) {
940 worktree = file.worktree.clone();
941 buffer_abs_path = file.abs_path(cx);
942 } else {
943 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
944 };
945
946 if worktree.read(cx).as_local().is_some() {
947 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
948 let buffer_abs_path = buffer_abs_path.unwrap();
949 let lang_name;
950 let lang_server;
951 if let Some(lang) = buffer.language() {
952 lang_name = lang.name().to_string();
953 if let Some(server) = self
954 .language_servers
955 .get(&(worktree.read(cx).id(), lang_name.clone()))
956 {
957 lang_server = server.clone();
958 } else {
959 return Task::ready(Err(anyhow!("buffer does not have a language server")));
960 };
961 } else {
962 return Task::ready(Err(anyhow!("buffer does not have a language")));
963 }
964
965 cx.spawn(|this, mut cx| async move {
966 let response = lang_server
967 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
968 text_document_position_params: lsp::TextDocumentPositionParams {
969 text_document: lsp::TextDocumentIdentifier::new(
970 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
971 ),
972 position: lsp::Position::new(point.row, point.column),
973 },
974 work_done_progress_params: Default::default(),
975 partial_result_params: Default::default(),
976 })
977 .await?;
978
979 let mut definitions = Vec::new();
980 if let Some(response) = response {
981 let mut unresolved_locations = Vec::new();
982 match response {
983 lsp::GotoDefinitionResponse::Scalar(loc) => {
984 unresolved_locations.push((loc.uri, loc.range));
985 }
986 lsp::GotoDefinitionResponse::Array(locs) => {
987 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
988 }
989 lsp::GotoDefinitionResponse::Link(links) => {
990 unresolved_locations.extend(
991 links
992 .into_iter()
993 .map(|l| (l.target_uri, l.target_selection_range)),
994 );
995 }
996 }
997
998 for (target_uri, target_range) in unresolved_locations {
999 let abs_path = target_uri
1000 .to_file_path()
1001 .map_err(|_| anyhow!("invalid target path"))?;
1002
1003 let (worktree, relative_path) = if let Some(result) =
1004 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1005 {
1006 result
1007 } else {
1008 let worktree = this
1009 .update(&mut cx, |this, cx| {
1010 this.create_local_worktree(&abs_path, true, cx)
1011 })
1012 .await?;
1013 this.update(&mut cx, |this, cx| {
1014 this.language_servers.insert(
1015 (worktree.read(cx).id(), lang_name.clone()),
1016 lang_server.clone(),
1017 );
1018 });
1019 (worktree, PathBuf::new())
1020 };
1021
1022 let project_path = ProjectPath {
1023 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1024 path: relative_path.into(),
1025 };
1026 let target_buffer_handle = this
1027 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1028 .await?;
1029 cx.read(|cx| {
1030 let target_buffer = target_buffer_handle.read(cx);
1031 let target_start = target_buffer
1032 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1033 let target_end = target_buffer
1034 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1035 definitions.push(Definition {
1036 target_buffer: target_buffer_handle,
1037 target_range: target_buffer.anchor_after(target_start)
1038 ..target_buffer.anchor_before(target_end),
1039 });
1040 });
1041 }
1042 }
1043
1044 Ok(definitions)
1045 })
1046 } else {
1047 log::info!("go to definition is not yet implemented for guests");
1048 Task::ready(Ok(Default::default()))
1049 }
1050 }
1051
1052 pub fn find_or_create_local_worktree(
1053 &self,
1054 abs_path: impl AsRef<Path>,
1055 weak: bool,
1056 cx: &mut ModelContext<Self>,
1057 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1058 let abs_path = abs_path.as_ref();
1059 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1060 Task::ready(Ok((tree.clone(), relative_path.into())))
1061 } else {
1062 let worktree = self.create_local_worktree(abs_path, weak, cx);
1063 cx.foreground()
1064 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1065 }
1066 }
1067
1068 fn find_local_worktree(
1069 &self,
1070 abs_path: &Path,
1071 cx: &AppContext,
1072 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1073 for tree in self.worktrees(cx) {
1074 if let Some(relative_path) = tree
1075 .read(cx)
1076 .as_local()
1077 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1078 {
1079 return Some((tree.clone(), relative_path.into()));
1080 }
1081 }
1082 None
1083 }
1084
1085 pub fn is_shared(&self) -> bool {
1086 match &self.client_state {
1087 ProjectClientState::Local { is_shared, .. } => *is_shared,
1088 ProjectClientState::Remote { .. } => false,
1089 }
1090 }
1091
1092 fn create_local_worktree(
1093 &self,
1094 abs_path: impl AsRef<Path>,
1095 weak: bool,
1096 cx: &mut ModelContext<Self>,
1097 ) -> Task<Result<ModelHandle<Worktree>>> {
1098 let fs = self.fs.clone();
1099 let client = self.client.clone();
1100 let path = Arc::from(abs_path.as_ref());
1101 cx.spawn(|project, mut cx| async move {
1102 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1103
1104 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1105 project.add_worktree(&worktree, cx);
1106 (project.remote_id(), project.is_shared())
1107 });
1108
1109 if let Some(project_id) = remote_project_id {
1110 worktree
1111 .update(&mut cx, |worktree, cx| {
1112 worktree.as_local_mut().unwrap().register(project_id, cx)
1113 })
1114 .await?;
1115 if is_shared {
1116 worktree
1117 .update(&mut cx, |worktree, cx| {
1118 worktree.as_local_mut().unwrap().share(project_id, cx)
1119 })
1120 .await?;
1121 }
1122 }
1123
1124 Ok(worktree)
1125 })
1126 }
1127
1128 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1129 self.worktrees.retain(|worktree| {
1130 worktree
1131 .upgrade(cx)
1132 .map_or(false, |w| w.read(cx).id() != id)
1133 });
1134 cx.notify();
1135 }
1136
1137 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1138 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1139 cx.subscribe(&worktree, |this, worktree, _, cx| {
1140 this.update_open_buffers(worktree, cx)
1141 })
1142 .detach();
1143
1144 let push_weak_handle = {
1145 let worktree = worktree.read(cx);
1146 worktree.is_local() && worktree.is_weak()
1147 };
1148 if push_weak_handle {
1149 cx.observe_release(&worktree, |this, cx| {
1150 this.worktrees
1151 .retain(|worktree| worktree.upgrade(cx).is_some());
1152 cx.notify();
1153 })
1154 .detach();
1155 self.worktrees
1156 .push(WorktreeHandle::Weak(worktree.downgrade()));
1157 } else {
1158 self.worktrees
1159 .push(WorktreeHandle::Strong(worktree.clone()));
1160 }
1161 cx.notify();
1162 }
1163
1164 fn update_open_buffers(
1165 &mut self,
1166 worktree_handle: ModelHandle<Worktree>,
1167 cx: &mut ModelContext<Self>,
1168 ) {
1169 let local = worktree_handle.read(cx).is_local();
1170 let snapshot = worktree_handle.read(cx).snapshot();
1171 let mut buffers_to_delete = Vec::new();
1172 for (buffer_id, buffer) in &self.open_buffers {
1173 if let OpenBuffer::Loaded(buffer) = buffer {
1174 if let Some(buffer) = buffer.upgrade(cx) {
1175 buffer.update(cx, |buffer, cx| {
1176 if let Some(old_file) = File::from_dyn(buffer.file()) {
1177 if old_file.worktree != worktree_handle {
1178 return;
1179 }
1180
1181 let new_file = if let Some(entry) = old_file
1182 .entry_id
1183 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1184 {
1185 File {
1186 is_local: local,
1187 entry_id: Some(entry.id),
1188 mtime: entry.mtime,
1189 path: entry.path.clone(),
1190 worktree: worktree_handle.clone(),
1191 }
1192 } else if let Some(entry) =
1193 snapshot.entry_for_path(old_file.path().as_ref())
1194 {
1195 File {
1196 is_local: local,
1197 entry_id: Some(entry.id),
1198 mtime: entry.mtime,
1199 path: entry.path.clone(),
1200 worktree: worktree_handle.clone(),
1201 }
1202 } else {
1203 File {
1204 is_local: local,
1205 entry_id: None,
1206 path: old_file.path().clone(),
1207 mtime: old_file.mtime(),
1208 worktree: worktree_handle.clone(),
1209 }
1210 };
1211
1212 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
1213 task.detach();
1214 }
1215 }
1216 });
1217 } else {
1218 buffers_to_delete.push(*buffer_id);
1219 }
1220 }
1221 }
1222
1223 for buffer_id in buffers_to_delete {
1224 self.open_buffers.remove(&buffer_id);
1225 }
1226 }
1227
1228 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1229 let new_active_entry = entry.and_then(|project_path| {
1230 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1231 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1232 Some(ProjectEntry {
1233 worktree_id: project_path.worktree_id,
1234 entry_id: entry.id,
1235 })
1236 });
1237 if new_active_entry != self.active_entry {
1238 self.active_entry = new_active_entry;
1239 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1240 }
1241 }
1242
1243 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1244 self.language_servers_with_diagnostics_running > 0
1245 }
1246
1247 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1248 let mut summary = DiagnosticSummary::default();
1249 for (_, path_summary) in self.diagnostic_summaries(cx) {
1250 summary.error_count += path_summary.error_count;
1251 summary.warning_count += path_summary.warning_count;
1252 summary.info_count += path_summary.info_count;
1253 summary.hint_count += path_summary.hint_count;
1254 }
1255 summary
1256 }
1257
1258 pub fn diagnostic_summaries<'a>(
1259 &'a self,
1260 cx: &'a AppContext,
1261 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1262 self.worktrees(cx).flat_map(move |worktree| {
1263 let worktree = worktree.read(cx);
1264 let worktree_id = worktree.id();
1265 worktree
1266 .diagnostic_summaries()
1267 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1268 })
1269 }
1270
1271 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1272 self.language_servers_with_diagnostics_running += 1;
1273 if self.language_servers_with_diagnostics_running == 1 {
1274 cx.emit(Event::DiskBasedDiagnosticsStarted);
1275 }
1276 }
1277
1278 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1279 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1280 self.language_servers_with_diagnostics_running -= 1;
1281 if self.language_servers_with_diagnostics_running == 0 {
1282 cx.emit(Event::DiskBasedDiagnosticsFinished);
1283 }
1284 }
1285
1286 pub fn active_entry(&self) -> Option<ProjectEntry> {
1287 self.active_entry
1288 }
1289
1290 // RPC message handlers
1291
1292 fn handle_unshare_project(
1293 &mut self,
1294 _: TypedEnvelope<proto::UnshareProject>,
1295 _: Arc<Client>,
1296 cx: &mut ModelContext<Self>,
1297 ) -> Result<()> {
1298 if let ProjectClientState::Remote {
1299 sharing_has_stopped,
1300 ..
1301 } = &mut self.client_state
1302 {
1303 *sharing_has_stopped = true;
1304 self.collaborators.clear();
1305 cx.notify();
1306 Ok(())
1307 } else {
1308 unreachable!()
1309 }
1310 }
1311
1312 fn handle_add_collaborator(
1313 &mut self,
1314 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1315 _: Arc<Client>,
1316 cx: &mut ModelContext<Self>,
1317 ) -> Result<()> {
1318 let user_store = self.user_store.clone();
1319 let collaborator = envelope
1320 .payload
1321 .collaborator
1322 .take()
1323 .ok_or_else(|| anyhow!("empty collaborator"))?;
1324
1325 cx.spawn(|this, mut cx| {
1326 async move {
1327 let collaborator =
1328 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1329 this.update(&mut cx, |this, cx| {
1330 this.collaborators
1331 .insert(collaborator.peer_id, collaborator);
1332 cx.notify();
1333 });
1334 Ok(())
1335 }
1336 .log_err()
1337 })
1338 .detach();
1339
1340 Ok(())
1341 }
1342
1343 fn handle_remove_collaborator(
1344 &mut self,
1345 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1346 _: Arc<Client>,
1347 cx: &mut ModelContext<Self>,
1348 ) -> Result<()> {
1349 let peer_id = PeerId(envelope.payload.peer_id);
1350 let replica_id = self
1351 .collaborators
1352 .remove(&peer_id)
1353 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1354 .replica_id;
1355 self.shared_buffers.remove(&peer_id);
1356 for (_, buffer) in &self.open_buffers {
1357 if let OpenBuffer::Loaded(buffer) = buffer {
1358 if let Some(buffer) = buffer.upgrade(cx) {
1359 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1360 }
1361 }
1362 }
1363 cx.notify();
1364 Ok(())
1365 }
1366
1367 fn handle_share_worktree(
1368 &mut self,
1369 envelope: TypedEnvelope<proto::ShareWorktree>,
1370 client: Arc<Client>,
1371 cx: &mut ModelContext<Self>,
1372 ) -> Result<()> {
1373 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1374 let replica_id = self.replica_id();
1375 let worktree = envelope
1376 .payload
1377 .worktree
1378 .ok_or_else(|| anyhow!("invalid worktree"))?;
1379 cx.spawn(|this, mut cx| {
1380 async move {
1381 let worktree =
1382 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1383 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1384 Ok(())
1385 }
1386 .log_err()
1387 })
1388 .detach();
1389 Ok(())
1390 }
1391
1392 fn handle_unregister_worktree(
1393 &mut self,
1394 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1395 _: Arc<Client>,
1396 cx: &mut ModelContext<Self>,
1397 ) -> Result<()> {
1398 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1399 self.remove_worktree(worktree_id, cx);
1400 Ok(())
1401 }
1402
1403 fn handle_update_worktree(
1404 &mut self,
1405 envelope: TypedEnvelope<proto::UpdateWorktree>,
1406 _: Arc<Client>,
1407 cx: &mut ModelContext<Self>,
1408 ) -> Result<()> {
1409 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1410 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1411 worktree.update(cx, |worktree, cx| {
1412 let worktree = worktree.as_remote_mut().unwrap();
1413 worktree.update_from_remote(envelope, cx)
1414 })?;
1415 }
1416 Ok(())
1417 }
1418
1419 fn handle_update_diagnostic_summary(
1420 &mut self,
1421 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1422 _: Arc<Client>,
1423 cx: &mut ModelContext<Self>,
1424 ) -> Result<()> {
1425 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1426 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1427 if let Some(summary) = envelope.payload.summary {
1428 let project_path = ProjectPath {
1429 worktree_id,
1430 path: Path::new(&summary.path).into(),
1431 };
1432 worktree.update(cx, |worktree, _| {
1433 worktree
1434 .as_remote_mut()
1435 .unwrap()
1436 .update_diagnostic_summary(project_path.path.clone(), &summary);
1437 });
1438 cx.emit(Event::DiagnosticsUpdated(project_path));
1439 }
1440 }
1441 Ok(())
1442 }
1443
1444 fn handle_disk_based_diagnostics_updating(
1445 &mut self,
1446 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1447 _: Arc<Client>,
1448 cx: &mut ModelContext<Self>,
1449 ) -> Result<()> {
1450 self.disk_based_diagnostics_started(cx);
1451 Ok(())
1452 }
1453
1454 fn handle_disk_based_diagnostics_updated(
1455 &mut self,
1456 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1457 _: Arc<Client>,
1458 cx: &mut ModelContext<Self>,
1459 ) -> Result<()> {
1460 self.disk_based_diagnostics_finished(cx);
1461 Ok(())
1462 }
1463
1464 pub fn handle_update_buffer(
1465 &mut self,
1466 envelope: TypedEnvelope<proto::UpdateBuffer>,
1467 _: Arc<Client>,
1468 cx: &mut ModelContext<Self>,
1469 ) -> Result<()> {
1470 let payload = envelope.payload.clone();
1471 let buffer_id = payload.buffer_id as usize;
1472 let ops = payload
1473 .operations
1474 .into_iter()
1475 .map(|op| language::proto::deserialize_operation(op))
1476 .collect::<Result<Vec<_>, _>>()?;
1477 match self.open_buffers.get_mut(&buffer_id) {
1478 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1479 Some(OpenBuffer::Loaded(buffer)) => {
1480 if let Some(buffer) = buffer.upgrade(cx) {
1481 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1482 } else {
1483 self.open_buffers
1484 .insert(buffer_id, OpenBuffer::Operations(ops));
1485 }
1486 }
1487 None => {
1488 self.open_buffers
1489 .insert(buffer_id, OpenBuffer::Operations(ops));
1490 }
1491 }
1492 Ok(())
1493 }
1494
1495 pub fn handle_save_buffer(
1496 &mut self,
1497 envelope: TypedEnvelope<proto::SaveBuffer>,
1498 rpc: Arc<Client>,
1499 cx: &mut ModelContext<Self>,
1500 ) -> Result<()> {
1501 let sender_id = envelope.original_sender_id()?;
1502 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1503 let buffer = self
1504 .shared_buffers
1505 .get(&sender_id)
1506 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1507 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1508 let receipt = envelope.receipt();
1509 let buffer_id = envelope.payload.buffer_id;
1510 let save = cx.spawn(|_, mut cx| async move {
1511 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1512 });
1513
1514 cx.background()
1515 .spawn(
1516 async move {
1517 let (version, mtime) = save.await?;
1518
1519 rpc.respond(
1520 receipt,
1521 proto::BufferSaved {
1522 project_id,
1523 buffer_id,
1524 version: (&version).into(),
1525 mtime: Some(mtime.into()),
1526 },
1527 )
1528 .await?;
1529
1530 Ok(())
1531 }
1532 .log_err(),
1533 )
1534 .detach();
1535 Ok(())
1536 }
1537
1538 pub fn handle_format_buffer(
1539 &mut self,
1540 envelope: TypedEnvelope<proto::FormatBuffer>,
1541 rpc: Arc<Client>,
1542 cx: &mut ModelContext<Self>,
1543 ) -> Result<()> {
1544 let receipt = envelope.receipt();
1545 let sender_id = envelope.original_sender_id()?;
1546 let buffer = self
1547 .shared_buffers
1548 .get(&sender_id)
1549 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1550 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1551 cx.spawn(|_, mut cx| async move {
1552 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1553 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1554 // associated with formatting.
1555 cx.spawn(|_| async move {
1556 match format {
1557 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1558 Err(error) => {
1559 rpc.respond_with_error(
1560 receipt,
1561 proto::Error {
1562 message: error.to_string(),
1563 },
1564 )
1565 .await?
1566 }
1567 }
1568 Ok::<_, anyhow::Error>(())
1569 })
1570 .await
1571 .log_err();
1572 })
1573 .detach();
1574 Ok(())
1575 }
1576
1577 pub fn handle_open_buffer(
1578 &mut self,
1579 envelope: TypedEnvelope<proto::OpenBuffer>,
1580 rpc: Arc<Client>,
1581 cx: &mut ModelContext<Self>,
1582 ) -> anyhow::Result<()> {
1583 let receipt = envelope.receipt();
1584 let peer_id = envelope.original_sender_id()?;
1585 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1586 let open_buffer = self.open_buffer(
1587 ProjectPath {
1588 worktree_id,
1589 path: PathBuf::from(envelope.payload.path).into(),
1590 },
1591 cx,
1592 );
1593 cx.spawn(|this, mut cx| {
1594 async move {
1595 let buffer = open_buffer.await?;
1596 this.update(&mut cx, |this, _| {
1597 this.shared_buffers
1598 .entry(peer_id)
1599 .or_default()
1600 .insert(buffer.id() as u64, buffer.clone());
1601 });
1602 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1603 rpc.respond(
1604 receipt,
1605 proto::OpenBufferResponse {
1606 buffer: Some(message),
1607 },
1608 )
1609 .await
1610 }
1611 .log_err()
1612 })
1613 .detach();
1614 Ok(())
1615 }
1616
1617 pub fn handle_close_buffer(
1618 &mut self,
1619 envelope: TypedEnvelope<proto::CloseBuffer>,
1620 _: Arc<Client>,
1621 cx: &mut ModelContext<Self>,
1622 ) -> anyhow::Result<()> {
1623 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1624 shared_buffers.remove(&envelope.payload.buffer_id);
1625 cx.notify();
1626 }
1627 Ok(())
1628 }
1629
1630 pub fn handle_buffer_saved(
1631 &mut self,
1632 envelope: TypedEnvelope<proto::BufferSaved>,
1633 _: Arc<Client>,
1634 cx: &mut ModelContext<Self>,
1635 ) -> Result<()> {
1636 let payload = envelope.payload.clone();
1637 let buffer = self
1638 .open_buffers
1639 .get(&(payload.buffer_id as usize))
1640 .and_then(|buf| {
1641 if let OpenBuffer::Loaded(buffer) = buf {
1642 buffer.upgrade(cx)
1643 } else {
1644 None
1645 }
1646 });
1647 if let Some(buffer) = buffer {
1648 buffer.update(cx, |buffer, cx| {
1649 let version = payload.version.try_into()?;
1650 let mtime = payload
1651 .mtime
1652 .ok_or_else(|| anyhow!("missing mtime"))?
1653 .into();
1654 buffer.did_save(version, mtime, None, cx);
1655 Result::<_, anyhow::Error>::Ok(())
1656 })?;
1657 }
1658 Ok(())
1659 }
1660
1661 pub fn match_paths<'a>(
1662 &self,
1663 query: &'a str,
1664 include_ignored: bool,
1665 smart_case: bool,
1666 max_results: usize,
1667 cancel_flag: &'a AtomicBool,
1668 cx: &AppContext,
1669 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1670 let worktrees = self
1671 .worktrees(cx)
1672 .filter(|worktree| !worktree.read(cx).is_weak())
1673 .collect::<Vec<_>>();
1674 let include_root_name = worktrees.len() > 1;
1675 let candidate_sets = worktrees
1676 .into_iter()
1677 .map(|worktree| CandidateSet {
1678 snapshot: worktree.read(cx).snapshot(),
1679 include_ignored,
1680 include_root_name,
1681 })
1682 .collect::<Vec<_>>();
1683
1684 let background = cx.background().clone();
1685 async move {
1686 fuzzy::match_paths(
1687 candidate_sets.as_slice(),
1688 query,
1689 smart_case,
1690 max_results,
1691 cancel_flag,
1692 background,
1693 )
1694 .await
1695 }
1696 }
1697}
1698
1699impl WorktreeHandle {
1700 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1701 match self {
1702 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1703 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1704 }
1705 }
1706}
1707
1708struct CandidateSet {
1709 snapshot: Snapshot,
1710 include_ignored: bool,
1711 include_root_name: bool,
1712}
1713
1714impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1715 type Candidates = CandidateSetIter<'a>;
1716
1717 fn id(&self) -> usize {
1718 self.snapshot.id().to_usize()
1719 }
1720
1721 fn len(&self) -> usize {
1722 if self.include_ignored {
1723 self.snapshot.file_count()
1724 } else {
1725 self.snapshot.visible_file_count()
1726 }
1727 }
1728
1729 fn prefix(&self) -> Arc<str> {
1730 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1731 self.snapshot.root_name().into()
1732 } else if self.include_root_name {
1733 format!("{}/", self.snapshot.root_name()).into()
1734 } else {
1735 "".into()
1736 }
1737 }
1738
1739 fn candidates(&'a self, start: usize) -> Self::Candidates {
1740 CandidateSetIter {
1741 traversal: self.snapshot.files(self.include_ignored, start),
1742 }
1743 }
1744}
1745
1746struct CandidateSetIter<'a> {
1747 traversal: Traversal<'a>,
1748}
1749
1750impl<'a> Iterator for CandidateSetIter<'a> {
1751 type Item = PathMatchCandidate<'a>;
1752
1753 fn next(&mut self) -> Option<Self::Item> {
1754 self.traversal.next().map(|entry| {
1755 if let EntryKind::File(char_bag) = entry.kind {
1756 PathMatchCandidate {
1757 path: &entry.path,
1758 char_bag,
1759 }
1760 } else {
1761 unreachable!()
1762 }
1763 })
1764 }
1765}
1766
1767impl Entity for Project {
1768 type Event = Event;
1769
1770 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1771 match &self.client_state {
1772 ProjectClientState::Local { remote_id_rx, .. } => {
1773 if let Some(project_id) = *remote_id_rx.borrow() {
1774 let rpc = self.client.clone();
1775 cx.spawn(|_| async move {
1776 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1777 log::error!("error unregistering project: {}", err);
1778 }
1779 })
1780 .detach();
1781 }
1782 }
1783 ProjectClientState::Remote { remote_id, .. } => {
1784 let rpc = self.client.clone();
1785 let project_id = *remote_id;
1786 cx.spawn(|_| async move {
1787 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1788 log::error!("error leaving project: {}", err);
1789 }
1790 })
1791 .detach();
1792 }
1793 }
1794 }
1795
1796 fn app_will_quit(
1797 &mut self,
1798 _: &mut MutableAppContext,
1799 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1800 use futures::FutureExt;
1801
1802 let shutdown_futures = self
1803 .language_servers
1804 .drain()
1805 .filter_map(|(_, server)| server.shutdown())
1806 .collect::<Vec<_>>();
1807 Some(
1808 async move {
1809 futures::future::join_all(shutdown_futures).await;
1810 }
1811 .boxed(),
1812 )
1813 }
1814}
1815
1816impl Collaborator {
1817 fn from_proto(
1818 message: proto::Collaborator,
1819 user_store: &ModelHandle<UserStore>,
1820 cx: &mut AsyncAppContext,
1821 ) -> impl Future<Output = Result<Self>> {
1822 let user = user_store.update(cx, |user_store, cx| {
1823 user_store.fetch_user(message.user_id, cx)
1824 });
1825
1826 async move {
1827 Ok(Self {
1828 peer_id: PeerId(message.peer_id),
1829 user: user.await?,
1830 replica_id: message.replica_id as ReplicaId,
1831 })
1832 }
1833 }
1834}
1835
1836impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1837 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1838 Self {
1839 worktree_id,
1840 path: path.as_ref().into(),
1841 }
1842 }
1843}
1844
1845impl OpenBuffer {
1846 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1847 match self {
1848 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1849 OpenBuffer::Operations(_) => None,
1850 }
1851 }
1852}
1853
1854#[cfg(test)]
1855mod tests {
1856 use super::{Event, *};
1857 use client::test::FakeHttpClient;
1858 use fs::RealFs;
1859 use futures::StreamExt;
1860 use gpui::{test::subscribe, TestAppContext};
1861 use language::{
1862 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1863 LanguageServerConfig, Point,
1864 };
1865 use lsp::Url;
1866 use serde_json::json;
1867 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1868 use unindent::Unindent as _;
1869 use util::test::temp_tree;
1870 use worktree::WorktreeHandle as _;
1871
1872 #[gpui::test]
1873 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1874 let dir = temp_tree(json!({
1875 "root": {
1876 "apple": "",
1877 "banana": {
1878 "carrot": {
1879 "date": "",
1880 "endive": "",
1881 }
1882 },
1883 "fennel": {
1884 "grape": "",
1885 }
1886 }
1887 }));
1888
1889 let root_link_path = dir.path().join("root_link");
1890 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1891 unix::fs::symlink(
1892 &dir.path().join("root/fennel"),
1893 &dir.path().join("root/finnochio"),
1894 )
1895 .unwrap();
1896
1897 let project = build_project(Arc::new(RealFs), &mut cx);
1898
1899 let (tree, _) = project
1900 .update(&mut cx, |project, cx| {
1901 project.find_or_create_local_worktree(&root_link_path, false, cx)
1902 })
1903 .await
1904 .unwrap();
1905
1906 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1907 .await;
1908 cx.read(|cx| {
1909 let tree = tree.read(cx);
1910 assert_eq!(tree.file_count(), 5);
1911 assert_eq!(
1912 tree.inode_for_path("fennel/grape"),
1913 tree.inode_for_path("finnochio/grape")
1914 );
1915 });
1916
1917 let cancel_flag = Default::default();
1918 let results = project
1919 .read_with(&cx, |project, cx| {
1920 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
1921 })
1922 .await;
1923 assert_eq!(
1924 results
1925 .into_iter()
1926 .map(|result| result.path)
1927 .collect::<Vec<Arc<Path>>>(),
1928 vec![
1929 PathBuf::from("banana/carrot/date").into(),
1930 PathBuf::from("banana/carrot/endive").into(),
1931 ]
1932 );
1933 }
1934
1935 #[gpui::test]
1936 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
1937 let (language_server_config, mut fake_server) =
1938 LanguageServerConfig::fake(cx.background()).await;
1939 let progress_token = language_server_config
1940 .disk_based_diagnostics_progress_token
1941 .clone()
1942 .unwrap();
1943
1944 let mut languages = LanguageRegistry::new();
1945 languages.add(Arc::new(Language::new(
1946 LanguageConfig {
1947 name: "Rust".to_string(),
1948 path_suffixes: vec!["rs".to_string()],
1949 language_server: Some(language_server_config),
1950 ..Default::default()
1951 },
1952 Some(tree_sitter_rust::language()),
1953 )));
1954
1955 let dir = temp_tree(json!({
1956 "a.rs": "fn a() { A }",
1957 "b.rs": "const y: i32 = 1",
1958 }));
1959
1960 let http_client = FakeHttpClient::with_404_response();
1961 let client = Client::new(http_client.clone());
1962 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
1963
1964 let project = cx.update(|cx| {
1965 Project::local(
1966 client,
1967 user_store,
1968 Arc::new(languages),
1969 Arc::new(RealFs),
1970 cx,
1971 )
1972 });
1973
1974 let (tree, _) = project
1975 .update(&mut cx, |project, cx| {
1976 project.find_or_create_local_worktree(dir.path(), false, cx)
1977 })
1978 .await
1979 .unwrap();
1980 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
1981
1982 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1983 .await;
1984
1985 // Cause worktree to start the fake language server
1986 let _buffer = project
1987 .update(&mut cx, |project, cx| {
1988 project.open_buffer(
1989 ProjectPath {
1990 worktree_id,
1991 path: Path::new("b.rs").into(),
1992 },
1993 cx,
1994 )
1995 })
1996 .await
1997 .unwrap();
1998
1999 let mut events = subscribe(&project, &mut cx);
2000
2001 fake_server.start_progress(&progress_token).await;
2002 assert_eq!(
2003 events.next().await.unwrap(),
2004 Event::DiskBasedDiagnosticsStarted
2005 );
2006
2007 fake_server.start_progress(&progress_token).await;
2008 fake_server.end_progress(&progress_token).await;
2009 fake_server.start_progress(&progress_token).await;
2010
2011 fake_server
2012 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2013 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2014 version: None,
2015 diagnostics: vec![lsp::Diagnostic {
2016 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2017 severity: Some(lsp::DiagnosticSeverity::ERROR),
2018 message: "undefined variable 'A'".to_string(),
2019 ..Default::default()
2020 }],
2021 })
2022 .await;
2023 assert_eq!(
2024 events.next().await.unwrap(),
2025 Event::DiagnosticsUpdated(ProjectPath {
2026 worktree_id,
2027 path: Arc::from(Path::new("a.rs"))
2028 })
2029 );
2030
2031 fake_server.end_progress(&progress_token).await;
2032 fake_server.end_progress(&progress_token).await;
2033 assert_eq!(
2034 events.next().await.unwrap(),
2035 Event::DiskBasedDiagnosticsUpdated
2036 );
2037 assert_eq!(
2038 events.next().await.unwrap(),
2039 Event::DiskBasedDiagnosticsFinished
2040 );
2041
2042 let buffer = project
2043 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2044 .await
2045 .unwrap();
2046
2047 buffer.read_with(&cx, |buffer, _| {
2048 let snapshot = buffer.snapshot();
2049 let diagnostics = snapshot
2050 .diagnostics_in_range::<_, Point>(0..buffer.len())
2051 .collect::<Vec<_>>();
2052 assert_eq!(
2053 diagnostics,
2054 &[DiagnosticEntry {
2055 range: Point::new(0, 9)..Point::new(0, 10),
2056 diagnostic: Diagnostic {
2057 severity: lsp::DiagnosticSeverity::ERROR,
2058 message: "undefined variable 'A'".to_string(),
2059 group_id: 0,
2060 is_primary: true,
2061 ..Default::default()
2062 }
2063 }]
2064 )
2065 });
2066 }
2067
2068 #[gpui::test]
2069 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2070 let dir = temp_tree(json!({
2071 "root": {
2072 "dir1": {},
2073 "dir2": {
2074 "dir3": {}
2075 }
2076 }
2077 }));
2078
2079 let project = build_project(Arc::new(RealFs), &mut cx);
2080 let (tree, _) = project
2081 .update(&mut cx, |project, cx| {
2082 project.find_or_create_local_worktree(&dir.path(), false, cx)
2083 })
2084 .await
2085 .unwrap();
2086
2087 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2088 .await;
2089
2090 let cancel_flag = Default::default();
2091 let results = project
2092 .read_with(&cx, |project, cx| {
2093 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2094 })
2095 .await;
2096
2097 assert!(results.is_empty());
2098 }
2099
2100 #[gpui::test]
2101 async fn test_definition(mut cx: gpui::TestAppContext) {
2102 let (language_server_config, mut fake_server) =
2103 LanguageServerConfig::fake(cx.background()).await;
2104
2105 let mut languages = LanguageRegistry::new();
2106 languages.add(Arc::new(Language::new(
2107 LanguageConfig {
2108 name: "Rust".to_string(),
2109 path_suffixes: vec!["rs".to_string()],
2110 language_server: Some(language_server_config),
2111 ..Default::default()
2112 },
2113 Some(tree_sitter_rust::language()),
2114 )));
2115
2116 let dir = temp_tree(json!({
2117 "a.rs": "const fn a() { A }",
2118 "b.rs": "const y: i32 = crate::a()",
2119 }));
2120
2121 let http_client = FakeHttpClient::with_404_response();
2122 let client = Client::new(http_client.clone());
2123 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2124 let project = cx.update(|cx| {
2125 Project::local(
2126 client,
2127 user_store,
2128 Arc::new(languages),
2129 Arc::new(RealFs),
2130 cx,
2131 )
2132 });
2133
2134 let (tree, _) = project
2135 .update(&mut cx, |project, cx| {
2136 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2137 })
2138 .await
2139 .unwrap();
2140 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2141 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2142 .await;
2143
2144 // Cause worktree to start the fake language server
2145 let buffer = project
2146 .update(&mut cx, |project, cx| {
2147 project.open_buffer(
2148 ProjectPath {
2149 worktree_id,
2150 path: Path::new("").into(),
2151 },
2152 cx,
2153 )
2154 })
2155 .await
2156 .unwrap();
2157 let definitions =
2158 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2159 let (request_id, request) = fake_server
2160 .receive_request::<lsp::request::GotoDefinition>()
2161 .await;
2162 let request_params = request.text_document_position_params;
2163 assert_eq!(
2164 request_params.text_document.uri.to_file_path().unwrap(),
2165 dir.path().join("b.rs")
2166 );
2167 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2168
2169 fake_server
2170 .respond(
2171 request_id,
2172 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2173 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2174 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2175 ))),
2176 )
2177 .await;
2178 let mut definitions = definitions.await.unwrap();
2179 assert_eq!(definitions.len(), 1);
2180 let definition = definitions.pop().unwrap();
2181 cx.update(|cx| {
2182 let target_buffer = definition.target_buffer.read(cx);
2183 assert_eq!(
2184 target_buffer.file().unwrap().abs_path(cx),
2185 Some(dir.path().join("a.rs"))
2186 );
2187 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2188 assert_eq!(
2189 list_worktrees(&project, cx),
2190 [
2191 (dir.path().join("b.rs"), false),
2192 (dir.path().join("a.rs"), true)
2193 ]
2194 );
2195
2196 drop(definition);
2197 });
2198 cx.read(|cx| {
2199 assert_eq!(
2200 list_worktrees(&project, cx),
2201 [(dir.path().join("b.rs"), false)]
2202 );
2203 });
2204
2205 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2206 project
2207 .read(cx)
2208 .worktrees(cx)
2209 .map(|worktree| {
2210 let worktree = worktree.read(cx);
2211 (
2212 worktree.as_local().unwrap().abs_path().to_path_buf(),
2213 worktree.is_weak(),
2214 )
2215 })
2216 .collect::<Vec<_>>()
2217 }
2218 }
2219
2220 #[gpui::test]
2221 async fn test_save_file(mut cx: gpui::TestAppContext) {
2222 let fs = Arc::new(FakeFs::new());
2223 fs.insert_tree(
2224 "/dir",
2225 json!({
2226 "file1": "the old contents",
2227 }),
2228 )
2229 .await;
2230
2231 let project = build_project(fs.clone(), &mut cx);
2232 let worktree_id = project
2233 .update(&mut cx, |p, cx| {
2234 p.find_or_create_local_worktree("/dir", false, cx)
2235 })
2236 .await
2237 .unwrap()
2238 .0
2239 .read_with(&cx, |tree, _| tree.id());
2240
2241 let buffer = project
2242 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2243 .await
2244 .unwrap();
2245 buffer
2246 .update(&mut cx, |buffer, cx| {
2247 assert_eq!(buffer.text(), "the old contents");
2248 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2249 buffer.save(cx)
2250 })
2251 .await
2252 .unwrap();
2253
2254 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2255 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2256 }
2257
2258 #[gpui::test]
2259 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2260 let fs = Arc::new(FakeFs::new());
2261 fs.insert_tree(
2262 "/dir",
2263 json!({
2264 "file1": "the old contents",
2265 }),
2266 )
2267 .await;
2268
2269 let project = build_project(fs.clone(), &mut cx);
2270 let worktree_id = project
2271 .update(&mut cx, |p, cx| {
2272 p.find_or_create_local_worktree("/dir/file1", false, cx)
2273 })
2274 .await
2275 .unwrap()
2276 .0
2277 .read_with(&cx, |tree, _| tree.id());
2278
2279 let buffer = project
2280 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2281 .await
2282 .unwrap();
2283 buffer
2284 .update(&mut cx, |buffer, cx| {
2285 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2286 buffer.save(cx)
2287 })
2288 .await
2289 .unwrap();
2290
2291 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2292 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2293 }
2294
2295 #[gpui::test]
2296 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2297 let dir = temp_tree(json!({
2298 "a": {
2299 "file1": "",
2300 "file2": "",
2301 "file3": "",
2302 },
2303 "b": {
2304 "c": {
2305 "file4": "",
2306 "file5": "",
2307 }
2308 }
2309 }));
2310
2311 let project = build_project(Arc::new(RealFs), &mut cx);
2312 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2313
2314 let (tree, _) = project
2315 .update(&mut cx, |p, cx| {
2316 p.find_or_create_local_worktree(dir.path(), false, cx)
2317 })
2318 .await
2319 .unwrap();
2320 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2321
2322 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2323 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2324 async move { buffer.await.unwrap() }
2325 };
2326 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2327 tree.read_with(cx, |tree, _| {
2328 tree.entry_for_path(path)
2329 .expect(&format!("no entry for path {}", path))
2330 .id
2331 })
2332 };
2333
2334 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2335 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2336 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2337 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2338
2339 let file2_id = id_for_path("a/file2", &cx);
2340 let file3_id = id_for_path("a/file3", &cx);
2341 let file4_id = id_for_path("b/c/file4", &cx);
2342
2343 // Wait for the initial scan.
2344 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2345 .await;
2346
2347 // Create a remote copy of this worktree.
2348 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2349 let remote = Worktree::remote(
2350 1,
2351 1,
2352 initial_snapshot.to_proto(&Default::default(), Default::default()),
2353 rpc.clone(),
2354 &mut cx.to_async(),
2355 )
2356 .await
2357 .unwrap();
2358
2359 cx.read(|cx| {
2360 assert!(!buffer2.read(cx).is_dirty());
2361 assert!(!buffer3.read(cx).is_dirty());
2362 assert!(!buffer4.read(cx).is_dirty());
2363 assert!(!buffer5.read(cx).is_dirty());
2364 });
2365
2366 // Rename and delete files and directories.
2367 tree.flush_fs_events(&cx).await;
2368 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2369 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2370 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2371 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2372 tree.flush_fs_events(&cx).await;
2373
2374 let expected_paths = vec![
2375 "a",
2376 "a/file1",
2377 "a/file2.new",
2378 "b",
2379 "d",
2380 "d/file3",
2381 "d/file4",
2382 ];
2383
2384 cx.read(|app| {
2385 assert_eq!(
2386 tree.read(app)
2387 .paths()
2388 .map(|p| p.to_str().unwrap())
2389 .collect::<Vec<_>>(),
2390 expected_paths
2391 );
2392
2393 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2394 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2395 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2396
2397 assert_eq!(
2398 buffer2.read(app).file().unwrap().path().as_ref(),
2399 Path::new("a/file2.new")
2400 );
2401 assert_eq!(
2402 buffer3.read(app).file().unwrap().path().as_ref(),
2403 Path::new("d/file3")
2404 );
2405 assert_eq!(
2406 buffer4.read(app).file().unwrap().path().as_ref(),
2407 Path::new("d/file4")
2408 );
2409 assert_eq!(
2410 buffer5.read(app).file().unwrap().path().as_ref(),
2411 Path::new("b/c/file5")
2412 );
2413
2414 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2415 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2416 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2417 assert!(buffer5.read(app).file().unwrap().is_deleted());
2418 });
2419
2420 // Update the remote worktree. Check that it becomes consistent with the
2421 // local worktree.
2422 remote.update(&mut cx, |remote, cx| {
2423 let update_message =
2424 tree.read(cx)
2425 .snapshot()
2426 .build_update(&initial_snapshot, 1, 1, true);
2427 remote
2428 .as_remote_mut()
2429 .unwrap()
2430 .snapshot
2431 .apply_update(update_message)
2432 .unwrap();
2433
2434 assert_eq!(
2435 remote
2436 .paths()
2437 .map(|p| p.to_str().unwrap())
2438 .collect::<Vec<_>>(),
2439 expected_paths
2440 );
2441 });
2442 }
2443
2444 #[gpui::test]
2445 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2446 let fs = Arc::new(FakeFs::new());
2447 fs.insert_tree(
2448 "/the-dir",
2449 json!({
2450 "a.txt": "a-contents",
2451 "b.txt": "b-contents",
2452 }),
2453 )
2454 .await;
2455
2456 let project = build_project(fs.clone(), &mut cx);
2457 let worktree_id = project
2458 .update(&mut cx, |p, cx| {
2459 p.find_or_create_local_worktree("/the-dir", false, cx)
2460 })
2461 .await
2462 .unwrap()
2463 .0
2464 .read_with(&cx, |tree, _| tree.id());
2465
2466 // Spawn multiple tasks to open paths, repeating some paths.
2467 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2468 (
2469 p.open_buffer((worktree_id, "a.txt"), cx),
2470 p.open_buffer((worktree_id, "b.txt"), cx),
2471 p.open_buffer((worktree_id, "a.txt"), cx),
2472 )
2473 });
2474
2475 let buffer_a_1 = buffer_a_1.await.unwrap();
2476 let buffer_a_2 = buffer_a_2.await.unwrap();
2477 let buffer_b = buffer_b.await.unwrap();
2478 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2479 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2480
2481 // There is only one buffer per path.
2482 let buffer_a_id = buffer_a_1.id();
2483 assert_eq!(buffer_a_2.id(), buffer_a_id);
2484
2485 // Open the same path again while it is still open.
2486 drop(buffer_a_1);
2487 let buffer_a_3 = project
2488 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2489 .await
2490 .unwrap();
2491
2492 // There's still only one buffer per path.
2493 assert_eq!(buffer_a_3.id(), buffer_a_id);
2494 }
2495
2496 #[gpui::test]
2497 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2498 use std::fs;
2499
2500 let dir = temp_tree(json!({
2501 "file1": "abc",
2502 "file2": "def",
2503 "file3": "ghi",
2504 }));
2505
2506 let project = build_project(Arc::new(RealFs), &mut cx);
2507 let (worktree, _) = project
2508 .update(&mut cx, |p, cx| {
2509 p.find_or_create_local_worktree(dir.path(), false, cx)
2510 })
2511 .await
2512 .unwrap();
2513 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2514
2515 worktree.flush_fs_events(&cx).await;
2516 worktree
2517 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2518 .await;
2519
2520 let buffer1 = project
2521 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2522 .await
2523 .unwrap();
2524 let events = Rc::new(RefCell::new(Vec::new()));
2525
2526 // initially, the buffer isn't dirty.
2527 buffer1.update(&mut cx, |buffer, cx| {
2528 cx.subscribe(&buffer1, {
2529 let events = events.clone();
2530 move |_, _, event, _| events.borrow_mut().push(event.clone())
2531 })
2532 .detach();
2533
2534 assert!(!buffer.is_dirty());
2535 assert!(events.borrow().is_empty());
2536
2537 buffer.edit(vec![1..2], "", cx);
2538 });
2539
2540 // after the first edit, the buffer is dirty, and emits a dirtied event.
2541 buffer1.update(&mut cx, |buffer, cx| {
2542 assert!(buffer.text() == "ac");
2543 assert!(buffer.is_dirty());
2544 assert_eq!(
2545 *events.borrow(),
2546 &[language::Event::Edited, language::Event::Dirtied]
2547 );
2548 events.borrow_mut().clear();
2549 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2550 });
2551
2552 // after saving, the buffer is not dirty, and emits a saved event.
2553 buffer1.update(&mut cx, |buffer, cx| {
2554 assert!(!buffer.is_dirty());
2555 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2556 events.borrow_mut().clear();
2557
2558 buffer.edit(vec![1..1], "B", cx);
2559 buffer.edit(vec![2..2], "D", cx);
2560 });
2561
2562 // after editing again, the buffer is dirty, and emits another dirty event.
2563 buffer1.update(&mut cx, |buffer, cx| {
2564 assert!(buffer.text() == "aBDc");
2565 assert!(buffer.is_dirty());
2566 assert_eq!(
2567 *events.borrow(),
2568 &[
2569 language::Event::Edited,
2570 language::Event::Dirtied,
2571 language::Event::Edited,
2572 ],
2573 );
2574 events.borrow_mut().clear();
2575
2576 // TODO - currently, after restoring the buffer to its
2577 // previously-saved state, the is still considered dirty.
2578 buffer.edit([1..3], "", cx);
2579 assert!(buffer.text() == "ac");
2580 assert!(buffer.is_dirty());
2581 });
2582
2583 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2584
2585 // When a file is deleted, the buffer is considered dirty.
2586 let events = Rc::new(RefCell::new(Vec::new()));
2587 let buffer2 = project
2588 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2589 .await
2590 .unwrap();
2591 buffer2.update(&mut cx, |_, cx| {
2592 cx.subscribe(&buffer2, {
2593 let events = events.clone();
2594 move |_, _, event, _| events.borrow_mut().push(event.clone())
2595 })
2596 .detach();
2597 });
2598
2599 fs::remove_file(dir.path().join("file2")).unwrap();
2600 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2601 assert_eq!(
2602 *events.borrow(),
2603 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2604 );
2605
2606 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2607 let events = Rc::new(RefCell::new(Vec::new()));
2608 let buffer3 = project
2609 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2610 .await
2611 .unwrap();
2612 buffer3.update(&mut cx, |_, cx| {
2613 cx.subscribe(&buffer3, {
2614 let events = events.clone();
2615 move |_, _, event, _| events.borrow_mut().push(event.clone())
2616 })
2617 .detach();
2618 });
2619
2620 worktree.flush_fs_events(&cx).await;
2621 buffer3.update(&mut cx, |buffer, cx| {
2622 buffer.edit(Some(0..0), "x", cx);
2623 });
2624 events.borrow_mut().clear();
2625 fs::remove_file(dir.path().join("file3")).unwrap();
2626 buffer3
2627 .condition(&cx, |_, _| !events.borrow().is_empty())
2628 .await;
2629 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2630 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2631 }
2632
2633 #[gpui::test]
2634 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2635 use std::fs;
2636
2637 let initial_contents = "aaa\nbbbbb\nc\n";
2638 let dir = temp_tree(json!({ "the-file": initial_contents }));
2639
2640 let project = build_project(Arc::new(RealFs), &mut cx);
2641 let (worktree, _) = project
2642 .update(&mut cx, |p, cx| {
2643 p.find_or_create_local_worktree(dir.path(), false, cx)
2644 })
2645 .await
2646 .unwrap();
2647 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2648
2649 worktree
2650 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2651 .await;
2652
2653 let abs_path = dir.path().join("the-file");
2654 let buffer = project
2655 .update(&mut cx, |p, cx| {
2656 p.open_buffer((worktree_id, "the-file"), cx)
2657 })
2658 .await
2659 .unwrap();
2660
2661 // TODO
2662 // Add a cursor on each row.
2663 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2664 // assert!(!buffer.is_dirty());
2665 // buffer.add_selection_set(
2666 // &(0..3)
2667 // .map(|row| Selection {
2668 // id: row as usize,
2669 // start: Point::new(row, 1),
2670 // end: Point::new(row, 1),
2671 // reversed: false,
2672 // goal: SelectionGoal::None,
2673 // })
2674 // .collect::<Vec<_>>(),
2675 // cx,
2676 // )
2677 // });
2678
2679 // Change the file on disk, adding two new lines of text, and removing
2680 // one line.
2681 buffer.read_with(&cx, |buffer, _| {
2682 assert!(!buffer.is_dirty());
2683 assert!(!buffer.has_conflict());
2684 });
2685 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2686 fs::write(&abs_path, new_contents).unwrap();
2687
2688 // Because the buffer was not modified, it is reloaded from disk. Its
2689 // contents are edited according to the diff between the old and new
2690 // file contents.
2691 buffer
2692 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2693 .await;
2694
2695 buffer.update(&mut cx, |buffer, _| {
2696 assert_eq!(buffer.text(), new_contents);
2697 assert!(!buffer.is_dirty());
2698 assert!(!buffer.has_conflict());
2699
2700 // TODO
2701 // let cursor_positions = buffer
2702 // .selection_set(selection_set_id)
2703 // .unwrap()
2704 // .selections::<Point>(&*buffer)
2705 // .map(|selection| {
2706 // assert_eq!(selection.start, selection.end);
2707 // selection.start
2708 // })
2709 // .collect::<Vec<_>>();
2710 // assert_eq!(
2711 // cursor_positions,
2712 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2713 // );
2714 });
2715
2716 // Modify the buffer
2717 buffer.update(&mut cx, |buffer, cx| {
2718 buffer.edit(vec![0..0], " ", cx);
2719 assert!(buffer.is_dirty());
2720 assert!(!buffer.has_conflict());
2721 });
2722
2723 // Change the file on disk again, adding blank lines to the beginning.
2724 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2725
2726 // Because the buffer is modified, it doesn't reload from disk, but is
2727 // marked as having a conflict.
2728 buffer
2729 .condition(&cx, |buffer, _| buffer.has_conflict())
2730 .await;
2731 }
2732
2733 #[gpui::test]
2734 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2735 let fs = Arc::new(FakeFs::new());
2736 fs.insert_tree(
2737 "/the-dir",
2738 json!({
2739 "a.rs": "
2740 fn foo(mut v: Vec<usize>) {
2741 for x in &v {
2742 v.push(1);
2743 }
2744 }
2745 "
2746 .unindent(),
2747 }),
2748 )
2749 .await;
2750
2751 let project = build_project(fs.clone(), &mut cx);
2752 let (worktree, _) = project
2753 .update(&mut cx, |p, cx| {
2754 p.find_or_create_local_worktree("/the-dir", false, cx)
2755 })
2756 .await
2757 .unwrap();
2758 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2759
2760 let buffer = project
2761 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2762 .await
2763 .unwrap();
2764
2765 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2766 let message = lsp::PublishDiagnosticsParams {
2767 uri: buffer_uri.clone(),
2768 diagnostics: vec![
2769 lsp::Diagnostic {
2770 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2771 severity: Some(DiagnosticSeverity::WARNING),
2772 message: "error 1".to_string(),
2773 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2774 location: lsp::Location {
2775 uri: buffer_uri.clone(),
2776 range: lsp::Range::new(
2777 lsp::Position::new(1, 8),
2778 lsp::Position::new(1, 9),
2779 ),
2780 },
2781 message: "error 1 hint 1".to_string(),
2782 }]),
2783 ..Default::default()
2784 },
2785 lsp::Diagnostic {
2786 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2787 severity: Some(DiagnosticSeverity::HINT),
2788 message: "error 1 hint 1".to_string(),
2789 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2790 location: lsp::Location {
2791 uri: buffer_uri.clone(),
2792 range: lsp::Range::new(
2793 lsp::Position::new(1, 8),
2794 lsp::Position::new(1, 9),
2795 ),
2796 },
2797 message: "original diagnostic".to_string(),
2798 }]),
2799 ..Default::default()
2800 },
2801 lsp::Diagnostic {
2802 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2803 severity: Some(DiagnosticSeverity::ERROR),
2804 message: "error 2".to_string(),
2805 related_information: Some(vec![
2806 lsp::DiagnosticRelatedInformation {
2807 location: lsp::Location {
2808 uri: buffer_uri.clone(),
2809 range: lsp::Range::new(
2810 lsp::Position::new(1, 13),
2811 lsp::Position::new(1, 15),
2812 ),
2813 },
2814 message: "error 2 hint 1".to_string(),
2815 },
2816 lsp::DiagnosticRelatedInformation {
2817 location: lsp::Location {
2818 uri: buffer_uri.clone(),
2819 range: lsp::Range::new(
2820 lsp::Position::new(1, 13),
2821 lsp::Position::new(1, 15),
2822 ),
2823 },
2824 message: "error 2 hint 2".to_string(),
2825 },
2826 ]),
2827 ..Default::default()
2828 },
2829 lsp::Diagnostic {
2830 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2831 severity: Some(DiagnosticSeverity::HINT),
2832 message: "error 2 hint 1".to_string(),
2833 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2834 location: lsp::Location {
2835 uri: buffer_uri.clone(),
2836 range: lsp::Range::new(
2837 lsp::Position::new(2, 8),
2838 lsp::Position::new(2, 17),
2839 ),
2840 },
2841 message: "original diagnostic".to_string(),
2842 }]),
2843 ..Default::default()
2844 },
2845 lsp::Diagnostic {
2846 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2847 severity: Some(DiagnosticSeverity::HINT),
2848 message: "error 2 hint 2".to_string(),
2849 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2850 location: lsp::Location {
2851 uri: buffer_uri.clone(),
2852 range: lsp::Range::new(
2853 lsp::Position::new(2, 8),
2854 lsp::Position::new(2, 17),
2855 ),
2856 },
2857 message: "original diagnostic".to_string(),
2858 }]),
2859 ..Default::default()
2860 },
2861 ],
2862 version: None,
2863 };
2864
2865 project
2866 .update(&mut cx, |p, cx| {
2867 p.update_diagnostics(message, &Default::default(), cx)
2868 })
2869 .unwrap();
2870 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2871
2872 assert_eq!(
2873 buffer
2874 .diagnostics_in_range::<_, Point>(0..buffer.len())
2875 .collect::<Vec<_>>(),
2876 &[
2877 DiagnosticEntry {
2878 range: Point::new(1, 8)..Point::new(1, 9),
2879 diagnostic: Diagnostic {
2880 severity: DiagnosticSeverity::WARNING,
2881 message: "error 1".to_string(),
2882 group_id: 0,
2883 is_primary: true,
2884 ..Default::default()
2885 }
2886 },
2887 DiagnosticEntry {
2888 range: Point::new(1, 8)..Point::new(1, 9),
2889 diagnostic: Diagnostic {
2890 severity: DiagnosticSeverity::HINT,
2891 message: "error 1 hint 1".to_string(),
2892 group_id: 0,
2893 is_primary: false,
2894 ..Default::default()
2895 }
2896 },
2897 DiagnosticEntry {
2898 range: Point::new(1, 13)..Point::new(1, 15),
2899 diagnostic: Diagnostic {
2900 severity: DiagnosticSeverity::HINT,
2901 message: "error 2 hint 1".to_string(),
2902 group_id: 1,
2903 is_primary: false,
2904 ..Default::default()
2905 }
2906 },
2907 DiagnosticEntry {
2908 range: Point::new(1, 13)..Point::new(1, 15),
2909 diagnostic: Diagnostic {
2910 severity: DiagnosticSeverity::HINT,
2911 message: "error 2 hint 2".to_string(),
2912 group_id: 1,
2913 is_primary: false,
2914 ..Default::default()
2915 }
2916 },
2917 DiagnosticEntry {
2918 range: Point::new(2, 8)..Point::new(2, 17),
2919 diagnostic: Diagnostic {
2920 severity: DiagnosticSeverity::ERROR,
2921 message: "error 2".to_string(),
2922 group_id: 1,
2923 is_primary: true,
2924 ..Default::default()
2925 }
2926 }
2927 ]
2928 );
2929
2930 assert_eq!(
2931 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
2932 &[
2933 DiagnosticEntry {
2934 range: Point::new(1, 8)..Point::new(1, 9),
2935 diagnostic: Diagnostic {
2936 severity: DiagnosticSeverity::WARNING,
2937 message: "error 1".to_string(),
2938 group_id: 0,
2939 is_primary: true,
2940 ..Default::default()
2941 }
2942 },
2943 DiagnosticEntry {
2944 range: Point::new(1, 8)..Point::new(1, 9),
2945 diagnostic: Diagnostic {
2946 severity: DiagnosticSeverity::HINT,
2947 message: "error 1 hint 1".to_string(),
2948 group_id: 0,
2949 is_primary: false,
2950 ..Default::default()
2951 }
2952 },
2953 ]
2954 );
2955 assert_eq!(
2956 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
2957 &[
2958 DiagnosticEntry {
2959 range: Point::new(1, 13)..Point::new(1, 15),
2960 diagnostic: Diagnostic {
2961 severity: DiagnosticSeverity::HINT,
2962 message: "error 2 hint 1".to_string(),
2963 group_id: 1,
2964 is_primary: false,
2965 ..Default::default()
2966 }
2967 },
2968 DiagnosticEntry {
2969 range: Point::new(1, 13)..Point::new(1, 15),
2970 diagnostic: Diagnostic {
2971 severity: DiagnosticSeverity::HINT,
2972 message: "error 2 hint 2".to_string(),
2973 group_id: 1,
2974 is_primary: false,
2975 ..Default::default()
2976 }
2977 },
2978 DiagnosticEntry {
2979 range: Point::new(2, 8)..Point::new(2, 17),
2980 diagnostic: Diagnostic {
2981 severity: DiagnosticSeverity::ERROR,
2982 message: "error 2".to_string(),
2983 group_id: 1,
2984 is_primary: true,
2985 ..Default::default()
2986 }
2987 }
2988 ]
2989 );
2990 }
2991
2992 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
2993 let languages = Arc::new(LanguageRegistry::new());
2994 let http_client = FakeHttpClient::with_404_response();
2995 let client = client::Client::new(http_client.clone());
2996 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2997 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
2998 }
2999}