1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{atomic::AtomicBool, Arc},
27};
28use util::{post_inc, ResultExt, TryFutureExt as _};
29
30pub use fs::*;
31pub use worktree::*;
32
33pub struct Project {
34 worktrees: Vec<WorktreeHandle>,
35 active_entry: Option<ProjectEntry>,
36 languages: Arc<LanguageRegistry>,
37 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
38 client: Arc<client::Client>,
39 user_store: ModelHandle<UserStore>,
40 fs: Arc<dyn Fs>,
41 client_state: ProjectClientState,
42 collaborators: HashMap<PeerId, Collaborator>,
43 subscriptions: Vec<client::Subscription>,
44 language_servers_with_diagnostics_running: isize,
45 open_buffers: HashMap<usize, OpenBuffer>,
46 loading_buffers: HashMap<
47 ProjectPath,
48 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
49 >,
50 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
51}
52
53enum OpenBuffer {
54 Operations(Vec<Operation>),
55 Loaded(WeakModelHandle<Buffer>),
56}
57
58enum WorktreeHandle {
59 Strong(ModelHandle<Worktree>),
60 Weak(WeakModelHandle<Worktree>),
61}
62
63enum ProjectClientState {
64 Local {
65 is_shared: bool,
66 remote_id_tx: watch::Sender<Option<u64>>,
67 remote_id_rx: watch::Receiver<Option<u64>>,
68 _maintain_remote_id_task: Task<Option<()>>,
69 },
70 Remote {
71 sharing_has_stopped: bool,
72 remote_id: u64,
73 replica_id: ReplicaId,
74 },
75}
76
77#[derive(Clone, Debug)]
78pub struct Collaborator {
79 pub user: Arc<User>,
80 pub peer_id: PeerId,
81 pub replica_id: ReplicaId,
82}
83
84#[derive(Clone, Debug, PartialEq)]
85pub enum Event {
86 ActiveEntryChanged(Option<ProjectEntry>),
87 WorktreeRemoved(WorktreeId),
88 DiskBasedDiagnosticsStarted,
89 DiskBasedDiagnosticsUpdated,
90 DiskBasedDiagnosticsFinished,
91 DiagnosticsUpdated(ProjectPath),
92}
93
94#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
95pub struct ProjectPath {
96 pub worktree_id: WorktreeId,
97 pub path: Arc<Path>,
98}
99
100#[derive(Clone, Debug, Default, PartialEq)]
101pub struct DiagnosticSummary {
102 pub error_count: usize,
103 pub warning_count: usize,
104 pub info_count: usize,
105 pub hint_count: usize,
106}
107
108#[derive(Debug)]
109pub struct Definition {
110 pub target_buffer: ModelHandle<Buffer>,
111 pub target_range: Range<language::Anchor>,
112}
113
114impl DiagnosticSummary {
115 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
116 let mut this = Self {
117 error_count: 0,
118 warning_count: 0,
119 info_count: 0,
120 hint_count: 0,
121 };
122
123 for entry in diagnostics {
124 if entry.diagnostic.is_primary {
125 match entry.diagnostic.severity {
126 DiagnosticSeverity::ERROR => this.error_count += 1,
127 DiagnosticSeverity::WARNING => this.warning_count += 1,
128 DiagnosticSeverity::INFORMATION => this.info_count += 1,
129 DiagnosticSeverity::HINT => this.hint_count += 1,
130 _ => {}
131 }
132 }
133 }
134
135 this
136 }
137
138 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
139 proto::DiagnosticSummary {
140 path: path.to_string_lossy().to_string(),
141 error_count: self.error_count as u32,
142 warning_count: self.warning_count as u32,
143 info_count: self.info_count as u32,
144 hint_count: self.hint_count as u32,
145 }
146 }
147}
148
149#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
150pub struct ProjectEntry {
151 pub worktree_id: WorktreeId,
152 pub entry_id: usize,
153}
154
155impl Project {
156 pub fn local(
157 client: Arc<Client>,
158 user_store: ModelHandle<UserStore>,
159 languages: Arc<LanguageRegistry>,
160 fs: Arc<dyn Fs>,
161 cx: &mut MutableAppContext,
162 ) -> ModelHandle<Self> {
163 cx.add_model(|cx: &mut ModelContext<Self>| {
164 let (remote_id_tx, remote_id_rx) = watch::channel();
165 let _maintain_remote_id_task = cx.spawn_weak({
166 let rpc = client.clone();
167 move |this, mut cx| {
168 async move {
169 let mut status = rpc.status();
170 while let Some(status) = status.recv().await {
171 if let Some(this) = this.upgrade(&cx) {
172 let remote_id = if let client::Status::Connected { .. } = status {
173 let response = rpc.request(proto::RegisterProject {}).await?;
174 Some(response.project_id)
175 } else {
176 None
177 };
178
179 if let Some(project_id) = remote_id {
180 let mut registrations = Vec::new();
181 this.update(&mut cx, |this, cx| {
182 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
183 registrations.push(worktree.update(
184 cx,
185 |worktree, cx| {
186 let worktree = worktree.as_local_mut().unwrap();
187 worktree.register(project_id, cx)
188 },
189 ));
190 }
191 });
192 for registration in registrations {
193 registration.await?;
194 }
195 }
196 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
197 }
198 }
199 Ok(())
200 }
201 .log_err()
202 }
203 });
204
205 Self {
206 worktrees: Default::default(),
207 collaborators: Default::default(),
208 open_buffers: Default::default(),
209 loading_buffers: Default::default(),
210 shared_buffers: Default::default(),
211 client_state: ProjectClientState::Local {
212 is_shared: false,
213 remote_id_tx,
214 remote_id_rx,
215 _maintain_remote_id_task,
216 },
217 subscriptions: Vec::new(),
218 active_entry: None,
219 languages,
220 client,
221 user_store,
222 fs,
223 language_servers_with_diagnostics_running: 0,
224 language_servers: Default::default(),
225 }
226 })
227 }
228
229 pub async fn remote(
230 remote_id: u64,
231 client: Arc<Client>,
232 user_store: ModelHandle<UserStore>,
233 languages: Arc<LanguageRegistry>,
234 fs: Arc<dyn Fs>,
235 cx: &mut AsyncAppContext,
236 ) -> Result<ModelHandle<Self>> {
237 client.authenticate_and_connect(&cx).await?;
238
239 let response = client
240 .request(proto::JoinProject {
241 project_id: remote_id,
242 })
243 .await?;
244
245 let replica_id = response.replica_id as ReplicaId;
246
247 let mut worktrees = Vec::new();
248 for worktree in response.worktrees {
249 worktrees
250 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
251 }
252
253 let user_ids = response
254 .collaborators
255 .iter()
256 .map(|peer| peer.user_id)
257 .collect();
258 user_store
259 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
260 .await?;
261 let mut collaborators = HashMap::default();
262 for message in response.collaborators {
263 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
264 collaborators.insert(collaborator.peer_id, collaborator);
265 }
266
267 Ok(cx.add_model(|cx| {
268 let mut this = Self {
269 worktrees: Vec::new(),
270 open_buffers: Default::default(),
271 loading_buffers: Default::default(),
272 shared_buffers: Default::default(),
273 active_entry: None,
274 collaborators,
275 languages,
276 user_store,
277 fs,
278 subscriptions: vec![
279 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
283 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
284 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
285 client.subscribe_to_entity(
286 remote_id,
287 cx,
288 Self::handle_update_diagnostic_summary,
289 ),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_disk_based_diagnostics_updating,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updated,
299 ),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
302 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
303 ],
304 client,
305 client_state: ProjectClientState::Remote {
306 sharing_has_stopped: false,
307 remote_id,
308 replica_id,
309 },
310 language_servers_with_diagnostics_running: 0,
311 language_servers: Default::default(),
312 };
313 for worktree in worktrees {
314 this.add_worktree(&worktree, cx);
315 }
316 this
317 }))
318 }
319
320 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
321 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
322 *remote_id_tx.borrow_mut() = remote_id;
323 }
324
325 self.subscriptions.clear();
326 if let Some(remote_id) = remote_id {
327 let client = &self.client;
328 self.subscriptions.extend([
329 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
338 ]);
339 }
340 }
341
342 pub fn remote_id(&self) -> Option<u64> {
343 match &self.client_state {
344 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
345 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
346 }
347 }
348
349 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
350 let mut id = None;
351 let mut watch = None;
352 match &self.client_state {
353 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
354 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
355 }
356
357 async move {
358 if let Some(id) = id {
359 return id;
360 }
361 let mut watch = watch.unwrap();
362 loop {
363 let id = *watch.borrow();
364 if let Some(id) = id {
365 return id;
366 }
367 watch.recv().await;
368 }
369 }
370 }
371
372 pub fn replica_id(&self) -> ReplicaId {
373 match &self.client_state {
374 ProjectClientState::Local { .. } => 0,
375 ProjectClientState::Remote { replica_id, .. } => *replica_id,
376 }
377 }
378
379 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
380 &self.collaborators
381 }
382
383 pub fn worktrees<'a>(
384 &'a self,
385 cx: &'a AppContext,
386 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
387 self.worktrees
388 .iter()
389 .filter_map(move |worktree| worktree.upgrade(cx))
390 }
391
392 pub fn worktree_for_id(
393 &self,
394 id: WorktreeId,
395 cx: &AppContext,
396 ) -> Option<ModelHandle<Worktree>> {
397 self.worktrees(cx)
398 .find(|worktree| worktree.read(cx).id() == id)
399 }
400
401 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
402 let rpc = self.client.clone();
403 cx.spawn(|this, mut cx| async move {
404 let project_id = this.update(&mut cx, |this, _| {
405 if let ProjectClientState::Local {
406 is_shared,
407 remote_id_rx,
408 ..
409 } = &mut this.client_state
410 {
411 *is_shared = true;
412 remote_id_rx
413 .borrow()
414 .ok_or_else(|| anyhow!("no project id"))
415 } else {
416 Err(anyhow!("can't share a remote project"))
417 }
418 })?;
419
420 rpc.request(proto::ShareProject { project_id }).await?;
421 let mut tasks = Vec::new();
422 this.update(&mut cx, |this, cx| {
423 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
424 worktree.update(cx, |worktree, cx| {
425 let worktree = worktree.as_local_mut().unwrap();
426 tasks.push(worktree.share(project_id, cx));
427 });
428 }
429 });
430 for task in tasks {
431 task.await?;
432 }
433 this.update(&mut cx, |_, cx| cx.notify());
434 Ok(())
435 })
436 }
437
438 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
439 let rpc = self.client.clone();
440 cx.spawn(|this, mut cx| async move {
441 let project_id = this.update(&mut cx, |this, _| {
442 if let ProjectClientState::Local {
443 is_shared,
444 remote_id_rx,
445 ..
446 } = &mut this.client_state
447 {
448 *is_shared = false;
449 remote_id_rx
450 .borrow()
451 .ok_or_else(|| anyhow!("no project id"))
452 } else {
453 Err(anyhow!("can't share a remote project"))
454 }
455 })?;
456
457 rpc.send(proto::UnshareProject { project_id }).await?;
458 this.update(&mut cx, |this, cx| {
459 this.collaborators.clear();
460 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
461 worktree.update(cx, |worktree, _| {
462 worktree.as_local_mut().unwrap().unshare();
463 });
464 }
465 cx.notify()
466 });
467 Ok(())
468 })
469 }
470
471 pub fn is_read_only(&self) -> bool {
472 match &self.client_state {
473 ProjectClientState::Local { .. } => false,
474 ProjectClientState::Remote {
475 sharing_has_stopped,
476 ..
477 } => *sharing_has_stopped,
478 }
479 }
480
481 pub fn is_local(&self) -> bool {
482 match &self.client_state {
483 ProjectClientState::Local { .. } => true,
484 ProjectClientState::Remote { .. } => false,
485 }
486 }
487
488 pub fn open_buffer(
489 &mut self,
490 path: impl Into<ProjectPath>,
491 cx: &mut ModelContext<Self>,
492 ) -> Task<Result<ModelHandle<Buffer>>> {
493 let project_path = path.into();
494 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
495 worktree
496 } else {
497 return Task::ready(Err(anyhow!("no such worktree")));
498 };
499
500 // If there is already a buffer for the given path, then return it.
501 let existing_buffer = self.get_open_buffer(&project_path, cx);
502 if let Some(existing_buffer) = existing_buffer {
503 return Task::ready(Ok(existing_buffer));
504 }
505
506 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
507 // If the given path is already being loaded, then wait for that existing
508 // task to complete and return the same buffer.
509 hash_map::Entry::Occupied(e) => e.get().clone(),
510
511 // Otherwise, record the fact that this path is now being loaded.
512 hash_map::Entry::Vacant(entry) => {
513 let (mut tx, rx) = postage::watch::channel();
514 entry.insert(rx.clone());
515
516 let load_buffer = worktree.update(cx, |worktree, cx| {
517 worktree.load_buffer(&project_path.path, cx)
518 });
519
520 cx.spawn(move |this, mut cx| async move {
521 let load_result = load_buffer.await;
522 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
523 // Record the fact that the buffer is no longer loading.
524 this.loading_buffers.remove(&project_path);
525 let buffer = load_result.map_err(Arc::new)?;
526 this.open_buffers.insert(
527 buffer.read(cx).remote_id() as usize,
528 OpenBuffer::Loaded(buffer.downgrade()),
529 );
530 this.assign_language_to_buffer(&worktree, &buffer, cx);
531 Ok(buffer)
532 }));
533 })
534 .detach();
535 rx
536 }
537 };
538
539 cx.foreground().spawn(async move {
540 loop {
541 if let Some(result) = loading_watch.borrow().as_ref() {
542 match result {
543 Ok(buffer) => return Ok(buffer.clone()),
544 Err(error) => return Err(anyhow!("{}", error)),
545 }
546 }
547 loading_watch.recv().await;
548 }
549 })
550 }
551
552 pub fn save_buffer_as(
553 &self,
554 buffer: ModelHandle<Buffer>,
555 abs_path: PathBuf,
556 cx: &mut ModelContext<Project>,
557 ) -> Task<Result<()>> {
558 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
559 cx.spawn(|this, mut cx| async move {
560 let (worktree, path) = worktree_task.await?;
561 worktree
562 .update(&mut cx, |worktree, cx| {
563 worktree
564 .as_local_mut()
565 .unwrap()
566 .save_buffer_as(buffer.clone(), path, cx)
567 })
568 .await?;
569 this.update(&mut cx, |this, cx| {
570 this.open_buffers
571 .insert(buffer.id(), OpenBuffer::Loaded(buffer.downgrade()));
572 this.assign_language_to_buffer(&worktree, &buffer, cx);
573 });
574 Ok(())
575 })
576 }
577
578 #[cfg(any(test, feature = "test-support"))]
579 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
580 let path = path.into();
581 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
582 self.open_buffers.iter().any(|(_, buffer)| {
583 if let Some(buffer) = buffer.upgrade(cx) {
584 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
585 if file.worktree == worktree && file.path() == &path.path {
586 return true;
587 }
588 }
589 }
590 false
591 })
592 } else {
593 false
594 }
595 }
596
597 fn get_open_buffer(
598 &mut self,
599 path: &ProjectPath,
600 cx: &mut ModelContext<Self>,
601 ) -> Option<ModelHandle<Buffer>> {
602 let mut result = None;
603 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
604 self.open_buffers.retain(|_, buffer| {
605 if let OpenBuffer::Loaded(buffer) = buffer {
606 if let Some(buffer) = buffer.upgrade(cx) {
607 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
608 if file.worktree == worktree && file.path() == &path.path {
609 result = Some(buffer);
610 }
611 }
612 return true;
613 }
614 }
615 false
616 });
617 result
618 }
619
620 fn assign_language_to_buffer(
621 &mut self,
622 worktree: &ModelHandle<Worktree>,
623 buffer: &ModelHandle<Buffer>,
624 cx: &mut ModelContext<Self>,
625 ) -> Option<()> {
626 let (path, full_path) = {
627 let file = buffer.read(cx).file()?;
628 (file.path().clone(), file.full_path(cx))
629 };
630
631 // If the buffer has a language, set it and start/assign the language server
632 if let Some(language) = self.languages.select_language(&full_path) {
633 buffer.update(cx, |buffer, cx| {
634 buffer.set_language(Some(language.clone()), cx);
635 });
636
637 // For local worktrees, start a language server if needed.
638 // Also assign the language server and any previously stored diagnostics to the buffer.
639 if let Some(local_worktree) = worktree.read(cx).as_local() {
640 let worktree_id = local_worktree.id();
641 let worktree_abs_path = local_worktree.abs_path().clone();
642
643 let language_server = match self
644 .language_servers
645 .entry((worktree_id, language.name().to_string()))
646 {
647 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
648 hash_map::Entry::Vacant(e) => Self::start_language_server(
649 self.client.clone(),
650 language.clone(),
651 &worktree_abs_path,
652 cx,
653 )
654 .map(|server| e.insert(server).clone()),
655 };
656
657 buffer.update(cx, |buffer, cx| {
658 buffer.set_language_server(language_server, cx);
659 });
660 }
661 }
662
663 if let Some(local_worktree) = worktree.read(cx).as_local() {
664 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
665 buffer.update(cx, |buffer, cx| {
666 buffer.update_diagnostics(None, diagnostics, cx).log_err();
667 });
668 }
669 }
670
671 None
672 }
673
674 fn start_language_server(
675 rpc: Arc<Client>,
676 language: Arc<Language>,
677 worktree_path: &Path,
678 cx: &mut ModelContext<Self>,
679 ) -> Option<Arc<LanguageServer>> {
680 enum LspEvent {
681 DiagnosticsStart,
682 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
683 DiagnosticsFinish,
684 }
685
686 let language_server = language
687 .start_server(worktree_path, cx)
688 .log_err()
689 .flatten()?;
690 let disk_based_sources = language
691 .disk_based_diagnostic_sources()
692 .cloned()
693 .unwrap_or_default();
694 let disk_based_diagnostics_progress_token =
695 language.disk_based_diagnostics_progress_token().cloned();
696 let has_disk_based_diagnostic_progress_token =
697 disk_based_diagnostics_progress_token.is_some();
698 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
699
700 // Listen for `PublishDiagnostics` notifications.
701 language_server
702 .on_notification::<lsp::notification::PublishDiagnostics, _>({
703 let diagnostics_tx = diagnostics_tx.clone();
704 move |params| {
705 if !has_disk_based_diagnostic_progress_token {
706 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
707 }
708 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
709 if !has_disk_based_diagnostic_progress_token {
710 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
711 }
712 }
713 })
714 .detach();
715
716 // Listen for `Progress` notifications. Send an event when the language server
717 // transitions between running jobs and not running any jobs.
718 let mut running_jobs_for_this_server: i32 = 0;
719 language_server
720 .on_notification::<lsp::notification::Progress, _>(move |params| {
721 let token = match params.token {
722 lsp::NumberOrString::Number(_) => None,
723 lsp::NumberOrString::String(token) => Some(token),
724 };
725
726 if token == disk_based_diagnostics_progress_token {
727 match params.value {
728 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
729 lsp::WorkDoneProgress::Begin(_) => {
730 running_jobs_for_this_server += 1;
731 if running_jobs_for_this_server == 1 {
732 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
733 }
734 }
735 lsp::WorkDoneProgress::End(_) => {
736 running_jobs_for_this_server -= 1;
737 if running_jobs_for_this_server == 0 {
738 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
739 }
740 }
741 _ => {}
742 },
743 }
744 }
745 })
746 .detach();
747
748 // Process all the LSP events.
749 cx.spawn_weak(|this, mut cx| async move {
750 while let Ok(message) = diagnostics_rx.recv().await {
751 let this = cx.read(|cx| this.upgrade(cx))?;
752 match message {
753 LspEvent::DiagnosticsStart => {
754 let send = this.update(&mut cx, |this, cx| {
755 this.disk_based_diagnostics_started(cx);
756 this.remote_id().map(|project_id| {
757 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
758 })
759 });
760 if let Some(send) = send {
761 send.await.log_err();
762 }
763 }
764 LspEvent::DiagnosticsUpdate(params) => {
765 this.update(&mut cx, |this, cx| {
766 this.update_diagnostics(params, &disk_based_sources, cx)
767 .log_err();
768 });
769 }
770 LspEvent::DiagnosticsFinish => {
771 let send = this.update(&mut cx, |this, cx| {
772 this.disk_based_diagnostics_finished(cx);
773 this.remote_id().map(|project_id| {
774 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
775 })
776 });
777 if let Some(send) = send {
778 send.await.log_err();
779 }
780 }
781 }
782 }
783 Some(())
784 })
785 .detach();
786
787 Some(language_server)
788 }
789
790 pub fn update_diagnostics(
791 &mut self,
792 params: lsp::PublishDiagnosticsParams,
793 disk_based_sources: &HashSet<String>,
794 cx: &mut ModelContext<Self>,
795 ) -> Result<()> {
796 let abs_path = params
797 .uri
798 .to_file_path()
799 .map_err(|_| anyhow!("URI is not a file"))?;
800 let mut next_group_id = 0;
801 let mut diagnostics = Vec::default();
802 let mut primary_diagnostic_group_ids = HashMap::default();
803 let mut sources_by_group_id = HashMap::default();
804 let mut supporting_diagnostic_severities = HashMap::default();
805 for diagnostic in ¶ms.diagnostics {
806 let source = diagnostic.source.as_ref();
807 let code = diagnostic.code.as_ref().map(|code| match code {
808 lsp::NumberOrString::Number(code) => code.to_string(),
809 lsp::NumberOrString::String(code) => code.clone(),
810 });
811 let range = range_from_lsp(diagnostic.range);
812 let is_supporting = diagnostic
813 .related_information
814 .as_ref()
815 .map_or(false, |infos| {
816 infos.iter().any(|info| {
817 primary_diagnostic_group_ids.contains_key(&(
818 source,
819 code.clone(),
820 range_from_lsp(info.location.range),
821 ))
822 })
823 });
824
825 if is_supporting {
826 if let Some(severity) = diagnostic.severity {
827 supporting_diagnostic_severities
828 .insert((source, code.clone(), range), severity);
829 }
830 } else {
831 let group_id = post_inc(&mut next_group_id);
832 let is_disk_based =
833 source.map_or(false, |source| disk_based_sources.contains(source));
834
835 sources_by_group_id.insert(group_id, source);
836 primary_diagnostic_group_ids
837 .insert((source, code.clone(), range.clone()), group_id);
838
839 diagnostics.push(DiagnosticEntry {
840 range,
841 diagnostic: Diagnostic {
842 code: code.clone(),
843 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
844 message: diagnostic.message.clone(),
845 group_id,
846 is_primary: true,
847 is_valid: true,
848 is_disk_based,
849 },
850 });
851 if let Some(infos) = &diagnostic.related_information {
852 for info in infos {
853 if info.location.uri == params.uri {
854 let range = range_from_lsp(info.location.range);
855 diagnostics.push(DiagnosticEntry {
856 range,
857 diagnostic: Diagnostic {
858 code: code.clone(),
859 severity: DiagnosticSeverity::INFORMATION,
860 message: info.message.clone(),
861 group_id,
862 is_primary: false,
863 is_valid: true,
864 is_disk_based,
865 },
866 });
867 }
868 }
869 }
870 }
871 }
872
873 for entry in &mut diagnostics {
874 let diagnostic = &mut entry.diagnostic;
875 if !diagnostic.is_primary {
876 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
877 if let Some(&severity) = supporting_diagnostic_severities.get(&(
878 source,
879 diagnostic.code.clone(),
880 entry.range.clone(),
881 )) {
882 diagnostic.severity = severity;
883 }
884 }
885 }
886
887 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
888 Ok(())
889 }
890
891 pub fn update_diagnostic_entries(
892 &mut self,
893 abs_path: PathBuf,
894 version: Option<i32>,
895 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
896 cx: &mut ModelContext<Project>,
897 ) -> Result<(), anyhow::Error> {
898 let (worktree, relative_path) = self
899 .find_local_worktree(&abs_path, cx)
900 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
901 let project_path = ProjectPath {
902 worktree_id: worktree.read(cx).id(),
903 path: relative_path.into(),
904 };
905
906 for buffer in self.open_buffers.values() {
907 if let Some(buffer) = buffer.upgrade(cx) {
908 if buffer
909 .read(cx)
910 .file()
911 .map_or(false, |file| *file.path() == project_path.path)
912 {
913 buffer.update(cx, |buffer, cx| {
914 buffer.update_diagnostics(version, diagnostics.clone(), cx)
915 })?;
916 break;
917 }
918 }
919 }
920 worktree.update(cx, |worktree, cx| {
921 worktree
922 .as_local_mut()
923 .ok_or_else(|| anyhow!("not a local worktree"))?
924 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
925 })?;
926 cx.emit(Event::DiagnosticsUpdated(project_path));
927 Ok(())
928 }
929
930 pub fn definition<T: ToOffset>(
931 &self,
932 source_buffer_handle: &ModelHandle<Buffer>,
933 position: T,
934 cx: &mut ModelContext<Self>,
935 ) -> Task<Result<Vec<Definition>>> {
936 let source_buffer_handle = source_buffer_handle.clone();
937 let buffer = source_buffer_handle.read(cx);
938 let worktree;
939 let buffer_abs_path;
940 if let Some(file) = File::from_dyn(buffer.file()) {
941 worktree = file.worktree.clone();
942 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
943 } else {
944 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
945 };
946
947 if worktree.read(cx).as_local().is_some() {
948 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
949 let buffer_abs_path = buffer_abs_path.unwrap();
950 let lang_name;
951 let lang_server;
952 if let Some(lang) = buffer.language() {
953 lang_name = lang.name().to_string();
954 if let Some(server) = self
955 .language_servers
956 .get(&(worktree.read(cx).id(), lang_name.clone()))
957 {
958 lang_server = server.clone();
959 } else {
960 return Task::ready(Err(anyhow!("buffer does not have a language server")));
961 };
962 } else {
963 return Task::ready(Err(anyhow!("buffer does not have a language")));
964 }
965
966 cx.spawn(|this, mut cx| async move {
967 let response = lang_server
968 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
969 text_document_position_params: lsp::TextDocumentPositionParams {
970 text_document: lsp::TextDocumentIdentifier::new(
971 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
972 ),
973 position: lsp::Position::new(point.row, point.column),
974 },
975 work_done_progress_params: Default::default(),
976 partial_result_params: Default::default(),
977 })
978 .await?;
979
980 let mut definitions = Vec::new();
981 if let Some(response) = response {
982 let mut unresolved_locations = Vec::new();
983 match response {
984 lsp::GotoDefinitionResponse::Scalar(loc) => {
985 unresolved_locations.push((loc.uri, loc.range));
986 }
987 lsp::GotoDefinitionResponse::Array(locs) => {
988 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
989 }
990 lsp::GotoDefinitionResponse::Link(links) => {
991 unresolved_locations.extend(
992 links
993 .into_iter()
994 .map(|l| (l.target_uri, l.target_selection_range)),
995 );
996 }
997 }
998
999 for (target_uri, target_range) in unresolved_locations {
1000 let abs_path = target_uri
1001 .to_file_path()
1002 .map_err(|_| anyhow!("invalid target path"))?;
1003
1004 let (worktree, relative_path) = if let Some(result) =
1005 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1006 {
1007 result
1008 } else {
1009 let worktree = this
1010 .update(&mut cx, |this, cx| {
1011 this.create_local_worktree(&abs_path, true, cx)
1012 })
1013 .await?;
1014 this.update(&mut cx, |this, cx| {
1015 this.language_servers.insert(
1016 (worktree.read(cx).id(), lang_name.clone()),
1017 lang_server.clone(),
1018 );
1019 });
1020 (worktree, PathBuf::new())
1021 };
1022
1023 let project_path = ProjectPath {
1024 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1025 path: relative_path.into(),
1026 };
1027 let target_buffer_handle = this
1028 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1029 .await?;
1030 cx.read(|cx| {
1031 let target_buffer = target_buffer_handle.read(cx);
1032 let target_start = target_buffer
1033 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1034 let target_end = target_buffer
1035 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1036 definitions.push(Definition {
1037 target_buffer: target_buffer_handle,
1038 target_range: target_buffer.anchor_after(target_start)
1039 ..target_buffer.anchor_before(target_end),
1040 });
1041 });
1042 }
1043 }
1044
1045 Ok(definitions)
1046 })
1047 } else {
1048 log::info!("go to definition is not yet implemented for guests");
1049 Task::ready(Ok(Default::default()))
1050 }
1051 }
1052
1053 pub fn find_or_create_local_worktree(
1054 &self,
1055 abs_path: impl AsRef<Path>,
1056 weak: bool,
1057 cx: &mut ModelContext<Self>,
1058 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1059 let abs_path = abs_path.as_ref();
1060 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1061 Task::ready(Ok((tree.clone(), relative_path.into())))
1062 } else {
1063 let worktree = self.create_local_worktree(abs_path, weak, cx);
1064 cx.foreground()
1065 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1066 }
1067 }
1068
1069 fn find_local_worktree(
1070 &self,
1071 abs_path: &Path,
1072 cx: &AppContext,
1073 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1074 for tree in self.worktrees(cx) {
1075 if let Some(relative_path) = tree
1076 .read(cx)
1077 .as_local()
1078 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1079 {
1080 return Some((tree.clone(), relative_path.into()));
1081 }
1082 }
1083 None
1084 }
1085
1086 pub fn is_shared(&self) -> bool {
1087 match &self.client_state {
1088 ProjectClientState::Local { is_shared, .. } => *is_shared,
1089 ProjectClientState::Remote { .. } => false,
1090 }
1091 }
1092
1093 fn create_local_worktree(
1094 &self,
1095 abs_path: impl AsRef<Path>,
1096 weak: bool,
1097 cx: &mut ModelContext<Self>,
1098 ) -> Task<Result<ModelHandle<Worktree>>> {
1099 let fs = self.fs.clone();
1100 let client = self.client.clone();
1101 let path = Arc::from(abs_path.as_ref());
1102 cx.spawn(|project, mut cx| async move {
1103 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1104
1105 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1106 project.add_worktree(&worktree, cx);
1107 (project.remote_id(), project.is_shared())
1108 });
1109
1110 if let Some(project_id) = remote_project_id {
1111 worktree
1112 .update(&mut cx, |worktree, cx| {
1113 worktree.as_local_mut().unwrap().register(project_id, cx)
1114 })
1115 .await?;
1116 if is_shared {
1117 worktree
1118 .update(&mut cx, |worktree, cx| {
1119 worktree.as_local_mut().unwrap().share(project_id, cx)
1120 })
1121 .await?;
1122 }
1123 }
1124
1125 Ok(worktree)
1126 })
1127 }
1128
1129 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1130 self.worktrees.retain(|worktree| {
1131 worktree
1132 .upgrade(cx)
1133 .map_or(false, |w| w.read(cx).id() != id)
1134 });
1135 cx.notify();
1136 }
1137
1138 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1139 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1140 if worktree.read(cx).is_local() {
1141 cx.subscribe(&worktree, |this, worktree, _, cx| {
1142 this.update_local_worktree_buffers(worktree, cx);
1143 })
1144 .detach();
1145 }
1146
1147 let push_weak_handle = {
1148 let worktree = worktree.read(cx);
1149 worktree.is_local() && worktree.is_weak()
1150 };
1151 if push_weak_handle {
1152 cx.observe_release(&worktree, |this, cx| {
1153 this.worktrees
1154 .retain(|worktree| worktree.upgrade(cx).is_some());
1155 cx.notify();
1156 })
1157 .detach();
1158 self.worktrees
1159 .push(WorktreeHandle::Weak(worktree.downgrade()));
1160 } else {
1161 self.worktrees
1162 .push(WorktreeHandle::Strong(worktree.clone()));
1163 }
1164 cx.notify();
1165 }
1166
1167 fn update_local_worktree_buffers(
1168 &mut self,
1169 worktree_handle: ModelHandle<Worktree>,
1170 cx: &mut ModelContext<Self>,
1171 ) {
1172 let snapshot = worktree_handle.read(cx).snapshot();
1173 let mut buffers_to_delete = Vec::new();
1174 for (buffer_id, buffer) in &self.open_buffers {
1175 if let OpenBuffer::Loaded(buffer) = buffer {
1176 if let Some(buffer) = buffer.upgrade(cx) {
1177 buffer.update(cx, |buffer, cx| {
1178 if let Some(old_file) = File::from_dyn(buffer.file()) {
1179 if old_file.worktree != worktree_handle {
1180 return;
1181 }
1182
1183 let new_file = if let Some(entry) = old_file
1184 .entry_id
1185 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1186 {
1187 File {
1188 is_local: true,
1189 entry_id: Some(entry.id),
1190 mtime: entry.mtime,
1191 path: entry.path.clone(),
1192 worktree: worktree_handle.clone(),
1193 }
1194 } else if let Some(entry) =
1195 snapshot.entry_for_path(old_file.path().as_ref())
1196 {
1197 File {
1198 is_local: true,
1199 entry_id: Some(entry.id),
1200 mtime: entry.mtime,
1201 path: entry.path.clone(),
1202 worktree: worktree_handle.clone(),
1203 }
1204 } else {
1205 File {
1206 is_local: true,
1207 entry_id: None,
1208 path: old_file.path().clone(),
1209 mtime: old_file.mtime(),
1210 worktree: worktree_handle.clone(),
1211 }
1212 };
1213
1214 if let Some(project_id) = self.remote_id() {
1215 let client = self.client.clone();
1216 let message = proto::UpdateBufferFile {
1217 project_id,
1218 buffer_id: *buffer_id as u64,
1219 file: Some(new_file.to_proto()),
1220 };
1221 cx.foreground()
1222 .spawn(async move { client.send(message).await })
1223 .detach_and_log_err(cx);
1224 }
1225 buffer.file_updated(Box::new(new_file), cx).detach();
1226 }
1227 });
1228 } else {
1229 buffers_to_delete.push(*buffer_id);
1230 }
1231 }
1232 }
1233
1234 for buffer_id in buffers_to_delete {
1235 self.open_buffers.remove(&buffer_id);
1236 }
1237 }
1238
1239 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1240 let new_active_entry = entry.and_then(|project_path| {
1241 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1242 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1243 Some(ProjectEntry {
1244 worktree_id: project_path.worktree_id,
1245 entry_id: entry.id,
1246 })
1247 });
1248 if new_active_entry != self.active_entry {
1249 self.active_entry = new_active_entry;
1250 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1251 }
1252 }
1253
1254 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1255 self.language_servers_with_diagnostics_running > 0
1256 }
1257
1258 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1259 let mut summary = DiagnosticSummary::default();
1260 for (_, path_summary) in self.diagnostic_summaries(cx) {
1261 summary.error_count += path_summary.error_count;
1262 summary.warning_count += path_summary.warning_count;
1263 summary.info_count += path_summary.info_count;
1264 summary.hint_count += path_summary.hint_count;
1265 }
1266 summary
1267 }
1268
1269 pub fn diagnostic_summaries<'a>(
1270 &'a self,
1271 cx: &'a AppContext,
1272 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1273 self.worktrees(cx).flat_map(move |worktree| {
1274 let worktree = worktree.read(cx);
1275 let worktree_id = worktree.id();
1276 worktree
1277 .diagnostic_summaries()
1278 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1279 })
1280 }
1281
1282 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1283 self.language_servers_with_diagnostics_running += 1;
1284 if self.language_servers_with_diagnostics_running == 1 {
1285 cx.emit(Event::DiskBasedDiagnosticsStarted);
1286 }
1287 }
1288
1289 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1290 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1291 self.language_servers_with_diagnostics_running -= 1;
1292 if self.language_servers_with_diagnostics_running == 0 {
1293 cx.emit(Event::DiskBasedDiagnosticsFinished);
1294 }
1295 }
1296
1297 pub fn active_entry(&self) -> Option<ProjectEntry> {
1298 self.active_entry
1299 }
1300
1301 // RPC message handlers
1302
1303 fn handle_unshare_project(
1304 &mut self,
1305 _: TypedEnvelope<proto::UnshareProject>,
1306 _: Arc<Client>,
1307 cx: &mut ModelContext<Self>,
1308 ) -> Result<()> {
1309 if let ProjectClientState::Remote {
1310 sharing_has_stopped,
1311 ..
1312 } = &mut self.client_state
1313 {
1314 *sharing_has_stopped = true;
1315 self.collaborators.clear();
1316 cx.notify();
1317 Ok(())
1318 } else {
1319 unreachable!()
1320 }
1321 }
1322
1323 fn handle_add_collaborator(
1324 &mut self,
1325 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1326 _: Arc<Client>,
1327 cx: &mut ModelContext<Self>,
1328 ) -> Result<()> {
1329 let user_store = self.user_store.clone();
1330 let collaborator = envelope
1331 .payload
1332 .collaborator
1333 .take()
1334 .ok_or_else(|| anyhow!("empty collaborator"))?;
1335
1336 cx.spawn(|this, mut cx| {
1337 async move {
1338 let collaborator =
1339 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1340 this.update(&mut cx, |this, cx| {
1341 this.collaborators
1342 .insert(collaborator.peer_id, collaborator);
1343 cx.notify();
1344 });
1345 Ok(())
1346 }
1347 .log_err()
1348 })
1349 .detach();
1350
1351 Ok(())
1352 }
1353
1354 fn handle_remove_collaborator(
1355 &mut self,
1356 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1357 _: Arc<Client>,
1358 cx: &mut ModelContext<Self>,
1359 ) -> Result<()> {
1360 let peer_id = PeerId(envelope.payload.peer_id);
1361 let replica_id = self
1362 .collaborators
1363 .remove(&peer_id)
1364 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1365 .replica_id;
1366 self.shared_buffers.remove(&peer_id);
1367 for (_, buffer) in &self.open_buffers {
1368 if let OpenBuffer::Loaded(buffer) = buffer {
1369 if let Some(buffer) = buffer.upgrade(cx) {
1370 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1371 }
1372 }
1373 }
1374 cx.notify();
1375 Ok(())
1376 }
1377
1378 fn handle_share_worktree(
1379 &mut self,
1380 envelope: TypedEnvelope<proto::ShareWorktree>,
1381 client: Arc<Client>,
1382 cx: &mut ModelContext<Self>,
1383 ) -> Result<()> {
1384 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1385 let replica_id = self.replica_id();
1386 let worktree = envelope
1387 .payload
1388 .worktree
1389 .ok_or_else(|| anyhow!("invalid worktree"))?;
1390 cx.spawn(|this, mut cx| {
1391 async move {
1392 let worktree =
1393 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1394 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1395 Ok(())
1396 }
1397 .log_err()
1398 })
1399 .detach();
1400 Ok(())
1401 }
1402
1403 fn handle_unregister_worktree(
1404 &mut self,
1405 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1406 _: Arc<Client>,
1407 cx: &mut ModelContext<Self>,
1408 ) -> Result<()> {
1409 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1410 self.remove_worktree(worktree_id, cx);
1411 Ok(())
1412 }
1413
1414 fn handle_update_worktree(
1415 &mut self,
1416 envelope: TypedEnvelope<proto::UpdateWorktree>,
1417 _: Arc<Client>,
1418 cx: &mut ModelContext<Self>,
1419 ) -> Result<()> {
1420 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1421 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1422 worktree.update(cx, |worktree, cx| {
1423 let worktree = worktree.as_remote_mut().unwrap();
1424 worktree.update_from_remote(envelope, cx)
1425 })?;
1426 }
1427 Ok(())
1428 }
1429
1430 fn handle_update_diagnostic_summary(
1431 &mut self,
1432 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1433 _: Arc<Client>,
1434 cx: &mut ModelContext<Self>,
1435 ) -> Result<()> {
1436 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1437 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1438 if let Some(summary) = envelope.payload.summary {
1439 let project_path = ProjectPath {
1440 worktree_id,
1441 path: Path::new(&summary.path).into(),
1442 };
1443 worktree.update(cx, |worktree, _| {
1444 worktree
1445 .as_remote_mut()
1446 .unwrap()
1447 .update_diagnostic_summary(project_path.path.clone(), &summary);
1448 });
1449 cx.emit(Event::DiagnosticsUpdated(project_path));
1450 }
1451 }
1452 Ok(())
1453 }
1454
1455 fn handle_disk_based_diagnostics_updating(
1456 &mut self,
1457 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1458 _: Arc<Client>,
1459 cx: &mut ModelContext<Self>,
1460 ) -> Result<()> {
1461 self.disk_based_diagnostics_started(cx);
1462 Ok(())
1463 }
1464
1465 fn handle_disk_based_diagnostics_updated(
1466 &mut self,
1467 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1468 _: Arc<Client>,
1469 cx: &mut ModelContext<Self>,
1470 ) -> Result<()> {
1471 self.disk_based_diagnostics_finished(cx);
1472 Ok(())
1473 }
1474
1475 pub fn handle_update_buffer(
1476 &mut self,
1477 envelope: TypedEnvelope<proto::UpdateBuffer>,
1478 _: Arc<Client>,
1479 cx: &mut ModelContext<Self>,
1480 ) -> Result<()> {
1481 let payload = envelope.payload.clone();
1482 let buffer_id = payload.buffer_id as usize;
1483 let ops = payload
1484 .operations
1485 .into_iter()
1486 .map(|op| language::proto::deserialize_operation(op))
1487 .collect::<Result<Vec<_>, _>>()?;
1488 match self.open_buffers.get_mut(&buffer_id) {
1489 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1490 Some(OpenBuffer::Loaded(buffer)) => {
1491 if let Some(buffer) = buffer.upgrade(cx) {
1492 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1493 } else {
1494 self.open_buffers
1495 .insert(buffer_id, OpenBuffer::Operations(ops));
1496 }
1497 }
1498 None => {
1499 self.open_buffers
1500 .insert(buffer_id, OpenBuffer::Operations(ops));
1501 }
1502 }
1503 Ok(())
1504 }
1505
1506 pub fn handle_update_buffer_file(
1507 &mut self,
1508 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1509 _: Arc<Client>,
1510 cx: &mut ModelContext<Self>,
1511 ) -> Result<()> {
1512 let payload = envelope.payload.clone();
1513 let buffer_id = payload.buffer_id as usize;
1514 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1515 let worktree = self
1516 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1517 .ok_or_else(|| anyhow!("no such worktree"))?;
1518 let file = File::from_proto(file, worktree.clone(), cx)?;
1519 let buffer = self
1520 .open_buffers
1521 .get_mut(&buffer_id)
1522 .and_then(|b| b.upgrade(cx))
1523 .ok_or_else(|| anyhow!("no such buffer"))?;
1524 buffer.update(cx, |buffer, cx| {
1525 buffer.file_updated(Box::new(file), cx).detach();
1526 });
1527
1528 Ok(())
1529 }
1530
1531 pub fn handle_save_buffer(
1532 &mut self,
1533 envelope: TypedEnvelope<proto::SaveBuffer>,
1534 rpc: Arc<Client>,
1535 cx: &mut ModelContext<Self>,
1536 ) -> Result<()> {
1537 let sender_id = envelope.original_sender_id()?;
1538 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1539 let buffer = self
1540 .shared_buffers
1541 .get(&sender_id)
1542 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1543 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1544 let receipt = envelope.receipt();
1545 let buffer_id = envelope.payload.buffer_id;
1546 let save = cx.spawn(|_, mut cx| async move {
1547 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1548 });
1549
1550 cx.background()
1551 .spawn(
1552 async move {
1553 let (version, mtime) = save.await?;
1554
1555 rpc.respond(
1556 receipt,
1557 proto::BufferSaved {
1558 project_id,
1559 buffer_id,
1560 version: (&version).into(),
1561 mtime: Some(mtime.into()),
1562 },
1563 )
1564 .await?;
1565
1566 Ok(())
1567 }
1568 .log_err(),
1569 )
1570 .detach();
1571 Ok(())
1572 }
1573
1574 pub fn handle_format_buffer(
1575 &mut self,
1576 envelope: TypedEnvelope<proto::FormatBuffer>,
1577 rpc: Arc<Client>,
1578 cx: &mut ModelContext<Self>,
1579 ) -> Result<()> {
1580 let receipt = envelope.receipt();
1581 let sender_id = envelope.original_sender_id()?;
1582 let buffer = self
1583 .shared_buffers
1584 .get(&sender_id)
1585 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1586 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1587 cx.spawn(|_, mut cx| async move {
1588 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1589 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1590 // associated with formatting.
1591 cx.spawn(|_| async move {
1592 match format {
1593 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1594 Err(error) => {
1595 rpc.respond_with_error(
1596 receipt,
1597 proto::Error {
1598 message: error.to_string(),
1599 },
1600 )
1601 .await?
1602 }
1603 }
1604 Ok::<_, anyhow::Error>(())
1605 })
1606 .await
1607 .log_err();
1608 })
1609 .detach();
1610 Ok(())
1611 }
1612
1613 pub fn handle_open_buffer(
1614 &mut self,
1615 envelope: TypedEnvelope<proto::OpenBuffer>,
1616 rpc: Arc<Client>,
1617 cx: &mut ModelContext<Self>,
1618 ) -> anyhow::Result<()> {
1619 let receipt = envelope.receipt();
1620 let peer_id = envelope.original_sender_id()?;
1621 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1622 let open_buffer = self.open_buffer(
1623 ProjectPath {
1624 worktree_id,
1625 path: PathBuf::from(envelope.payload.path).into(),
1626 },
1627 cx,
1628 );
1629 cx.spawn(|this, mut cx| {
1630 async move {
1631 let buffer = open_buffer.await?;
1632 this.update(&mut cx, |this, _| {
1633 this.shared_buffers
1634 .entry(peer_id)
1635 .or_default()
1636 .insert(buffer.id() as u64, buffer.clone());
1637 });
1638 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1639 rpc.respond(
1640 receipt,
1641 proto::OpenBufferResponse {
1642 buffer: Some(message),
1643 },
1644 )
1645 .await
1646 }
1647 .log_err()
1648 })
1649 .detach();
1650 Ok(())
1651 }
1652
1653 pub fn handle_close_buffer(
1654 &mut self,
1655 envelope: TypedEnvelope<proto::CloseBuffer>,
1656 _: Arc<Client>,
1657 cx: &mut ModelContext<Self>,
1658 ) -> anyhow::Result<()> {
1659 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1660 shared_buffers.remove(&envelope.payload.buffer_id);
1661 cx.notify();
1662 }
1663 Ok(())
1664 }
1665
1666 pub fn handle_buffer_saved(
1667 &mut self,
1668 envelope: TypedEnvelope<proto::BufferSaved>,
1669 _: Arc<Client>,
1670 cx: &mut ModelContext<Self>,
1671 ) -> Result<()> {
1672 let payload = envelope.payload.clone();
1673 let buffer = self
1674 .open_buffers
1675 .get(&(payload.buffer_id as usize))
1676 .and_then(|buf| {
1677 if let OpenBuffer::Loaded(buffer) = buf {
1678 buffer.upgrade(cx)
1679 } else {
1680 None
1681 }
1682 });
1683 if let Some(buffer) = buffer {
1684 buffer.update(cx, |buffer, cx| {
1685 let version = payload.version.try_into()?;
1686 let mtime = payload
1687 .mtime
1688 .ok_or_else(|| anyhow!("missing mtime"))?
1689 .into();
1690 buffer.did_save(version, mtime, None, cx);
1691 Result::<_, anyhow::Error>::Ok(())
1692 })?;
1693 }
1694 Ok(())
1695 }
1696
1697 pub fn match_paths<'a>(
1698 &self,
1699 query: &'a str,
1700 include_ignored: bool,
1701 smart_case: bool,
1702 max_results: usize,
1703 cancel_flag: &'a AtomicBool,
1704 cx: &AppContext,
1705 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1706 let worktrees = self
1707 .worktrees(cx)
1708 .filter(|worktree| !worktree.read(cx).is_weak())
1709 .collect::<Vec<_>>();
1710 let include_root_name = worktrees.len() > 1;
1711 let candidate_sets = worktrees
1712 .into_iter()
1713 .map(|worktree| CandidateSet {
1714 snapshot: worktree.read(cx).snapshot(),
1715 include_ignored,
1716 include_root_name,
1717 })
1718 .collect::<Vec<_>>();
1719
1720 let background = cx.background().clone();
1721 async move {
1722 fuzzy::match_paths(
1723 candidate_sets.as_slice(),
1724 query,
1725 smart_case,
1726 max_results,
1727 cancel_flag,
1728 background,
1729 )
1730 .await
1731 }
1732 }
1733}
1734
1735impl WorktreeHandle {
1736 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1737 match self {
1738 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1739 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1740 }
1741 }
1742}
1743
1744struct CandidateSet {
1745 snapshot: Snapshot,
1746 include_ignored: bool,
1747 include_root_name: bool,
1748}
1749
1750impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1751 type Candidates = CandidateSetIter<'a>;
1752
1753 fn id(&self) -> usize {
1754 self.snapshot.id().to_usize()
1755 }
1756
1757 fn len(&self) -> usize {
1758 if self.include_ignored {
1759 self.snapshot.file_count()
1760 } else {
1761 self.snapshot.visible_file_count()
1762 }
1763 }
1764
1765 fn prefix(&self) -> Arc<str> {
1766 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1767 self.snapshot.root_name().into()
1768 } else if self.include_root_name {
1769 format!("{}/", self.snapshot.root_name()).into()
1770 } else {
1771 "".into()
1772 }
1773 }
1774
1775 fn candidates(&'a self, start: usize) -> Self::Candidates {
1776 CandidateSetIter {
1777 traversal: self.snapshot.files(self.include_ignored, start),
1778 }
1779 }
1780}
1781
1782struct CandidateSetIter<'a> {
1783 traversal: Traversal<'a>,
1784}
1785
1786impl<'a> Iterator for CandidateSetIter<'a> {
1787 type Item = PathMatchCandidate<'a>;
1788
1789 fn next(&mut self) -> Option<Self::Item> {
1790 self.traversal.next().map(|entry| {
1791 if let EntryKind::File(char_bag) = entry.kind {
1792 PathMatchCandidate {
1793 path: &entry.path,
1794 char_bag,
1795 }
1796 } else {
1797 unreachable!()
1798 }
1799 })
1800 }
1801}
1802
1803impl Entity for Project {
1804 type Event = Event;
1805
1806 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1807 match &self.client_state {
1808 ProjectClientState::Local { remote_id_rx, .. } => {
1809 if let Some(project_id) = *remote_id_rx.borrow() {
1810 let rpc = self.client.clone();
1811 cx.spawn(|_| async move {
1812 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1813 log::error!("error unregistering project: {}", err);
1814 }
1815 })
1816 .detach();
1817 }
1818 }
1819 ProjectClientState::Remote { remote_id, .. } => {
1820 let rpc = self.client.clone();
1821 let project_id = *remote_id;
1822 cx.spawn(|_| async move {
1823 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1824 log::error!("error leaving project: {}", err);
1825 }
1826 })
1827 .detach();
1828 }
1829 }
1830 }
1831
1832 fn app_will_quit(
1833 &mut self,
1834 _: &mut MutableAppContext,
1835 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1836 use futures::FutureExt;
1837
1838 let shutdown_futures = self
1839 .language_servers
1840 .drain()
1841 .filter_map(|(_, server)| server.shutdown())
1842 .collect::<Vec<_>>();
1843 Some(
1844 async move {
1845 futures::future::join_all(shutdown_futures).await;
1846 }
1847 .boxed(),
1848 )
1849 }
1850}
1851
1852impl Collaborator {
1853 fn from_proto(
1854 message: proto::Collaborator,
1855 user_store: &ModelHandle<UserStore>,
1856 cx: &mut AsyncAppContext,
1857 ) -> impl Future<Output = Result<Self>> {
1858 let user = user_store.update(cx, |user_store, cx| {
1859 user_store.fetch_user(message.user_id, cx)
1860 });
1861
1862 async move {
1863 Ok(Self {
1864 peer_id: PeerId(message.peer_id),
1865 user: user.await?,
1866 replica_id: message.replica_id as ReplicaId,
1867 })
1868 }
1869 }
1870}
1871
1872impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1873 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1874 Self {
1875 worktree_id,
1876 path: path.as_ref().into(),
1877 }
1878 }
1879}
1880
1881impl OpenBuffer {
1882 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1883 match self {
1884 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1885 OpenBuffer::Operations(_) => None,
1886 }
1887 }
1888}
1889
1890#[cfg(test)]
1891mod tests {
1892 use super::{Event, *};
1893 use client::test::FakeHttpClient;
1894 use fs::RealFs;
1895 use futures::StreamExt;
1896 use gpui::{test::subscribe, TestAppContext};
1897 use language::{
1898 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1899 LanguageServerConfig, Point,
1900 };
1901 use lsp::Url;
1902 use serde_json::json;
1903 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1904 use unindent::Unindent as _;
1905 use util::test::temp_tree;
1906 use worktree::WorktreeHandle as _;
1907
1908 #[gpui::test]
1909 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1910 let dir = temp_tree(json!({
1911 "root": {
1912 "apple": "",
1913 "banana": {
1914 "carrot": {
1915 "date": "",
1916 "endive": "",
1917 }
1918 },
1919 "fennel": {
1920 "grape": "",
1921 }
1922 }
1923 }));
1924
1925 let root_link_path = dir.path().join("root_link");
1926 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1927 unix::fs::symlink(
1928 &dir.path().join("root/fennel"),
1929 &dir.path().join("root/finnochio"),
1930 )
1931 .unwrap();
1932
1933 let project = build_project(Arc::new(RealFs), &mut cx);
1934
1935 let (tree, _) = project
1936 .update(&mut cx, |project, cx| {
1937 project.find_or_create_local_worktree(&root_link_path, false, cx)
1938 })
1939 .await
1940 .unwrap();
1941
1942 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1943 .await;
1944 cx.read(|cx| {
1945 let tree = tree.read(cx);
1946 assert_eq!(tree.file_count(), 5);
1947 assert_eq!(
1948 tree.inode_for_path("fennel/grape"),
1949 tree.inode_for_path("finnochio/grape")
1950 );
1951 });
1952
1953 let cancel_flag = Default::default();
1954 let results = project
1955 .read_with(&cx, |project, cx| {
1956 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
1957 })
1958 .await;
1959 assert_eq!(
1960 results
1961 .into_iter()
1962 .map(|result| result.path)
1963 .collect::<Vec<Arc<Path>>>(),
1964 vec![
1965 PathBuf::from("banana/carrot/date").into(),
1966 PathBuf::from("banana/carrot/endive").into(),
1967 ]
1968 );
1969 }
1970
1971 #[gpui::test]
1972 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
1973 let (language_server_config, mut fake_server) =
1974 LanguageServerConfig::fake(cx.background()).await;
1975 let progress_token = language_server_config
1976 .disk_based_diagnostics_progress_token
1977 .clone()
1978 .unwrap();
1979
1980 let mut languages = LanguageRegistry::new();
1981 languages.add(Arc::new(Language::new(
1982 LanguageConfig {
1983 name: "Rust".to_string(),
1984 path_suffixes: vec!["rs".to_string()],
1985 language_server: Some(language_server_config),
1986 ..Default::default()
1987 },
1988 Some(tree_sitter_rust::language()),
1989 )));
1990
1991 let dir = temp_tree(json!({
1992 "a.rs": "fn a() { A }",
1993 "b.rs": "const y: i32 = 1",
1994 }));
1995
1996 let http_client = FakeHttpClient::with_404_response();
1997 let client = Client::new(http_client.clone());
1998 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
1999
2000 let project = cx.update(|cx| {
2001 Project::local(
2002 client,
2003 user_store,
2004 Arc::new(languages),
2005 Arc::new(RealFs),
2006 cx,
2007 )
2008 });
2009
2010 let (tree, _) = project
2011 .update(&mut cx, |project, cx| {
2012 project.find_or_create_local_worktree(dir.path(), false, cx)
2013 })
2014 .await
2015 .unwrap();
2016 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2017
2018 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2019 .await;
2020
2021 // Cause worktree to start the fake language server
2022 let _buffer = project
2023 .update(&mut cx, |project, cx| {
2024 project.open_buffer(
2025 ProjectPath {
2026 worktree_id,
2027 path: Path::new("b.rs").into(),
2028 },
2029 cx,
2030 )
2031 })
2032 .await
2033 .unwrap();
2034
2035 let mut events = subscribe(&project, &mut cx);
2036
2037 fake_server.start_progress(&progress_token).await;
2038 assert_eq!(
2039 events.next().await.unwrap(),
2040 Event::DiskBasedDiagnosticsStarted
2041 );
2042
2043 fake_server.start_progress(&progress_token).await;
2044 fake_server.end_progress(&progress_token).await;
2045 fake_server.start_progress(&progress_token).await;
2046
2047 fake_server
2048 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2049 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2050 version: None,
2051 diagnostics: vec![lsp::Diagnostic {
2052 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2053 severity: Some(lsp::DiagnosticSeverity::ERROR),
2054 message: "undefined variable 'A'".to_string(),
2055 ..Default::default()
2056 }],
2057 })
2058 .await;
2059 assert_eq!(
2060 events.next().await.unwrap(),
2061 Event::DiagnosticsUpdated(ProjectPath {
2062 worktree_id,
2063 path: Arc::from(Path::new("a.rs"))
2064 })
2065 );
2066
2067 fake_server.end_progress(&progress_token).await;
2068 fake_server.end_progress(&progress_token).await;
2069 assert_eq!(
2070 events.next().await.unwrap(),
2071 Event::DiskBasedDiagnosticsUpdated
2072 );
2073 assert_eq!(
2074 events.next().await.unwrap(),
2075 Event::DiskBasedDiagnosticsFinished
2076 );
2077
2078 let buffer = project
2079 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2080 .await
2081 .unwrap();
2082
2083 buffer.read_with(&cx, |buffer, _| {
2084 let snapshot = buffer.snapshot();
2085 let diagnostics = snapshot
2086 .diagnostics_in_range::<_, Point>(0..buffer.len())
2087 .collect::<Vec<_>>();
2088 assert_eq!(
2089 diagnostics,
2090 &[DiagnosticEntry {
2091 range: Point::new(0, 9)..Point::new(0, 10),
2092 diagnostic: Diagnostic {
2093 severity: lsp::DiagnosticSeverity::ERROR,
2094 message: "undefined variable 'A'".to_string(),
2095 group_id: 0,
2096 is_primary: true,
2097 ..Default::default()
2098 }
2099 }]
2100 )
2101 });
2102 }
2103
2104 #[gpui::test]
2105 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2106 let dir = temp_tree(json!({
2107 "root": {
2108 "dir1": {},
2109 "dir2": {
2110 "dir3": {}
2111 }
2112 }
2113 }));
2114
2115 let project = build_project(Arc::new(RealFs), &mut cx);
2116 let (tree, _) = project
2117 .update(&mut cx, |project, cx| {
2118 project.find_or_create_local_worktree(&dir.path(), false, cx)
2119 })
2120 .await
2121 .unwrap();
2122
2123 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2124 .await;
2125
2126 let cancel_flag = Default::default();
2127 let results = project
2128 .read_with(&cx, |project, cx| {
2129 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2130 })
2131 .await;
2132
2133 assert!(results.is_empty());
2134 }
2135
2136 #[gpui::test]
2137 async fn test_definition(mut cx: gpui::TestAppContext) {
2138 let (language_server_config, mut fake_server) =
2139 LanguageServerConfig::fake(cx.background()).await;
2140
2141 let mut languages = LanguageRegistry::new();
2142 languages.add(Arc::new(Language::new(
2143 LanguageConfig {
2144 name: "Rust".to_string(),
2145 path_suffixes: vec!["rs".to_string()],
2146 language_server: Some(language_server_config),
2147 ..Default::default()
2148 },
2149 Some(tree_sitter_rust::language()),
2150 )));
2151
2152 let dir = temp_tree(json!({
2153 "a.rs": "const fn a() { A }",
2154 "b.rs": "const y: i32 = crate::a()",
2155 }));
2156
2157 let http_client = FakeHttpClient::with_404_response();
2158 let client = Client::new(http_client.clone());
2159 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2160 let project = cx.update(|cx| {
2161 Project::local(
2162 client,
2163 user_store,
2164 Arc::new(languages),
2165 Arc::new(RealFs),
2166 cx,
2167 )
2168 });
2169
2170 let (tree, _) = project
2171 .update(&mut cx, |project, cx| {
2172 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2173 })
2174 .await
2175 .unwrap();
2176 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2177 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2178 .await;
2179
2180 // Cause worktree to start the fake language server
2181 let buffer = project
2182 .update(&mut cx, |project, cx| {
2183 project.open_buffer(
2184 ProjectPath {
2185 worktree_id,
2186 path: Path::new("").into(),
2187 },
2188 cx,
2189 )
2190 })
2191 .await
2192 .unwrap();
2193 let definitions =
2194 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2195 let (request_id, request) = fake_server
2196 .receive_request::<lsp::request::GotoDefinition>()
2197 .await;
2198 let request_params = request.text_document_position_params;
2199 assert_eq!(
2200 request_params.text_document.uri.to_file_path().unwrap(),
2201 dir.path().join("b.rs")
2202 );
2203 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2204
2205 fake_server
2206 .respond(
2207 request_id,
2208 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2209 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2210 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2211 ))),
2212 )
2213 .await;
2214 let mut definitions = definitions.await.unwrap();
2215 assert_eq!(definitions.len(), 1);
2216 let definition = definitions.pop().unwrap();
2217 cx.update(|cx| {
2218 let target_buffer = definition.target_buffer.read(cx);
2219 assert_eq!(
2220 target_buffer
2221 .file()
2222 .unwrap()
2223 .as_local()
2224 .unwrap()
2225 .abs_path(cx),
2226 dir.path().join("a.rs")
2227 );
2228 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2229 assert_eq!(
2230 list_worktrees(&project, cx),
2231 [
2232 (dir.path().join("b.rs"), false),
2233 (dir.path().join("a.rs"), true)
2234 ]
2235 );
2236
2237 drop(definition);
2238 });
2239 cx.read(|cx| {
2240 assert_eq!(
2241 list_worktrees(&project, cx),
2242 [(dir.path().join("b.rs"), false)]
2243 );
2244 });
2245
2246 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2247 project
2248 .read(cx)
2249 .worktrees(cx)
2250 .map(|worktree| {
2251 let worktree = worktree.read(cx);
2252 (
2253 worktree.as_local().unwrap().abs_path().to_path_buf(),
2254 worktree.is_weak(),
2255 )
2256 })
2257 .collect::<Vec<_>>()
2258 }
2259 }
2260
2261 #[gpui::test]
2262 async fn test_save_file(mut cx: gpui::TestAppContext) {
2263 let fs = Arc::new(FakeFs::new());
2264 fs.insert_tree(
2265 "/dir",
2266 json!({
2267 "file1": "the old contents",
2268 }),
2269 )
2270 .await;
2271
2272 let project = build_project(fs.clone(), &mut cx);
2273 let worktree_id = project
2274 .update(&mut cx, |p, cx| {
2275 p.find_or_create_local_worktree("/dir", false, cx)
2276 })
2277 .await
2278 .unwrap()
2279 .0
2280 .read_with(&cx, |tree, _| tree.id());
2281
2282 let buffer = project
2283 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2284 .await
2285 .unwrap();
2286 buffer
2287 .update(&mut cx, |buffer, cx| {
2288 assert_eq!(buffer.text(), "the old contents");
2289 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2290 buffer.save(cx)
2291 })
2292 .await
2293 .unwrap();
2294
2295 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2296 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2297 }
2298
2299 #[gpui::test]
2300 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2301 let fs = Arc::new(FakeFs::new());
2302 fs.insert_tree(
2303 "/dir",
2304 json!({
2305 "file1": "the old contents",
2306 }),
2307 )
2308 .await;
2309
2310 let project = build_project(fs.clone(), &mut cx);
2311 let worktree_id = project
2312 .update(&mut cx, |p, cx| {
2313 p.find_or_create_local_worktree("/dir/file1", false, cx)
2314 })
2315 .await
2316 .unwrap()
2317 .0
2318 .read_with(&cx, |tree, _| tree.id());
2319
2320 let buffer = project
2321 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2322 .await
2323 .unwrap();
2324 buffer
2325 .update(&mut cx, |buffer, cx| {
2326 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2327 buffer.save(cx)
2328 })
2329 .await
2330 .unwrap();
2331
2332 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2333 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2334 }
2335
2336 #[gpui::test]
2337 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2338 let dir = temp_tree(json!({
2339 "a": {
2340 "file1": "",
2341 "file2": "",
2342 "file3": "",
2343 },
2344 "b": {
2345 "c": {
2346 "file4": "",
2347 "file5": "",
2348 }
2349 }
2350 }));
2351
2352 let project = build_project(Arc::new(RealFs), &mut cx);
2353 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2354
2355 let (tree, _) = project
2356 .update(&mut cx, |p, cx| {
2357 p.find_or_create_local_worktree(dir.path(), false, cx)
2358 })
2359 .await
2360 .unwrap();
2361 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2362
2363 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2364 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2365 async move { buffer.await.unwrap() }
2366 };
2367 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2368 tree.read_with(cx, |tree, _| {
2369 tree.entry_for_path(path)
2370 .expect(&format!("no entry for path {}", path))
2371 .id
2372 })
2373 };
2374
2375 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2376 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2377 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2378 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2379
2380 let file2_id = id_for_path("a/file2", &cx);
2381 let file3_id = id_for_path("a/file3", &cx);
2382 let file4_id = id_for_path("b/c/file4", &cx);
2383
2384 // Wait for the initial scan.
2385 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2386 .await;
2387
2388 // Create a remote copy of this worktree.
2389 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2390 let remote = Worktree::remote(
2391 1,
2392 1,
2393 initial_snapshot.to_proto(&Default::default(), Default::default()),
2394 rpc.clone(),
2395 &mut cx.to_async(),
2396 )
2397 .await
2398 .unwrap();
2399
2400 cx.read(|cx| {
2401 assert!(!buffer2.read(cx).is_dirty());
2402 assert!(!buffer3.read(cx).is_dirty());
2403 assert!(!buffer4.read(cx).is_dirty());
2404 assert!(!buffer5.read(cx).is_dirty());
2405 });
2406
2407 // Rename and delete files and directories.
2408 tree.flush_fs_events(&cx).await;
2409 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2410 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2411 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2412 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2413 tree.flush_fs_events(&cx).await;
2414
2415 let expected_paths = vec![
2416 "a",
2417 "a/file1",
2418 "a/file2.new",
2419 "b",
2420 "d",
2421 "d/file3",
2422 "d/file4",
2423 ];
2424
2425 cx.read(|app| {
2426 assert_eq!(
2427 tree.read(app)
2428 .paths()
2429 .map(|p| p.to_str().unwrap())
2430 .collect::<Vec<_>>(),
2431 expected_paths
2432 );
2433
2434 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2435 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2436 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2437
2438 assert_eq!(
2439 buffer2.read(app).file().unwrap().path().as_ref(),
2440 Path::new("a/file2.new")
2441 );
2442 assert_eq!(
2443 buffer3.read(app).file().unwrap().path().as_ref(),
2444 Path::new("d/file3")
2445 );
2446 assert_eq!(
2447 buffer4.read(app).file().unwrap().path().as_ref(),
2448 Path::new("d/file4")
2449 );
2450 assert_eq!(
2451 buffer5.read(app).file().unwrap().path().as_ref(),
2452 Path::new("b/c/file5")
2453 );
2454
2455 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2456 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2457 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2458 assert!(buffer5.read(app).file().unwrap().is_deleted());
2459 });
2460
2461 // Update the remote worktree. Check that it becomes consistent with the
2462 // local worktree.
2463 remote.update(&mut cx, |remote, cx| {
2464 let update_message =
2465 tree.read(cx)
2466 .snapshot()
2467 .build_update(&initial_snapshot, 1, 1, true);
2468 remote
2469 .as_remote_mut()
2470 .unwrap()
2471 .snapshot
2472 .apply_remote_update(update_message)
2473 .unwrap();
2474
2475 assert_eq!(
2476 remote
2477 .paths()
2478 .map(|p| p.to_str().unwrap())
2479 .collect::<Vec<_>>(),
2480 expected_paths
2481 );
2482 });
2483 }
2484
2485 #[gpui::test]
2486 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2487 let fs = Arc::new(FakeFs::new());
2488 fs.insert_tree(
2489 "/the-dir",
2490 json!({
2491 "a.txt": "a-contents",
2492 "b.txt": "b-contents",
2493 }),
2494 )
2495 .await;
2496
2497 let project = build_project(fs.clone(), &mut cx);
2498 let worktree_id = project
2499 .update(&mut cx, |p, cx| {
2500 p.find_or_create_local_worktree("/the-dir", false, cx)
2501 })
2502 .await
2503 .unwrap()
2504 .0
2505 .read_with(&cx, |tree, _| tree.id());
2506
2507 // Spawn multiple tasks to open paths, repeating some paths.
2508 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2509 (
2510 p.open_buffer((worktree_id, "a.txt"), cx),
2511 p.open_buffer((worktree_id, "b.txt"), cx),
2512 p.open_buffer((worktree_id, "a.txt"), cx),
2513 )
2514 });
2515
2516 let buffer_a_1 = buffer_a_1.await.unwrap();
2517 let buffer_a_2 = buffer_a_2.await.unwrap();
2518 let buffer_b = buffer_b.await.unwrap();
2519 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2520 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2521
2522 // There is only one buffer per path.
2523 let buffer_a_id = buffer_a_1.id();
2524 assert_eq!(buffer_a_2.id(), buffer_a_id);
2525
2526 // Open the same path again while it is still open.
2527 drop(buffer_a_1);
2528 let buffer_a_3 = project
2529 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2530 .await
2531 .unwrap();
2532
2533 // There's still only one buffer per path.
2534 assert_eq!(buffer_a_3.id(), buffer_a_id);
2535 }
2536
2537 #[gpui::test]
2538 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2539 use std::fs;
2540
2541 let dir = temp_tree(json!({
2542 "file1": "abc",
2543 "file2": "def",
2544 "file3": "ghi",
2545 }));
2546
2547 let project = build_project(Arc::new(RealFs), &mut cx);
2548 let (worktree, _) = project
2549 .update(&mut cx, |p, cx| {
2550 p.find_or_create_local_worktree(dir.path(), false, cx)
2551 })
2552 .await
2553 .unwrap();
2554 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2555
2556 worktree.flush_fs_events(&cx).await;
2557 worktree
2558 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2559 .await;
2560
2561 let buffer1 = project
2562 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2563 .await
2564 .unwrap();
2565 let events = Rc::new(RefCell::new(Vec::new()));
2566
2567 // initially, the buffer isn't dirty.
2568 buffer1.update(&mut cx, |buffer, cx| {
2569 cx.subscribe(&buffer1, {
2570 let events = events.clone();
2571 move |_, _, event, _| events.borrow_mut().push(event.clone())
2572 })
2573 .detach();
2574
2575 assert!(!buffer.is_dirty());
2576 assert!(events.borrow().is_empty());
2577
2578 buffer.edit(vec![1..2], "", cx);
2579 });
2580
2581 // after the first edit, the buffer is dirty, and emits a dirtied event.
2582 buffer1.update(&mut cx, |buffer, cx| {
2583 assert!(buffer.text() == "ac");
2584 assert!(buffer.is_dirty());
2585 assert_eq!(
2586 *events.borrow(),
2587 &[language::Event::Edited, language::Event::Dirtied]
2588 );
2589 events.borrow_mut().clear();
2590 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2591 });
2592
2593 // after saving, the buffer is not dirty, and emits a saved event.
2594 buffer1.update(&mut cx, |buffer, cx| {
2595 assert!(!buffer.is_dirty());
2596 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2597 events.borrow_mut().clear();
2598
2599 buffer.edit(vec![1..1], "B", cx);
2600 buffer.edit(vec![2..2], "D", cx);
2601 });
2602
2603 // after editing again, the buffer is dirty, and emits another dirty event.
2604 buffer1.update(&mut cx, |buffer, cx| {
2605 assert!(buffer.text() == "aBDc");
2606 assert!(buffer.is_dirty());
2607 assert_eq!(
2608 *events.borrow(),
2609 &[
2610 language::Event::Edited,
2611 language::Event::Dirtied,
2612 language::Event::Edited,
2613 ],
2614 );
2615 events.borrow_mut().clear();
2616
2617 // TODO - currently, after restoring the buffer to its
2618 // previously-saved state, the is still considered dirty.
2619 buffer.edit([1..3], "", cx);
2620 assert!(buffer.text() == "ac");
2621 assert!(buffer.is_dirty());
2622 });
2623
2624 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2625
2626 // When a file is deleted, the buffer is considered dirty.
2627 let events = Rc::new(RefCell::new(Vec::new()));
2628 let buffer2 = project
2629 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2630 .await
2631 .unwrap();
2632 buffer2.update(&mut cx, |_, cx| {
2633 cx.subscribe(&buffer2, {
2634 let events = events.clone();
2635 move |_, _, event, _| events.borrow_mut().push(event.clone())
2636 })
2637 .detach();
2638 });
2639
2640 fs::remove_file(dir.path().join("file2")).unwrap();
2641 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2642 assert_eq!(
2643 *events.borrow(),
2644 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2645 );
2646
2647 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2648 let events = Rc::new(RefCell::new(Vec::new()));
2649 let buffer3 = project
2650 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2651 .await
2652 .unwrap();
2653 buffer3.update(&mut cx, |_, cx| {
2654 cx.subscribe(&buffer3, {
2655 let events = events.clone();
2656 move |_, _, event, _| events.borrow_mut().push(event.clone())
2657 })
2658 .detach();
2659 });
2660
2661 worktree.flush_fs_events(&cx).await;
2662 buffer3.update(&mut cx, |buffer, cx| {
2663 buffer.edit(Some(0..0), "x", cx);
2664 });
2665 events.borrow_mut().clear();
2666 fs::remove_file(dir.path().join("file3")).unwrap();
2667 buffer3
2668 .condition(&cx, |_, _| !events.borrow().is_empty())
2669 .await;
2670 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2671 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2672 }
2673
2674 #[gpui::test]
2675 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2676 use std::fs;
2677
2678 let initial_contents = "aaa\nbbbbb\nc\n";
2679 let dir = temp_tree(json!({ "the-file": initial_contents }));
2680
2681 let project = build_project(Arc::new(RealFs), &mut cx);
2682 let (worktree, _) = project
2683 .update(&mut cx, |p, cx| {
2684 p.find_or_create_local_worktree(dir.path(), false, cx)
2685 })
2686 .await
2687 .unwrap();
2688 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2689
2690 worktree
2691 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2692 .await;
2693
2694 let abs_path = dir.path().join("the-file");
2695 let buffer = project
2696 .update(&mut cx, |p, cx| {
2697 p.open_buffer((worktree_id, "the-file"), cx)
2698 })
2699 .await
2700 .unwrap();
2701
2702 // TODO
2703 // Add a cursor on each row.
2704 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2705 // assert!(!buffer.is_dirty());
2706 // buffer.add_selection_set(
2707 // &(0..3)
2708 // .map(|row| Selection {
2709 // id: row as usize,
2710 // start: Point::new(row, 1),
2711 // end: Point::new(row, 1),
2712 // reversed: false,
2713 // goal: SelectionGoal::None,
2714 // })
2715 // .collect::<Vec<_>>(),
2716 // cx,
2717 // )
2718 // });
2719
2720 // Change the file on disk, adding two new lines of text, and removing
2721 // one line.
2722 buffer.read_with(&cx, |buffer, _| {
2723 assert!(!buffer.is_dirty());
2724 assert!(!buffer.has_conflict());
2725 });
2726 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2727 fs::write(&abs_path, new_contents).unwrap();
2728
2729 // Because the buffer was not modified, it is reloaded from disk. Its
2730 // contents are edited according to the diff between the old and new
2731 // file contents.
2732 buffer
2733 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2734 .await;
2735
2736 buffer.update(&mut cx, |buffer, _| {
2737 assert_eq!(buffer.text(), new_contents);
2738 assert!(!buffer.is_dirty());
2739 assert!(!buffer.has_conflict());
2740
2741 // TODO
2742 // let cursor_positions = buffer
2743 // .selection_set(selection_set_id)
2744 // .unwrap()
2745 // .selections::<Point>(&*buffer)
2746 // .map(|selection| {
2747 // assert_eq!(selection.start, selection.end);
2748 // selection.start
2749 // })
2750 // .collect::<Vec<_>>();
2751 // assert_eq!(
2752 // cursor_positions,
2753 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2754 // );
2755 });
2756
2757 // Modify the buffer
2758 buffer.update(&mut cx, |buffer, cx| {
2759 buffer.edit(vec![0..0], " ", cx);
2760 assert!(buffer.is_dirty());
2761 assert!(!buffer.has_conflict());
2762 });
2763
2764 // Change the file on disk again, adding blank lines to the beginning.
2765 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2766
2767 // Because the buffer is modified, it doesn't reload from disk, but is
2768 // marked as having a conflict.
2769 buffer
2770 .condition(&cx, |buffer, _| buffer.has_conflict())
2771 .await;
2772 }
2773
2774 #[gpui::test]
2775 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2776 let fs = Arc::new(FakeFs::new());
2777 fs.insert_tree(
2778 "/the-dir",
2779 json!({
2780 "a.rs": "
2781 fn foo(mut v: Vec<usize>) {
2782 for x in &v {
2783 v.push(1);
2784 }
2785 }
2786 "
2787 .unindent(),
2788 }),
2789 )
2790 .await;
2791
2792 let project = build_project(fs.clone(), &mut cx);
2793 let (worktree, _) = project
2794 .update(&mut cx, |p, cx| {
2795 p.find_or_create_local_worktree("/the-dir", false, cx)
2796 })
2797 .await
2798 .unwrap();
2799 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2800
2801 let buffer = project
2802 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2803 .await
2804 .unwrap();
2805
2806 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2807 let message = lsp::PublishDiagnosticsParams {
2808 uri: buffer_uri.clone(),
2809 diagnostics: vec![
2810 lsp::Diagnostic {
2811 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2812 severity: Some(DiagnosticSeverity::WARNING),
2813 message: "error 1".to_string(),
2814 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2815 location: lsp::Location {
2816 uri: buffer_uri.clone(),
2817 range: lsp::Range::new(
2818 lsp::Position::new(1, 8),
2819 lsp::Position::new(1, 9),
2820 ),
2821 },
2822 message: "error 1 hint 1".to_string(),
2823 }]),
2824 ..Default::default()
2825 },
2826 lsp::Diagnostic {
2827 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2828 severity: Some(DiagnosticSeverity::HINT),
2829 message: "error 1 hint 1".to_string(),
2830 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2831 location: lsp::Location {
2832 uri: buffer_uri.clone(),
2833 range: lsp::Range::new(
2834 lsp::Position::new(1, 8),
2835 lsp::Position::new(1, 9),
2836 ),
2837 },
2838 message: "original diagnostic".to_string(),
2839 }]),
2840 ..Default::default()
2841 },
2842 lsp::Diagnostic {
2843 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2844 severity: Some(DiagnosticSeverity::ERROR),
2845 message: "error 2".to_string(),
2846 related_information: Some(vec![
2847 lsp::DiagnosticRelatedInformation {
2848 location: lsp::Location {
2849 uri: buffer_uri.clone(),
2850 range: lsp::Range::new(
2851 lsp::Position::new(1, 13),
2852 lsp::Position::new(1, 15),
2853 ),
2854 },
2855 message: "error 2 hint 1".to_string(),
2856 },
2857 lsp::DiagnosticRelatedInformation {
2858 location: lsp::Location {
2859 uri: buffer_uri.clone(),
2860 range: lsp::Range::new(
2861 lsp::Position::new(1, 13),
2862 lsp::Position::new(1, 15),
2863 ),
2864 },
2865 message: "error 2 hint 2".to_string(),
2866 },
2867 ]),
2868 ..Default::default()
2869 },
2870 lsp::Diagnostic {
2871 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2872 severity: Some(DiagnosticSeverity::HINT),
2873 message: "error 2 hint 1".to_string(),
2874 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2875 location: lsp::Location {
2876 uri: buffer_uri.clone(),
2877 range: lsp::Range::new(
2878 lsp::Position::new(2, 8),
2879 lsp::Position::new(2, 17),
2880 ),
2881 },
2882 message: "original diagnostic".to_string(),
2883 }]),
2884 ..Default::default()
2885 },
2886 lsp::Diagnostic {
2887 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2888 severity: Some(DiagnosticSeverity::HINT),
2889 message: "error 2 hint 2".to_string(),
2890 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2891 location: lsp::Location {
2892 uri: buffer_uri.clone(),
2893 range: lsp::Range::new(
2894 lsp::Position::new(2, 8),
2895 lsp::Position::new(2, 17),
2896 ),
2897 },
2898 message: "original diagnostic".to_string(),
2899 }]),
2900 ..Default::default()
2901 },
2902 ],
2903 version: None,
2904 };
2905
2906 project
2907 .update(&mut cx, |p, cx| {
2908 p.update_diagnostics(message, &Default::default(), cx)
2909 })
2910 .unwrap();
2911 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2912
2913 assert_eq!(
2914 buffer
2915 .diagnostics_in_range::<_, Point>(0..buffer.len())
2916 .collect::<Vec<_>>(),
2917 &[
2918 DiagnosticEntry {
2919 range: Point::new(1, 8)..Point::new(1, 9),
2920 diagnostic: Diagnostic {
2921 severity: DiagnosticSeverity::WARNING,
2922 message: "error 1".to_string(),
2923 group_id: 0,
2924 is_primary: true,
2925 ..Default::default()
2926 }
2927 },
2928 DiagnosticEntry {
2929 range: Point::new(1, 8)..Point::new(1, 9),
2930 diagnostic: Diagnostic {
2931 severity: DiagnosticSeverity::HINT,
2932 message: "error 1 hint 1".to_string(),
2933 group_id: 0,
2934 is_primary: false,
2935 ..Default::default()
2936 }
2937 },
2938 DiagnosticEntry {
2939 range: Point::new(1, 13)..Point::new(1, 15),
2940 diagnostic: Diagnostic {
2941 severity: DiagnosticSeverity::HINT,
2942 message: "error 2 hint 1".to_string(),
2943 group_id: 1,
2944 is_primary: false,
2945 ..Default::default()
2946 }
2947 },
2948 DiagnosticEntry {
2949 range: Point::new(1, 13)..Point::new(1, 15),
2950 diagnostic: Diagnostic {
2951 severity: DiagnosticSeverity::HINT,
2952 message: "error 2 hint 2".to_string(),
2953 group_id: 1,
2954 is_primary: false,
2955 ..Default::default()
2956 }
2957 },
2958 DiagnosticEntry {
2959 range: Point::new(2, 8)..Point::new(2, 17),
2960 diagnostic: Diagnostic {
2961 severity: DiagnosticSeverity::ERROR,
2962 message: "error 2".to_string(),
2963 group_id: 1,
2964 is_primary: true,
2965 ..Default::default()
2966 }
2967 }
2968 ]
2969 );
2970
2971 assert_eq!(
2972 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
2973 &[
2974 DiagnosticEntry {
2975 range: Point::new(1, 8)..Point::new(1, 9),
2976 diagnostic: Diagnostic {
2977 severity: DiagnosticSeverity::WARNING,
2978 message: "error 1".to_string(),
2979 group_id: 0,
2980 is_primary: true,
2981 ..Default::default()
2982 }
2983 },
2984 DiagnosticEntry {
2985 range: Point::new(1, 8)..Point::new(1, 9),
2986 diagnostic: Diagnostic {
2987 severity: DiagnosticSeverity::HINT,
2988 message: "error 1 hint 1".to_string(),
2989 group_id: 0,
2990 is_primary: false,
2991 ..Default::default()
2992 }
2993 },
2994 ]
2995 );
2996 assert_eq!(
2997 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
2998 &[
2999 DiagnosticEntry {
3000 range: Point::new(1, 13)..Point::new(1, 15),
3001 diagnostic: Diagnostic {
3002 severity: DiagnosticSeverity::HINT,
3003 message: "error 2 hint 1".to_string(),
3004 group_id: 1,
3005 is_primary: false,
3006 ..Default::default()
3007 }
3008 },
3009 DiagnosticEntry {
3010 range: Point::new(1, 13)..Point::new(1, 15),
3011 diagnostic: Diagnostic {
3012 severity: DiagnosticSeverity::HINT,
3013 message: "error 2 hint 2".to_string(),
3014 group_id: 1,
3015 is_primary: false,
3016 ..Default::default()
3017 }
3018 },
3019 DiagnosticEntry {
3020 range: Point::new(2, 8)..Point::new(2, 17),
3021 diagnostic: Diagnostic {
3022 severity: DiagnosticSeverity::ERROR,
3023 message: "error 2".to_string(),
3024 group_id: 1,
3025 is_primary: true,
3026 ..Default::default()
3027 }
3028 }
3029 ]
3030 );
3031 }
3032
3033 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3034 let languages = Arc::new(LanguageRegistry::new());
3035 let http_client = FakeHttpClient::with_404_response();
3036 let client = client::Client::new(http_client.clone());
3037 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3038 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3039 }
3040}