1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
17 LanguageRegistry, Operation, PointUtf16, ToOffset, ToPointUtf16,
18};
19use lsp::{DiagnosticSeverity, LanguageServer};
20use postage::{prelude::Stream, watch};
21use smol::block_on;
22use std::{
23 convert::TryInto,
24 ops::Range,
25 path::{Path, PathBuf},
26 sync::{atomic::AtomicBool, Arc},
27};
28use util::{post_inc, ResultExt, TryFutureExt as _};
29
30pub use fs::*;
31pub use worktree::*;
32
33pub struct Project {
34 worktrees: Vec<WorktreeHandle>,
35 active_entry: Option<ProjectEntry>,
36 languages: Arc<LanguageRegistry>,
37 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
38 client: Arc<client::Client>,
39 user_store: ModelHandle<UserStore>,
40 fs: Arc<dyn Fs>,
41 client_state: ProjectClientState,
42 collaborators: HashMap<PeerId, Collaborator>,
43 subscriptions: Vec<client::Subscription>,
44 language_servers_with_diagnostics_running: isize,
45 open_buffers: HashMap<usize, OpenBuffer>,
46 loading_buffers: HashMap<
47 ProjectPath,
48 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
49 >,
50 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
51}
52
53enum OpenBuffer {
54 Operations(Vec<Operation>),
55 Loaded(WeakModelHandle<Buffer>),
56}
57
58enum WorktreeHandle {
59 Strong(ModelHandle<Worktree>),
60 Weak(WeakModelHandle<Worktree>),
61}
62
63enum ProjectClientState {
64 Local {
65 is_shared: bool,
66 remote_id_tx: watch::Sender<Option<u64>>,
67 remote_id_rx: watch::Receiver<Option<u64>>,
68 _maintain_remote_id_task: Task<Option<()>>,
69 },
70 Remote {
71 sharing_has_stopped: bool,
72 remote_id: u64,
73 replica_id: ReplicaId,
74 },
75}
76
77#[derive(Clone, Debug)]
78pub struct Collaborator {
79 pub user: Arc<User>,
80 pub peer_id: PeerId,
81 pub replica_id: ReplicaId,
82}
83
84#[derive(Clone, Debug, PartialEq)]
85pub enum Event {
86 ActiveEntryChanged(Option<ProjectEntry>),
87 WorktreeRemoved(WorktreeId),
88 DiskBasedDiagnosticsStarted,
89 DiskBasedDiagnosticsUpdated,
90 DiskBasedDiagnosticsFinished,
91 DiagnosticsUpdated(ProjectPath),
92}
93
94#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
95pub struct ProjectPath {
96 pub worktree_id: WorktreeId,
97 pub path: Arc<Path>,
98}
99
100#[derive(Clone, Debug, Default, PartialEq)]
101pub struct DiagnosticSummary {
102 pub error_count: usize,
103 pub warning_count: usize,
104 pub info_count: usize,
105 pub hint_count: usize,
106}
107
108#[derive(Debug)]
109pub struct Definition {
110 pub target_buffer: ModelHandle<Buffer>,
111 pub target_range: Range<language::Anchor>,
112}
113
114impl DiagnosticSummary {
115 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
116 let mut this = Self {
117 error_count: 0,
118 warning_count: 0,
119 info_count: 0,
120 hint_count: 0,
121 };
122
123 for entry in diagnostics {
124 if entry.diagnostic.is_primary {
125 match entry.diagnostic.severity {
126 DiagnosticSeverity::ERROR => this.error_count += 1,
127 DiagnosticSeverity::WARNING => this.warning_count += 1,
128 DiagnosticSeverity::INFORMATION => this.info_count += 1,
129 DiagnosticSeverity::HINT => this.hint_count += 1,
130 _ => {}
131 }
132 }
133 }
134
135 this
136 }
137
138 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
139 proto::DiagnosticSummary {
140 path: path.to_string_lossy().to_string(),
141 error_count: self.error_count as u32,
142 warning_count: self.warning_count as u32,
143 info_count: self.info_count as u32,
144 hint_count: self.hint_count as u32,
145 }
146 }
147}
148
149#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
150pub struct ProjectEntry {
151 pub worktree_id: WorktreeId,
152 pub entry_id: usize,
153}
154
155impl Project {
156 pub fn local(
157 client: Arc<Client>,
158 user_store: ModelHandle<UserStore>,
159 languages: Arc<LanguageRegistry>,
160 fs: Arc<dyn Fs>,
161 cx: &mut MutableAppContext,
162 ) -> ModelHandle<Self> {
163 cx.add_model(|cx: &mut ModelContext<Self>| {
164 let (remote_id_tx, remote_id_rx) = watch::channel();
165 let _maintain_remote_id_task = cx.spawn_weak({
166 let rpc = client.clone();
167 move |this, mut cx| {
168 async move {
169 let mut status = rpc.status();
170 while let Some(status) = status.recv().await {
171 if let Some(this) = this.upgrade(&cx) {
172 let remote_id = if let client::Status::Connected { .. } = status {
173 let response = rpc.request(proto::RegisterProject {}).await?;
174 Some(response.project_id)
175 } else {
176 None
177 };
178
179 if let Some(project_id) = remote_id {
180 let mut registrations = Vec::new();
181 this.update(&mut cx, |this, cx| {
182 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
183 registrations.push(worktree.update(
184 cx,
185 |worktree, cx| {
186 let worktree = worktree.as_local_mut().unwrap();
187 worktree.register(project_id, cx)
188 },
189 ));
190 }
191 });
192 for registration in registrations {
193 registration.await?;
194 }
195 }
196 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
197 }
198 }
199 Ok(())
200 }
201 .log_err()
202 }
203 });
204
205 Self {
206 worktrees: Default::default(),
207 collaborators: Default::default(),
208 open_buffers: Default::default(),
209 loading_buffers: Default::default(),
210 shared_buffers: Default::default(),
211 client_state: ProjectClientState::Local {
212 is_shared: false,
213 remote_id_tx,
214 remote_id_rx,
215 _maintain_remote_id_task,
216 },
217 subscriptions: Vec::new(),
218 active_entry: None,
219 languages,
220 client,
221 user_store,
222 fs,
223 language_servers_with_diagnostics_running: 0,
224 language_servers: Default::default(),
225 }
226 })
227 }
228
229 pub async fn remote(
230 remote_id: u64,
231 client: Arc<Client>,
232 user_store: ModelHandle<UserStore>,
233 languages: Arc<LanguageRegistry>,
234 fs: Arc<dyn Fs>,
235 cx: &mut AsyncAppContext,
236 ) -> Result<ModelHandle<Self>> {
237 client.authenticate_and_connect(&cx).await?;
238
239 let response = client
240 .request(proto::JoinProject {
241 project_id: remote_id,
242 })
243 .await?;
244
245 let replica_id = response.replica_id as ReplicaId;
246
247 let mut worktrees = Vec::new();
248 for worktree in response.worktrees {
249 worktrees
250 .push(Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx).await?);
251 }
252
253 let user_ids = response
254 .collaborators
255 .iter()
256 .map(|peer| peer.user_id)
257 .collect();
258 user_store
259 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
260 .await?;
261 let mut collaborators = HashMap::default();
262 for message in response.collaborators {
263 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
264 collaborators.insert(collaborator.peer_id, collaborator);
265 }
266
267 Ok(cx.add_model(|cx| {
268 let mut this = Self {
269 worktrees: Vec::new(),
270 open_buffers: Default::default(),
271 loading_buffers: Default::default(),
272 shared_buffers: Default::default(),
273 active_entry: None,
274 collaborators,
275 languages,
276 user_store,
277 fs,
278 subscriptions: vec![
279 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
283 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
284 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
285 client.subscribe_to_entity(
286 remote_id,
287 cx,
288 Self::handle_update_diagnostic_summary,
289 ),
290 client.subscribe_to_entity(
291 remote_id,
292 cx,
293 Self::handle_disk_based_diagnostics_updating,
294 ),
295 client.subscribe_to_entity(
296 remote_id,
297 cx,
298 Self::handle_disk_based_diagnostics_updated,
299 ),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
302 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_reloaded),
303 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
304 ],
305 client,
306 client_state: ProjectClientState::Remote {
307 sharing_has_stopped: false,
308 remote_id,
309 replica_id,
310 },
311 language_servers_with_diagnostics_running: 0,
312 language_servers: Default::default(),
313 };
314 for worktree in worktrees {
315 this.add_worktree(&worktree, cx);
316 }
317 this
318 }))
319 }
320
321 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
322 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
323 *remote_id_tx.borrow_mut() = remote_id;
324 }
325
326 self.subscriptions.clear();
327 if let Some(remote_id) = remote_id {
328 let client = &self.client;
329 self.subscriptions.extend([
330 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
338 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
339 ]);
340 }
341 }
342
343 pub fn remote_id(&self) -> Option<u64> {
344 match &self.client_state {
345 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
346 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
347 }
348 }
349
350 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
351 let mut id = None;
352 let mut watch = None;
353 match &self.client_state {
354 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
355 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
356 }
357
358 async move {
359 if let Some(id) = id {
360 return id;
361 }
362 let mut watch = watch.unwrap();
363 loop {
364 let id = *watch.borrow();
365 if let Some(id) = id {
366 return id;
367 }
368 watch.recv().await;
369 }
370 }
371 }
372
373 pub fn replica_id(&self) -> ReplicaId {
374 match &self.client_state {
375 ProjectClientState::Local { .. } => 0,
376 ProjectClientState::Remote { replica_id, .. } => *replica_id,
377 }
378 }
379
380 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
381 &self.collaborators
382 }
383
384 pub fn worktrees<'a>(
385 &'a self,
386 cx: &'a AppContext,
387 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
388 self.worktrees
389 .iter()
390 .filter_map(move |worktree| worktree.upgrade(cx))
391 }
392
393 pub fn worktree_for_id(
394 &self,
395 id: WorktreeId,
396 cx: &AppContext,
397 ) -> Option<ModelHandle<Worktree>> {
398 self.worktrees(cx)
399 .find(|worktree| worktree.read(cx).id() == id)
400 }
401
402 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
403 let rpc = self.client.clone();
404 cx.spawn(|this, mut cx| async move {
405 let project_id = this.update(&mut cx, |this, _| {
406 if let ProjectClientState::Local {
407 is_shared,
408 remote_id_rx,
409 ..
410 } = &mut this.client_state
411 {
412 *is_shared = true;
413 remote_id_rx
414 .borrow()
415 .ok_or_else(|| anyhow!("no project id"))
416 } else {
417 Err(anyhow!("can't share a remote project"))
418 }
419 })?;
420
421 rpc.request(proto::ShareProject { project_id }).await?;
422 let mut tasks = Vec::new();
423 this.update(&mut cx, |this, cx| {
424 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
425 worktree.update(cx, |worktree, cx| {
426 let worktree = worktree.as_local_mut().unwrap();
427 tasks.push(worktree.share(project_id, cx));
428 });
429 }
430 });
431 for task in tasks {
432 task.await?;
433 }
434 this.update(&mut cx, |_, cx| cx.notify());
435 Ok(())
436 })
437 }
438
439 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
440 let rpc = self.client.clone();
441 cx.spawn(|this, mut cx| async move {
442 let project_id = this.update(&mut cx, |this, _| {
443 if let ProjectClientState::Local {
444 is_shared,
445 remote_id_rx,
446 ..
447 } = &mut this.client_state
448 {
449 *is_shared = false;
450 remote_id_rx
451 .borrow()
452 .ok_or_else(|| anyhow!("no project id"))
453 } else {
454 Err(anyhow!("can't share a remote project"))
455 }
456 })?;
457
458 rpc.send(proto::UnshareProject { project_id }).await?;
459 this.update(&mut cx, |this, cx| {
460 this.collaborators.clear();
461 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
462 worktree.update(cx, |worktree, _| {
463 worktree.as_local_mut().unwrap().unshare();
464 });
465 }
466 cx.notify()
467 });
468 Ok(())
469 })
470 }
471
472 pub fn is_read_only(&self) -> bool {
473 match &self.client_state {
474 ProjectClientState::Local { .. } => false,
475 ProjectClientState::Remote {
476 sharing_has_stopped,
477 ..
478 } => *sharing_has_stopped,
479 }
480 }
481
482 pub fn is_local(&self) -> bool {
483 match &self.client_state {
484 ProjectClientState::Local { .. } => true,
485 ProjectClientState::Remote { .. } => false,
486 }
487 }
488
489 pub fn open_buffer(
490 &mut self,
491 path: impl Into<ProjectPath>,
492 cx: &mut ModelContext<Self>,
493 ) -> Task<Result<ModelHandle<Buffer>>> {
494 let project_path = path.into();
495 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
496 worktree
497 } else {
498 return Task::ready(Err(anyhow!("no such worktree")));
499 };
500
501 // If there is already a buffer for the given path, then return it.
502 let existing_buffer = self.get_open_buffer(&project_path, cx);
503 if let Some(existing_buffer) = existing_buffer {
504 return Task::ready(Ok(existing_buffer));
505 }
506
507 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
508 // If the given path is already being loaded, then wait for that existing
509 // task to complete and return the same buffer.
510 hash_map::Entry::Occupied(e) => e.get().clone(),
511
512 // Otherwise, record the fact that this path is now being loaded.
513 hash_map::Entry::Vacant(entry) => {
514 let (mut tx, rx) = postage::watch::channel();
515 entry.insert(rx.clone());
516
517 let load_buffer = worktree.update(cx, |worktree, cx| {
518 worktree.load_buffer(&project_path.path, cx)
519 });
520
521 cx.spawn(move |this, mut cx| async move {
522 let load_result = load_buffer.await;
523 *tx.borrow_mut() = Some(this.update(&mut cx, |this, cx| {
524 // Record the fact that the buffer is no longer loading.
525 this.loading_buffers.remove(&project_path);
526 let buffer = load_result.map_err(Arc::new)?;
527 this.open_buffers.insert(
528 buffer.read(cx).remote_id() as usize,
529 OpenBuffer::Loaded(buffer.downgrade()),
530 );
531 this.assign_language_to_buffer(&worktree, &buffer, cx);
532 Ok(buffer)
533 }));
534 })
535 .detach();
536 rx
537 }
538 };
539
540 cx.foreground().spawn(async move {
541 loop {
542 if let Some(result) = loading_watch.borrow().as_ref() {
543 match result {
544 Ok(buffer) => return Ok(buffer.clone()),
545 Err(error) => return Err(anyhow!("{}", error)),
546 }
547 }
548 loading_watch.recv().await;
549 }
550 })
551 }
552
553 pub fn save_buffer_as(
554 &self,
555 buffer: ModelHandle<Buffer>,
556 abs_path: PathBuf,
557 cx: &mut ModelContext<Project>,
558 ) -> Task<Result<()>> {
559 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
560 cx.spawn(|this, mut cx| async move {
561 let (worktree, path) = worktree_task.await?;
562 worktree
563 .update(&mut cx, |worktree, cx| {
564 worktree
565 .as_local_mut()
566 .unwrap()
567 .save_buffer_as(buffer.clone(), path, cx)
568 })
569 .await?;
570 this.update(&mut cx, |this, cx| {
571 this.open_buffers
572 .insert(buffer.id(), OpenBuffer::Loaded(buffer.downgrade()));
573 this.assign_language_to_buffer(&worktree, &buffer, cx);
574 });
575 Ok(())
576 })
577 }
578
579 #[cfg(any(test, feature = "test-support"))]
580 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
581 let path = path.into();
582 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
583 self.open_buffers.iter().any(|(_, buffer)| {
584 if let Some(buffer) = buffer.upgrade(cx) {
585 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
586 if file.worktree == worktree && file.path() == &path.path {
587 return true;
588 }
589 }
590 }
591 false
592 })
593 } else {
594 false
595 }
596 }
597
598 fn get_open_buffer(
599 &mut self,
600 path: &ProjectPath,
601 cx: &mut ModelContext<Self>,
602 ) -> Option<ModelHandle<Buffer>> {
603 let mut result = None;
604 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
605 self.open_buffers.retain(|_, buffer| {
606 if let OpenBuffer::Loaded(buffer) = buffer {
607 if let Some(buffer) = buffer.upgrade(cx) {
608 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
609 if file.worktree == worktree && file.path() == &path.path {
610 result = Some(buffer);
611 }
612 }
613 return true;
614 }
615 }
616 false
617 });
618 result
619 }
620
621 fn assign_language_to_buffer(
622 &mut self,
623 worktree: &ModelHandle<Worktree>,
624 buffer: &ModelHandle<Buffer>,
625 cx: &mut ModelContext<Self>,
626 ) -> Option<()> {
627 let (path, full_path) = {
628 let file = buffer.read(cx).file()?;
629 (file.path().clone(), file.full_path(cx))
630 };
631
632 // If the buffer has a language, set it and start/assign the language server
633 if let Some(language) = self.languages.select_language(&full_path) {
634 buffer.update(cx, |buffer, cx| {
635 buffer.set_language(Some(language.clone()), cx);
636 });
637
638 // For local worktrees, start a language server if needed.
639 // Also assign the language server and any previously stored diagnostics to the buffer.
640 if let Some(local_worktree) = worktree.read(cx).as_local() {
641 let worktree_id = local_worktree.id();
642 let worktree_abs_path = local_worktree.abs_path().clone();
643
644 let language_server = match self
645 .language_servers
646 .entry((worktree_id, language.name().to_string()))
647 {
648 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
649 hash_map::Entry::Vacant(e) => Self::start_language_server(
650 self.client.clone(),
651 language.clone(),
652 &worktree_abs_path,
653 cx,
654 )
655 .map(|server| e.insert(server).clone()),
656 };
657
658 buffer.update(cx, |buffer, cx| {
659 buffer.set_language_server(language_server, cx);
660 });
661 }
662 }
663
664 if let Some(local_worktree) = worktree.read(cx).as_local() {
665 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
666 buffer.update(cx, |buffer, cx| {
667 buffer.update_diagnostics(None, diagnostics, cx).log_err();
668 });
669 }
670 }
671
672 None
673 }
674
675 fn start_language_server(
676 rpc: Arc<Client>,
677 language: Arc<Language>,
678 worktree_path: &Path,
679 cx: &mut ModelContext<Self>,
680 ) -> Option<Arc<LanguageServer>> {
681 enum LspEvent {
682 DiagnosticsStart,
683 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
684 DiagnosticsFinish,
685 }
686
687 let language_server = language
688 .start_server(worktree_path, cx)
689 .log_err()
690 .flatten()?;
691 let disk_based_sources = language
692 .disk_based_diagnostic_sources()
693 .cloned()
694 .unwrap_or_default();
695 let disk_based_diagnostics_progress_token =
696 language.disk_based_diagnostics_progress_token().cloned();
697 let has_disk_based_diagnostic_progress_token =
698 disk_based_diagnostics_progress_token.is_some();
699 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
700
701 // Listen for `PublishDiagnostics` notifications.
702 language_server
703 .on_notification::<lsp::notification::PublishDiagnostics, _>({
704 let diagnostics_tx = diagnostics_tx.clone();
705 move |params| {
706 if !has_disk_based_diagnostic_progress_token {
707 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
708 }
709 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
710 if !has_disk_based_diagnostic_progress_token {
711 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
712 }
713 }
714 })
715 .detach();
716
717 // Listen for `Progress` notifications. Send an event when the language server
718 // transitions between running jobs and not running any jobs.
719 let mut running_jobs_for_this_server: i32 = 0;
720 language_server
721 .on_notification::<lsp::notification::Progress, _>(move |params| {
722 let token = match params.token {
723 lsp::NumberOrString::Number(_) => None,
724 lsp::NumberOrString::String(token) => Some(token),
725 };
726
727 if token == disk_based_diagnostics_progress_token {
728 match params.value {
729 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
730 lsp::WorkDoneProgress::Begin(_) => {
731 running_jobs_for_this_server += 1;
732 if running_jobs_for_this_server == 1 {
733 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
734 }
735 }
736 lsp::WorkDoneProgress::End(_) => {
737 running_jobs_for_this_server -= 1;
738 if running_jobs_for_this_server == 0 {
739 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
740 }
741 }
742 _ => {}
743 },
744 }
745 }
746 })
747 .detach();
748
749 // Process all the LSP events.
750 cx.spawn_weak(|this, mut cx| async move {
751 while let Ok(message) = diagnostics_rx.recv().await {
752 let this = cx.read(|cx| this.upgrade(cx))?;
753 match message {
754 LspEvent::DiagnosticsStart => {
755 let send = this.update(&mut cx, |this, cx| {
756 this.disk_based_diagnostics_started(cx);
757 this.remote_id().map(|project_id| {
758 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
759 })
760 });
761 if let Some(send) = send {
762 send.await.log_err();
763 }
764 }
765 LspEvent::DiagnosticsUpdate(params) => {
766 this.update(&mut cx, |this, cx| {
767 this.update_diagnostics(params, &disk_based_sources, cx)
768 .log_err();
769 });
770 }
771 LspEvent::DiagnosticsFinish => {
772 let send = this.update(&mut cx, |this, cx| {
773 this.disk_based_diagnostics_finished(cx);
774 this.remote_id().map(|project_id| {
775 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
776 })
777 });
778 if let Some(send) = send {
779 send.await.log_err();
780 }
781 }
782 }
783 }
784 Some(())
785 })
786 .detach();
787
788 Some(language_server)
789 }
790
791 pub fn update_diagnostics(
792 &mut self,
793 params: lsp::PublishDiagnosticsParams,
794 disk_based_sources: &HashSet<String>,
795 cx: &mut ModelContext<Self>,
796 ) -> Result<()> {
797 let abs_path = params
798 .uri
799 .to_file_path()
800 .map_err(|_| anyhow!("URI is not a file"))?;
801 let mut next_group_id = 0;
802 let mut diagnostics = Vec::default();
803 let mut primary_diagnostic_group_ids = HashMap::default();
804 let mut sources_by_group_id = HashMap::default();
805 let mut supporting_diagnostic_severities = HashMap::default();
806 for diagnostic in ¶ms.diagnostics {
807 let source = diagnostic.source.as_ref();
808 let code = diagnostic.code.as_ref().map(|code| match code {
809 lsp::NumberOrString::Number(code) => code.to_string(),
810 lsp::NumberOrString::String(code) => code.clone(),
811 });
812 let range = range_from_lsp(diagnostic.range);
813 let is_supporting = diagnostic
814 .related_information
815 .as_ref()
816 .map_or(false, |infos| {
817 infos.iter().any(|info| {
818 primary_diagnostic_group_ids.contains_key(&(
819 source,
820 code.clone(),
821 range_from_lsp(info.location.range),
822 ))
823 })
824 });
825
826 if is_supporting {
827 if let Some(severity) = diagnostic.severity {
828 supporting_diagnostic_severities
829 .insert((source, code.clone(), range), severity);
830 }
831 } else {
832 let group_id = post_inc(&mut next_group_id);
833 let is_disk_based =
834 source.map_or(false, |source| disk_based_sources.contains(source));
835
836 sources_by_group_id.insert(group_id, source);
837 primary_diagnostic_group_ids
838 .insert((source, code.clone(), range.clone()), group_id);
839
840 diagnostics.push(DiagnosticEntry {
841 range,
842 diagnostic: Diagnostic {
843 code: code.clone(),
844 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
845 message: diagnostic.message.clone(),
846 group_id,
847 is_primary: true,
848 is_valid: true,
849 is_disk_based,
850 },
851 });
852 if let Some(infos) = &diagnostic.related_information {
853 for info in infos {
854 if info.location.uri == params.uri {
855 let range = range_from_lsp(info.location.range);
856 diagnostics.push(DiagnosticEntry {
857 range,
858 diagnostic: Diagnostic {
859 code: code.clone(),
860 severity: DiagnosticSeverity::INFORMATION,
861 message: info.message.clone(),
862 group_id,
863 is_primary: false,
864 is_valid: true,
865 is_disk_based,
866 },
867 });
868 }
869 }
870 }
871 }
872 }
873
874 for entry in &mut diagnostics {
875 let diagnostic = &mut entry.diagnostic;
876 if !diagnostic.is_primary {
877 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
878 if let Some(&severity) = supporting_diagnostic_severities.get(&(
879 source,
880 diagnostic.code.clone(),
881 entry.range.clone(),
882 )) {
883 diagnostic.severity = severity;
884 }
885 }
886 }
887
888 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
889 Ok(())
890 }
891
892 pub fn update_diagnostic_entries(
893 &mut self,
894 abs_path: PathBuf,
895 version: Option<i32>,
896 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
897 cx: &mut ModelContext<Project>,
898 ) -> Result<(), anyhow::Error> {
899 let (worktree, relative_path) = self
900 .find_local_worktree(&abs_path, cx)
901 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
902 let project_path = ProjectPath {
903 worktree_id: worktree.read(cx).id(),
904 path: relative_path.into(),
905 };
906
907 for buffer in self.open_buffers.values() {
908 if let Some(buffer) = buffer.upgrade(cx) {
909 if buffer
910 .read(cx)
911 .file()
912 .map_or(false, |file| *file.path() == project_path.path)
913 {
914 buffer.update(cx, |buffer, cx| {
915 buffer.update_diagnostics(version, diagnostics.clone(), cx)
916 })?;
917 break;
918 }
919 }
920 }
921 worktree.update(cx, |worktree, cx| {
922 worktree
923 .as_local_mut()
924 .ok_or_else(|| anyhow!("not a local worktree"))?
925 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
926 })?;
927 cx.emit(Event::DiagnosticsUpdated(project_path));
928 Ok(())
929 }
930
931 pub fn definition<T: ToOffset>(
932 &self,
933 source_buffer_handle: &ModelHandle<Buffer>,
934 position: T,
935 cx: &mut ModelContext<Self>,
936 ) -> Task<Result<Vec<Definition>>> {
937 let source_buffer_handle = source_buffer_handle.clone();
938 let buffer = source_buffer_handle.read(cx);
939 let worktree;
940 let buffer_abs_path;
941 if let Some(file) = File::from_dyn(buffer.file()) {
942 worktree = file.worktree.clone();
943 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
944 } else {
945 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
946 };
947
948 if worktree.read(cx).as_local().is_some() {
949 let point = buffer.offset_to_point_utf16(position.to_offset(buffer));
950 let buffer_abs_path = buffer_abs_path.unwrap();
951 let lang_name;
952 let lang_server;
953 if let Some(lang) = buffer.language() {
954 lang_name = lang.name().to_string();
955 if let Some(server) = self
956 .language_servers
957 .get(&(worktree.read(cx).id(), lang_name.clone()))
958 {
959 lang_server = server.clone();
960 } else {
961 return Task::ready(Err(anyhow!("buffer does not have a language server")));
962 };
963 } else {
964 return Task::ready(Err(anyhow!("buffer does not have a language")));
965 }
966
967 cx.spawn(|this, mut cx| async move {
968 let response = lang_server
969 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
970 text_document_position_params: lsp::TextDocumentPositionParams {
971 text_document: lsp::TextDocumentIdentifier::new(
972 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
973 ),
974 position: lsp::Position::new(point.row, point.column),
975 },
976 work_done_progress_params: Default::default(),
977 partial_result_params: Default::default(),
978 })
979 .await?;
980
981 let mut definitions = Vec::new();
982 if let Some(response) = response {
983 let mut unresolved_locations = Vec::new();
984 match response {
985 lsp::GotoDefinitionResponse::Scalar(loc) => {
986 unresolved_locations.push((loc.uri, loc.range));
987 }
988 lsp::GotoDefinitionResponse::Array(locs) => {
989 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
990 }
991 lsp::GotoDefinitionResponse::Link(links) => {
992 unresolved_locations.extend(
993 links
994 .into_iter()
995 .map(|l| (l.target_uri, l.target_selection_range)),
996 );
997 }
998 }
999
1000 for (target_uri, target_range) in unresolved_locations {
1001 let abs_path = target_uri
1002 .to_file_path()
1003 .map_err(|_| anyhow!("invalid target path"))?;
1004
1005 let (worktree, relative_path) = if let Some(result) =
1006 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1007 {
1008 result
1009 } else {
1010 let worktree = this
1011 .update(&mut cx, |this, cx| {
1012 this.create_local_worktree(&abs_path, true, cx)
1013 })
1014 .await?;
1015 this.update(&mut cx, |this, cx| {
1016 this.language_servers.insert(
1017 (worktree.read(cx).id(), lang_name.clone()),
1018 lang_server.clone(),
1019 );
1020 });
1021 (worktree, PathBuf::new())
1022 };
1023
1024 let project_path = ProjectPath {
1025 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1026 path: relative_path.into(),
1027 };
1028 let target_buffer_handle = this
1029 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1030 .await?;
1031 cx.read(|cx| {
1032 let target_buffer = target_buffer_handle.read(cx);
1033 let target_start = target_buffer
1034 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1035 let target_end = target_buffer
1036 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1037 definitions.push(Definition {
1038 target_buffer: target_buffer_handle,
1039 target_range: target_buffer.anchor_after(target_start)
1040 ..target_buffer.anchor_before(target_end),
1041 });
1042 });
1043 }
1044 }
1045
1046 Ok(definitions)
1047 })
1048 } else {
1049 log::info!("go to definition is not yet implemented for guests");
1050 Task::ready(Ok(Default::default()))
1051 }
1052 }
1053
1054 pub fn find_or_create_local_worktree(
1055 &self,
1056 abs_path: impl AsRef<Path>,
1057 weak: bool,
1058 cx: &mut ModelContext<Self>,
1059 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1060 let abs_path = abs_path.as_ref();
1061 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1062 Task::ready(Ok((tree.clone(), relative_path.into())))
1063 } else {
1064 let worktree = self.create_local_worktree(abs_path, weak, cx);
1065 cx.foreground()
1066 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1067 }
1068 }
1069
1070 fn find_local_worktree(
1071 &self,
1072 abs_path: &Path,
1073 cx: &AppContext,
1074 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1075 for tree in self.worktrees(cx) {
1076 if let Some(relative_path) = tree
1077 .read(cx)
1078 .as_local()
1079 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1080 {
1081 return Some((tree.clone(), relative_path.into()));
1082 }
1083 }
1084 None
1085 }
1086
1087 pub fn is_shared(&self) -> bool {
1088 match &self.client_state {
1089 ProjectClientState::Local { is_shared, .. } => *is_shared,
1090 ProjectClientState::Remote { .. } => false,
1091 }
1092 }
1093
1094 fn create_local_worktree(
1095 &self,
1096 abs_path: impl AsRef<Path>,
1097 weak: bool,
1098 cx: &mut ModelContext<Self>,
1099 ) -> Task<Result<ModelHandle<Worktree>>> {
1100 let fs = self.fs.clone();
1101 let client = self.client.clone();
1102 let path = Arc::from(abs_path.as_ref());
1103 cx.spawn(|project, mut cx| async move {
1104 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1105
1106 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1107 project.add_worktree(&worktree, cx);
1108 (project.remote_id(), project.is_shared())
1109 });
1110
1111 if let Some(project_id) = remote_project_id {
1112 worktree
1113 .update(&mut cx, |worktree, cx| {
1114 worktree.as_local_mut().unwrap().register(project_id, cx)
1115 })
1116 .await?;
1117 if is_shared {
1118 worktree
1119 .update(&mut cx, |worktree, cx| {
1120 worktree.as_local_mut().unwrap().share(project_id, cx)
1121 })
1122 .await?;
1123 }
1124 }
1125
1126 Ok(worktree)
1127 })
1128 }
1129
1130 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1131 self.worktrees.retain(|worktree| {
1132 worktree
1133 .upgrade(cx)
1134 .map_or(false, |w| w.read(cx).id() != id)
1135 });
1136 cx.notify();
1137 }
1138
1139 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1140 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1141 if worktree.read(cx).is_local() {
1142 cx.subscribe(&worktree, |this, worktree, _, cx| {
1143 this.update_local_worktree_buffers(worktree, cx);
1144 })
1145 .detach();
1146 }
1147
1148 let push_weak_handle = {
1149 let worktree = worktree.read(cx);
1150 worktree.is_local() && worktree.is_weak()
1151 };
1152 if push_weak_handle {
1153 cx.observe_release(&worktree, |this, cx| {
1154 this.worktrees
1155 .retain(|worktree| worktree.upgrade(cx).is_some());
1156 cx.notify();
1157 })
1158 .detach();
1159 self.worktrees
1160 .push(WorktreeHandle::Weak(worktree.downgrade()));
1161 } else {
1162 self.worktrees
1163 .push(WorktreeHandle::Strong(worktree.clone()));
1164 }
1165 cx.notify();
1166 }
1167
1168 fn update_local_worktree_buffers(
1169 &mut self,
1170 worktree_handle: ModelHandle<Worktree>,
1171 cx: &mut ModelContext<Self>,
1172 ) {
1173 let snapshot = worktree_handle.read(cx).snapshot();
1174 let mut buffers_to_delete = Vec::new();
1175 for (buffer_id, buffer) in &self.open_buffers {
1176 if let OpenBuffer::Loaded(buffer) = buffer {
1177 if let Some(buffer) = buffer.upgrade(cx) {
1178 buffer.update(cx, |buffer, cx| {
1179 if let Some(old_file) = File::from_dyn(buffer.file()) {
1180 if old_file.worktree != worktree_handle {
1181 return;
1182 }
1183
1184 let new_file = if let Some(entry) = old_file
1185 .entry_id
1186 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1187 {
1188 File {
1189 is_local: true,
1190 entry_id: Some(entry.id),
1191 mtime: entry.mtime,
1192 path: entry.path.clone(),
1193 worktree: worktree_handle.clone(),
1194 }
1195 } else if let Some(entry) =
1196 snapshot.entry_for_path(old_file.path().as_ref())
1197 {
1198 File {
1199 is_local: true,
1200 entry_id: Some(entry.id),
1201 mtime: entry.mtime,
1202 path: entry.path.clone(),
1203 worktree: worktree_handle.clone(),
1204 }
1205 } else {
1206 File {
1207 is_local: true,
1208 entry_id: None,
1209 path: old_file.path().clone(),
1210 mtime: old_file.mtime(),
1211 worktree: worktree_handle.clone(),
1212 }
1213 };
1214
1215 if let Some(project_id) = self.remote_id() {
1216 let client = self.client.clone();
1217 let message = proto::UpdateBufferFile {
1218 project_id,
1219 buffer_id: *buffer_id as u64,
1220 file: Some(new_file.to_proto()),
1221 };
1222 cx.foreground()
1223 .spawn(async move { client.send(message).await })
1224 .detach_and_log_err(cx);
1225 }
1226 buffer.file_updated(Box::new(new_file), cx).detach();
1227 }
1228 });
1229 } else {
1230 buffers_to_delete.push(*buffer_id);
1231 }
1232 }
1233 }
1234
1235 for buffer_id in buffers_to_delete {
1236 self.open_buffers.remove(&buffer_id);
1237 }
1238 }
1239
1240 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1241 let new_active_entry = entry.and_then(|project_path| {
1242 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1243 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1244 Some(ProjectEntry {
1245 worktree_id: project_path.worktree_id,
1246 entry_id: entry.id,
1247 })
1248 });
1249 if new_active_entry != self.active_entry {
1250 self.active_entry = new_active_entry;
1251 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1252 }
1253 }
1254
1255 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1256 self.language_servers_with_diagnostics_running > 0
1257 }
1258
1259 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1260 let mut summary = DiagnosticSummary::default();
1261 for (_, path_summary) in self.diagnostic_summaries(cx) {
1262 summary.error_count += path_summary.error_count;
1263 summary.warning_count += path_summary.warning_count;
1264 summary.info_count += path_summary.info_count;
1265 summary.hint_count += path_summary.hint_count;
1266 }
1267 summary
1268 }
1269
1270 pub fn diagnostic_summaries<'a>(
1271 &'a self,
1272 cx: &'a AppContext,
1273 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1274 self.worktrees(cx).flat_map(move |worktree| {
1275 let worktree = worktree.read(cx);
1276 let worktree_id = worktree.id();
1277 worktree
1278 .diagnostic_summaries()
1279 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1280 })
1281 }
1282
1283 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1284 self.language_servers_with_diagnostics_running += 1;
1285 if self.language_servers_with_diagnostics_running == 1 {
1286 cx.emit(Event::DiskBasedDiagnosticsStarted);
1287 }
1288 }
1289
1290 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1291 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1292 self.language_servers_with_diagnostics_running -= 1;
1293 if self.language_servers_with_diagnostics_running == 0 {
1294 cx.emit(Event::DiskBasedDiagnosticsFinished);
1295 }
1296 }
1297
1298 pub fn active_entry(&self) -> Option<ProjectEntry> {
1299 self.active_entry
1300 }
1301
1302 // RPC message handlers
1303
1304 fn handle_unshare_project(
1305 &mut self,
1306 _: TypedEnvelope<proto::UnshareProject>,
1307 _: Arc<Client>,
1308 cx: &mut ModelContext<Self>,
1309 ) -> Result<()> {
1310 if let ProjectClientState::Remote {
1311 sharing_has_stopped,
1312 ..
1313 } = &mut self.client_state
1314 {
1315 *sharing_has_stopped = true;
1316 self.collaborators.clear();
1317 cx.notify();
1318 Ok(())
1319 } else {
1320 unreachable!()
1321 }
1322 }
1323
1324 fn handle_add_collaborator(
1325 &mut self,
1326 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1327 _: Arc<Client>,
1328 cx: &mut ModelContext<Self>,
1329 ) -> Result<()> {
1330 let user_store = self.user_store.clone();
1331 let collaborator = envelope
1332 .payload
1333 .collaborator
1334 .take()
1335 .ok_or_else(|| anyhow!("empty collaborator"))?;
1336
1337 cx.spawn(|this, mut cx| {
1338 async move {
1339 let collaborator =
1340 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1341 this.update(&mut cx, |this, cx| {
1342 this.collaborators
1343 .insert(collaborator.peer_id, collaborator);
1344 cx.notify();
1345 });
1346 Ok(())
1347 }
1348 .log_err()
1349 })
1350 .detach();
1351
1352 Ok(())
1353 }
1354
1355 fn handle_remove_collaborator(
1356 &mut self,
1357 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1358 _: Arc<Client>,
1359 cx: &mut ModelContext<Self>,
1360 ) -> Result<()> {
1361 let peer_id = PeerId(envelope.payload.peer_id);
1362 let replica_id = self
1363 .collaborators
1364 .remove(&peer_id)
1365 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1366 .replica_id;
1367 self.shared_buffers.remove(&peer_id);
1368 for (_, buffer) in &self.open_buffers {
1369 if let OpenBuffer::Loaded(buffer) = buffer {
1370 if let Some(buffer) = buffer.upgrade(cx) {
1371 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1372 }
1373 }
1374 }
1375 cx.notify();
1376 Ok(())
1377 }
1378
1379 fn handle_share_worktree(
1380 &mut self,
1381 envelope: TypedEnvelope<proto::ShareWorktree>,
1382 client: Arc<Client>,
1383 cx: &mut ModelContext<Self>,
1384 ) -> Result<()> {
1385 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1386 let replica_id = self.replica_id();
1387 let worktree = envelope
1388 .payload
1389 .worktree
1390 .ok_or_else(|| anyhow!("invalid worktree"))?;
1391 cx.spawn(|this, mut cx| {
1392 async move {
1393 let worktree =
1394 Worktree::remote(remote_id, replica_id, worktree, client, &mut cx).await?;
1395 this.update(&mut cx, |this, cx| this.add_worktree(&worktree, cx));
1396 Ok(())
1397 }
1398 .log_err()
1399 })
1400 .detach();
1401 Ok(())
1402 }
1403
1404 fn handle_unregister_worktree(
1405 &mut self,
1406 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1407 _: Arc<Client>,
1408 cx: &mut ModelContext<Self>,
1409 ) -> Result<()> {
1410 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1411 self.remove_worktree(worktree_id, cx);
1412 Ok(())
1413 }
1414
1415 fn handle_update_worktree(
1416 &mut self,
1417 envelope: TypedEnvelope<proto::UpdateWorktree>,
1418 _: Arc<Client>,
1419 cx: &mut ModelContext<Self>,
1420 ) -> Result<()> {
1421 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1422 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1423 worktree.update(cx, |worktree, cx| {
1424 let worktree = worktree.as_remote_mut().unwrap();
1425 worktree.update_from_remote(envelope, cx)
1426 })?;
1427 }
1428 Ok(())
1429 }
1430
1431 fn handle_update_diagnostic_summary(
1432 &mut self,
1433 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1434 _: Arc<Client>,
1435 cx: &mut ModelContext<Self>,
1436 ) -> Result<()> {
1437 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1438 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1439 if let Some(summary) = envelope.payload.summary {
1440 let project_path = ProjectPath {
1441 worktree_id,
1442 path: Path::new(&summary.path).into(),
1443 };
1444 worktree.update(cx, |worktree, _| {
1445 worktree
1446 .as_remote_mut()
1447 .unwrap()
1448 .update_diagnostic_summary(project_path.path.clone(), &summary);
1449 });
1450 cx.emit(Event::DiagnosticsUpdated(project_path));
1451 }
1452 }
1453 Ok(())
1454 }
1455
1456 fn handle_disk_based_diagnostics_updating(
1457 &mut self,
1458 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1459 _: Arc<Client>,
1460 cx: &mut ModelContext<Self>,
1461 ) -> Result<()> {
1462 self.disk_based_diagnostics_started(cx);
1463 Ok(())
1464 }
1465
1466 fn handle_disk_based_diagnostics_updated(
1467 &mut self,
1468 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1469 _: Arc<Client>,
1470 cx: &mut ModelContext<Self>,
1471 ) -> Result<()> {
1472 self.disk_based_diagnostics_finished(cx);
1473 Ok(())
1474 }
1475
1476 pub fn handle_update_buffer(
1477 &mut self,
1478 envelope: TypedEnvelope<proto::UpdateBuffer>,
1479 _: Arc<Client>,
1480 cx: &mut ModelContext<Self>,
1481 ) -> Result<()> {
1482 let payload = envelope.payload.clone();
1483 let buffer_id = payload.buffer_id as usize;
1484 let ops = payload
1485 .operations
1486 .into_iter()
1487 .map(|op| language::proto::deserialize_operation(op))
1488 .collect::<Result<Vec<_>, _>>()?;
1489 match self.open_buffers.get_mut(&buffer_id) {
1490 Some(OpenBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
1491 Some(OpenBuffer::Loaded(buffer)) => {
1492 if let Some(buffer) = buffer.upgrade(cx) {
1493 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1494 } else {
1495 self.open_buffers
1496 .insert(buffer_id, OpenBuffer::Operations(ops));
1497 }
1498 }
1499 None => {
1500 self.open_buffers
1501 .insert(buffer_id, OpenBuffer::Operations(ops));
1502 }
1503 }
1504 Ok(())
1505 }
1506
1507 pub fn handle_update_buffer_file(
1508 &mut self,
1509 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1510 _: Arc<Client>,
1511 cx: &mut ModelContext<Self>,
1512 ) -> Result<()> {
1513 let payload = envelope.payload.clone();
1514 let buffer_id = payload.buffer_id as usize;
1515 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1516 let worktree = self
1517 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1518 .ok_or_else(|| anyhow!("no such worktree"))?;
1519 let file = File::from_proto(file, worktree.clone(), cx)?;
1520 let buffer = self
1521 .open_buffers
1522 .get_mut(&buffer_id)
1523 .and_then(|b| b.upgrade(cx))
1524 .ok_or_else(|| anyhow!("no such buffer"))?;
1525 buffer.update(cx, |buffer, cx| {
1526 buffer.file_updated(Box::new(file), cx).detach();
1527 });
1528
1529 Ok(())
1530 }
1531
1532 pub fn handle_save_buffer(
1533 &mut self,
1534 envelope: TypedEnvelope<proto::SaveBuffer>,
1535 rpc: Arc<Client>,
1536 cx: &mut ModelContext<Self>,
1537 ) -> Result<()> {
1538 let sender_id = envelope.original_sender_id()?;
1539 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1540 let buffer = self
1541 .shared_buffers
1542 .get(&sender_id)
1543 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1544 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1545 let receipt = envelope.receipt();
1546 let buffer_id = envelope.payload.buffer_id;
1547 let save = cx.spawn(|_, mut cx| async move {
1548 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1549 });
1550
1551 cx.background()
1552 .spawn(
1553 async move {
1554 let (version, mtime) = save.await?;
1555
1556 rpc.respond(
1557 receipt,
1558 proto::BufferSaved {
1559 project_id,
1560 buffer_id,
1561 version: (&version).into(),
1562 mtime: Some(mtime.into()),
1563 },
1564 )
1565 .await?;
1566
1567 Ok(())
1568 }
1569 .log_err(),
1570 )
1571 .detach();
1572 Ok(())
1573 }
1574
1575 pub fn handle_format_buffer(
1576 &mut self,
1577 envelope: TypedEnvelope<proto::FormatBuffer>,
1578 rpc: Arc<Client>,
1579 cx: &mut ModelContext<Self>,
1580 ) -> Result<()> {
1581 let receipt = envelope.receipt();
1582 let sender_id = envelope.original_sender_id()?;
1583 let buffer = self
1584 .shared_buffers
1585 .get(&sender_id)
1586 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1587 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1588 cx.spawn(|_, mut cx| async move {
1589 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1590 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1591 // associated with formatting.
1592 cx.spawn(|_| async move {
1593 match format {
1594 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1595 Err(error) => {
1596 rpc.respond_with_error(
1597 receipt,
1598 proto::Error {
1599 message: error.to_string(),
1600 },
1601 )
1602 .await?
1603 }
1604 }
1605 Ok::<_, anyhow::Error>(())
1606 })
1607 .await
1608 .log_err();
1609 })
1610 .detach();
1611 Ok(())
1612 }
1613
1614 pub fn handle_open_buffer(
1615 &mut self,
1616 envelope: TypedEnvelope<proto::OpenBuffer>,
1617 rpc: Arc<Client>,
1618 cx: &mut ModelContext<Self>,
1619 ) -> anyhow::Result<()> {
1620 let receipt = envelope.receipt();
1621 let peer_id = envelope.original_sender_id()?;
1622 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1623 let open_buffer = self.open_buffer(
1624 ProjectPath {
1625 worktree_id,
1626 path: PathBuf::from(envelope.payload.path).into(),
1627 },
1628 cx,
1629 );
1630 cx.spawn(|this, mut cx| {
1631 async move {
1632 let buffer = open_buffer.await?;
1633 this.update(&mut cx, |this, _| {
1634 this.shared_buffers
1635 .entry(peer_id)
1636 .or_default()
1637 .insert(buffer.id() as u64, buffer.clone());
1638 });
1639 let message = buffer.read_with(&cx, |buffer, _| buffer.to_proto());
1640 rpc.respond(
1641 receipt,
1642 proto::OpenBufferResponse {
1643 buffer: Some(message),
1644 },
1645 )
1646 .await
1647 }
1648 .log_err()
1649 })
1650 .detach();
1651 Ok(())
1652 }
1653
1654 pub fn handle_close_buffer(
1655 &mut self,
1656 envelope: TypedEnvelope<proto::CloseBuffer>,
1657 _: Arc<Client>,
1658 cx: &mut ModelContext<Self>,
1659 ) -> anyhow::Result<()> {
1660 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1661 shared_buffers.remove(&envelope.payload.buffer_id);
1662 cx.notify();
1663 }
1664 Ok(())
1665 }
1666
1667 pub fn handle_buffer_saved(
1668 &mut self,
1669 envelope: TypedEnvelope<proto::BufferSaved>,
1670 _: Arc<Client>,
1671 cx: &mut ModelContext<Self>,
1672 ) -> Result<()> {
1673 let payload = envelope.payload.clone();
1674 let buffer = self
1675 .open_buffers
1676 .get(&(payload.buffer_id as usize))
1677 .and_then(|buf| {
1678 if let OpenBuffer::Loaded(buffer) = buf {
1679 buffer.upgrade(cx)
1680 } else {
1681 None
1682 }
1683 });
1684 if let Some(buffer) = buffer {
1685 buffer.update(cx, |buffer, cx| {
1686 let version = payload.version.try_into()?;
1687 let mtime = payload
1688 .mtime
1689 .ok_or_else(|| anyhow!("missing mtime"))?
1690 .into();
1691 buffer.did_save(version, mtime, None, cx);
1692 Result::<_, anyhow::Error>::Ok(())
1693 })?;
1694 }
1695 Ok(())
1696 }
1697
1698 pub fn handle_buffer_reloaded(
1699 &mut self,
1700 envelope: TypedEnvelope<proto::BufferReloaded>,
1701 _: Arc<Client>,
1702 cx: &mut ModelContext<Self>,
1703 ) -> Result<()> {
1704 let payload = envelope.payload.clone();
1705 let buffer = self
1706 .open_buffers
1707 .get(&(payload.buffer_id as usize))
1708 .and_then(|buf| {
1709 if let OpenBuffer::Loaded(buffer) = buf {
1710 buffer.upgrade(cx)
1711 } else {
1712 None
1713 }
1714 });
1715 if let Some(buffer) = buffer {
1716 buffer.update(cx, |buffer, cx| {
1717 let version = payload.version.try_into()?;
1718 let mtime = payload
1719 .mtime
1720 .ok_or_else(|| anyhow!("missing mtime"))?
1721 .into();
1722 buffer.did_reload(version, mtime, cx);
1723 Result::<_, anyhow::Error>::Ok(())
1724 })?;
1725 }
1726 Ok(())
1727 }
1728
1729 pub fn match_paths<'a>(
1730 &self,
1731 query: &'a str,
1732 include_ignored: bool,
1733 smart_case: bool,
1734 max_results: usize,
1735 cancel_flag: &'a AtomicBool,
1736 cx: &AppContext,
1737 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1738 let worktrees = self
1739 .worktrees(cx)
1740 .filter(|worktree| !worktree.read(cx).is_weak())
1741 .collect::<Vec<_>>();
1742 let include_root_name = worktrees.len() > 1;
1743 let candidate_sets = worktrees
1744 .into_iter()
1745 .map(|worktree| CandidateSet {
1746 snapshot: worktree.read(cx).snapshot(),
1747 include_ignored,
1748 include_root_name,
1749 })
1750 .collect::<Vec<_>>();
1751
1752 let background = cx.background().clone();
1753 async move {
1754 fuzzy::match_paths(
1755 candidate_sets.as_slice(),
1756 query,
1757 smart_case,
1758 max_results,
1759 cancel_flag,
1760 background,
1761 )
1762 .await
1763 }
1764 }
1765}
1766
1767impl WorktreeHandle {
1768 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1769 match self {
1770 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1771 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1772 }
1773 }
1774}
1775
1776struct CandidateSet {
1777 snapshot: Snapshot,
1778 include_ignored: bool,
1779 include_root_name: bool,
1780}
1781
1782impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1783 type Candidates = CandidateSetIter<'a>;
1784
1785 fn id(&self) -> usize {
1786 self.snapshot.id().to_usize()
1787 }
1788
1789 fn len(&self) -> usize {
1790 if self.include_ignored {
1791 self.snapshot.file_count()
1792 } else {
1793 self.snapshot.visible_file_count()
1794 }
1795 }
1796
1797 fn prefix(&self) -> Arc<str> {
1798 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1799 self.snapshot.root_name().into()
1800 } else if self.include_root_name {
1801 format!("{}/", self.snapshot.root_name()).into()
1802 } else {
1803 "".into()
1804 }
1805 }
1806
1807 fn candidates(&'a self, start: usize) -> Self::Candidates {
1808 CandidateSetIter {
1809 traversal: self.snapshot.files(self.include_ignored, start),
1810 }
1811 }
1812}
1813
1814struct CandidateSetIter<'a> {
1815 traversal: Traversal<'a>,
1816}
1817
1818impl<'a> Iterator for CandidateSetIter<'a> {
1819 type Item = PathMatchCandidate<'a>;
1820
1821 fn next(&mut self) -> Option<Self::Item> {
1822 self.traversal.next().map(|entry| {
1823 if let EntryKind::File(char_bag) = entry.kind {
1824 PathMatchCandidate {
1825 path: &entry.path,
1826 char_bag,
1827 }
1828 } else {
1829 unreachable!()
1830 }
1831 })
1832 }
1833}
1834
1835impl Entity for Project {
1836 type Event = Event;
1837
1838 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1839 match &self.client_state {
1840 ProjectClientState::Local { remote_id_rx, .. } => {
1841 if let Some(project_id) = *remote_id_rx.borrow() {
1842 let rpc = self.client.clone();
1843 cx.spawn(|_| async move {
1844 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
1845 log::error!("error unregistering project: {}", err);
1846 }
1847 })
1848 .detach();
1849 }
1850 }
1851 ProjectClientState::Remote { remote_id, .. } => {
1852 let rpc = self.client.clone();
1853 let project_id = *remote_id;
1854 cx.spawn(|_| async move {
1855 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
1856 log::error!("error leaving project: {}", err);
1857 }
1858 })
1859 .detach();
1860 }
1861 }
1862 }
1863
1864 fn app_will_quit(
1865 &mut self,
1866 _: &mut MutableAppContext,
1867 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
1868 use futures::FutureExt;
1869
1870 let shutdown_futures = self
1871 .language_servers
1872 .drain()
1873 .filter_map(|(_, server)| server.shutdown())
1874 .collect::<Vec<_>>();
1875 Some(
1876 async move {
1877 futures::future::join_all(shutdown_futures).await;
1878 }
1879 .boxed(),
1880 )
1881 }
1882}
1883
1884impl Collaborator {
1885 fn from_proto(
1886 message: proto::Collaborator,
1887 user_store: &ModelHandle<UserStore>,
1888 cx: &mut AsyncAppContext,
1889 ) -> impl Future<Output = Result<Self>> {
1890 let user = user_store.update(cx, |user_store, cx| {
1891 user_store.fetch_user(message.user_id, cx)
1892 });
1893
1894 async move {
1895 Ok(Self {
1896 peer_id: PeerId(message.peer_id),
1897 user: user.await?,
1898 replica_id: message.replica_id as ReplicaId,
1899 })
1900 }
1901 }
1902}
1903
1904impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
1905 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
1906 Self {
1907 worktree_id,
1908 path: path.as_ref().into(),
1909 }
1910 }
1911}
1912
1913impl OpenBuffer {
1914 fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Buffer>> {
1915 match self {
1916 OpenBuffer::Loaded(buffer) => buffer.upgrade(cx),
1917 OpenBuffer::Operations(_) => None,
1918 }
1919 }
1920}
1921
1922#[cfg(test)]
1923mod tests {
1924 use super::{Event, *};
1925 use client::test::FakeHttpClient;
1926 use fs::RealFs;
1927 use futures::StreamExt;
1928 use gpui::{test::subscribe, TestAppContext};
1929 use language::{
1930 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
1931 LanguageServerConfig, Point,
1932 };
1933 use lsp::Url;
1934 use serde_json::json;
1935 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
1936 use unindent::Unindent as _;
1937 use util::test::temp_tree;
1938 use worktree::WorktreeHandle as _;
1939
1940 #[gpui::test]
1941 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1942 let dir = temp_tree(json!({
1943 "root": {
1944 "apple": "",
1945 "banana": {
1946 "carrot": {
1947 "date": "",
1948 "endive": "",
1949 }
1950 },
1951 "fennel": {
1952 "grape": "",
1953 }
1954 }
1955 }));
1956
1957 let root_link_path = dir.path().join("root_link");
1958 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1959 unix::fs::symlink(
1960 &dir.path().join("root/fennel"),
1961 &dir.path().join("root/finnochio"),
1962 )
1963 .unwrap();
1964
1965 let project = build_project(Arc::new(RealFs), &mut cx);
1966
1967 let (tree, _) = project
1968 .update(&mut cx, |project, cx| {
1969 project.find_or_create_local_worktree(&root_link_path, false, cx)
1970 })
1971 .await
1972 .unwrap();
1973
1974 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1975 .await;
1976 cx.read(|cx| {
1977 let tree = tree.read(cx);
1978 assert_eq!(tree.file_count(), 5);
1979 assert_eq!(
1980 tree.inode_for_path("fennel/grape"),
1981 tree.inode_for_path("finnochio/grape")
1982 );
1983 });
1984
1985 let cancel_flag = Default::default();
1986 let results = project
1987 .read_with(&cx, |project, cx| {
1988 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
1989 })
1990 .await;
1991 assert_eq!(
1992 results
1993 .into_iter()
1994 .map(|result| result.path)
1995 .collect::<Vec<Arc<Path>>>(),
1996 vec![
1997 PathBuf::from("banana/carrot/date").into(),
1998 PathBuf::from("banana/carrot/endive").into(),
1999 ]
2000 );
2001 }
2002
2003 #[gpui::test]
2004 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
2005 let (language_server_config, mut fake_server) =
2006 LanguageServerConfig::fake(cx.background()).await;
2007 let progress_token = language_server_config
2008 .disk_based_diagnostics_progress_token
2009 .clone()
2010 .unwrap();
2011
2012 let mut languages = LanguageRegistry::new();
2013 languages.add(Arc::new(Language::new(
2014 LanguageConfig {
2015 name: "Rust".to_string(),
2016 path_suffixes: vec!["rs".to_string()],
2017 language_server: Some(language_server_config),
2018 ..Default::default()
2019 },
2020 Some(tree_sitter_rust::language()),
2021 )));
2022
2023 let dir = temp_tree(json!({
2024 "a.rs": "fn a() { A }",
2025 "b.rs": "const y: i32 = 1",
2026 }));
2027
2028 let http_client = FakeHttpClient::with_404_response();
2029 let client = Client::new(http_client.clone());
2030 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2031
2032 let project = cx.update(|cx| {
2033 Project::local(
2034 client,
2035 user_store,
2036 Arc::new(languages),
2037 Arc::new(RealFs),
2038 cx,
2039 )
2040 });
2041
2042 let (tree, _) = project
2043 .update(&mut cx, |project, cx| {
2044 project.find_or_create_local_worktree(dir.path(), false, cx)
2045 })
2046 .await
2047 .unwrap();
2048 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2049
2050 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2051 .await;
2052
2053 // Cause worktree to start the fake language server
2054 let _buffer = project
2055 .update(&mut cx, |project, cx| {
2056 project.open_buffer(
2057 ProjectPath {
2058 worktree_id,
2059 path: Path::new("b.rs").into(),
2060 },
2061 cx,
2062 )
2063 })
2064 .await
2065 .unwrap();
2066
2067 let mut events = subscribe(&project, &mut cx);
2068
2069 fake_server.start_progress(&progress_token).await;
2070 assert_eq!(
2071 events.next().await.unwrap(),
2072 Event::DiskBasedDiagnosticsStarted
2073 );
2074
2075 fake_server.start_progress(&progress_token).await;
2076 fake_server.end_progress(&progress_token).await;
2077 fake_server.start_progress(&progress_token).await;
2078
2079 fake_server
2080 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2081 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2082 version: None,
2083 diagnostics: vec![lsp::Diagnostic {
2084 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2085 severity: Some(lsp::DiagnosticSeverity::ERROR),
2086 message: "undefined variable 'A'".to_string(),
2087 ..Default::default()
2088 }],
2089 })
2090 .await;
2091 assert_eq!(
2092 events.next().await.unwrap(),
2093 Event::DiagnosticsUpdated(ProjectPath {
2094 worktree_id,
2095 path: Arc::from(Path::new("a.rs"))
2096 })
2097 );
2098
2099 fake_server.end_progress(&progress_token).await;
2100 fake_server.end_progress(&progress_token).await;
2101 assert_eq!(
2102 events.next().await.unwrap(),
2103 Event::DiskBasedDiagnosticsUpdated
2104 );
2105 assert_eq!(
2106 events.next().await.unwrap(),
2107 Event::DiskBasedDiagnosticsFinished
2108 );
2109
2110 let buffer = project
2111 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2112 .await
2113 .unwrap();
2114
2115 buffer.read_with(&cx, |buffer, _| {
2116 let snapshot = buffer.snapshot();
2117 let diagnostics = snapshot
2118 .diagnostics_in_range::<_, Point>(0..buffer.len())
2119 .collect::<Vec<_>>();
2120 assert_eq!(
2121 diagnostics,
2122 &[DiagnosticEntry {
2123 range: Point::new(0, 9)..Point::new(0, 10),
2124 diagnostic: Diagnostic {
2125 severity: lsp::DiagnosticSeverity::ERROR,
2126 message: "undefined variable 'A'".to_string(),
2127 group_id: 0,
2128 is_primary: true,
2129 ..Default::default()
2130 }
2131 }]
2132 )
2133 });
2134 }
2135
2136 #[gpui::test]
2137 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2138 let dir = temp_tree(json!({
2139 "root": {
2140 "dir1": {},
2141 "dir2": {
2142 "dir3": {}
2143 }
2144 }
2145 }));
2146
2147 let project = build_project(Arc::new(RealFs), &mut cx);
2148 let (tree, _) = project
2149 .update(&mut cx, |project, cx| {
2150 project.find_or_create_local_worktree(&dir.path(), false, cx)
2151 })
2152 .await
2153 .unwrap();
2154
2155 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2156 .await;
2157
2158 let cancel_flag = Default::default();
2159 let results = project
2160 .read_with(&cx, |project, cx| {
2161 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2162 })
2163 .await;
2164
2165 assert!(results.is_empty());
2166 }
2167
2168 #[gpui::test]
2169 async fn test_definition(mut cx: gpui::TestAppContext) {
2170 let (language_server_config, mut fake_server) =
2171 LanguageServerConfig::fake(cx.background()).await;
2172
2173 let mut languages = LanguageRegistry::new();
2174 languages.add(Arc::new(Language::new(
2175 LanguageConfig {
2176 name: "Rust".to_string(),
2177 path_suffixes: vec!["rs".to_string()],
2178 language_server: Some(language_server_config),
2179 ..Default::default()
2180 },
2181 Some(tree_sitter_rust::language()),
2182 )));
2183
2184 let dir = temp_tree(json!({
2185 "a.rs": "const fn a() { A }",
2186 "b.rs": "const y: i32 = crate::a()",
2187 }));
2188
2189 let http_client = FakeHttpClient::with_404_response();
2190 let client = Client::new(http_client.clone());
2191 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2192 let project = cx.update(|cx| {
2193 Project::local(
2194 client,
2195 user_store,
2196 Arc::new(languages),
2197 Arc::new(RealFs),
2198 cx,
2199 )
2200 });
2201
2202 let (tree, _) = project
2203 .update(&mut cx, |project, cx| {
2204 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2205 })
2206 .await
2207 .unwrap();
2208 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2209 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2210 .await;
2211
2212 // Cause worktree to start the fake language server
2213 let buffer = project
2214 .update(&mut cx, |project, cx| {
2215 project.open_buffer(
2216 ProjectPath {
2217 worktree_id,
2218 path: Path::new("").into(),
2219 },
2220 cx,
2221 )
2222 })
2223 .await
2224 .unwrap();
2225 let definitions =
2226 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2227 let (request_id, request) = fake_server
2228 .receive_request::<lsp::request::GotoDefinition>()
2229 .await;
2230 let request_params = request.text_document_position_params;
2231 assert_eq!(
2232 request_params.text_document.uri.to_file_path().unwrap(),
2233 dir.path().join("b.rs")
2234 );
2235 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2236
2237 fake_server
2238 .respond(
2239 request_id,
2240 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2241 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2242 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2243 ))),
2244 )
2245 .await;
2246 let mut definitions = definitions.await.unwrap();
2247 assert_eq!(definitions.len(), 1);
2248 let definition = definitions.pop().unwrap();
2249 cx.update(|cx| {
2250 let target_buffer = definition.target_buffer.read(cx);
2251 assert_eq!(
2252 target_buffer
2253 .file()
2254 .unwrap()
2255 .as_local()
2256 .unwrap()
2257 .abs_path(cx),
2258 dir.path().join("a.rs")
2259 );
2260 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2261 assert_eq!(
2262 list_worktrees(&project, cx),
2263 [
2264 (dir.path().join("b.rs"), false),
2265 (dir.path().join("a.rs"), true)
2266 ]
2267 );
2268
2269 drop(definition);
2270 });
2271 cx.read(|cx| {
2272 assert_eq!(
2273 list_worktrees(&project, cx),
2274 [(dir.path().join("b.rs"), false)]
2275 );
2276 });
2277
2278 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2279 project
2280 .read(cx)
2281 .worktrees(cx)
2282 .map(|worktree| {
2283 let worktree = worktree.read(cx);
2284 (
2285 worktree.as_local().unwrap().abs_path().to_path_buf(),
2286 worktree.is_weak(),
2287 )
2288 })
2289 .collect::<Vec<_>>()
2290 }
2291 }
2292
2293 #[gpui::test]
2294 async fn test_save_file(mut cx: gpui::TestAppContext) {
2295 let fs = Arc::new(FakeFs::new());
2296 fs.insert_tree(
2297 "/dir",
2298 json!({
2299 "file1": "the old contents",
2300 }),
2301 )
2302 .await;
2303
2304 let project = build_project(fs.clone(), &mut cx);
2305 let worktree_id = project
2306 .update(&mut cx, |p, cx| {
2307 p.find_or_create_local_worktree("/dir", false, cx)
2308 })
2309 .await
2310 .unwrap()
2311 .0
2312 .read_with(&cx, |tree, _| tree.id());
2313
2314 let buffer = project
2315 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2316 .await
2317 .unwrap();
2318 buffer
2319 .update(&mut cx, |buffer, cx| {
2320 assert_eq!(buffer.text(), "the old contents");
2321 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2322 buffer.save(cx)
2323 })
2324 .await
2325 .unwrap();
2326
2327 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2328 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2329 }
2330
2331 #[gpui::test]
2332 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2333 let fs = Arc::new(FakeFs::new());
2334 fs.insert_tree(
2335 "/dir",
2336 json!({
2337 "file1": "the old contents",
2338 }),
2339 )
2340 .await;
2341
2342 let project = build_project(fs.clone(), &mut cx);
2343 let worktree_id = project
2344 .update(&mut cx, |p, cx| {
2345 p.find_or_create_local_worktree("/dir/file1", false, cx)
2346 })
2347 .await
2348 .unwrap()
2349 .0
2350 .read_with(&cx, |tree, _| tree.id());
2351
2352 let buffer = project
2353 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2354 .await
2355 .unwrap();
2356 buffer
2357 .update(&mut cx, |buffer, cx| {
2358 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2359 buffer.save(cx)
2360 })
2361 .await
2362 .unwrap();
2363
2364 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2365 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2366 }
2367
2368 #[gpui::test]
2369 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2370 let dir = temp_tree(json!({
2371 "a": {
2372 "file1": "",
2373 "file2": "",
2374 "file3": "",
2375 },
2376 "b": {
2377 "c": {
2378 "file4": "",
2379 "file5": "",
2380 }
2381 }
2382 }));
2383
2384 let project = build_project(Arc::new(RealFs), &mut cx);
2385 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2386
2387 let (tree, _) = project
2388 .update(&mut cx, |p, cx| {
2389 p.find_or_create_local_worktree(dir.path(), false, cx)
2390 })
2391 .await
2392 .unwrap();
2393 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2394
2395 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2396 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2397 async move { buffer.await.unwrap() }
2398 };
2399 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2400 tree.read_with(cx, |tree, _| {
2401 tree.entry_for_path(path)
2402 .expect(&format!("no entry for path {}", path))
2403 .id
2404 })
2405 };
2406
2407 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2408 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2409 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2410 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2411
2412 let file2_id = id_for_path("a/file2", &cx);
2413 let file3_id = id_for_path("a/file3", &cx);
2414 let file4_id = id_for_path("b/c/file4", &cx);
2415
2416 // Wait for the initial scan.
2417 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2418 .await;
2419
2420 // Create a remote copy of this worktree.
2421 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2422 let remote = Worktree::remote(
2423 1,
2424 1,
2425 initial_snapshot.to_proto(&Default::default(), Default::default()),
2426 rpc.clone(),
2427 &mut cx.to_async(),
2428 )
2429 .await
2430 .unwrap();
2431
2432 cx.read(|cx| {
2433 assert!(!buffer2.read(cx).is_dirty());
2434 assert!(!buffer3.read(cx).is_dirty());
2435 assert!(!buffer4.read(cx).is_dirty());
2436 assert!(!buffer5.read(cx).is_dirty());
2437 });
2438
2439 // Rename and delete files and directories.
2440 tree.flush_fs_events(&cx).await;
2441 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2442 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2443 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2444 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2445 tree.flush_fs_events(&cx).await;
2446
2447 let expected_paths = vec![
2448 "a",
2449 "a/file1",
2450 "a/file2.new",
2451 "b",
2452 "d",
2453 "d/file3",
2454 "d/file4",
2455 ];
2456
2457 cx.read(|app| {
2458 assert_eq!(
2459 tree.read(app)
2460 .paths()
2461 .map(|p| p.to_str().unwrap())
2462 .collect::<Vec<_>>(),
2463 expected_paths
2464 );
2465
2466 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2467 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2468 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2469
2470 assert_eq!(
2471 buffer2.read(app).file().unwrap().path().as_ref(),
2472 Path::new("a/file2.new")
2473 );
2474 assert_eq!(
2475 buffer3.read(app).file().unwrap().path().as_ref(),
2476 Path::new("d/file3")
2477 );
2478 assert_eq!(
2479 buffer4.read(app).file().unwrap().path().as_ref(),
2480 Path::new("d/file4")
2481 );
2482 assert_eq!(
2483 buffer5.read(app).file().unwrap().path().as_ref(),
2484 Path::new("b/c/file5")
2485 );
2486
2487 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2488 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2489 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2490 assert!(buffer5.read(app).file().unwrap().is_deleted());
2491 });
2492
2493 // Update the remote worktree. Check that it becomes consistent with the
2494 // local worktree.
2495 remote.update(&mut cx, |remote, cx| {
2496 let update_message =
2497 tree.read(cx)
2498 .snapshot()
2499 .build_update(&initial_snapshot, 1, 1, true);
2500 remote
2501 .as_remote_mut()
2502 .unwrap()
2503 .snapshot
2504 .apply_remote_update(update_message)
2505 .unwrap();
2506
2507 assert_eq!(
2508 remote
2509 .paths()
2510 .map(|p| p.to_str().unwrap())
2511 .collect::<Vec<_>>(),
2512 expected_paths
2513 );
2514 });
2515 }
2516
2517 #[gpui::test]
2518 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2519 let fs = Arc::new(FakeFs::new());
2520 fs.insert_tree(
2521 "/the-dir",
2522 json!({
2523 "a.txt": "a-contents",
2524 "b.txt": "b-contents",
2525 }),
2526 )
2527 .await;
2528
2529 let project = build_project(fs.clone(), &mut cx);
2530 let worktree_id = project
2531 .update(&mut cx, |p, cx| {
2532 p.find_or_create_local_worktree("/the-dir", false, cx)
2533 })
2534 .await
2535 .unwrap()
2536 .0
2537 .read_with(&cx, |tree, _| tree.id());
2538
2539 // Spawn multiple tasks to open paths, repeating some paths.
2540 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2541 (
2542 p.open_buffer((worktree_id, "a.txt"), cx),
2543 p.open_buffer((worktree_id, "b.txt"), cx),
2544 p.open_buffer((worktree_id, "a.txt"), cx),
2545 )
2546 });
2547
2548 let buffer_a_1 = buffer_a_1.await.unwrap();
2549 let buffer_a_2 = buffer_a_2.await.unwrap();
2550 let buffer_b = buffer_b.await.unwrap();
2551 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2552 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2553
2554 // There is only one buffer per path.
2555 let buffer_a_id = buffer_a_1.id();
2556 assert_eq!(buffer_a_2.id(), buffer_a_id);
2557
2558 // Open the same path again while it is still open.
2559 drop(buffer_a_1);
2560 let buffer_a_3 = project
2561 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2562 .await
2563 .unwrap();
2564
2565 // There's still only one buffer per path.
2566 assert_eq!(buffer_a_3.id(), buffer_a_id);
2567 }
2568
2569 #[gpui::test]
2570 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2571 use std::fs;
2572
2573 let dir = temp_tree(json!({
2574 "file1": "abc",
2575 "file2": "def",
2576 "file3": "ghi",
2577 }));
2578
2579 let project = build_project(Arc::new(RealFs), &mut cx);
2580 let (worktree, _) = project
2581 .update(&mut cx, |p, cx| {
2582 p.find_or_create_local_worktree(dir.path(), false, cx)
2583 })
2584 .await
2585 .unwrap();
2586 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2587
2588 worktree.flush_fs_events(&cx).await;
2589 worktree
2590 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2591 .await;
2592
2593 let buffer1 = project
2594 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2595 .await
2596 .unwrap();
2597 let events = Rc::new(RefCell::new(Vec::new()));
2598
2599 // initially, the buffer isn't dirty.
2600 buffer1.update(&mut cx, |buffer, cx| {
2601 cx.subscribe(&buffer1, {
2602 let events = events.clone();
2603 move |_, _, event, _| events.borrow_mut().push(event.clone())
2604 })
2605 .detach();
2606
2607 assert!(!buffer.is_dirty());
2608 assert!(events.borrow().is_empty());
2609
2610 buffer.edit(vec![1..2], "", cx);
2611 });
2612
2613 // after the first edit, the buffer is dirty, and emits a dirtied event.
2614 buffer1.update(&mut cx, |buffer, cx| {
2615 assert!(buffer.text() == "ac");
2616 assert!(buffer.is_dirty());
2617 assert_eq!(
2618 *events.borrow(),
2619 &[language::Event::Edited, language::Event::Dirtied]
2620 );
2621 events.borrow_mut().clear();
2622 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2623 });
2624
2625 // after saving, the buffer is not dirty, and emits a saved event.
2626 buffer1.update(&mut cx, |buffer, cx| {
2627 assert!(!buffer.is_dirty());
2628 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2629 events.borrow_mut().clear();
2630
2631 buffer.edit(vec![1..1], "B", cx);
2632 buffer.edit(vec![2..2], "D", cx);
2633 });
2634
2635 // after editing again, the buffer is dirty, and emits another dirty event.
2636 buffer1.update(&mut cx, |buffer, cx| {
2637 assert!(buffer.text() == "aBDc");
2638 assert!(buffer.is_dirty());
2639 assert_eq!(
2640 *events.borrow(),
2641 &[
2642 language::Event::Edited,
2643 language::Event::Dirtied,
2644 language::Event::Edited,
2645 ],
2646 );
2647 events.borrow_mut().clear();
2648
2649 // TODO - currently, after restoring the buffer to its
2650 // previously-saved state, the is still considered dirty.
2651 buffer.edit([1..3], "", cx);
2652 assert!(buffer.text() == "ac");
2653 assert!(buffer.is_dirty());
2654 });
2655
2656 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2657
2658 // When a file is deleted, the buffer is considered dirty.
2659 let events = Rc::new(RefCell::new(Vec::new()));
2660 let buffer2 = project
2661 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2662 .await
2663 .unwrap();
2664 buffer2.update(&mut cx, |_, cx| {
2665 cx.subscribe(&buffer2, {
2666 let events = events.clone();
2667 move |_, _, event, _| events.borrow_mut().push(event.clone())
2668 })
2669 .detach();
2670 });
2671
2672 fs::remove_file(dir.path().join("file2")).unwrap();
2673 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2674 assert_eq!(
2675 *events.borrow(),
2676 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2677 );
2678
2679 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2680 let events = Rc::new(RefCell::new(Vec::new()));
2681 let buffer3 = project
2682 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2683 .await
2684 .unwrap();
2685 buffer3.update(&mut cx, |_, cx| {
2686 cx.subscribe(&buffer3, {
2687 let events = events.clone();
2688 move |_, _, event, _| events.borrow_mut().push(event.clone())
2689 })
2690 .detach();
2691 });
2692
2693 worktree.flush_fs_events(&cx).await;
2694 buffer3.update(&mut cx, |buffer, cx| {
2695 buffer.edit(Some(0..0), "x", cx);
2696 });
2697 events.borrow_mut().clear();
2698 fs::remove_file(dir.path().join("file3")).unwrap();
2699 buffer3
2700 .condition(&cx, |_, _| !events.borrow().is_empty())
2701 .await;
2702 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2703 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2704 }
2705
2706 #[gpui::test]
2707 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2708 use std::fs;
2709
2710 let initial_contents = "aaa\nbbbbb\nc\n";
2711 let dir = temp_tree(json!({ "the-file": initial_contents }));
2712
2713 let project = build_project(Arc::new(RealFs), &mut cx);
2714 let (worktree, _) = project
2715 .update(&mut cx, |p, cx| {
2716 p.find_or_create_local_worktree(dir.path(), false, cx)
2717 })
2718 .await
2719 .unwrap();
2720 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2721
2722 worktree
2723 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2724 .await;
2725
2726 let abs_path = dir.path().join("the-file");
2727 let buffer = project
2728 .update(&mut cx, |p, cx| {
2729 p.open_buffer((worktree_id, "the-file"), cx)
2730 })
2731 .await
2732 .unwrap();
2733
2734 // TODO
2735 // Add a cursor on each row.
2736 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2737 // assert!(!buffer.is_dirty());
2738 // buffer.add_selection_set(
2739 // &(0..3)
2740 // .map(|row| Selection {
2741 // id: row as usize,
2742 // start: Point::new(row, 1),
2743 // end: Point::new(row, 1),
2744 // reversed: false,
2745 // goal: SelectionGoal::None,
2746 // })
2747 // .collect::<Vec<_>>(),
2748 // cx,
2749 // )
2750 // });
2751
2752 // Change the file on disk, adding two new lines of text, and removing
2753 // one line.
2754 buffer.read_with(&cx, |buffer, _| {
2755 assert!(!buffer.is_dirty());
2756 assert!(!buffer.has_conflict());
2757 });
2758 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2759 fs::write(&abs_path, new_contents).unwrap();
2760
2761 // Because the buffer was not modified, it is reloaded from disk. Its
2762 // contents are edited according to the diff between the old and new
2763 // file contents.
2764 buffer
2765 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2766 .await;
2767
2768 buffer.update(&mut cx, |buffer, _| {
2769 assert_eq!(buffer.text(), new_contents);
2770 assert!(!buffer.is_dirty());
2771 assert!(!buffer.has_conflict());
2772
2773 // TODO
2774 // let cursor_positions = buffer
2775 // .selection_set(selection_set_id)
2776 // .unwrap()
2777 // .selections::<Point>(&*buffer)
2778 // .map(|selection| {
2779 // assert_eq!(selection.start, selection.end);
2780 // selection.start
2781 // })
2782 // .collect::<Vec<_>>();
2783 // assert_eq!(
2784 // cursor_positions,
2785 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2786 // );
2787 });
2788
2789 // Modify the buffer
2790 buffer.update(&mut cx, |buffer, cx| {
2791 buffer.edit(vec![0..0], " ", cx);
2792 assert!(buffer.is_dirty());
2793 assert!(!buffer.has_conflict());
2794 });
2795
2796 // Change the file on disk again, adding blank lines to the beginning.
2797 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2798
2799 // Because the buffer is modified, it doesn't reload from disk, but is
2800 // marked as having a conflict.
2801 buffer
2802 .condition(&cx, |buffer, _| buffer.has_conflict())
2803 .await;
2804 }
2805
2806 #[gpui::test]
2807 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2808 let fs = Arc::new(FakeFs::new());
2809 fs.insert_tree(
2810 "/the-dir",
2811 json!({
2812 "a.rs": "
2813 fn foo(mut v: Vec<usize>) {
2814 for x in &v {
2815 v.push(1);
2816 }
2817 }
2818 "
2819 .unindent(),
2820 }),
2821 )
2822 .await;
2823
2824 let project = build_project(fs.clone(), &mut cx);
2825 let (worktree, _) = project
2826 .update(&mut cx, |p, cx| {
2827 p.find_or_create_local_worktree("/the-dir", false, cx)
2828 })
2829 .await
2830 .unwrap();
2831 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2832
2833 let buffer = project
2834 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2835 .await
2836 .unwrap();
2837
2838 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2839 let message = lsp::PublishDiagnosticsParams {
2840 uri: buffer_uri.clone(),
2841 diagnostics: vec![
2842 lsp::Diagnostic {
2843 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2844 severity: Some(DiagnosticSeverity::WARNING),
2845 message: "error 1".to_string(),
2846 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2847 location: lsp::Location {
2848 uri: buffer_uri.clone(),
2849 range: lsp::Range::new(
2850 lsp::Position::new(1, 8),
2851 lsp::Position::new(1, 9),
2852 ),
2853 },
2854 message: "error 1 hint 1".to_string(),
2855 }]),
2856 ..Default::default()
2857 },
2858 lsp::Diagnostic {
2859 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2860 severity: Some(DiagnosticSeverity::HINT),
2861 message: "error 1 hint 1".to_string(),
2862 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2863 location: lsp::Location {
2864 uri: buffer_uri.clone(),
2865 range: lsp::Range::new(
2866 lsp::Position::new(1, 8),
2867 lsp::Position::new(1, 9),
2868 ),
2869 },
2870 message: "original diagnostic".to_string(),
2871 }]),
2872 ..Default::default()
2873 },
2874 lsp::Diagnostic {
2875 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
2876 severity: Some(DiagnosticSeverity::ERROR),
2877 message: "error 2".to_string(),
2878 related_information: Some(vec![
2879 lsp::DiagnosticRelatedInformation {
2880 location: lsp::Location {
2881 uri: buffer_uri.clone(),
2882 range: lsp::Range::new(
2883 lsp::Position::new(1, 13),
2884 lsp::Position::new(1, 15),
2885 ),
2886 },
2887 message: "error 2 hint 1".to_string(),
2888 },
2889 lsp::DiagnosticRelatedInformation {
2890 location: lsp::Location {
2891 uri: buffer_uri.clone(),
2892 range: lsp::Range::new(
2893 lsp::Position::new(1, 13),
2894 lsp::Position::new(1, 15),
2895 ),
2896 },
2897 message: "error 2 hint 2".to_string(),
2898 },
2899 ]),
2900 ..Default::default()
2901 },
2902 lsp::Diagnostic {
2903 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2904 severity: Some(DiagnosticSeverity::HINT),
2905 message: "error 2 hint 1".to_string(),
2906 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2907 location: lsp::Location {
2908 uri: buffer_uri.clone(),
2909 range: lsp::Range::new(
2910 lsp::Position::new(2, 8),
2911 lsp::Position::new(2, 17),
2912 ),
2913 },
2914 message: "original diagnostic".to_string(),
2915 }]),
2916 ..Default::default()
2917 },
2918 lsp::Diagnostic {
2919 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
2920 severity: Some(DiagnosticSeverity::HINT),
2921 message: "error 2 hint 2".to_string(),
2922 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2923 location: lsp::Location {
2924 uri: buffer_uri.clone(),
2925 range: lsp::Range::new(
2926 lsp::Position::new(2, 8),
2927 lsp::Position::new(2, 17),
2928 ),
2929 },
2930 message: "original diagnostic".to_string(),
2931 }]),
2932 ..Default::default()
2933 },
2934 ],
2935 version: None,
2936 };
2937
2938 project
2939 .update(&mut cx, |p, cx| {
2940 p.update_diagnostics(message, &Default::default(), cx)
2941 })
2942 .unwrap();
2943 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
2944
2945 assert_eq!(
2946 buffer
2947 .diagnostics_in_range::<_, Point>(0..buffer.len())
2948 .collect::<Vec<_>>(),
2949 &[
2950 DiagnosticEntry {
2951 range: Point::new(1, 8)..Point::new(1, 9),
2952 diagnostic: Diagnostic {
2953 severity: DiagnosticSeverity::WARNING,
2954 message: "error 1".to_string(),
2955 group_id: 0,
2956 is_primary: true,
2957 ..Default::default()
2958 }
2959 },
2960 DiagnosticEntry {
2961 range: Point::new(1, 8)..Point::new(1, 9),
2962 diagnostic: Diagnostic {
2963 severity: DiagnosticSeverity::HINT,
2964 message: "error 1 hint 1".to_string(),
2965 group_id: 0,
2966 is_primary: false,
2967 ..Default::default()
2968 }
2969 },
2970 DiagnosticEntry {
2971 range: Point::new(1, 13)..Point::new(1, 15),
2972 diagnostic: Diagnostic {
2973 severity: DiagnosticSeverity::HINT,
2974 message: "error 2 hint 1".to_string(),
2975 group_id: 1,
2976 is_primary: false,
2977 ..Default::default()
2978 }
2979 },
2980 DiagnosticEntry {
2981 range: Point::new(1, 13)..Point::new(1, 15),
2982 diagnostic: Diagnostic {
2983 severity: DiagnosticSeverity::HINT,
2984 message: "error 2 hint 2".to_string(),
2985 group_id: 1,
2986 is_primary: false,
2987 ..Default::default()
2988 }
2989 },
2990 DiagnosticEntry {
2991 range: Point::new(2, 8)..Point::new(2, 17),
2992 diagnostic: Diagnostic {
2993 severity: DiagnosticSeverity::ERROR,
2994 message: "error 2".to_string(),
2995 group_id: 1,
2996 is_primary: true,
2997 ..Default::default()
2998 }
2999 }
3000 ]
3001 );
3002
3003 assert_eq!(
3004 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3005 &[
3006 DiagnosticEntry {
3007 range: Point::new(1, 8)..Point::new(1, 9),
3008 diagnostic: Diagnostic {
3009 severity: DiagnosticSeverity::WARNING,
3010 message: "error 1".to_string(),
3011 group_id: 0,
3012 is_primary: true,
3013 ..Default::default()
3014 }
3015 },
3016 DiagnosticEntry {
3017 range: Point::new(1, 8)..Point::new(1, 9),
3018 diagnostic: Diagnostic {
3019 severity: DiagnosticSeverity::HINT,
3020 message: "error 1 hint 1".to_string(),
3021 group_id: 0,
3022 is_primary: false,
3023 ..Default::default()
3024 }
3025 },
3026 ]
3027 );
3028 assert_eq!(
3029 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3030 &[
3031 DiagnosticEntry {
3032 range: Point::new(1, 13)..Point::new(1, 15),
3033 diagnostic: Diagnostic {
3034 severity: DiagnosticSeverity::HINT,
3035 message: "error 2 hint 1".to_string(),
3036 group_id: 1,
3037 is_primary: false,
3038 ..Default::default()
3039 }
3040 },
3041 DiagnosticEntry {
3042 range: Point::new(1, 13)..Point::new(1, 15),
3043 diagnostic: Diagnostic {
3044 severity: DiagnosticSeverity::HINT,
3045 message: "error 2 hint 2".to_string(),
3046 group_id: 1,
3047 is_primary: false,
3048 ..Default::default()
3049 }
3050 },
3051 DiagnosticEntry {
3052 range: Point::new(2, 8)..Point::new(2, 17),
3053 diagnostic: Diagnostic {
3054 severity: DiagnosticSeverity::ERROR,
3055 message: "error 2".to_string(),
3056 group_id: 1,
3057 is_primary: true,
3058 ..Default::default()
3059 }
3060 }
3061 ]
3062 );
3063 }
3064
3065 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3066 let languages = Arc::new(LanguageRegistry::new());
3067 let http_client = FakeHttpClient::with_404_response();
3068 let client = client::Client::new(http_client.clone());
3069 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3070 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3071 }
3072}