1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 proto::{deserialize_anchor, serialize_anchor},
17 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
18 LanguageRegistry, PointUtf16, ToOffset, ToPointUtf16,
19};
20use lsp::{DiagnosticSeverity, LanguageServer};
21use postage::{prelude::Stream, watch};
22use smol::block_on;
23use std::{
24 convert::TryInto,
25 ops::Range,
26 path::{Path, PathBuf},
27 sync::{atomic::AtomicBool, Arc},
28};
29use util::{post_inc, ResultExt, TryFutureExt as _};
30
31pub use fs::*;
32pub use worktree::*;
33
34pub struct Project {
35 worktrees: Vec<WorktreeHandle>,
36 active_entry: Option<ProjectEntry>,
37 languages: Arc<LanguageRegistry>,
38 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
39 client: Arc<client::Client>,
40 user_store: ModelHandle<UserStore>,
41 fs: Arc<dyn Fs>,
42 client_state: ProjectClientState,
43 collaborators: HashMap<PeerId, Collaborator>,
44 subscriptions: Vec<client::Subscription>,
45 language_servers_with_diagnostics_running: isize,
46 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
47 loading_buffers: HashMap<
48 ProjectPath,
49 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
50 >,
51 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
52}
53
54enum WorktreeHandle {
55 Strong(ModelHandle<Worktree>),
56 Weak(WeakModelHandle<Worktree>),
57}
58
59enum ProjectClientState {
60 Local {
61 is_shared: bool,
62 remote_id_tx: watch::Sender<Option<u64>>,
63 remote_id_rx: watch::Receiver<Option<u64>>,
64 _maintain_remote_id_task: Task<Option<()>>,
65 },
66 Remote {
67 sharing_has_stopped: bool,
68 remote_id: u64,
69 replica_id: ReplicaId,
70 },
71}
72
73#[derive(Clone, Debug)]
74pub struct Collaborator {
75 pub user: Arc<User>,
76 pub peer_id: PeerId,
77 pub replica_id: ReplicaId,
78}
79
80#[derive(Clone, Debug, PartialEq)]
81pub enum Event {
82 ActiveEntryChanged(Option<ProjectEntry>),
83 WorktreeRemoved(WorktreeId),
84 DiskBasedDiagnosticsStarted,
85 DiskBasedDiagnosticsUpdated,
86 DiskBasedDiagnosticsFinished,
87 DiagnosticsUpdated(ProjectPath),
88}
89
90#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
91pub struct ProjectPath {
92 pub worktree_id: WorktreeId,
93 pub path: Arc<Path>,
94}
95
96#[derive(Clone, Debug, Default, PartialEq)]
97pub struct DiagnosticSummary {
98 pub error_count: usize,
99 pub warning_count: usize,
100 pub info_count: usize,
101 pub hint_count: usize,
102}
103
104#[derive(Debug)]
105pub struct Definition {
106 pub target_buffer: ModelHandle<Buffer>,
107 pub target_range: Range<language::Anchor>,
108}
109
110impl DiagnosticSummary {
111 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
112 let mut this = Self {
113 error_count: 0,
114 warning_count: 0,
115 info_count: 0,
116 hint_count: 0,
117 };
118
119 for entry in diagnostics {
120 if entry.diagnostic.is_primary {
121 match entry.diagnostic.severity {
122 DiagnosticSeverity::ERROR => this.error_count += 1,
123 DiagnosticSeverity::WARNING => this.warning_count += 1,
124 DiagnosticSeverity::INFORMATION => this.info_count += 1,
125 DiagnosticSeverity::HINT => this.hint_count += 1,
126 _ => {}
127 }
128 }
129 }
130
131 this
132 }
133
134 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
135 proto::DiagnosticSummary {
136 path: path.to_string_lossy().to_string(),
137 error_count: self.error_count as u32,
138 warning_count: self.warning_count as u32,
139 info_count: self.info_count as u32,
140 hint_count: self.hint_count as u32,
141 }
142 }
143}
144
145#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
146pub struct ProjectEntry {
147 pub worktree_id: WorktreeId,
148 pub entry_id: usize,
149}
150
151impl Project {
152 pub fn local(
153 client: Arc<Client>,
154 user_store: ModelHandle<UserStore>,
155 languages: Arc<LanguageRegistry>,
156 fs: Arc<dyn Fs>,
157 cx: &mut MutableAppContext,
158 ) -> ModelHandle<Self> {
159 cx.add_model(|cx: &mut ModelContext<Self>| {
160 let (remote_id_tx, remote_id_rx) = watch::channel();
161 let _maintain_remote_id_task = cx.spawn_weak({
162 let rpc = client.clone();
163 move |this, mut cx| {
164 async move {
165 let mut status = rpc.status();
166 while let Some(status) = status.recv().await {
167 if let Some(this) = this.upgrade(&cx) {
168 let remote_id = if let client::Status::Connected { .. } = status {
169 let response = rpc.request(proto::RegisterProject {}).await?;
170 Some(response.project_id)
171 } else {
172 None
173 };
174
175 if let Some(project_id) = remote_id {
176 let mut registrations = Vec::new();
177 this.update(&mut cx, |this, cx| {
178 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
179 registrations.push(worktree.update(
180 cx,
181 |worktree, cx| {
182 let worktree = worktree.as_local_mut().unwrap();
183 worktree.register(project_id, cx)
184 },
185 ));
186 }
187 });
188 for registration in registrations {
189 registration.await?;
190 }
191 }
192 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
193 }
194 }
195 Ok(())
196 }
197 .log_err()
198 }
199 });
200
201 Self {
202 worktrees: Default::default(),
203 collaborators: Default::default(),
204 open_buffers: Default::default(),
205 loading_buffers: Default::default(),
206 shared_buffers: Default::default(),
207 client_state: ProjectClientState::Local {
208 is_shared: false,
209 remote_id_tx,
210 remote_id_rx,
211 _maintain_remote_id_task,
212 },
213 subscriptions: Vec::new(),
214 active_entry: None,
215 languages,
216 client,
217 user_store,
218 fs,
219 language_servers_with_diagnostics_running: 0,
220 language_servers: Default::default(),
221 }
222 })
223 }
224
225 pub async fn remote(
226 remote_id: u64,
227 client: Arc<Client>,
228 user_store: ModelHandle<UserStore>,
229 languages: Arc<LanguageRegistry>,
230 fs: Arc<dyn Fs>,
231 cx: &mut AsyncAppContext,
232 ) -> Result<ModelHandle<Self>> {
233 client.authenticate_and_connect(&cx).await?;
234
235 let response = client
236 .request(proto::JoinProject {
237 project_id: remote_id,
238 })
239 .await?;
240
241 let replica_id = response.replica_id as ReplicaId;
242
243 let mut worktrees = Vec::new();
244 for worktree in response.worktrees {
245 let (worktree, load_task) = cx
246 .update(|cx| Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx));
247 worktrees.push(worktree);
248 load_task.detach();
249 }
250
251 let user_ids = response
252 .collaborators
253 .iter()
254 .map(|peer| peer.user_id)
255 .collect();
256 user_store
257 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
258 .await?;
259 let mut collaborators = HashMap::default();
260 for message in response.collaborators {
261 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
262 collaborators.insert(collaborator.peer_id, collaborator);
263 }
264
265 Ok(cx.add_model(|cx| {
266 let mut this = Self {
267 worktrees: Vec::new(),
268 open_buffers: Default::default(),
269 loading_buffers: Default::default(),
270 shared_buffers: Default::default(),
271 active_entry: None,
272 collaborators,
273 languages,
274 user_store,
275 fs,
276 subscriptions: vec![
277 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
278 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
279 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
283 client.subscribe_to_entity(
284 remote_id,
285 cx,
286 Self::handle_update_diagnostic_summary,
287 ),
288 client.subscribe_to_entity(
289 remote_id,
290 cx,
291 Self::handle_disk_based_diagnostics_updating,
292 ),
293 client.subscribe_to_entity(
294 remote_id,
295 cx,
296 Self::handle_disk_based_diagnostics_updated,
297 ),
298 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
299 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_reloaded),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
302 ],
303 client,
304 client_state: ProjectClientState::Remote {
305 sharing_has_stopped: false,
306 remote_id,
307 replica_id,
308 },
309 language_servers_with_diagnostics_running: 0,
310 language_servers: Default::default(),
311 };
312 for worktree in worktrees {
313 this.add_worktree(&worktree, cx);
314 }
315 this
316 }))
317 }
318
319 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
320 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
321 *remote_id_tx.borrow_mut() = remote_id;
322 }
323
324 self.subscriptions.clear();
325 if let Some(remote_id) = remote_id {
326 let client = &self.client;
327 self.subscriptions.extend([
328 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
329 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_get_definition),
338 ]);
339 }
340 }
341
342 pub fn remote_id(&self) -> Option<u64> {
343 match &self.client_state {
344 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
345 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
346 }
347 }
348
349 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
350 let mut id = None;
351 let mut watch = None;
352 match &self.client_state {
353 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
354 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
355 }
356
357 async move {
358 if let Some(id) = id {
359 return id;
360 }
361 let mut watch = watch.unwrap();
362 loop {
363 let id = *watch.borrow();
364 if let Some(id) = id {
365 return id;
366 }
367 watch.recv().await;
368 }
369 }
370 }
371
372 pub fn replica_id(&self) -> ReplicaId {
373 match &self.client_state {
374 ProjectClientState::Local { .. } => 0,
375 ProjectClientState::Remote { replica_id, .. } => *replica_id,
376 }
377 }
378
379 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
380 &self.collaborators
381 }
382
383 pub fn worktrees<'a>(
384 &'a self,
385 cx: &'a AppContext,
386 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
387 self.worktrees
388 .iter()
389 .filter_map(move |worktree| worktree.upgrade(cx))
390 }
391
392 pub fn worktree_for_id(
393 &self,
394 id: WorktreeId,
395 cx: &AppContext,
396 ) -> Option<ModelHandle<Worktree>> {
397 self.worktrees(cx)
398 .find(|worktree| worktree.read(cx).id() == id)
399 }
400
401 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
402 let rpc = self.client.clone();
403 cx.spawn(|this, mut cx| async move {
404 let project_id = this.update(&mut cx, |this, _| {
405 if let ProjectClientState::Local {
406 is_shared,
407 remote_id_rx,
408 ..
409 } = &mut this.client_state
410 {
411 *is_shared = true;
412 remote_id_rx
413 .borrow()
414 .ok_or_else(|| anyhow!("no project id"))
415 } else {
416 Err(anyhow!("can't share a remote project"))
417 }
418 })?;
419
420 rpc.request(proto::ShareProject { project_id }).await?;
421 let mut tasks = Vec::new();
422 this.update(&mut cx, |this, cx| {
423 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
424 worktree.update(cx, |worktree, cx| {
425 let worktree = worktree.as_local_mut().unwrap();
426 tasks.push(worktree.share(project_id, cx));
427 });
428 }
429 });
430 for task in tasks {
431 task.await?;
432 }
433 this.update(&mut cx, |_, cx| cx.notify());
434 Ok(())
435 })
436 }
437
438 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
439 let rpc = self.client.clone();
440 cx.spawn(|this, mut cx| async move {
441 let project_id = this.update(&mut cx, |this, _| {
442 if let ProjectClientState::Local {
443 is_shared,
444 remote_id_rx,
445 ..
446 } = &mut this.client_state
447 {
448 *is_shared = false;
449 remote_id_rx
450 .borrow()
451 .ok_or_else(|| anyhow!("no project id"))
452 } else {
453 Err(anyhow!("can't share a remote project"))
454 }
455 })?;
456
457 rpc.send(proto::UnshareProject { project_id }).await?;
458 this.update(&mut cx, |this, cx| {
459 this.collaborators.clear();
460 this.shared_buffers.clear();
461 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
462 worktree.update(cx, |worktree, _| {
463 worktree.as_local_mut().unwrap().unshare();
464 });
465 }
466 cx.notify()
467 });
468 Ok(())
469 })
470 }
471
472 pub fn is_read_only(&self) -> bool {
473 match &self.client_state {
474 ProjectClientState::Local { .. } => false,
475 ProjectClientState::Remote {
476 sharing_has_stopped,
477 ..
478 } => *sharing_has_stopped,
479 }
480 }
481
482 pub fn is_local(&self) -> bool {
483 match &self.client_state {
484 ProjectClientState::Local { .. } => true,
485 ProjectClientState::Remote { .. } => false,
486 }
487 }
488
489 pub fn open_buffer(
490 &mut self,
491 path: impl Into<ProjectPath>,
492 cx: &mut ModelContext<Self>,
493 ) -> Task<Result<ModelHandle<Buffer>>> {
494 let project_path = path.into();
495 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
496 worktree
497 } else {
498 return Task::ready(Err(anyhow!("no such worktree")));
499 };
500
501 // If there is already a buffer for the given path, then return it.
502 let existing_buffer = self.get_open_buffer(&project_path, cx);
503 if let Some(existing_buffer) = existing_buffer {
504 return Task::ready(Ok(existing_buffer));
505 }
506
507 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
508 // If the given path is already being loaded, then wait for that existing
509 // task to complete and return the same buffer.
510 hash_map::Entry::Occupied(e) => e.get().clone(),
511
512 // Otherwise, record the fact that this path is now being loaded.
513 hash_map::Entry::Vacant(entry) => {
514 let (mut tx, rx) = postage::watch::channel();
515 entry.insert(rx.clone());
516
517 let load_buffer = if worktree.read(cx).is_local() {
518 self.open_local_buffer(&project_path.path, &worktree, cx)
519 } else {
520 self.open_remote_buffer(&project_path.path, &worktree, cx)
521 };
522
523 cx.spawn(move |this, mut cx| async move {
524 let load_result = load_buffer.await;
525 *tx.borrow_mut() = Some(this.update(&mut cx, |this, _| {
526 // Record the fact that the buffer is no longer loading.
527 this.loading_buffers.remove(&project_path);
528 let buffer = load_result.map_err(Arc::new)?;
529 Ok(buffer)
530 }));
531 })
532 .detach();
533 rx
534 }
535 };
536
537 cx.foreground().spawn(async move {
538 loop {
539 if let Some(result) = loading_watch.borrow().as_ref() {
540 match result {
541 Ok(buffer) => return Ok(buffer.clone()),
542 Err(error) => return Err(anyhow!("{}", error)),
543 }
544 }
545 loading_watch.recv().await;
546 }
547 })
548 }
549
550 fn open_local_buffer(
551 &mut self,
552 path: &Arc<Path>,
553 worktree: &ModelHandle<Worktree>,
554 cx: &mut ModelContext<Self>,
555 ) -> Task<Result<ModelHandle<Buffer>>> {
556 let load_buffer = worktree.update(cx, |worktree, cx| {
557 let worktree = worktree.as_local_mut().unwrap();
558 worktree.load_buffer(path, cx)
559 });
560 let worktree = worktree.downgrade();
561 cx.spawn(|this, mut cx| async move {
562 let buffer = load_buffer.await?;
563 let worktree = worktree
564 .upgrade(&cx)
565 .ok_or_else(|| anyhow!("worktree was removed"))?;
566 this.update(&mut cx, |this, cx| {
567 this.register_buffer(&buffer, Some(&worktree), cx)
568 })?;
569 Ok(buffer)
570 })
571 }
572
573 fn open_remote_buffer(
574 &mut self,
575 path: &Arc<Path>,
576 worktree: &ModelHandle<Worktree>,
577 cx: &mut ModelContext<Self>,
578 ) -> Task<Result<ModelHandle<Buffer>>> {
579 let rpc = self.client.clone();
580 let project_id = self.remote_id().unwrap();
581 let remote_worktree_id = worktree.read(cx).id();
582 let path = path.clone();
583 let path_string = path.to_string_lossy().to_string();
584 cx.spawn(|this, mut cx| async move {
585 let response = rpc
586 .request(proto::OpenBuffer {
587 project_id,
588 worktree_id: remote_worktree_id.to_proto(),
589 path: path_string,
590 })
591 .await?;
592 let buffer = response.buffer.ok_or_else(|| anyhow!("missing buffer"))?;
593 this.update(&mut cx, |this, cx| {
594 this.deserialize_remote_buffer(buffer, cx)
595 })
596 })
597 }
598
599 pub fn save_buffer_as(
600 &self,
601 buffer: ModelHandle<Buffer>,
602 abs_path: PathBuf,
603 cx: &mut ModelContext<Project>,
604 ) -> Task<Result<()>> {
605 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
606 cx.spawn(|this, mut cx| async move {
607 let (worktree, path) = worktree_task.await?;
608 worktree
609 .update(&mut cx, |worktree, cx| {
610 worktree
611 .as_local_mut()
612 .unwrap()
613 .save_buffer_as(buffer.clone(), path, cx)
614 })
615 .await?;
616 this.update(&mut cx, |this, cx| {
617 this.assign_language_to_buffer(&buffer, Some(&worktree), cx);
618 });
619 Ok(())
620 })
621 }
622
623 #[cfg(any(test, feature = "test-support"))]
624 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
625 let path = path.into();
626 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
627 self.open_buffers.iter().any(|(_, buffer)| {
628 if let Some(buffer) = buffer.upgrade(cx) {
629 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
630 if file.worktree == worktree && file.path() == &path.path {
631 return true;
632 }
633 }
634 }
635 false
636 })
637 } else {
638 false
639 }
640 }
641
642 fn get_open_buffer(
643 &mut self,
644 path: &ProjectPath,
645 cx: &mut ModelContext<Self>,
646 ) -> Option<ModelHandle<Buffer>> {
647 let mut result = None;
648 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
649 self.open_buffers.retain(|_, buffer| {
650 if let Some(buffer) = buffer.upgrade(cx) {
651 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
652 if file.worktree == worktree && file.path() == &path.path {
653 result = Some(buffer);
654 }
655 }
656 true
657 } else {
658 false
659 }
660 });
661 result
662 }
663
664 fn register_buffer(
665 &mut self,
666 buffer: &ModelHandle<Buffer>,
667 worktree: Option<&ModelHandle<Worktree>>,
668 cx: &mut ModelContext<Self>,
669 ) -> Result<()> {
670 if self
671 .open_buffers
672 .insert(buffer.read(cx).remote_id() as usize, buffer.downgrade())
673 .is_some()
674 {
675 return Err(anyhow!("registered the same buffer twice"));
676 }
677 self.assign_language_to_buffer(&buffer, worktree, cx);
678 Ok(())
679 }
680
681 fn assign_language_to_buffer(
682 &mut self,
683 buffer: &ModelHandle<Buffer>,
684 worktree: Option<&ModelHandle<Worktree>>,
685 cx: &mut ModelContext<Self>,
686 ) -> Option<()> {
687 let (path, full_path) = {
688 let file = buffer.read(cx).file()?;
689 (file.path().clone(), file.full_path(cx))
690 };
691
692 // If the buffer has a language, set it and start/assign the language server
693 if let Some(language) = self.languages.select_language(&full_path) {
694 buffer.update(cx, |buffer, cx| {
695 buffer.set_language(Some(language.clone()), cx);
696 });
697
698 // For local worktrees, start a language server if needed.
699 // Also assign the language server and any previously stored diagnostics to the buffer.
700 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
701 let worktree_id = local_worktree.id();
702 let worktree_abs_path = local_worktree.abs_path().clone();
703
704 let language_server = match self
705 .language_servers
706 .entry((worktree_id, language.name().to_string()))
707 {
708 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
709 hash_map::Entry::Vacant(e) => Self::start_language_server(
710 self.client.clone(),
711 language.clone(),
712 &worktree_abs_path,
713 cx,
714 )
715 .map(|server| e.insert(server).clone()),
716 };
717
718 buffer.update(cx, |buffer, cx| {
719 buffer.set_language_server(language_server, cx);
720 });
721 }
722 }
723
724 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
725 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
726 buffer.update(cx, |buffer, cx| {
727 buffer.update_diagnostics(None, diagnostics, cx).log_err();
728 });
729 }
730 }
731
732 None
733 }
734
735 fn start_language_server(
736 rpc: Arc<Client>,
737 language: Arc<Language>,
738 worktree_path: &Path,
739 cx: &mut ModelContext<Self>,
740 ) -> Option<Arc<LanguageServer>> {
741 enum LspEvent {
742 DiagnosticsStart,
743 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
744 DiagnosticsFinish,
745 }
746
747 let language_server = language
748 .start_server(worktree_path, cx)
749 .log_err()
750 .flatten()?;
751 let disk_based_sources = language
752 .disk_based_diagnostic_sources()
753 .cloned()
754 .unwrap_or_default();
755 let disk_based_diagnostics_progress_token =
756 language.disk_based_diagnostics_progress_token().cloned();
757 let has_disk_based_diagnostic_progress_token =
758 disk_based_diagnostics_progress_token.is_some();
759 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
760
761 // Listen for `PublishDiagnostics` notifications.
762 language_server
763 .on_notification::<lsp::notification::PublishDiagnostics, _>({
764 let diagnostics_tx = diagnostics_tx.clone();
765 move |params| {
766 if !has_disk_based_diagnostic_progress_token {
767 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
768 }
769 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
770 if !has_disk_based_diagnostic_progress_token {
771 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
772 }
773 }
774 })
775 .detach();
776
777 // Listen for `Progress` notifications. Send an event when the language server
778 // transitions between running jobs and not running any jobs.
779 let mut running_jobs_for_this_server: i32 = 0;
780 language_server
781 .on_notification::<lsp::notification::Progress, _>(move |params| {
782 let token = match params.token {
783 lsp::NumberOrString::Number(_) => None,
784 lsp::NumberOrString::String(token) => Some(token),
785 };
786
787 if token == disk_based_diagnostics_progress_token {
788 match params.value {
789 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
790 lsp::WorkDoneProgress::Begin(_) => {
791 running_jobs_for_this_server += 1;
792 if running_jobs_for_this_server == 1 {
793 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
794 }
795 }
796 lsp::WorkDoneProgress::End(_) => {
797 running_jobs_for_this_server -= 1;
798 if running_jobs_for_this_server == 0 {
799 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
800 }
801 }
802 _ => {}
803 },
804 }
805 }
806 })
807 .detach();
808
809 // Process all the LSP events.
810 cx.spawn_weak(|this, mut cx| async move {
811 while let Ok(message) = diagnostics_rx.recv().await {
812 let this = cx.read(|cx| this.upgrade(cx))?;
813 match message {
814 LspEvent::DiagnosticsStart => {
815 let send = this.update(&mut cx, |this, cx| {
816 this.disk_based_diagnostics_started(cx);
817 this.remote_id().map(|project_id| {
818 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
819 })
820 });
821 if let Some(send) = send {
822 send.await.log_err();
823 }
824 }
825 LspEvent::DiagnosticsUpdate(params) => {
826 this.update(&mut cx, |this, cx| {
827 this.update_diagnostics(params, &disk_based_sources, cx)
828 .log_err();
829 });
830 }
831 LspEvent::DiagnosticsFinish => {
832 let send = this.update(&mut cx, |this, cx| {
833 this.disk_based_diagnostics_finished(cx);
834 this.remote_id().map(|project_id| {
835 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
836 })
837 });
838 if let Some(send) = send {
839 send.await.log_err();
840 }
841 }
842 }
843 }
844 Some(())
845 })
846 .detach();
847
848 Some(language_server)
849 }
850
851 pub fn update_diagnostics(
852 &mut self,
853 params: lsp::PublishDiagnosticsParams,
854 disk_based_sources: &HashSet<String>,
855 cx: &mut ModelContext<Self>,
856 ) -> Result<()> {
857 let abs_path = params
858 .uri
859 .to_file_path()
860 .map_err(|_| anyhow!("URI is not a file"))?;
861 let mut next_group_id = 0;
862 let mut diagnostics = Vec::default();
863 let mut primary_diagnostic_group_ids = HashMap::default();
864 let mut sources_by_group_id = HashMap::default();
865 let mut supporting_diagnostic_severities = HashMap::default();
866 for diagnostic in ¶ms.diagnostics {
867 let source = diagnostic.source.as_ref();
868 let code = diagnostic.code.as_ref().map(|code| match code {
869 lsp::NumberOrString::Number(code) => code.to_string(),
870 lsp::NumberOrString::String(code) => code.clone(),
871 });
872 let range = range_from_lsp(diagnostic.range);
873 let is_supporting = diagnostic
874 .related_information
875 .as_ref()
876 .map_or(false, |infos| {
877 infos.iter().any(|info| {
878 primary_diagnostic_group_ids.contains_key(&(
879 source,
880 code.clone(),
881 range_from_lsp(info.location.range),
882 ))
883 })
884 });
885
886 if is_supporting {
887 if let Some(severity) = diagnostic.severity {
888 supporting_diagnostic_severities
889 .insert((source, code.clone(), range), severity);
890 }
891 } else {
892 let group_id = post_inc(&mut next_group_id);
893 let is_disk_based =
894 source.map_or(false, |source| disk_based_sources.contains(source));
895
896 sources_by_group_id.insert(group_id, source);
897 primary_diagnostic_group_ids
898 .insert((source, code.clone(), range.clone()), group_id);
899
900 diagnostics.push(DiagnosticEntry {
901 range,
902 diagnostic: Diagnostic {
903 code: code.clone(),
904 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
905 message: diagnostic.message.clone(),
906 group_id,
907 is_primary: true,
908 is_valid: true,
909 is_disk_based,
910 },
911 });
912 if let Some(infos) = &diagnostic.related_information {
913 for info in infos {
914 if info.location.uri == params.uri {
915 let range = range_from_lsp(info.location.range);
916 diagnostics.push(DiagnosticEntry {
917 range,
918 diagnostic: Diagnostic {
919 code: code.clone(),
920 severity: DiagnosticSeverity::INFORMATION,
921 message: info.message.clone(),
922 group_id,
923 is_primary: false,
924 is_valid: true,
925 is_disk_based,
926 },
927 });
928 }
929 }
930 }
931 }
932 }
933
934 for entry in &mut diagnostics {
935 let diagnostic = &mut entry.diagnostic;
936 if !diagnostic.is_primary {
937 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
938 if let Some(&severity) = supporting_diagnostic_severities.get(&(
939 source,
940 diagnostic.code.clone(),
941 entry.range.clone(),
942 )) {
943 diagnostic.severity = severity;
944 }
945 }
946 }
947
948 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
949 Ok(())
950 }
951
952 pub fn update_diagnostic_entries(
953 &mut self,
954 abs_path: PathBuf,
955 version: Option<i32>,
956 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
957 cx: &mut ModelContext<Project>,
958 ) -> Result<(), anyhow::Error> {
959 let (worktree, relative_path) = self
960 .find_local_worktree(&abs_path, cx)
961 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
962 let project_path = ProjectPath {
963 worktree_id: worktree.read(cx).id(),
964 path: relative_path.into(),
965 };
966
967 for buffer in self.open_buffers.values() {
968 if let Some(buffer) = buffer.upgrade(cx) {
969 if buffer
970 .read(cx)
971 .file()
972 .map_or(false, |file| *file.path() == project_path.path)
973 {
974 buffer.update(cx, |buffer, cx| {
975 buffer.update_diagnostics(version, diagnostics.clone(), cx)
976 })?;
977 break;
978 }
979 }
980 }
981 worktree.update(cx, |worktree, cx| {
982 worktree
983 .as_local_mut()
984 .ok_or_else(|| anyhow!("not a local worktree"))?
985 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
986 })?;
987 cx.emit(Event::DiagnosticsUpdated(project_path));
988 Ok(())
989 }
990
991 pub fn definition<T: ToOffset>(
992 &self,
993 source_buffer_handle: &ModelHandle<Buffer>,
994 position: T,
995 cx: &mut ModelContext<Self>,
996 ) -> Task<Result<Vec<Definition>>> {
997 let source_buffer_handle = source_buffer_handle.clone();
998 let source_buffer = source_buffer_handle.read(cx);
999 let worktree;
1000 let buffer_abs_path;
1001 if let Some(file) = File::from_dyn(source_buffer.file()) {
1002 worktree = file.worktree.clone();
1003 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
1004 } else {
1005 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
1006 };
1007
1008 if worktree.read(cx).as_local().is_some() {
1009 let point = source_buffer.offset_to_point_utf16(position.to_offset(source_buffer));
1010 let buffer_abs_path = buffer_abs_path.unwrap();
1011 let lang_name;
1012 let lang_server;
1013 if let Some(lang) = source_buffer.language() {
1014 lang_name = lang.name().to_string();
1015 if let Some(server) = self
1016 .language_servers
1017 .get(&(worktree.read(cx).id(), lang_name.clone()))
1018 {
1019 lang_server = server.clone();
1020 } else {
1021 return Task::ready(Err(anyhow!("buffer does not have a language server")));
1022 };
1023 } else {
1024 return Task::ready(Err(anyhow!("buffer does not have a language")));
1025 }
1026
1027 cx.spawn(|this, mut cx| async move {
1028 let response = lang_server
1029 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
1030 text_document_position_params: lsp::TextDocumentPositionParams {
1031 text_document: lsp::TextDocumentIdentifier::new(
1032 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
1033 ),
1034 position: lsp::Position::new(point.row, point.column),
1035 },
1036 work_done_progress_params: Default::default(),
1037 partial_result_params: Default::default(),
1038 })
1039 .await?;
1040
1041 let mut definitions = Vec::new();
1042 if let Some(response) = response {
1043 let mut unresolved_locations = Vec::new();
1044 match response {
1045 lsp::GotoDefinitionResponse::Scalar(loc) => {
1046 unresolved_locations.push((loc.uri, loc.range));
1047 }
1048 lsp::GotoDefinitionResponse::Array(locs) => {
1049 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
1050 }
1051 lsp::GotoDefinitionResponse::Link(links) => {
1052 unresolved_locations.extend(
1053 links
1054 .into_iter()
1055 .map(|l| (l.target_uri, l.target_selection_range)),
1056 );
1057 }
1058 }
1059
1060 for (target_uri, target_range) in unresolved_locations {
1061 let abs_path = target_uri
1062 .to_file_path()
1063 .map_err(|_| anyhow!("invalid target path"))?;
1064
1065 let (worktree, relative_path) = if let Some(result) =
1066 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1067 {
1068 result
1069 } else {
1070 let worktree = this
1071 .update(&mut cx, |this, cx| {
1072 this.create_local_worktree(&abs_path, true, cx)
1073 })
1074 .await?;
1075 this.update(&mut cx, |this, cx| {
1076 this.language_servers.insert(
1077 (worktree.read(cx).id(), lang_name.clone()),
1078 lang_server.clone(),
1079 );
1080 });
1081 (worktree, PathBuf::new())
1082 };
1083
1084 let project_path = ProjectPath {
1085 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1086 path: relative_path.into(),
1087 };
1088 let target_buffer_handle = this
1089 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1090 .await?;
1091 cx.read(|cx| {
1092 let target_buffer = target_buffer_handle.read(cx);
1093 let target_start = target_buffer
1094 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1095 let target_end = target_buffer
1096 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1097 definitions.push(Definition {
1098 target_buffer: target_buffer_handle,
1099 target_range: target_buffer.anchor_after(target_start)
1100 ..target_buffer.anchor_before(target_end),
1101 });
1102 });
1103 }
1104 }
1105
1106 Ok(definitions)
1107 })
1108 } else if let Some(project_id) = self.remote_id() {
1109 let client = self.client.clone();
1110 let request = proto::GetDefinition {
1111 project_id,
1112 buffer_id: source_buffer.remote_id(),
1113 position: Some(serialize_anchor(&source_buffer.anchor_before(position))),
1114 };
1115 cx.spawn(|this, mut cx| async move {
1116 let response = client.request(request).await?;
1117 this.update(&mut cx, |this, cx| {
1118 let mut definitions = Vec::new();
1119 for definition in response.definitions {
1120 let target_buffer = this.deserialize_remote_buffer(
1121 definition.buffer.ok_or_else(|| anyhow!("missing buffer"))?,
1122 cx,
1123 )?;
1124 let target_start = definition
1125 .target_start
1126 .and_then(deserialize_anchor)
1127 .ok_or_else(|| anyhow!("missing target start"))?;
1128 let target_end = definition
1129 .target_end
1130 .and_then(deserialize_anchor)
1131 .ok_or_else(|| anyhow!("missing target end"))?;
1132 definitions.push(Definition {
1133 target_buffer,
1134 target_range: target_start..target_end,
1135 })
1136 }
1137
1138 Ok(definitions)
1139 })
1140 })
1141 } else {
1142 Task::ready(Err(anyhow!("project does not have a remote id")))
1143 }
1144 }
1145
1146 pub fn find_or_create_local_worktree(
1147 &self,
1148 abs_path: impl AsRef<Path>,
1149 weak: bool,
1150 cx: &mut ModelContext<Self>,
1151 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1152 let abs_path = abs_path.as_ref();
1153 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1154 Task::ready(Ok((tree.clone(), relative_path.into())))
1155 } else {
1156 let worktree = self.create_local_worktree(abs_path, weak, cx);
1157 cx.foreground()
1158 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1159 }
1160 }
1161
1162 fn find_local_worktree(
1163 &self,
1164 abs_path: &Path,
1165 cx: &AppContext,
1166 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1167 for tree in self.worktrees(cx) {
1168 if let Some(relative_path) = tree
1169 .read(cx)
1170 .as_local()
1171 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1172 {
1173 return Some((tree.clone(), relative_path.into()));
1174 }
1175 }
1176 None
1177 }
1178
1179 pub fn is_shared(&self) -> bool {
1180 match &self.client_state {
1181 ProjectClientState::Local { is_shared, .. } => *is_shared,
1182 ProjectClientState::Remote { .. } => false,
1183 }
1184 }
1185
1186 fn create_local_worktree(
1187 &self,
1188 abs_path: impl AsRef<Path>,
1189 weak: bool,
1190 cx: &mut ModelContext<Self>,
1191 ) -> Task<Result<ModelHandle<Worktree>>> {
1192 let fs = self.fs.clone();
1193 let client = self.client.clone();
1194 let path = Arc::from(abs_path.as_ref());
1195 cx.spawn(|project, mut cx| async move {
1196 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1197
1198 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1199 project.add_worktree(&worktree, cx);
1200 (project.remote_id(), project.is_shared())
1201 });
1202
1203 if let Some(project_id) = remote_project_id {
1204 worktree
1205 .update(&mut cx, |worktree, cx| {
1206 worktree.as_local_mut().unwrap().register(project_id, cx)
1207 })
1208 .await?;
1209 if is_shared {
1210 worktree
1211 .update(&mut cx, |worktree, cx| {
1212 worktree.as_local_mut().unwrap().share(project_id, cx)
1213 })
1214 .await?;
1215 }
1216 }
1217
1218 Ok(worktree)
1219 })
1220 }
1221
1222 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1223 self.worktrees.retain(|worktree| {
1224 worktree
1225 .upgrade(cx)
1226 .map_or(false, |w| w.read(cx).id() != id)
1227 });
1228 cx.notify();
1229 }
1230
1231 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1232 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1233 if worktree.read(cx).is_local() {
1234 cx.subscribe(&worktree, |this, worktree, _, cx| {
1235 this.update_local_worktree_buffers(worktree, cx);
1236 })
1237 .detach();
1238 }
1239
1240 let push_weak_handle = {
1241 let worktree = worktree.read(cx);
1242 worktree.is_local() && worktree.is_weak()
1243 };
1244 if push_weak_handle {
1245 cx.observe_release(&worktree, |this, cx| {
1246 this.worktrees
1247 .retain(|worktree| worktree.upgrade(cx).is_some());
1248 cx.notify();
1249 })
1250 .detach();
1251 self.worktrees
1252 .push(WorktreeHandle::Weak(worktree.downgrade()));
1253 } else {
1254 self.worktrees
1255 .push(WorktreeHandle::Strong(worktree.clone()));
1256 }
1257 cx.notify();
1258 }
1259
1260 fn update_local_worktree_buffers(
1261 &mut self,
1262 worktree_handle: ModelHandle<Worktree>,
1263 cx: &mut ModelContext<Self>,
1264 ) {
1265 let snapshot = worktree_handle.read(cx).snapshot();
1266 let mut buffers_to_delete = Vec::new();
1267 for (buffer_id, buffer) in &self.open_buffers {
1268 if let Some(buffer) = buffer.upgrade(cx) {
1269 buffer.update(cx, |buffer, cx| {
1270 if let Some(old_file) = File::from_dyn(buffer.file()) {
1271 if old_file.worktree != worktree_handle {
1272 return;
1273 }
1274
1275 let new_file = if let Some(entry) = old_file
1276 .entry_id
1277 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1278 {
1279 File {
1280 is_local: true,
1281 entry_id: Some(entry.id),
1282 mtime: entry.mtime,
1283 path: entry.path.clone(),
1284 worktree: worktree_handle.clone(),
1285 }
1286 } else if let Some(entry) =
1287 snapshot.entry_for_path(old_file.path().as_ref())
1288 {
1289 File {
1290 is_local: true,
1291 entry_id: Some(entry.id),
1292 mtime: entry.mtime,
1293 path: entry.path.clone(),
1294 worktree: worktree_handle.clone(),
1295 }
1296 } else {
1297 File {
1298 is_local: true,
1299 entry_id: None,
1300 path: old_file.path().clone(),
1301 mtime: old_file.mtime(),
1302 worktree: worktree_handle.clone(),
1303 }
1304 };
1305
1306 if let Some(project_id) = self.remote_id() {
1307 let client = self.client.clone();
1308 let message = proto::UpdateBufferFile {
1309 project_id,
1310 buffer_id: *buffer_id as u64,
1311 file: Some(new_file.to_proto()),
1312 };
1313 cx.foreground()
1314 .spawn(async move { client.send(message).await })
1315 .detach_and_log_err(cx);
1316 }
1317 buffer.file_updated(Box::new(new_file), cx).detach();
1318 }
1319 });
1320 } else {
1321 buffers_to_delete.push(*buffer_id);
1322 }
1323 }
1324
1325 for buffer_id in buffers_to_delete {
1326 self.open_buffers.remove(&buffer_id);
1327 }
1328 }
1329
1330 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1331 let new_active_entry = entry.and_then(|project_path| {
1332 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1333 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1334 Some(ProjectEntry {
1335 worktree_id: project_path.worktree_id,
1336 entry_id: entry.id,
1337 })
1338 });
1339 if new_active_entry != self.active_entry {
1340 self.active_entry = new_active_entry;
1341 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1342 }
1343 }
1344
1345 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1346 self.language_servers_with_diagnostics_running > 0
1347 }
1348
1349 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1350 let mut summary = DiagnosticSummary::default();
1351 for (_, path_summary) in self.diagnostic_summaries(cx) {
1352 summary.error_count += path_summary.error_count;
1353 summary.warning_count += path_summary.warning_count;
1354 summary.info_count += path_summary.info_count;
1355 summary.hint_count += path_summary.hint_count;
1356 }
1357 summary
1358 }
1359
1360 pub fn diagnostic_summaries<'a>(
1361 &'a self,
1362 cx: &'a AppContext,
1363 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1364 self.worktrees(cx).flat_map(move |worktree| {
1365 let worktree = worktree.read(cx);
1366 let worktree_id = worktree.id();
1367 worktree
1368 .diagnostic_summaries()
1369 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1370 })
1371 }
1372
1373 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1374 self.language_servers_with_diagnostics_running += 1;
1375 if self.language_servers_with_diagnostics_running == 1 {
1376 cx.emit(Event::DiskBasedDiagnosticsStarted);
1377 }
1378 }
1379
1380 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1381 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1382 self.language_servers_with_diagnostics_running -= 1;
1383 if self.language_servers_with_diagnostics_running == 0 {
1384 cx.emit(Event::DiskBasedDiagnosticsFinished);
1385 }
1386 }
1387
1388 pub fn active_entry(&self) -> Option<ProjectEntry> {
1389 self.active_entry
1390 }
1391
1392 // RPC message handlers
1393
1394 fn handle_unshare_project(
1395 &mut self,
1396 _: TypedEnvelope<proto::UnshareProject>,
1397 _: Arc<Client>,
1398 cx: &mut ModelContext<Self>,
1399 ) -> Result<()> {
1400 if let ProjectClientState::Remote {
1401 sharing_has_stopped,
1402 ..
1403 } = &mut self.client_state
1404 {
1405 *sharing_has_stopped = true;
1406 self.collaborators.clear();
1407 cx.notify();
1408 Ok(())
1409 } else {
1410 unreachable!()
1411 }
1412 }
1413
1414 fn handle_add_collaborator(
1415 &mut self,
1416 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1417 _: Arc<Client>,
1418 cx: &mut ModelContext<Self>,
1419 ) -> Result<()> {
1420 let user_store = self.user_store.clone();
1421 let collaborator = envelope
1422 .payload
1423 .collaborator
1424 .take()
1425 .ok_or_else(|| anyhow!("empty collaborator"))?;
1426
1427 cx.spawn(|this, mut cx| {
1428 async move {
1429 let collaborator =
1430 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1431 this.update(&mut cx, |this, cx| {
1432 this.collaborators
1433 .insert(collaborator.peer_id, collaborator);
1434 cx.notify();
1435 });
1436 Ok(())
1437 }
1438 .log_err()
1439 })
1440 .detach();
1441
1442 Ok(())
1443 }
1444
1445 fn handle_remove_collaborator(
1446 &mut self,
1447 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1448 _: Arc<Client>,
1449 cx: &mut ModelContext<Self>,
1450 ) -> Result<()> {
1451 let peer_id = PeerId(envelope.payload.peer_id);
1452 let replica_id = self
1453 .collaborators
1454 .remove(&peer_id)
1455 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1456 .replica_id;
1457 self.shared_buffers.remove(&peer_id);
1458 for (_, buffer) in &self.open_buffers {
1459 if let Some(buffer) = buffer.upgrade(cx) {
1460 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1461 }
1462 }
1463 cx.notify();
1464 Ok(())
1465 }
1466
1467 fn handle_share_worktree(
1468 &mut self,
1469 envelope: TypedEnvelope<proto::ShareWorktree>,
1470 client: Arc<Client>,
1471 cx: &mut ModelContext<Self>,
1472 ) -> Result<()> {
1473 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1474 let replica_id = self.replica_id();
1475 let worktree = envelope
1476 .payload
1477 .worktree
1478 .ok_or_else(|| anyhow!("invalid worktree"))?;
1479 let (worktree, load_task) = Worktree::remote(remote_id, replica_id, worktree, client, cx);
1480 self.add_worktree(&worktree, cx);
1481 load_task.detach();
1482 Ok(())
1483 }
1484
1485 fn handle_unregister_worktree(
1486 &mut self,
1487 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1488 _: Arc<Client>,
1489 cx: &mut ModelContext<Self>,
1490 ) -> Result<()> {
1491 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1492 self.remove_worktree(worktree_id, cx);
1493 Ok(())
1494 }
1495
1496 fn handle_update_worktree(
1497 &mut self,
1498 envelope: TypedEnvelope<proto::UpdateWorktree>,
1499 _: Arc<Client>,
1500 cx: &mut ModelContext<Self>,
1501 ) -> Result<()> {
1502 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1503 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1504 worktree.update(cx, |worktree, cx| {
1505 let worktree = worktree.as_remote_mut().unwrap();
1506 worktree.update_from_remote(envelope, cx)
1507 })?;
1508 }
1509 Ok(())
1510 }
1511
1512 fn handle_update_diagnostic_summary(
1513 &mut self,
1514 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1515 _: Arc<Client>,
1516 cx: &mut ModelContext<Self>,
1517 ) -> Result<()> {
1518 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1519 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1520 if let Some(summary) = envelope.payload.summary {
1521 let project_path = ProjectPath {
1522 worktree_id,
1523 path: Path::new(&summary.path).into(),
1524 };
1525 worktree.update(cx, |worktree, _| {
1526 worktree
1527 .as_remote_mut()
1528 .unwrap()
1529 .update_diagnostic_summary(project_path.path.clone(), &summary);
1530 });
1531 cx.emit(Event::DiagnosticsUpdated(project_path));
1532 }
1533 }
1534 Ok(())
1535 }
1536
1537 fn handle_disk_based_diagnostics_updating(
1538 &mut self,
1539 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1540 _: Arc<Client>,
1541 cx: &mut ModelContext<Self>,
1542 ) -> Result<()> {
1543 self.disk_based_diagnostics_started(cx);
1544 Ok(())
1545 }
1546
1547 fn handle_disk_based_diagnostics_updated(
1548 &mut self,
1549 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1550 _: Arc<Client>,
1551 cx: &mut ModelContext<Self>,
1552 ) -> Result<()> {
1553 self.disk_based_diagnostics_finished(cx);
1554 Ok(())
1555 }
1556
1557 pub fn handle_update_buffer(
1558 &mut self,
1559 envelope: TypedEnvelope<proto::UpdateBuffer>,
1560 _: Arc<Client>,
1561 cx: &mut ModelContext<Self>,
1562 ) -> Result<()> {
1563 let payload = envelope.payload.clone();
1564 let buffer_id = payload.buffer_id as usize;
1565 let ops = payload
1566 .operations
1567 .into_iter()
1568 .map(|op| language::proto::deserialize_operation(op))
1569 .collect::<Result<Vec<_>, _>>()?;
1570 if let Some(buffer) = self.open_buffers.get_mut(&buffer_id) {
1571 if let Some(buffer) = buffer.upgrade(cx) {
1572 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1573 }
1574 }
1575 Ok(())
1576 }
1577
1578 pub fn handle_update_buffer_file(
1579 &mut self,
1580 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1581 _: Arc<Client>,
1582 cx: &mut ModelContext<Self>,
1583 ) -> Result<()> {
1584 let payload = envelope.payload.clone();
1585 let buffer_id = payload.buffer_id as usize;
1586 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1587 let worktree = self
1588 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1589 .ok_or_else(|| anyhow!("no such worktree"))?;
1590 let file = File::from_proto(file, worktree.clone(), cx)?;
1591 let buffer = self
1592 .open_buffers
1593 .get_mut(&buffer_id)
1594 .and_then(|b| b.upgrade(cx))
1595 .ok_or_else(|| anyhow!("no such buffer"))?;
1596 buffer.update(cx, |buffer, cx| {
1597 buffer.file_updated(Box::new(file), cx).detach();
1598 });
1599
1600 Ok(())
1601 }
1602
1603 pub fn handle_save_buffer(
1604 &mut self,
1605 envelope: TypedEnvelope<proto::SaveBuffer>,
1606 rpc: Arc<Client>,
1607 cx: &mut ModelContext<Self>,
1608 ) -> Result<()> {
1609 let sender_id = envelope.original_sender_id()?;
1610 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1611 let buffer = self
1612 .shared_buffers
1613 .get(&sender_id)
1614 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1615 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1616 let receipt = envelope.receipt();
1617 let buffer_id = envelope.payload.buffer_id;
1618 let save = cx.spawn(|_, mut cx| async move {
1619 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1620 });
1621
1622 cx.background()
1623 .spawn(
1624 async move {
1625 let (version, mtime) = save.await?;
1626
1627 rpc.respond(
1628 receipt,
1629 proto::BufferSaved {
1630 project_id,
1631 buffer_id,
1632 version: (&version).into(),
1633 mtime: Some(mtime.into()),
1634 },
1635 )
1636 .await?;
1637
1638 Ok(())
1639 }
1640 .log_err(),
1641 )
1642 .detach();
1643 Ok(())
1644 }
1645
1646 pub fn handle_format_buffer(
1647 &mut self,
1648 envelope: TypedEnvelope<proto::FormatBuffer>,
1649 rpc: Arc<Client>,
1650 cx: &mut ModelContext<Self>,
1651 ) -> Result<()> {
1652 let receipt = envelope.receipt();
1653 let sender_id = envelope.original_sender_id()?;
1654 let buffer = self
1655 .shared_buffers
1656 .get(&sender_id)
1657 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1658 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1659 cx.spawn(|_, mut cx| async move {
1660 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1661 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1662 // associated with formatting.
1663 cx.spawn(|_| async move {
1664 match format {
1665 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1666 Err(error) => {
1667 rpc.respond_with_error(
1668 receipt,
1669 proto::Error {
1670 message: error.to_string(),
1671 },
1672 )
1673 .await?
1674 }
1675 }
1676 Ok::<_, anyhow::Error>(())
1677 })
1678 .await
1679 .log_err();
1680 })
1681 .detach();
1682 Ok(())
1683 }
1684
1685 pub fn handle_get_definition(
1686 &mut self,
1687 envelope: TypedEnvelope<proto::GetDefinition>,
1688 rpc: Arc<Client>,
1689 cx: &mut ModelContext<Self>,
1690 ) -> Result<()> {
1691 let receipt = envelope.receipt();
1692 let sender_id = envelope.original_sender_id()?;
1693 let source_buffer = self
1694 .shared_buffers
1695 .get(&sender_id)
1696 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1697 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1698 let position = envelope
1699 .payload
1700 .position
1701 .and_then(deserialize_anchor)
1702 .ok_or_else(|| anyhow!("invalid position"))?;
1703 if !source_buffer.read(cx).can_resolve(&position) {
1704 return Err(anyhow!("cannot resolve position"));
1705 }
1706
1707 let definitions = self.definition(&source_buffer, position, cx);
1708 cx.spawn(|this, mut cx| async move {
1709 let definitions = definitions.await?;
1710 let mut response = proto::GetDefinitionResponse {
1711 definitions: Default::default(),
1712 };
1713 this.update(&mut cx, |this, cx| {
1714 for definition in definitions {
1715 let buffer =
1716 this.serialize_buffer_for_peer(&definition.target_buffer, sender_id, cx);
1717 response.definitions.push(proto::Definition {
1718 target_start: Some(serialize_anchor(&definition.target_range.start)),
1719 target_end: Some(serialize_anchor(&definition.target_range.end)),
1720 buffer: Some(buffer),
1721 });
1722 }
1723 });
1724 rpc.respond(receipt, response).await?;
1725 Ok::<_, anyhow::Error>(())
1726 })
1727 .detach_and_log_err(cx);
1728
1729 Ok(())
1730 }
1731
1732 pub fn handle_open_buffer(
1733 &mut self,
1734 envelope: TypedEnvelope<proto::OpenBuffer>,
1735 rpc: Arc<Client>,
1736 cx: &mut ModelContext<Self>,
1737 ) -> anyhow::Result<()> {
1738 let receipt = envelope.receipt();
1739 let peer_id = envelope.original_sender_id()?;
1740 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1741 let open_buffer = self.open_buffer(
1742 ProjectPath {
1743 worktree_id,
1744 path: PathBuf::from(envelope.payload.path).into(),
1745 },
1746 cx,
1747 );
1748 cx.spawn(|this, mut cx| {
1749 async move {
1750 let buffer = open_buffer.await?;
1751 let buffer = this.update(&mut cx, |this, cx| {
1752 this.serialize_buffer_for_peer(&buffer, peer_id, cx)
1753 });
1754 rpc.respond(
1755 receipt,
1756 proto::OpenBufferResponse {
1757 buffer: Some(buffer),
1758 },
1759 )
1760 .await
1761 }
1762 .log_err()
1763 })
1764 .detach();
1765 Ok(())
1766 }
1767
1768 fn serialize_buffer_for_peer(
1769 &mut self,
1770 buffer: &ModelHandle<Buffer>,
1771 peer_id: PeerId,
1772 cx: &AppContext,
1773 ) -> proto::Buffer {
1774 let buffer_id = buffer.read(cx).remote_id();
1775 let shared_buffers = self.shared_buffers.entry(peer_id).or_default();
1776 match shared_buffers.entry(buffer_id) {
1777 hash_map::Entry::Occupied(_) => proto::Buffer {
1778 variant: Some(proto::buffer::Variant::Id(buffer_id)),
1779 },
1780 hash_map::Entry::Vacant(entry) => {
1781 entry.insert(buffer.clone());
1782 proto::Buffer {
1783 variant: Some(proto::buffer::Variant::State(buffer.read(cx).to_proto())),
1784 }
1785 }
1786 }
1787 }
1788
1789 fn deserialize_remote_buffer(
1790 &mut self,
1791 buffer: proto::Buffer,
1792 cx: &mut ModelContext<Self>,
1793 ) -> Result<ModelHandle<Buffer>> {
1794 match buffer.variant.ok_or_else(|| anyhow!("missing buffer"))? {
1795 proto::buffer::Variant::Id(id) => self
1796 .open_buffers
1797 .get(&(id as usize))
1798 .and_then(|buffer| buffer.upgrade(cx))
1799 .ok_or_else(|| anyhow!("no buffer exists for id {}", id)),
1800 proto::buffer::Variant::State(mut buffer) => {
1801 let mut buffer_worktree = None;
1802 let mut buffer_file = None;
1803 if let Some(file) = buffer.file.take() {
1804 let worktree_id = WorktreeId::from_proto(file.worktree_id);
1805 let worktree = self
1806 .worktree_for_id(worktree_id, cx)
1807 .ok_or_else(|| anyhow!("no worktree found for id {}", file.worktree_id))?;
1808 buffer_file = Some(Box::new(File::from_proto(file, worktree.clone(), cx)?)
1809 as Box<dyn language::File>);
1810 buffer_worktree = Some(worktree);
1811 }
1812
1813 let buffer = cx.add_model(|cx| {
1814 Buffer::from_proto(self.replica_id(), buffer, buffer_file, cx).unwrap()
1815 });
1816 self.register_buffer(&buffer, buffer_worktree.as_ref(), cx)?;
1817 Ok(buffer)
1818 }
1819 }
1820 }
1821
1822 pub fn handle_close_buffer(
1823 &mut self,
1824 envelope: TypedEnvelope<proto::CloseBuffer>,
1825 _: Arc<Client>,
1826 cx: &mut ModelContext<Self>,
1827 ) -> anyhow::Result<()> {
1828 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1829 shared_buffers.remove(&envelope.payload.buffer_id);
1830 cx.notify();
1831 }
1832 Ok(())
1833 }
1834
1835 pub fn handle_buffer_saved(
1836 &mut self,
1837 envelope: TypedEnvelope<proto::BufferSaved>,
1838 _: Arc<Client>,
1839 cx: &mut ModelContext<Self>,
1840 ) -> Result<()> {
1841 let payload = envelope.payload.clone();
1842 let buffer = self
1843 .open_buffers
1844 .get(&(payload.buffer_id as usize))
1845 .and_then(|buffer| buffer.upgrade(cx));
1846 if let Some(buffer) = buffer {
1847 buffer.update(cx, |buffer, cx| {
1848 let version = payload.version.try_into()?;
1849 let mtime = payload
1850 .mtime
1851 .ok_or_else(|| anyhow!("missing mtime"))?
1852 .into();
1853 buffer.did_save(version, mtime, None, cx);
1854 Result::<_, anyhow::Error>::Ok(())
1855 })?;
1856 }
1857 Ok(())
1858 }
1859
1860 pub fn handle_buffer_reloaded(
1861 &mut self,
1862 envelope: TypedEnvelope<proto::BufferReloaded>,
1863 _: Arc<Client>,
1864 cx: &mut ModelContext<Self>,
1865 ) -> Result<()> {
1866 let payload = envelope.payload.clone();
1867 let buffer = self
1868 .open_buffers
1869 .get(&(payload.buffer_id as usize))
1870 .and_then(|buffer| buffer.upgrade(cx));
1871 if let Some(buffer) = buffer {
1872 buffer.update(cx, |buffer, cx| {
1873 let version = payload.version.try_into()?;
1874 let mtime = payload
1875 .mtime
1876 .ok_or_else(|| anyhow!("missing mtime"))?
1877 .into();
1878 buffer.did_reload(version, mtime, cx);
1879 Result::<_, anyhow::Error>::Ok(())
1880 })?;
1881 }
1882 Ok(())
1883 }
1884
1885 pub fn match_paths<'a>(
1886 &self,
1887 query: &'a str,
1888 include_ignored: bool,
1889 smart_case: bool,
1890 max_results: usize,
1891 cancel_flag: &'a AtomicBool,
1892 cx: &AppContext,
1893 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1894 let worktrees = self
1895 .worktrees(cx)
1896 .filter(|worktree| !worktree.read(cx).is_weak())
1897 .collect::<Vec<_>>();
1898 let include_root_name = worktrees.len() > 1;
1899 let candidate_sets = worktrees
1900 .into_iter()
1901 .map(|worktree| CandidateSet {
1902 snapshot: worktree.read(cx).snapshot(),
1903 include_ignored,
1904 include_root_name,
1905 })
1906 .collect::<Vec<_>>();
1907
1908 let background = cx.background().clone();
1909 async move {
1910 fuzzy::match_paths(
1911 candidate_sets.as_slice(),
1912 query,
1913 smart_case,
1914 max_results,
1915 cancel_flag,
1916 background,
1917 )
1918 .await
1919 }
1920 }
1921}
1922
1923impl WorktreeHandle {
1924 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1925 match self {
1926 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1927 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1928 }
1929 }
1930}
1931
1932struct CandidateSet {
1933 snapshot: Snapshot,
1934 include_ignored: bool,
1935 include_root_name: bool,
1936}
1937
1938impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
1939 type Candidates = CandidateSetIter<'a>;
1940
1941 fn id(&self) -> usize {
1942 self.snapshot.id().to_usize()
1943 }
1944
1945 fn len(&self) -> usize {
1946 if self.include_ignored {
1947 self.snapshot.file_count()
1948 } else {
1949 self.snapshot.visible_file_count()
1950 }
1951 }
1952
1953 fn prefix(&self) -> Arc<str> {
1954 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
1955 self.snapshot.root_name().into()
1956 } else if self.include_root_name {
1957 format!("{}/", self.snapshot.root_name()).into()
1958 } else {
1959 "".into()
1960 }
1961 }
1962
1963 fn candidates(&'a self, start: usize) -> Self::Candidates {
1964 CandidateSetIter {
1965 traversal: self.snapshot.files(self.include_ignored, start),
1966 }
1967 }
1968}
1969
1970struct CandidateSetIter<'a> {
1971 traversal: Traversal<'a>,
1972}
1973
1974impl<'a> Iterator for CandidateSetIter<'a> {
1975 type Item = PathMatchCandidate<'a>;
1976
1977 fn next(&mut self) -> Option<Self::Item> {
1978 self.traversal.next().map(|entry| {
1979 if let EntryKind::File(char_bag) = entry.kind {
1980 PathMatchCandidate {
1981 path: &entry.path,
1982 char_bag,
1983 }
1984 } else {
1985 unreachable!()
1986 }
1987 })
1988 }
1989}
1990
1991impl Entity for Project {
1992 type Event = Event;
1993
1994 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
1995 match &self.client_state {
1996 ProjectClientState::Local { remote_id_rx, .. } => {
1997 if let Some(project_id) = *remote_id_rx.borrow() {
1998 let rpc = self.client.clone();
1999 cx.spawn(|_| async move {
2000 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
2001 log::error!("error unregistering project: {}", err);
2002 }
2003 })
2004 .detach();
2005 }
2006 }
2007 ProjectClientState::Remote { remote_id, .. } => {
2008 let rpc = self.client.clone();
2009 let project_id = *remote_id;
2010 cx.spawn(|_| async move {
2011 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
2012 log::error!("error leaving project: {}", err);
2013 }
2014 })
2015 .detach();
2016 }
2017 }
2018 }
2019
2020 fn app_will_quit(
2021 &mut self,
2022 _: &mut MutableAppContext,
2023 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
2024 use futures::FutureExt;
2025
2026 let shutdown_futures = self
2027 .language_servers
2028 .drain()
2029 .filter_map(|(_, server)| server.shutdown())
2030 .collect::<Vec<_>>();
2031 Some(
2032 async move {
2033 futures::future::join_all(shutdown_futures).await;
2034 }
2035 .boxed(),
2036 )
2037 }
2038}
2039
2040impl Collaborator {
2041 fn from_proto(
2042 message: proto::Collaborator,
2043 user_store: &ModelHandle<UserStore>,
2044 cx: &mut AsyncAppContext,
2045 ) -> impl Future<Output = Result<Self>> {
2046 let user = user_store.update(cx, |user_store, cx| {
2047 user_store.fetch_user(message.user_id, cx)
2048 });
2049
2050 async move {
2051 Ok(Self {
2052 peer_id: PeerId(message.peer_id),
2053 user: user.await?,
2054 replica_id: message.replica_id as ReplicaId,
2055 })
2056 }
2057 }
2058}
2059
2060impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
2061 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
2062 Self {
2063 worktree_id,
2064 path: path.as_ref().into(),
2065 }
2066 }
2067}
2068
2069#[cfg(test)]
2070mod tests {
2071 use super::{Event, *};
2072 use client::test::FakeHttpClient;
2073 use fs::RealFs;
2074 use futures::StreamExt;
2075 use gpui::{test::subscribe, TestAppContext};
2076 use language::{
2077 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
2078 LanguageServerConfig, Point,
2079 };
2080 use lsp::Url;
2081 use serde_json::json;
2082 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
2083 use unindent::Unindent as _;
2084 use util::test::temp_tree;
2085 use worktree::WorktreeHandle as _;
2086
2087 #[gpui::test]
2088 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2089 let dir = temp_tree(json!({
2090 "root": {
2091 "apple": "",
2092 "banana": {
2093 "carrot": {
2094 "date": "",
2095 "endive": "",
2096 }
2097 },
2098 "fennel": {
2099 "grape": "",
2100 }
2101 }
2102 }));
2103
2104 let root_link_path = dir.path().join("root_link");
2105 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2106 unix::fs::symlink(
2107 &dir.path().join("root/fennel"),
2108 &dir.path().join("root/finnochio"),
2109 )
2110 .unwrap();
2111
2112 let project = build_project(Arc::new(RealFs), &mut cx);
2113
2114 let (tree, _) = project
2115 .update(&mut cx, |project, cx| {
2116 project.find_or_create_local_worktree(&root_link_path, false, cx)
2117 })
2118 .await
2119 .unwrap();
2120
2121 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2122 .await;
2123 cx.read(|cx| {
2124 let tree = tree.read(cx);
2125 assert_eq!(tree.file_count(), 5);
2126 assert_eq!(
2127 tree.inode_for_path("fennel/grape"),
2128 tree.inode_for_path("finnochio/grape")
2129 );
2130 });
2131
2132 let cancel_flag = Default::default();
2133 let results = project
2134 .read_with(&cx, |project, cx| {
2135 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
2136 })
2137 .await;
2138 assert_eq!(
2139 results
2140 .into_iter()
2141 .map(|result| result.path)
2142 .collect::<Vec<Arc<Path>>>(),
2143 vec![
2144 PathBuf::from("banana/carrot/date").into(),
2145 PathBuf::from("banana/carrot/endive").into(),
2146 ]
2147 );
2148 }
2149
2150 #[gpui::test]
2151 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
2152 let (language_server_config, mut fake_server) =
2153 LanguageServerConfig::fake(cx.background()).await;
2154 let progress_token = language_server_config
2155 .disk_based_diagnostics_progress_token
2156 .clone()
2157 .unwrap();
2158
2159 let mut languages = LanguageRegistry::new();
2160 languages.add(Arc::new(Language::new(
2161 LanguageConfig {
2162 name: "Rust".to_string(),
2163 path_suffixes: vec!["rs".to_string()],
2164 language_server: Some(language_server_config),
2165 ..Default::default()
2166 },
2167 Some(tree_sitter_rust::language()),
2168 )));
2169
2170 let dir = temp_tree(json!({
2171 "a.rs": "fn a() { A }",
2172 "b.rs": "const y: i32 = 1",
2173 }));
2174
2175 let http_client = FakeHttpClient::with_404_response();
2176 let client = Client::new(http_client.clone());
2177 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2178
2179 let project = cx.update(|cx| {
2180 Project::local(
2181 client,
2182 user_store,
2183 Arc::new(languages),
2184 Arc::new(RealFs),
2185 cx,
2186 )
2187 });
2188
2189 let (tree, _) = project
2190 .update(&mut cx, |project, cx| {
2191 project.find_or_create_local_worktree(dir.path(), false, cx)
2192 })
2193 .await
2194 .unwrap();
2195 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2196
2197 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2198 .await;
2199
2200 // Cause worktree to start the fake language server
2201 let _buffer = project
2202 .update(&mut cx, |project, cx| {
2203 project.open_buffer(
2204 ProjectPath {
2205 worktree_id,
2206 path: Path::new("b.rs").into(),
2207 },
2208 cx,
2209 )
2210 })
2211 .await
2212 .unwrap();
2213
2214 let mut events = subscribe(&project, &mut cx);
2215
2216 fake_server.start_progress(&progress_token).await;
2217 assert_eq!(
2218 events.next().await.unwrap(),
2219 Event::DiskBasedDiagnosticsStarted
2220 );
2221
2222 fake_server.start_progress(&progress_token).await;
2223 fake_server.end_progress(&progress_token).await;
2224 fake_server.start_progress(&progress_token).await;
2225
2226 fake_server
2227 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2228 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2229 version: None,
2230 diagnostics: vec![lsp::Diagnostic {
2231 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2232 severity: Some(lsp::DiagnosticSeverity::ERROR),
2233 message: "undefined variable 'A'".to_string(),
2234 ..Default::default()
2235 }],
2236 })
2237 .await;
2238 assert_eq!(
2239 events.next().await.unwrap(),
2240 Event::DiagnosticsUpdated(ProjectPath {
2241 worktree_id,
2242 path: Arc::from(Path::new("a.rs"))
2243 })
2244 );
2245
2246 fake_server.end_progress(&progress_token).await;
2247 fake_server.end_progress(&progress_token).await;
2248 assert_eq!(
2249 events.next().await.unwrap(),
2250 Event::DiskBasedDiagnosticsUpdated
2251 );
2252 assert_eq!(
2253 events.next().await.unwrap(),
2254 Event::DiskBasedDiagnosticsFinished
2255 );
2256
2257 let buffer = project
2258 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2259 .await
2260 .unwrap();
2261
2262 buffer.read_with(&cx, |buffer, _| {
2263 let snapshot = buffer.snapshot();
2264 let diagnostics = snapshot
2265 .diagnostics_in_range::<_, Point>(0..buffer.len())
2266 .collect::<Vec<_>>();
2267 assert_eq!(
2268 diagnostics,
2269 &[DiagnosticEntry {
2270 range: Point::new(0, 9)..Point::new(0, 10),
2271 diagnostic: Diagnostic {
2272 severity: lsp::DiagnosticSeverity::ERROR,
2273 message: "undefined variable 'A'".to_string(),
2274 group_id: 0,
2275 is_primary: true,
2276 ..Default::default()
2277 }
2278 }]
2279 )
2280 });
2281 }
2282
2283 #[gpui::test]
2284 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2285 let dir = temp_tree(json!({
2286 "root": {
2287 "dir1": {},
2288 "dir2": {
2289 "dir3": {}
2290 }
2291 }
2292 }));
2293
2294 let project = build_project(Arc::new(RealFs), &mut cx);
2295 let (tree, _) = project
2296 .update(&mut cx, |project, cx| {
2297 project.find_or_create_local_worktree(&dir.path(), false, cx)
2298 })
2299 .await
2300 .unwrap();
2301
2302 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2303 .await;
2304
2305 let cancel_flag = Default::default();
2306 let results = project
2307 .read_with(&cx, |project, cx| {
2308 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2309 })
2310 .await;
2311
2312 assert!(results.is_empty());
2313 }
2314
2315 #[gpui::test]
2316 async fn test_definition(mut cx: gpui::TestAppContext) {
2317 let (language_server_config, mut fake_server) =
2318 LanguageServerConfig::fake(cx.background()).await;
2319
2320 let mut languages = LanguageRegistry::new();
2321 languages.add(Arc::new(Language::new(
2322 LanguageConfig {
2323 name: "Rust".to_string(),
2324 path_suffixes: vec!["rs".to_string()],
2325 language_server: Some(language_server_config),
2326 ..Default::default()
2327 },
2328 Some(tree_sitter_rust::language()),
2329 )));
2330
2331 let dir = temp_tree(json!({
2332 "a.rs": "const fn a() { A }",
2333 "b.rs": "const y: i32 = crate::a()",
2334 }));
2335
2336 let http_client = FakeHttpClient::with_404_response();
2337 let client = Client::new(http_client.clone());
2338 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2339 let project = cx.update(|cx| {
2340 Project::local(
2341 client,
2342 user_store,
2343 Arc::new(languages),
2344 Arc::new(RealFs),
2345 cx,
2346 )
2347 });
2348
2349 let (tree, _) = project
2350 .update(&mut cx, |project, cx| {
2351 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2352 })
2353 .await
2354 .unwrap();
2355 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2356 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2357 .await;
2358
2359 // Cause worktree to start the fake language server
2360 let buffer = project
2361 .update(&mut cx, |project, cx| {
2362 project.open_buffer(
2363 ProjectPath {
2364 worktree_id,
2365 path: Path::new("").into(),
2366 },
2367 cx,
2368 )
2369 })
2370 .await
2371 .unwrap();
2372 let definitions =
2373 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2374 let (request_id, request) = fake_server
2375 .receive_request::<lsp::request::GotoDefinition>()
2376 .await;
2377 let request_params = request.text_document_position_params;
2378 assert_eq!(
2379 request_params.text_document.uri.to_file_path().unwrap(),
2380 dir.path().join("b.rs")
2381 );
2382 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2383
2384 fake_server
2385 .respond(
2386 request_id,
2387 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2388 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2389 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2390 ))),
2391 )
2392 .await;
2393 let mut definitions = definitions.await.unwrap();
2394 assert_eq!(definitions.len(), 1);
2395 let definition = definitions.pop().unwrap();
2396 cx.update(|cx| {
2397 let target_buffer = definition.target_buffer.read(cx);
2398 assert_eq!(
2399 target_buffer
2400 .file()
2401 .unwrap()
2402 .as_local()
2403 .unwrap()
2404 .abs_path(cx),
2405 dir.path().join("a.rs")
2406 );
2407 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2408 assert_eq!(
2409 list_worktrees(&project, cx),
2410 [
2411 (dir.path().join("b.rs"), false),
2412 (dir.path().join("a.rs"), true)
2413 ]
2414 );
2415
2416 drop(definition);
2417 });
2418 cx.read(|cx| {
2419 assert_eq!(
2420 list_worktrees(&project, cx),
2421 [(dir.path().join("b.rs"), false)]
2422 );
2423 });
2424
2425 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2426 project
2427 .read(cx)
2428 .worktrees(cx)
2429 .map(|worktree| {
2430 let worktree = worktree.read(cx);
2431 (
2432 worktree.as_local().unwrap().abs_path().to_path_buf(),
2433 worktree.is_weak(),
2434 )
2435 })
2436 .collect::<Vec<_>>()
2437 }
2438 }
2439
2440 #[gpui::test]
2441 async fn test_save_file(mut cx: gpui::TestAppContext) {
2442 let fs = Arc::new(FakeFs::new(cx.background()));
2443 fs.insert_tree(
2444 "/dir",
2445 json!({
2446 "file1": "the old contents",
2447 }),
2448 )
2449 .await;
2450
2451 let project = build_project(fs.clone(), &mut cx);
2452 let worktree_id = project
2453 .update(&mut cx, |p, cx| {
2454 p.find_or_create_local_worktree("/dir", false, cx)
2455 })
2456 .await
2457 .unwrap()
2458 .0
2459 .read_with(&cx, |tree, _| tree.id());
2460
2461 let buffer = project
2462 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2463 .await
2464 .unwrap();
2465 buffer
2466 .update(&mut cx, |buffer, cx| {
2467 assert_eq!(buffer.text(), "the old contents");
2468 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2469 buffer.save(cx)
2470 })
2471 .await
2472 .unwrap();
2473
2474 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2475 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2476 }
2477
2478 #[gpui::test]
2479 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2480 let fs = Arc::new(FakeFs::new(cx.background()));
2481 fs.insert_tree(
2482 "/dir",
2483 json!({
2484 "file1": "the old contents",
2485 }),
2486 )
2487 .await;
2488
2489 let project = build_project(fs.clone(), &mut cx);
2490 let worktree_id = project
2491 .update(&mut cx, |p, cx| {
2492 p.find_or_create_local_worktree("/dir/file1", false, cx)
2493 })
2494 .await
2495 .unwrap()
2496 .0
2497 .read_with(&cx, |tree, _| tree.id());
2498
2499 let buffer = project
2500 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2501 .await
2502 .unwrap();
2503 buffer
2504 .update(&mut cx, |buffer, cx| {
2505 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2506 buffer.save(cx)
2507 })
2508 .await
2509 .unwrap();
2510
2511 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2512 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2513 }
2514
2515 #[gpui::test]
2516 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2517 let dir = temp_tree(json!({
2518 "a": {
2519 "file1": "",
2520 "file2": "",
2521 "file3": "",
2522 },
2523 "b": {
2524 "c": {
2525 "file4": "",
2526 "file5": "",
2527 }
2528 }
2529 }));
2530
2531 let project = build_project(Arc::new(RealFs), &mut cx);
2532 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2533
2534 let (tree, _) = project
2535 .update(&mut cx, |p, cx| {
2536 p.find_or_create_local_worktree(dir.path(), false, cx)
2537 })
2538 .await
2539 .unwrap();
2540 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2541
2542 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2543 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2544 async move { buffer.await.unwrap() }
2545 };
2546 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2547 tree.read_with(cx, |tree, _| {
2548 tree.entry_for_path(path)
2549 .expect(&format!("no entry for path {}", path))
2550 .id
2551 })
2552 };
2553
2554 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2555 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2556 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2557 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2558
2559 let file2_id = id_for_path("a/file2", &cx);
2560 let file3_id = id_for_path("a/file3", &cx);
2561 let file4_id = id_for_path("b/c/file4", &cx);
2562
2563 // Wait for the initial scan.
2564 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2565 .await;
2566
2567 // Create a remote copy of this worktree.
2568 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2569 let (remote, load_task) = cx.update(|cx| {
2570 Worktree::remote(
2571 1,
2572 1,
2573 initial_snapshot.to_proto(&Default::default(), Default::default()),
2574 rpc.clone(),
2575 cx,
2576 )
2577 });
2578 load_task.await;
2579
2580 cx.read(|cx| {
2581 assert!(!buffer2.read(cx).is_dirty());
2582 assert!(!buffer3.read(cx).is_dirty());
2583 assert!(!buffer4.read(cx).is_dirty());
2584 assert!(!buffer5.read(cx).is_dirty());
2585 });
2586
2587 // Rename and delete files and directories.
2588 tree.flush_fs_events(&cx).await;
2589 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2590 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2591 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2592 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2593 tree.flush_fs_events(&cx).await;
2594
2595 let expected_paths = vec![
2596 "a",
2597 "a/file1",
2598 "a/file2.new",
2599 "b",
2600 "d",
2601 "d/file3",
2602 "d/file4",
2603 ];
2604
2605 cx.read(|app| {
2606 assert_eq!(
2607 tree.read(app)
2608 .paths()
2609 .map(|p| p.to_str().unwrap())
2610 .collect::<Vec<_>>(),
2611 expected_paths
2612 );
2613
2614 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2615 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2616 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2617
2618 assert_eq!(
2619 buffer2.read(app).file().unwrap().path().as_ref(),
2620 Path::new("a/file2.new")
2621 );
2622 assert_eq!(
2623 buffer3.read(app).file().unwrap().path().as_ref(),
2624 Path::new("d/file3")
2625 );
2626 assert_eq!(
2627 buffer4.read(app).file().unwrap().path().as_ref(),
2628 Path::new("d/file4")
2629 );
2630 assert_eq!(
2631 buffer5.read(app).file().unwrap().path().as_ref(),
2632 Path::new("b/c/file5")
2633 );
2634
2635 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2636 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2637 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2638 assert!(buffer5.read(app).file().unwrap().is_deleted());
2639 });
2640
2641 // Update the remote worktree. Check that it becomes consistent with the
2642 // local worktree.
2643 remote.update(&mut cx, |remote, cx| {
2644 let update_message =
2645 tree.read(cx)
2646 .snapshot()
2647 .build_update(&initial_snapshot, 1, 1, true);
2648 remote
2649 .as_remote_mut()
2650 .unwrap()
2651 .snapshot
2652 .apply_remote_update(update_message)
2653 .unwrap();
2654
2655 assert_eq!(
2656 remote
2657 .paths()
2658 .map(|p| p.to_str().unwrap())
2659 .collect::<Vec<_>>(),
2660 expected_paths
2661 );
2662 });
2663 }
2664
2665 #[gpui::test]
2666 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2667 let fs = Arc::new(FakeFs::new(cx.background()));
2668 fs.insert_tree(
2669 "/the-dir",
2670 json!({
2671 "a.txt": "a-contents",
2672 "b.txt": "b-contents",
2673 }),
2674 )
2675 .await;
2676
2677 let project = build_project(fs.clone(), &mut cx);
2678 let worktree_id = project
2679 .update(&mut cx, |p, cx| {
2680 p.find_or_create_local_worktree("/the-dir", false, cx)
2681 })
2682 .await
2683 .unwrap()
2684 .0
2685 .read_with(&cx, |tree, _| tree.id());
2686
2687 // Spawn multiple tasks to open paths, repeating some paths.
2688 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2689 (
2690 p.open_buffer((worktree_id, "a.txt"), cx),
2691 p.open_buffer((worktree_id, "b.txt"), cx),
2692 p.open_buffer((worktree_id, "a.txt"), cx),
2693 )
2694 });
2695
2696 let buffer_a_1 = buffer_a_1.await.unwrap();
2697 let buffer_a_2 = buffer_a_2.await.unwrap();
2698 let buffer_b = buffer_b.await.unwrap();
2699 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2700 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2701
2702 // There is only one buffer per path.
2703 let buffer_a_id = buffer_a_1.id();
2704 assert_eq!(buffer_a_2.id(), buffer_a_id);
2705
2706 // Open the same path again while it is still open.
2707 drop(buffer_a_1);
2708 let buffer_a_3 = project
2709 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2710 .await
2711 .unwrap();
2712
2713 // There's still only one buffer per path.
2714 assert_eq!(buffer_a_3.id(), buffer_a_id);
2715 }
2716
2717 #[gpui::test]
2718 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2719 use std::fs;
2720
2721 let dir = temp_tree(json!({
2722 "file1": "abc",
2723 "file2": "def",
2724 "file3": "ghi",
2725 }));
2726
2727 let project = build_project(Arc::new(RealFs), &mut cx);
2728 let (worktree, _) = project
2729 .update(&mut cx, |p, cx| {
2730 p.find_or_create_local_worktree(dir.path(), false, cx)
2731 })
2732 .await
2733 .unwrap();
2734 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2735
2736 worktree.flush_fs_events(&cx).await;
2737 worktree
2738 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2739 .await;
2740
2741 let buffer1 = project
2742 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2743 .await
2744 .unwrap();
2745 let events = Rc::new(RefCell::new(Vec::new()));
2746
2747 // initially, the buffer isn't dirty.
2748 buffer1.update(&mut cx, |buffer, cx| {
2749 cx.subscribe(&buffer1, {
2750 let events = events.clone();
2751 move |_, _, event, _| events.borrow_mut().push(event.clone())
2752 })
2753 .detach();
2754
2755 assert!(!buffer.is_dirty());
2756 assert!(events.borrow().is_empty());
2757
2758 buffer.edit(vec![1..2], "", cx);
2759 });
2760
2761 // after the first edit, the buffer is dirty, and emits a dirtied event.
2762 buffer1.update(&mut cx, |buffer, cx| {
2763 assert!(buffer.text() == "ac");
2764 assert!(buffer.is_dirty());
2765 assert_eq!(
2766 *events.borrow(),
2767 &[language::Event::Edited, language::Event::Dirtied]
2768 );
2769 events.borrow_mut().clear();
2770 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2771 });
2772
2773 // after saving, the buffer is not dirty, and emits a saved event.
2774 buffer1.update(&mut cx, |buffer, cx| {
2775 assert!(!buffer.is_dirty());
2776 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2777 events.borrow_mut().clear();
2778
2779 buffer.edit(vec![1..1], "B", cx);
2780 buffer.edit(vec![2..2], "D", cx);
2781 });
2782
2783 // after editing again, the buffer is dirty, and emits another dirty event.
2784 buffer1.update(&mut cx, |buffer, cx| {
2785 assert!(buffer.text() == "aBDc");
2786 assert!(buffer.is_dirty());
2787 assert_eq!(
2788 *events.borrow(),
2789 &[
2790 language::Event::Edited,
2791 language::Event::Dirtied,
2792 language::Event::Edited,
2793 ],
2794 );
2795 events.borrow_mut().clear();
2796
2797 // TODO - currently, after restoring the buffer to its
2798 // previously-saved state, the is still considered dirty.
2799 buffer.edit([1..3], "", cx);
2800 assert!(buffer.text() == "ac");
2801 assert!(buffer.is_dirty());
2802 });
2803
2804 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2805
2806 // When a file is deleted, the buffer is considered dirty.
2807 let events = Rc::new(RefCell::new(Vec::new()));
2808 let buffer2 = project
2809 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2810 .await
2811 .unwrap();
2812 buffer2.update(&mut cx, |_, cx| {
2813 cx.subscribe(&buffer2, {
2814 let events = events.clone();
2815 move |_, _, event, _| events.borrow_mut().push(event.clone())
2816 })
2817 .detach();
2818 });
2819
2820 fs::remove_file(dir.path().join("file2")).unwrap();
2821 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2822 assert_eq!(
2823 *events.borrow(),
2824 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2825 );
2826
2827 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2828 let events = Rc::new(RefCell::new(Vec::new()));
2829 let buffer3 = project
2830 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2831 .await
2832 .unwrap();
2833 buffer3.update(&mut cx, |_, cx| {
2834 cx.subscribe(&buffer3, {
2835 let events = events.clone();
2836 move |_, _, event, _| events.borrow_mut().push(event.clone())
2837 })
2838 .detach();
2839 });
2840
2841 worktree.flush_fs_events(&cx).await;
2842 buffer3.update(&mut cx, |buffer, cx| {
2843 buffer.edit(Some(0..0), "x", cx);
2844 });
2845 events.borrow_mut().clear();
2846 fs::remove_file(dir.path().join("file3")).unwrap();
2847 buffer3
2848 .condition(&cx, |_, _| !events.borrow().is_empty())
2849 .await;
2850 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2851 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2852 }
2853
2854 #[gpui::test]
2855 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2856 use std::fs;
2857
2858 let initial_contents = "aaa\nbbbbb\nc\n";
2859 let dir = temp_tree(json!({ "the-file": initial_contents }));
2860
2861 let project = build_project(Arc::new(RealFs), &mut cx);
2862 let (worktree, _) = project
2863 .update(&mut cx, |p, cx| {
2864 p.find_or_create_local_worktree(dir.path(), false, cx)
2865 })
2866 .await
2867 .unwrap();
2868 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2869
2870 worktree
2871 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2872 .await;
2873
2874 let abs_path = dir.path().join("the-file");
2875 let buffer = project
2876 .update(&mut cx, |p, cx| {
2877 p.open_buffer((worktree_id, "the-file"), cx)
2878 })
2879 .await
2880 .unwrap();
2881
2882 // TODO
2883 // Add a cursor on each row.
2884 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2885 // assert!(!buffer.is_dirty());
2886 // buffer.add_selection_set(
2887 // &(0..3)
2888 // .map(|row| Selection {
2889 // id: row as usize,
2890 // start: Point::new(row, 1),
2891 // end: Point::new(row, 1),
2892 // reversed: false,
2893 // goal: SelectionGoal::None,
2894 // })
2895 // .collect::<Vec<_>>(),
2896 // cx,
2897 // )
2898 // });
2899
2900 // Change the file on disk, adding two new lines of text, and removing
2901 // one line.
2902 buffer.read_with(&cx, |buffer, _| {
2903 assert!(!buffer.is_dirty());
2904 assert!(!buffer.has_conflict());
2905 });
2906 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2907 fs::write(&abs_path, new_contents).unwrap();
2908
2909 // Because the buffer was not modified, it is reloaded from disk. Its
2910 // contents are edited according to the diff between the old and new
2911 // file contents.
2912 buffer
2913 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2914 .await;
2915
2916 buffer.update(&mut cx, |buffer, _| {
2917 assert_eq!(buffer.text(), new_contents);
2918 assert!(!buffer.is_dirty());
2919 assert!(!buffer.has_conflict());
2920
2921 // TODO
2922 // let cursor_positions = buffer
2923 // .selection_set(selection_set_id)
2924 // .unwrap()
2925 // .selections::<Point>(&*buffer)
2926 // .map(|selection| {
2927 // assert_eq!(selection.start, selection.end);
2928 // selection.start
2929 // })
2930 // .collect::<Vec<_>>();
2931 // assert_eq!(
2932 // cursor_positions,
2933 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2934 // );
2935 });
2936
2937 // Modify the buffer
2938 buffer.update(&mut cx, |buffer, cx| {
2939 buffer.edit(vec![0..0], " ", cx);
2940 assert!(buffer.is_dirty());
2941 assert!(!buffer.has_conflict());
2942 });
2943
2944 // Change the file on disk again, adding blank lines to the beginning.
2945 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
2946
2947 // Because the buffer is modified, it doesn't reload from disk, but is
2948 // marked as having a conflict.
2949 buffer
2950 .condition(&cx, |buffer, _| buffer.has_conflict())
2951 .await;
2952 }
2953
2954 #[gpui::test]
2955 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
2956 let fs = Arc::new(FakeFs::new(cx.background()));
2957 fs.insert_tree(
2958 "/the-dir",
2959 json!({
2960 "a.rs": "
2961 fn foo(mut v: Vec<usize>) {
2962 for x in &v {
2963 v.push(1);
2964 }
2965 }
2966 "
2967 .unindent(),
2968 }),
2969 )
2970 .await;
2971
2972 let project = build_project(fs.clone(), &mut cx);
2973 let (worktree, _) = project
2974 .update(&mut cx, |p, cx| {
2975 p.find_or_create_local_worktree("/the-dir", false, cx)
2976 })
2977 .await
2978 .unwrap();
2979 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2980
2981 let buffer = project
2982 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2983 .await
2984 .unwrap();
2985
2986 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
2987 let message = lsp::PublishDiagnosticsParams {
2988 uri: buffer_uri.clone(),
2989 diagnostics: vec![
2990 lsp::Diagnostic {
2991 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
2992 severity: Some(DiagnosticSeverity::WARNING),
2993 message: "error 1".to_string(),
2994 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
2995 location: lsp::Location {
2996 uri: buffer_uri.clone(),
2997 range: lsp::Range::new(
2998 lsp::Position::new(1, 8),
2999 lsp::Position::new(1, 9),
3000 ),
3001 },
3002 message: "error 1 hint 1".to_string(),
3003 }]),
3004 ..Default::default()
3005 },
3006 lsp::Diagnostic {
3007 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3008 severity: Some(DiagnosticSeverity::HINT),
3009 message: "error 1 hint 1".to_string(),
3010 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3011 location: lsp::Location {
3012 uri: buffer_uri.clone(),
3013 range: lsp::Range::new(
3014 lsp::Position::new(1, 8),
3015 lsp::Position::new(1, 9),
3016 ),
3017 },
3018 message: "original diagnostic".to_string(),
3019 }]),
3020 ..Default::default()
3021 },
3022 lsp::Diagnostic {
3023 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3024 severity: Some(DiagnosticSeverity::ERROR),
3025 message: "error 2".to_string(),
3026 related_information: Some(vec![
3027 lsp::DiagnosticRelatedInformation {
3028 location: lsp::Location {
3029 uri: buffer_uri.clone(),
3030 range: lsp::Range::new(
3031 lsp::Position::new(1, 13),
3032 lsp::Position::new(1, 15),
3033 ),
3034 },
3035 message: "error 2 hint 1".to_string(),
3036 },
3037 lsp::DiagnosticRelatedInformation {
3038 location: lsp::Location {
3039 uri: buffer_uri.clone(),
3040 range: lsp::Range::new(
3041 lsp::Position::new(1, 13),
3042 lsp::Position::new(1, 15),
3043 ),
3044 },
3045 message: "error 2 hint 2".to_string(),
3046 },
3047 ]),
3048 ..Default::default()
3049 },
3050 lsp::Diagnostic {
3051 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3052 severity: Some(DiagnosticSeverity::HINT),
3053 message: "error 2 hint 1".to_string(),
3054 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3055 location: lsp::Location {
3056 uri: buffer_uri.clone(),
3057 range: lsp::Range::new(
3058 lsp::Position::new(2, 8),
3059 lsp::Position::new(2, 17),
3060 ),
3061 },
3062 message: "original diagnostic".to_string(),
3063 }]),
3064 ..Default::default()
3065 },
3066 lsp::Diagnostic {
3067 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3068 severity: Some(DiagnosticSeverity::HINT),
3069 message: "error 2 hint 2".to_string(),
3070 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3071 location: lsp::Location {
3072 uri: buffer_uri.clone(),
3073 range: lsp::Range::new(
3074 lsp::Position::new(2, 8),
3075 lsp::Position::new(2, 17),
3076 ),
3077 },
3078 message: "original diagnostic".to_string(),
3079 }]),
3080 ..Default::default()
3081 },
3082 ],
3083 version: None,
3084 };
3085
3086 project
3087 .update(&mut cx, |p, cx| {
3088 p.update_diagnostics(message, &Default::default(), cx)
3089 })
3090 .unwrap();
3091 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3092
3093 assert_eq!(
3094 buffer
3095 .diagnostics_in_range::<_, Point>(0..buffer.len())
3096 .collect::<Vec<_>>(),
3097 &[
3098 DiagnosticEntry {
3099 range: Point::new(1, 8)..Point::new(1, 9),
3100 diagnostic: Diagnostic {
3101 severity: DiagnosticSeverity::WARNING,
3102 message: "error 1".to_string(),
3103 group_id: 0,
3104 is_primary: true,
3105 ..Default::default()
3106 }
3107 },
3108 DiagnosticEntry {
3109 range: Point::new(1, 8)..Point::new(1, 9),
3110 diagnostic: Diagnostic {
3111 severity: DiagnosticSeverity::HINT,
3112 message: "error 1 hint 1".to_string(),
3113 group_id: 0,
3114 is_primary: false,
3115 ..Default::default()
3116 }
3117 },
3118 DiagnosticEntry {
3119 range: Point::new(1, 13)..Point::new(1, 15),
3120 diagnostic: Diagnostic {
3121 severity: DiagnosticSeverity::HINT,
3122 message: "error 2 hint 1".to_string(),
3123 group_id: 1,
3124 is_primary: false,
3125 ..Default::default()
3126 }
3127 },
3128 DiagnosticEntry {
3129 range: Point::new(1, 13)..Point::new(1, 15),
3130 diagnostic: Diagnostic {
3131 severity: DiagnosticSeverity::HINT,
3132 message: "error 2 hint 2".to_string(),
3133 group_id: 1,
3134 is_primary: false,
3135 ..Default::default()
3136 }
3137 },
3138 DiagnosticEntry {
3139 range: Point::new(2, 8)..Point::new(2, 17),
3140 diagnostic: Diagnostic {
3141 severity: DiagnosticSeverity::ERROR,
3142 message: "error 2".to_string(),
3143 group_id: 1,
3144 is_primary: true,
3145 ..Default::default()
3146 }
3147 }
3148 ]
3149 );
3150
3151 assert_eq!(
3152 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3153 &[
3154 DiagnosticEntry {
3155 range: Point::new(1, 8)..Point::new(1, 9),
3156 diagnostic: Diagnostic {
3157 severity: DiagnosticSeverity::WARNING,
3158 message: "error 1".to_string(),
3159 group_id: 0,
3160 is_primary: true,
3161 ..Default::default()
3162 }
3163 },
3164 DiagnosticEntry {
3165 range: Point::new(1, 8)..Point::new(1, 9),
3166 diagnostic: Diagnostic {
3167 severity: DiagnosticSeverity::HINT,
3168 message: "error 1 hint 1".to_string(),
3169 group_id: 0,
3170 is_primary: false,
3171 ..Default::default()
3172 }
3173 },
3174 ]
3175 );
3176 assert_eq!(
3177 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3178 &[
3179 DiagnosticEntry {
3180 range: Point::new(1, 13)..Point::new(1, 15),
3181 diagnostic: Diagnostic {
3182 severity: DiagnosticSeverity::HINT,
3183 message: "error 2 hint 1".to_string(),
3184 group_id: 1,
3185 is_primary: false,
3186 ..Default::default()
3187 }
3188 },
3189 DiagnosticEntry {
3190 range: Point::new(1, 13)..Point::new(1, 15),
3191 diagnostic: Diagnostic {
3192 severity: DiagnosticSeverity::HINT,
3193 message: "error 2 hint 2".to_string(),
3194 group_id: 1,
3195 is_primary: false,
3196 ..Default::default()
3197 }
3198 },
3199 DiagnosticEntry {
3200 range: Point::new(2, 8)..Point::new(2, 17),
3201 diagnostic: Diagnostic {
3202 severity: DiagnosticSeverity::ERROR,
3203 message: "error 2".to_string(),
3204 group_id: 1,
3205 is_primary: true,
3206 ..Default::default()
3207 }
3208 }
3209 ]
3210 );
3211 }
3212
3213 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3214 let languages = Arc::new(LanguageRegistry::new());
3215 let http_client = FakeHttpClient::with_404_response();
3216 let client = client::Client::new(http_client.clone());
3217 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3218 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3219 }
3220}