1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 proto::{deserialize_anchor, serialize_anchor},
17 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
18 LanguageRegistry, PointUtf16, ToOffset, ToPointUtf16,
19};
20use lsp::{DiagnosticSeverity, LanguageServer};
21use postage::{prelude::Stream, watch};
22use smol::block_on;
23use std::{
24 convert::TryInto,
25 ops::Range,
26 path::{Path, PathBuf},
27 sync::{atomic::AtomicBool, Arc},
28};
29use util::{post_inc, ResultExt, TryFutureExt as _};
30
31pub use fs::*;
32pub use worktree::*;
33
34pub struct Project {
35 worktrees: Vec<WorktreeHandle>,
36 active_entry: Option<ProjectEntry>,
37 languages: Arc<LanguageRegistry>,
38 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
39 client: Arc<client::Client>,
40 user_store: ModelHandle<UserStore>,
41 fs: Arc<dyn Fs>,
42 client_state: ProjectClientState,
43 collaborators: HashMap<PeerId, Collaborator>,
44 subscriptions: Vec<client::Subscription>,
45 language_servers_with_diagnostics_running: isize,
46 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
47 loading_buffers: HashMap<
48 ProjectPath,
49 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
50 >,
51 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
52}
53
54enum WorktreeHandle {
55 Strong(ModelHandle<Worktree>),
56 Weak(WeakModelHandle<Worktree>),
57}
58
59enum ProjectClientState {
60 Local {
61 is_shared: bool,
62 remote_id_tx: watch::Sender<Option<u64>>,
63 remote_id_rx: watch::Receiver<Option<u64>>,
64 _maintain_remote_id_task: Task<Option<()>>,
65 },
66 Remote {
67 sharing_has_stopped: bool,
68 remote_id: u64,
69 replica_id: ReplicaId,
70 },
71}
72
73#[derive(Clone, Debug)]
74pub struct Collaborator {
75 pub user: Arc<User>,
76 pub peer_id: PeerId,
77 pub replica_id: ReplicaId,
78}
79
80#[derive(Clone, Debug, PartialEq)]
81pub enum Event {
82 ActiveEntryChanged(Option<ProjectEntry>),
83 WorktreeRemoved(WorktreeId),
84 DiskBasedDiagnosticsStarted,
85 DiskBasedDiagnosticsUpdated,
86 DiskBasedDiagnosticsFinished,
87 DiagnosticsUpdated(ProjectPath),
88}
89
90#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
91pub struct ProjectPath {
92 pub worktree_id: WorktreeId,
93 pub path: Arc<Path>,
94}
95
96#[derive(Clone, Debug, Default, PartialEq)]
97pub struct DiagnosticSummary {
98 pub error_count: usize,
99 pub warning_count: usize,
100 pub info_count: usize,
101 pub hint_count: usize,
102}
103
104#[derive(Debug)]
105pub struct Definition {
106 pub target_buffer: ModelHandle<Buffer>,
107 pub target_range: Range<language::Anchor>,
108}
109
110impl DiagnosticSummary {
111 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
112 let mut this = Self {
113 error_count: 0,
114 warning_count: 0,
115 info_count: 0,
116 hint_count: 0,
117 };
118
119 for entry in diagnostics {
120 if entry.diagnostic.is_primary {
121 match entry.diagnostic.severity {
122 DiagnosticSeverity::ERROR => this.error_count += 1,
123 DiagnosticSeverity::WARNING => this.warning_count += 1,
124 DiagnosticSeverity::INFORMATION => this.info_count += 1,
125 DiagnosticSeverity::HINT => this.hint_count += 1,
126 _ => {}
127 }
128 }
129 }
130
131 this
132 }
133
134 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
135 proto::DiagnosticSummary {
136 path: path.to_string_lossy().to_string(),
137 error_count: self.error_count as u32,
138 warning_count: self.warning_count as u32,
139 info_count: self.info_count as u32,
140 hint_count: self.hint_count as u32,
141 }
142 }
143}
144
145#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
146pub struct ProjectEntry {
147 pub worktree_id: WorktreeId,
148 pub entry_id: usize,
149}
150
151impl Project {
152 pub fn local(
153 client: Arc<Client>,
154 user_store: ModelHandle<UserStore>,
155 languages: Arc<LanguageRegistry>,
156 fs: Arc<dyn Fs>,
157 cx: &mut MutableAppContext,
158 ) -> ModelHandle<Self> {
159 cx.add_model(|cx: &mut ModelContext<Self>| {
160 let (remote_id_tx, remote_id_rx) = watch::channel();
161 let _maintain_remote_id_task = cx.spawn_weak({
162 let rpc = client.clone();
163 move |this, mut cx| {
164 async move {
165 let mut status = rpc.status();
166 while let Some(status) = status.recv().await {
167 if let Some(this) = this.upgrade(&cx) {
168 let remote_id = if let client::Status::Connected { .. } = status {
169 let response = rpc.request(proto::RegisterProject {}).await?;
170 Some(response.project_id)
171 } else {
172 None
173 };
174
175 if let Some(project_id) = remote_id {
176 let mut registrations = Vec::new();
177 this.update(&mut cx, |this, cx| {
178 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
179 registrations.push(worktree.update(
180 cx,
181 |worktree, cx| {
182 let worktree = worktree.as_local_mut().unwrap();
183 worktree.register(project_id, cx)
184 },
185 ));
186 }
187 });
188 for registration in registrations {
189 registration.await?;
190 }
191 }
192 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
193 }
194 }
195 Ok(())
196 }
197 .log_err()
198 }
199 });
200
201 Self {
202 worktrees: Default::default(),
203 collaborators: Default::default(),
204 open_buffers: Default::default(),
205 loading_buffers: Default::default(),
206 shared_buffers: Default::default(),
207 client_state: ProjectClientState::Local {
208 is_shared: false,
209 remote_id_tx,
210 remote_id_rx,
211 _maintain_remote_id_task,
212 },
213 subscriptions: Vec::new(),
214 active_entry: None,
215 languages,
216 client,
217 user_store,
218 fs,
219 language_servers_with_diagnostics_running: 0,
220 language_servers: Default::default(),
221 }
222 })
223 }
224
225 pub async fn remote(
226 remote_id: u64,
227 client: Arc<Client>,
228 user_store: ModelHandle<UserStore>,
229 languages: Arc<LanguageRegistry>,
230 fs: Arc<dyn Fs>,
231 cx: &mut AsyncAppContext,
232 ) -> Result<ModelHandle<Self>> {
233 client.authenticate_and_connect(&cx).await?;
234
235 let response = client
236 .request(proto::JoinProject {
237 project_id: remote_id,
238 })
239 .await?;
240
241 let replica_id = response.replica_id as ReplicaId;
242
243 let mut worktrees = Vec::new();
244 for worktree in response.worktrees {
245 let (worktree, load_task) = cx
246 .update(|cx| Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx));
247 worktrees.push(worktree);
248 load_task.detach();
249 }
250
251 let user_ids = response
252 .collaborators
253 .iter()
254 .map(|peer| peer.user_id)
255 .collect();
256 user_store
257 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
258 .await?;
259 let mut collaborators = HashMap::default();
260 for message in response.collaborators {
261 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
262 collaborators.insert(collaborator.peer_id, collaborator);
263 }
264
265 Ok(cx.add_model(|cx| {
266 let mut this = Self {
267 worktrees: Vec::new(),
268 open_buffers: Default::default(),
269 loading_buffers: Default::default(),
270 shared_buffers: Default::default(),
271 active_entry: None,
272 collaborators,
273 languages,
274 user_store,
275 fs,
276 subscriptions: vec![
277 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
278 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
279 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
283 client.subscribe_to_entity(
284 remote_id,
285 cx,
286 Self::handle_update_diagnostic_summary,
287 ),
288 client.subscribe_to_entity(
289 remote_id,
290 cx,
291 Self::handle_disk_based_diagnostics_updating,
292 ),
293 client.subscribe_to_entity(
294 remote_id,
295 cx,
296 Self::handle_disk_based_diagnostics_updated,
297 ),
298 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
299 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_reloaded),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
302 ],
303 client,
304 client_state: ProjectClientState::Remote {
305 sharing_has_stopped: false,
306 remote_id,
307 replica_id,
308 },
309 language_servers_with_diagnostics_running: 0,
310 language_servers: Default::default(),
311 };
312 for worktree in worktrees {
313 this.add_worktree(&worktree, cx);
314 }
315 this
316 }))
317 }
318
319 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
320 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
321 *remote_id_tx.borrow_mut() = remote_id;
322 }
323
324 self.subscriptions.clear();
325 if let Some(remote_id) = remote_id {
326 let client = &self.client;
327 self.subscriptions.extend([
328 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
329 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_get_completions),
338 client.subscribe_to_entity(remote_id, cx, Self::handle_get_definition),
339 ]);
340 }
341 }
342
343 pub fn remote_id(&self) -> Option<u64> {
344 match &self.client_state {
345 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
346 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
347 }
348 }
349
350 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
351 let mut id = None;
352 let mut watch = None;
353 match &self.client_state {
354 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
355 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
356 }
357
358 async move {
359 if let Some(id) = id {
360 return id;
361 }
362 let mut watch = watch.unwrap();
363 loop {
364 let id = *watch.borrow();
365 if let Some(id) = id {
366 return id;
367 }
368 watch.recv().await;
369 }
370 }
371 }
372
373 pub fn replica_id(&self) -> ReplicaId {
374 match &self.client_state {
375 ProjectClientState::Local { .. } => 0,
376 ProjectClientState::Remote { replica_id, .. } => *replica_id,
377 }
378 }
379
380 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
381 &self.collaborators
382 }
383
384 pub fn worktrees<'a>(
385 &'a self,
386 cx: &'a AppContext,
387 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
388 self.worktrees
389 .iter()
390 .filter_map(move |worktree| worktree.upgrade(cx))
391 }
392
393 pub fn worktree_for_id(
394 &self,
395 id: WorktreeId,
396 cx: &AppContext,
397 ) -> Option<ModelHandle<Worktree>> {
398 self.worktrees(cx)
399 .find(|worktree| worktree.read(cx).id() == id)
400 }
401
402 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
403 let rpc = self.client.clone();
404 cx.spawn(|this, mut cx| async move {
405 let project_id = this.update(&mut cx, |this, _| {
406 if let ProjectClientState::Local {
407 is_shared,
408 remote_id_rx,
409 ..
410 } = &mut this.client_state
411 {
412 *is_shared = true;
413 remote_id_rx
414 .borrow()
415 .ok_or_else(|| anyhow!("no project id"))
416 } else {
417 Err(anyhow!("can't share a remote project"))
418 }
419 })?;
420
421 rpc.request(proto::ShareProject { project_id }).await?;
422 let mut tasks = Vec::new();
423 this.update(&mut cx, |this, cx| {
424 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
425 worktree.update(cx, |worktree, cx| {
426 let worktree = worktree.as_local_mut().unwrap();
427 tasks.push(worktree.share(project_id, cx));
428 });
429 }
430 });
431 for task in tasks {
432 task.await?;
433 }
434 this.update(&mut cx, |_, cx| cx.notify());
435 Ok(())
436 })
437 }
438
439 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
440 let rpc = self.client.clone();
441 cx.spawn(|this, mut cx| async move {
442 let project_id = this.update(&mut cx, |this, _| {
443 if let ProjectClientState::Local {
444 is_shared,
445 remote_id_rx,
446 ..
447 } = &mut this.client_state
448 {
449 *is_shared = false;
450 remote_id_rx
451 .borrow()
452 .ok_or_else(|| anyhow!("no project id"))
453 } else {
454 Err(anyhow!("can't share a remote project"))
455 }
456 })?;
457
458 rpc.send(proto::UnshareProject { project_id }).await?;
459 this.update(&mut cx, |this, cx| {
460 this.collaborators.clear();
461 this.shared_buffers.clear();
462 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
463 worktree.update(cx, |worktree, _| {
464 worktree.as_local_mut().unwrap().unshare();
465 });
466 }
467 cx.notify()
468 });
469 Ok(())
470 })
471 }
472
473 pub fn is_read_only(&self) -> bool {
474 match &self.client_state {
475 ProjectClientState::Local { .. } => false,
476 ProjectClientState::Remote {
477 sharing_has_stopped,
478 ..
479 } => *sharing_has_stopped,
480 }
481 }
482
483 pub fn is_local(&self) -> bool {
484 match &self.client_state {
485 ProjectClientState::Local { .. } => true,
486 ProjectClientState::Remote { .. } => false,
487 }
488 }
489
490 pub fn open_buffer(
491 &mut self,
492 path: impl Into<ProjectPath>,
493 cx: &mut ModelContext<Self>,
494 ) -> Task<Result<ModelHandle<Buffer>>> {
495 let project_path = path.into();
496 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
497 worktree
498 } else {
499 return Task::ready(Err(anyhow!("no such worktree")));
500 };
501
502 // If there is already a buffer for the given path, then return it.
503 let existing_buffer = self.get_open_buffer(&project_path, cx);
504 if let Some(existing_buffer) = existing_buffer {
505 return Task::ready(Ok(existing_buffer));
506 }
507
508 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
509 // If the given path is already being loaded, then wait for that existing
510 // task to complete and return the same buffer.
511 hash_map::Entry::Occupied(e) => e.get().clone(),
512
513 // Otherwise, record the fact that this path is now being loaded.
514 hash_map::Entry::Vacant(entry) => {
515 let (mut tx, rx) = postage::watch::channel();
516 entry.insert(rx.clone());
517
518 let load_buffer = if worktree.read(cx).is_local() {
519 self.open_local_buffer(&project_path.path, &worktree, cx)
520 } else {
521 self.open_remote_buffer(&project_path.path, &worktree, cx)
522 };
523
524 cx.spawn(move |this, mut cx| async move {
525 let load_result = load_buffer.await;
526 *tx.borrow_mut() = Some(this.update(&mut cx, |this, _| {
527 // Record the fact that the buffer is no longer loading.
528 this.loading_buffers.remove(&project_path);
529 let buffer = load_result.map_err(Arc::new)?;
530 Ok(buffer)
531 }));
532 })
533 .detach();
534 rx
535 }
536 };
537
538 cx.foreground().spawn(async move {
539 loop {
540 if let Some(result) = loading_watch.borrow().as_ref() {
541 match result {
542 Ok(buffer) => return Ok(buffer.clone()),
543 Err(error) => return Err(anyhow!("{}", error)),
544 }
545 }
546 loading_watch.recv().await;
547 }
548 })
549 }
550
551 fn open_local_buffer(
552 &mut self,
553 path: &Arc<Path>,
554 worktree: &ModelHandle<Worktree>,
555 cx: &mut ModelContext<Self>,
556 ) -> Task<Result<ModelHandle<Buffer>>> {
557 let load_buffer = worktree.update(cx, |worktree, cx| {
558 let worktree = worktree.as_local_mut().unwrap();
559 worktree.load_buffer(path, cx)
560 });
561 let worktree = worktree.downgrade();
562 cx.spawn(|this, mut cx| async move {
563 let buffer = load_buffer.await?;
564 let worktree = worktree
565 .upgrade(&cx)
566 .ok_or_else(|| anyhow!("worktree was removed"))?;
567 this.update(&mut cx, |this, cx| {
568 this.register_buffer(&buffer, Some(&worktree), cx)
569 })?;
570 Ok(buffer)
571 })
572 }
573
574 fn open_remote_buffer(
575 &mut self,
576 path: &Arc<Path>,
577 worktree: &ModelHandle<Worktree>,
578 cx: &mut ModelContext<Self>,
579 ) -> Task<Result<ModelHandle<Buffer>>> {
580 let rpc = self.client.clone();
581 let project_id = self.remote_id().unwrap();
582 let remote_worktree_id = worktree.read(cx).id();
583 let path = path.clone();
584 let path_string = path.to_string_lossy().to_string();
585 cx.spawn(|this, mut cx| async move {
586 let response = rpc
587 .request(proto::OpenBuffer {
588 project_id,
589 worktree_id: remote_worktree_id.to_proto(),
590 path: path_string,
591 })
592 .await?;
593 let buffer = response.buffer.ok_or_else(|| anyhow!("missing buffer"))?;
594 this.update(&mut cx, |this, cx| {
595 this.deserialize_remote_buffer(buffer, cx)
596 })
597 })
598 }
599
600 pub fn save_buffer_as(
601 &self,
602 buffer: ModelHandle<Buffer>,
603 abs_path: PathBuf,
604 cx: &mut ModelContext<Project>,
605 ) -> Task<Result<()>> {
606 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
607 cx.spawn(|this, mut cx| async move {
608 let (worktree, path) = worktree_task.await?;
609 worktree
610 .update(&mut cx, |worktree, cx| {
611 worktree
612 .as_local_mut()
613 .unwrap()
614 .save_buffer_as(buffer.clone(), path, cx)
615 })
616 .await?;
617 this.update(&mut cx, |this, cx| {
618 this.assign_language_to_buffer(&buffer, Some(&worktree), cx);
619 });
620 Ok(())
621 })
622 }
623
624 #[cfg(any(test, feature = "test-support"))]
625 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
626 let path = path.into();
627 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
628 self.open_buffers.iter().any(|(_, buffer)| {
629 if let Some(buffer) = buffer.upgrade(cx) {
630 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
631 if file.worktree == worktree && file.path() == &path.path {
632 return true;
633 }
634 }
635 }
636 false
637 })
638 } else {
639 false
640 }
641 }
642
643 fn get_open_buffer(
644 &mut self,
645 path: &ProjectPath,
646 cx: &mut ModelContext<Self>,
647 ) -> Option<ModelHandle<Buffer>> {
648 let mut result = None;
649 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
650 self.open_buffers.retain(|_, buffer| {
651 if let Some(buffer) = buffer.upgrade(cx) {
652 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
653 if file.worktree == worktree && file.path() == &path.path {
654 result = Some(buffer);
655 }
656 }
657 true
658 } else {
659 false
660 }
661 });
662 result
663 }
664
665 fn register_buffer(
666 &mut self,
667 buffer: &ModelHandle<Buffer>,
668 worktree: Option<&ModelHandle<Worktree>>,
669 cx: &mut ModelContext<Self>,
670 ) -> Result<()> {
671 if self
672 .open_buffers
673 .insert(buffer.read(cx).remote_id() as usize, buffer.downgrade())
674 .is_some()
675 {
676 return Err(anyhow!("registered the same buffer twice"));
677 }
678 self.assign_language_to_buffer(&buffer, worktree, cx);
679 Ok(())
680 }
681
682 fn assign_language_to_buffer(
683 &mut self,
684 buffer: &ModelHandle<Buffer>,
685 worktree: Option<&ModelHandle<Worktree>>,
686 cx: &mut ModelContext<Self>,
687 ) -> Option<()> {
688 let (path, full_path) = {
689 let file = buffer.read(cx).file()?;
690 (file.path().clone(), file.full_path(cx))
691 };
692
693 // If the buffer has a language, set it and start/assign the language server
694 if let Some(language) = self.languages.select_language(&full_path) {
695 buffer.update(cx, |buffer, cx| {
696 buffer.set_language(Some(language.clone()), cx);
697 });
698
699 // For local worktrees, start a language server if needed.
700 // Also assign the language server and any previously stored diagnostics to the buffer.
701 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
702 let worktree_id = local_worktree.id();
703 let worktree_abs_path = local_worktree.abs_path().clone();
704
705 let language_server = match self
706 .language_servers
707 .entry((worktree_id, language.name().to_string()))
708 {
709 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
710 hash_map::Entry::Vacant(e) => Self::start_language_server(
711 self.client.clone(),
712 language.clone(),
713 &worktree_abs_path,
714 cx,
715 )
716 .map(|server| e.insert(server).clone()),
717 };
718
719 buffer.update(cx, |buffer, cx| {
720 buffer.set_language_server(language_server, cx);
721 });
722 }
723 }
724
725 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
726 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
727 buffer.update(cx, |buffer, cx| {
728 buffer.update_diagnostics(None, diagnostics, cx).log_err();
729 });
730 }
731 }
732
733 None
734 }
735
736 fn start_language_server(
737 rpc: Arc<Client>,
738 language: Arc<Language>,
739 worktree_path: &Path,
740 cx: &mut ModelContext<Self>,
741 ) -> Option<Arc<LanguageServer>> {
742 enum LspEvent {
743 DiagnosticsStart,
744 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
745 DiagnosticsFinish,
746 }
747
748 let language_server = language
749 .start_server(worktree_path, cx)
750 .log_err()
751 .flatten()?;
752 let disk_based_sources = language
753 .disk_based_diagnostic_sources()
754 .cloned()
755 .unwrap_or_default();
756 let disk_based_diagnostics_progress_token =
757 language.disk_based_diagnostics_progress_token().cloned();
758 let has_disk_based_diagnostic_progress_token =
759 disk_based_diagnostics_progress_token.is_some();
760 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
761
762 // Listen for `PublishDiagnostics` notifications.
763 language_server
764 .on_notification::<lsp::notification::PublishDiagnostics, _>({
765 let diagnostics_tx = diagnostics_tx.clone();
766 move |params| {
767 if !has_disk_based_diagnostic_progress_token {
768 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
769 }
770 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
771 if !has_disk_based_diagnostic_progress_token {
772 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
773 }
774 }
775 })
776 .detach();
777
778 // Listen for `Progress` notifications. Send an event when the language server
779 // transitions between running jobs and not running any jobs.
780 let mut running_jobs_for_this_server: i32 = 0;
781 language_server
782 .on_notification::<lsp::notification::Progress, _>(move |params| {
783 let token = match params.token {
784 lsp::NumberOrString::Number(_) => None,
785 lsp::NumberOrString::String(token) => Some(token),
786 };
787
788 if token == disk_based_diagnostics_progress_token {
789 match params.value {
790 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
791 lsp::WorkDoneProgress::Begin(_) => {
792 running_jobs_for_this_server += 1;
793 if running_jobs_for_this_server == 1 {
794 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
795 }
796 }
797 lsp::WorkDoneProgress::End(_) => {
798 running_jobs_for_this_server -= 1;
799 if running_jobs_for_this_server == 0 {
800 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
801 }
802 }
803 _ => {}
804 },
805 }
806 }
807 })
808 .detach();
809
810 // Process all the LSP events.
811 cx.spawn_weak(|this, mut cx| async move {
812 while let Ok(message) = diagnostics_rx.recv().await {
813 let this = cx.read(|cx| this.upgrade(cx))?;
814 match message {
815 LspEvent::DiagnosticsStart => {
816 let send = this.update(&mut cx, |this, cx| {
817 this.disk_based_diagnostics_started(cx);
818 this.remote_id().map(|project_id| {
819 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
820 })
821 });
822 if let Some(send) = send {
823 send.await.log_err();
824 }
825 }
826 LspEvent::DiagnosticsUpdate(mut params) => {
827 language.process_diagnostics(&mut params);
828 this.update(&mut cx, |this, cx| {
829 this.update_diagnostics(params, &disk_based_sources, cx)
830 .log_err();
831 });
832 }
833 LspEvent::DiagnosticsFinish => {
834 let send = this.update(&mut cx, |this, cx| {
835 this.disk_based_diagnostics_finished(cx);
836 this.remote_id().map(|project_id| {
837 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
838 })
839 });
840 if let Some(send) = send {
841 send.await.log_err();
842 }
843 }
844 }
845 }
846 Some(())
847 })
848 .detach();
849
850 Some(language_server)
851 }
852
853 pub fn update_diagnostics(
854 &mut self,
855 params: lsp::PublishDiagnosticsParams,
856 disk_based_sources: &HashSet<String>,
857 cx: &mut ModelContext<Self>,
858 ) -> Result<()> {
859 let abs_path = params
860 .uri
861 .to_file_path()
862 .map_err(|_| anyhow!("URI is not a file"))?;
863 let mut next_group_id = 0;
864 let mut diagnostics = Vec::default();
865 let mut primary_diagnostic_group_ids = HashMap::default();
866 let mut sources_by_group_id = HashMap::default();
867 let mut supporting_diagnostic_severities = HashMap::default();
868 for diagnostic in ¶ms.diagnostics {
869 let source = diagnostic.source.as_ref();
870 let code = diagnostic.code.as_ref().map(|code| match code {
871 lsp::NumberOrString::Number(code) => code.to_string(),
872 lsp::NumberOrString::String(code) => code.clone(),
873 });
874 let range = range_from_lsp(diagnostic.range);
875 let is_supporting = diagnostic
876 .related_information
877 .as_ref()
878 .map_or(false, |infos| {
879 infos.iter().any(|info| {
880 primary_diagnostic_group_ids.contains_key(&(
881 source,
882 code.clone(),
883 range_from_lsp(info.location.range),
884 ))
885 })
886 });
887
888 if is_supporting {
889 if let Some(severity) = diagnostic.severity {
890 supporting_diagnostic_severities
891 .insert((source, code.clone(), range), severity);
892 }
893 } else {
894 let group_id = post_inc(&mut next_group_id);
895 let is_disk_based =
896 source.map_or(false, |source| disk_based_sources.contains(source));
897
898 sources_by_group_id.insert(group_id, source);
899 primary_diagnostic_group_ids
900 .insert((source, code.clone(), range.clone()), group_id);
901
902 diagnostics.push(DiagnosticEntry {
903 range,
904 diagnostic: Diagnostic {
905 code: code.clone(),
906 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
907 message: diagnostic.message.clone(),
908 group_id,
909 is_primary: true,
910 is_valid: true,
911 is_disk_based,
912 },
913 });
914 if let Some(infos) = &diagnostic.related_information {
915 for info in infos {
916 if info.location.uri == params.uri && !info.message.is_empty() {
917 let range = range_from_lsp(info.location.range);
918 diagnostics.push(DiagnosticEntry {
919 range,
920 diagnostic: Diagnostic {
921 code: code.clone(),
922 severity: DiagnosticSeverity::INFORMATION,
923 message: info.message.clone(),
924 group_id,
925 is_primary: false,
926 is_valid: true,
927 is_disk_based,
928 },
929 });
930 }
931 }
932 }
933 }
934 }
935
936 for entry in &mut diagnostics {
937 let diagnostic = &mut entry.diagnostic;
938 if !diagnostic.is_primary {
939 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
940 if let Some(&severity) = supporting_diagnostic_severities.get(&(
941 source,
942 diagnostic.code.clone(),
943 entry.range.clone(),
944 )) {
945 diagnostic.severity = severity;
946 }
947 }
948 }
949
950 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
951 Ok(())
952 }
953
954 pub fn update_diagnostic_entries(
955 &mut self,
956 abs_path: PathBuf,
957 version: Option<i32>,
958 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
959 cx: &mut ModelContext<Project>,
960 ) -> Result<(), anyhow::Error> {
961 let (worktree, relative_path) = self
962 .find_local_worktree(&abs_path, cx)
963 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
964 let project_path = ProjectPath {
965 worktree_id: worktree.read(cx).id(),
966 path: relative_path.into(),
967 };
968
969 for buffer in self.open_buffers.values() {
970 if let Some(buffer) = buffer.upgrade(cx) {
971 if buffer
972 .read(cx)
973 .file()
974 .map_or(false, |file| *file.path() == project_path.path)
975 {
976 buffer.update(cx, |buffer, cx| {
977 buffer.update_diagnostics(version, diagnostics.clone(), cx)
978 })?;
979 break;
980 }
981 }
982 }
983 worktree.update(cx, |worktree, cx| {
984 worktree
985 .as_local_mut()
986 .ok_or_else(|| anyhow!("not a local worktree"))?
987 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
988 })?;
989 cx.emit(Event::DiagnosticsUpdated(project_path));
990 Ok(())
991 }
992
993 pub fn definition<T: ToOffset>(
994 &self,
995 source_buffer_handle: &ModelHandle<Buffer>,
996 position: T,
997 cx: &mut ModelContext<Self>,
998 ) -> Task<Result<Vec<Definition>>> {
999 let source_buffer_handle = source_buffer_handle.clone();
1000 let source_buffer = source_buffer_handle.read(cx);
1001 let worktree;
1002 let buffer_abs_path;
1003 if let Some(file) = File::from_dyn(source_buffer.file()) {
1004 worktree = file.worktree.clone();
1005 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
1006 } else {
1007 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
1008 };
1009
1010 if worktree.read(cx).as_local().is_some() {
1011 let point = source_buffer.offset_to_point_utf16(position.to_offset(source_buffer));
1012 let buffer_abs_path = buffer_abs_path.unwrap();
1013 let lang_name;
1014 let lang_server;
1015 if let Some(lang) = source_buffer.language() {
1016 lang_name = lang.name().to_string();
1017 if let Some(server) = self
1018 .language_servers
1019 .get(&(worktree.read(cx).id(), lang_name.clone()))
1020 {
1021 lang_server = server.clone();
1022 } else {
1023 return Task::ready(Err(anyhow!("buffer does not have a language server")));
1024 };
1025 } else {
1026 return Task::ready(Err(anyhow!("buffer does not have a language")));
1027 }
1028
1029 cx.spawn(|this, mut cx| async move {
1030 let response = lang_server
1031 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
1032 text_document_position_params: lsp::TextDocumentPositionParams {
1033 text_document: lsp::TextDocumentIdentifier::new(
1034 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
1035 ),
1036 position: lsp::Position::new(point.row, point.column),
1037 },
1038 work_done_progress_params: Default::default(),
1039 partial_result_params: Default::default(),
1040 })
1041 .await?;
1042
1043 let mut definitions = Vec::new();
1044 if let Some(response) = response {
1045 let mut unresolved_locations = Vec::new();
1046 match response {
1047 lsp::GotoDefinitionResponse::Scalar(loc) => {
1048 unresolved_locations.push((loc.uri, loc.range));
1049 }
1050 lsp::GotoDefinitionResponse::Array(locs) => {
1051 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
1052 }
1053 lsp::GotoDefinitionResponse::Link(links) => {
1054 unresolved_locations.extend(
1055 links
1056 .into_iter()
1057 .map(|l| (l.target_uri, l.target_selection_range)),
1058 );
1059 }
1060 }
1061
1062 for (target_uri, target_range) in unresolved_locations {
1063 let abs_path = target_uri
1064 .to_file_path()
1065 .map_err(|_| anyhow!("invalid target path"))?;
1066
1067 let (worktree, relative_path) = if let Some(result) =
1068 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1069 {
1070 result
1071 } else {
1072 let worktree = this
1073 .update(&mut cx, |this, cx| {
1074 this.create_local_worktree(&abs_path, true, cx)
1075 })
1076 .await?;
1077 this.update(&mut cx, |this, cx| {
1078 this.language_servers.insert(
1079 (worktree.read(cx).id(), lang_name.clone()),
1080 lang_server.clone(),
1081 );
1082 });
1083 (worktree, PathBuf::new())
1084 };
1085
1086 let project_path = ProjectPath {
1087 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1088 path: relative_path.into(),
1089 };
1090 let target_buffer_handle = this
1091 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1092 .await?;
1093 cx.read(|cx| {
1094 let target_buffer = target_buffer_handle.read(cx);
1095 let target_start = target_buffer
1096 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1097 let target_end = target_buffer
1098 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1099 definitions.push(Definition {
1100 target_buffer: target_buffer_handle,
1101 target_range: target_buffer.anchor_after(target_start)
1102 ..target_buffer.anchor_before(target_end),
1103 });
1104 });
1105 }
1106 }
1107
1108 Ok(definitions)
1109 })
1110 } else if let Some(project_id) = self.remote_id() {
1111 let client = self.client.clone();
1112 let request = proto::GetDefinition {
1113 project_id,
1114 buffer_id: source_buffer.remote_id(),
1115 position: Some(serialize_anchor(&source_buffer.anchor_before(position))),
1116 };
1117 cx.spawn(|this, mut cx| async move {
1118 let response = client.request(request).await?;
1119 this.update(&mut cx, |this, cx| {
1120 let mut definitions = Vec::new();
1121 for definition in response.definitions {
1122 let target_buffer = this.deserialize_remote_buffer(
1123 definition.buffer.ok_or_else(|| anyhow!("missing buffer"))?,
1124 cx,
1125 )?;
1126 let target_start = definition
1127 .target_start
1128 .and_then(deserialize_anchor)
1129 .ok_or_else(|| anyhow!("missing target start"))?;
1130 let target_end = definition
1131 .target_end
1132 .and_then(deserialize_anchor)
1133 .ok_or_else(|| anyhow!("missing target end"))?;
1134 definitions.push(Definition {
1135 target_buffer,
1136 target_range: target_start..target_end,
1137 })
1138 }
1139
1140 Ok(definitions)
1141 })
1142 })
1143 } else {
1144 Task::ready(Err(anyhow!("project does not have a remote id")))
1145 }
1146 }
1147
1148 pub fn find_or_create_local_worktree(
1149 &self,
1150 abs_path: impl AsRef<Path>,
1151 weak: bool,
1152 cx: &mut ModelContext<Self>,
1153 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1154 let abs_path = abs_path.as_ref();
1155 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1156 Task::ready(Ok((tree.clone(), relative_path.into())))
1157 } else {
1158 let worktree = self.create_local_worktree(abs_path, weak, cx);
1159 cx.foreground()
1160 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1161 }
1162 }
1163
1164 fn find_local_worktree(
1165 &self,
1166 abs_path: &Path,
1167 cx: &AppContext,
1168 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1169 for tree in self.worktrees(cx) {
1170 if let Some(relative_path) = tree
1171 .read(cx)
1172 .as_local()
1173 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1174 {
1175 return Some((tree.clone(), relative_path.into()));
1176 }
1177 }
1178 None
1179 }
1180
1181 pub fn is_shared(&self) -> bool {
1182 match &self.client_state {
1183 ProjectClientState::Local { is_shared, .. } => *is_shared,
1184 ProjectClientState::Remote { .. } => false,
1185 }
1186 }
1187
1188 fn create_local_worktree(
1189 &self,
1190 abs_path: impl AsRef<Path>,
1191 weak: bool,
1192 cx: &mut ModelContext<Self>,
1193 ) -> Task<Result<ModelHandle<Worktree>>> {
1194 let fs = self.fs.clone();
1195 let client = self.client.clone();
1196 let path = Arc::from(abs_path.as_ref());
1197 cx.spawn(|project, mut cx| async move {
1198 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1199
1200 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1201 project.add_worktree(&worktree, cx);
1202 (project.remote_id(), project.is_shared())
1203 });
1204
1205 if let Some(project_id) = remote_project_id {
1206 worktree
1207 .update(&mut cx, |worktree, cx| {
1208 worktree.as_local_mut().unwrap().register(project_id, cx)
1209 })
1210 .await?;
1211 if is_shared {
1212 worktree
1213 .update(&mut cx, |worktree, cx| {
1214 worktree.as_local_mut().unwrap().share(project_id, cx)
1215 })
1216 .await?;
1217 }
1218 }
1219
1220 Ok(worktree)
1221 })
1222 }
1223
1224 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1225 self.worktrees.retain(|worktree| {
1226 worktree
1227 .upgrade(cx)
1228 .map_or(false, |w| w.read(cx).id() != id)
1229 });
1230 cx.notify();
1231 }
1232
1233 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1234 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1235 if worktree.read(cx).is_local() {
1236 cx.subscribe(&worktree, |this, worktree, _, cx| {
1237 this.update_local_worktree_buffers(worktree, cx);
1238 })
1239 .detach();
1240 }
1241
1242 let push_weak_handle = {
1243 let worktree = worktree.read(cx);
1244 worktree.is_local() && worktree.is_weak()
1245 };
1246 if push_weak_handle {
1247 cx.observe_release(&worktree, |this, cx| {
1248 this.worktrees
1249 .retain(|worktree| worktree.upgrade(cx).is_some());
1250 cx.notify();
1251 })
1252 .detach();
1253 self.worktrees
1254 .push(WorktreeHandle::Weak(worktree.downgrade()));
1255 } else {
1256 self.worktrees
1257 .push(WorktreeHandle::Strong(worktree.clone()));
1258 }
1259 cx.notify();
1260 }
1261
1262 fn update_local_worktree_buffers(
1263 &mut self,
1264 worktree_handle: ModelHandle<Worktree>,
1265 cx: &mut ModelContext<Self>,
1266 ) {
1267 let snapshot = worktree_handle.read(cx).snapshot();
1268 let mut buffers_to_delete = Vec::new();
1269 for (buffer_id, buffer) in &self.open_buffers {
1270 if let Some(buffer) = buffer.upgrade(cx) {
1271 buffer.update(cx, |buffer, cx| {
1272 if let Some(old_file) = File::from_dyn(buffer.file()) {
1273 if old_file.worktree != worktree_handle {
1274 return;
1275 }
1276
1277 let new_file = if let Some(entry) = old_file
1278 .entry_id
1279 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1280 {
1281 File {
1282 is_local: true,
1283 entry_id: Some(entry.id),
1284 mtime: entry.mtime,
1285 path: entry.path.clone(),
1286 worktree: worktree_handle.clone(),
1287 }
1288 } else if let Some(entry) =
1289 snapshot.entry_for_path(old_file.path().as_ref())
1290 {
1291 File {
1292 is_local: true,
1293 entry_id: Some(entry.id),
1294 mtime: entry.mtime,
1295 path: entry.path.clone(),
1296 worktree: worktree_handle.clone(),
1297 }
1298 } else {
1299 File {
1300 is_local: true,
1301 entry_id: None,
1302 path: old_file.path().clone(),
1303 mtime: old_file.mtime(),
1304 worktree: worktree_handle.clone(),
1305 }
1306 };
1307
1308 if let Some(project_id) = self.remote_id() {
1309 let client = self.client.clone();
1310 let message = proto::UpdateBufferFile {
1311 project_id,
1312 buffer_id: *buffer_id as u64,
1313 file: Some(new_file.to_proto()),
1314 };
1315 cx.foreground()
1316 .spawn(async move { client.send(message).await })
1317 .detach_and_log_err(cx);
1318 }
1319 buffer.file_updated(Box::new(new_file), cx).detach();
1320 }
1321 });
1322 } else {
1323 buffers_to_delete.push(*buffer_id);
1324 }
1325 }
1326
1327 for buffer_id in buffers_to_delete {
1328 self.open_buffers.remove(&buffer_id);
1329 }
1330 }
1331
1332 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1333 let new_active_entry = entry.and_then(|project_path| {
1334 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1335 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1336 Some(ProjectEntry {
1337 worktree_id: project_path.worktree_id,
1338 entry_id: entry.id,
1339 })
1340 });
1341 if new_active_entry != self.active_entry {
1342 self.active_entry = new_active_entry;
1343 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1344 }
1345 }
1346
1347 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1348 self.language_servers_with_diagnostics_running > 0
1349 }
1350
1351 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1352 let mut summary = DiagnosticSummary::default();
1353 for (_, path_summary) in self.diagnostic_summaries(cx) {
1354 summary.error_count += path_summary.error_count;
1355 summary.warning_count += path_summary.warning_count;
1356 summary.info_count += path_summary.info_count;
1357 summary.hint_count += path_summary.hint_count;
1358 }
1359 summary
1360 }
1361
1362 pub fn diagnostic_summaries<'a>(
1363 &'a self,
1364 cx: &'a AppContext,
1365 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1366 self.worktrees(cx).flat_map(move |worktree| {
1367 let worktree = worktree.read(cx);
1368 let worktree_id = worktree.id();
1369 worktree
1370 .diagnostic_summaries()
1371 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1372 })
1373 }
1374
1375 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1376 self.language_servers_with_diagnostics_running += 1;
1377 if self.language_servers_with_diagnostics_running == 1 {
1378 cx.emit(Event::DiskBasedDiagnosticsStarted);
1379 }
1380 }
1381
1382 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1383 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1384 self.language_servers_with_diagnostics_running -= 1;
1385 if self.language_servers_with_diagnostics_running == 0 {
1386 cx.emit(Event::DiskBasedDiagnosticsFinished);
1387 }
1388 }
1389
1390 pub fn active_entry(&self) -> Option<ProjectEntry> {
1391 self.active_entry
1392 }
1393
1394 // RPC message handlers
1395
1396 fn handle_unshare_project(
1397 &mut self,
1398 _: TypedEnvelope<proto::UnshareProject>,
1399 _: Arc<Client>,
1400 cx: &mut ModelContext<Self>,
1401 ) -> Result<()> {
1402 if let ProjectClientState::Remote {
1403 sharing_has_stopped,
1404 ..
1405 } = &mut self.client_state
1406 {
1407 *sharing_has_stopped = true;
1408 self.collaborators.clear();
1409 cx.notify();
1410 Ok(())
1411 } else {
1412 unreachable!()
1413 }
1414 }
1415
1416 fn handle_add_collaborator(
1417 &mut self,
1418 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1419 _: Arc<Client>,
1420 cx: &mut ModelContext<Self>,
1421 ) -> Result<()> {
1422 let user_store = self.user_store.clone();
1423 let collaborator = envelope
1424 .payload
1425 .collaborator
1426 .take()
1427 .ok_or_else(|| anyhow!("empty collaborator"))?;
1428
1429 cx.spawn(|this, mut cx| {
1430 async move {
1431 let collaborator =
1432 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1433 this.update(&mut cx, |this, cx| {
1434 this.collaborators
1435 .insert(collaborator.peer_id, collaborator);
1436 cx.notify();
1437 });
1438 Ok(())
1439 }
1440 .log_err()
1441 })
1442 .detach();
1443
1444 Ok(())
1445 }
1446
1447 fn handle_remove_collaborator(
1448 &mut self,
1449 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1450 _: Arc<Client>,
1451 cx: &mut ModelContext<Self>,
1452 ) -> Result<()> {
1453 let peer_id = PeerId(envelope.payload.peer_id);
1454 let replica_id = self
1455 .collaborators
1456 .remove(&peer_id)
1457 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1458 .replica_id;
1459 self.shared_buffers.remove(&peer_id);
1460 for (_, buffer) in &self.open_buffers {
1461 if let Some(buffer) = buffer.upgrade(cx) {
1462 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1463 }
1464 }
1465 cx.notify();
1466 Ok(())
1467 }
1468
1469 fn handle_share_worktree(
1470 &mut self,
1471 envelope: TypedEnvelope<proto::ShareWorktree>,
1472 client: Arc<Client>,
1473 cx: &mut ModelContext<Self>,
1474 ) -> Result<()> {
1475 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1476 let replica_id = self.replica_id();
1477 let worktree = envelope
1478 .payload
1479 .worktree
1480 .ok_or_else(|| anyhow!("invalid worktree"))?;
1481 let (worktree, load_task) = Worktree::remote(remote_id, replica_id, worktree, client, cx);
1482 self.add_worktree(&worktree, cx);
1483 load_task.detach();
1484 Ok(())
1485 }
1486
1487 fn handle_unregister_worktree(
1488 &mut self,
1489 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1490 _: Arc<Client>,
1491 cx: &mut ModelContext<Self>,
1492 ) -> Result<()> {
1493 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1494 self.remove_worktree(worktree_id, cx);
1495 Ok(())
1496 }
1497
1498 fn handle_update_worktree(
1499 &mut self,
1500 envelope: TypedEnvelope<proto::UpdateWorktree>,
1501 _: Arc<Client>,
1502 cx: &mut ModelContext<Self>,
1503 ) -> Result<()> {
1504 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1505 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1506 worktree.update(cx, |worktree, cx| {
1507 let worktree = worktree.as_remote_mut().unwrap();
1508 worktree.update_from_remote(envelope, cx)
1509 })?;
1510 }
1511 Ok(())
1512 }
1513
1514 fn handle_update_diagnostic_summary(
1515 &mut self,
1516 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1517 _: Arc<Client>,
1518 cx: &mut ModelContext<Self>,
1519 ) -> Result<()> {
1520 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1521 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1522 if let Some(summary) = envelope.payload.summary {
1523 let project_path = ProjectPath {
1524 worktree_id,
1525 path: Path::new(&summary.path).into(),
1526 };
1527 worktree.update(cx, |worktree, _| {
1528 worktree
1529 .as_remote_mut()
1530 .unwrap()
1531 .update_diagnostic_summary(project_path.path.clone(), &summary);
1532 });
1533 cx.emit(Event::DiagnosticsUpdated(project_path));
1534 }
1535 }
1536 Ok(())
1537 }
1538
1539 fn handle_disk_based_diagnostics_updating(
1540 &mut self,
1541 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1542 _: Arc<Client>,
1543 cx: &mut ModelContext<Self>,
1544 ) -> Result<()> {
1545 self.disk_based_diagnostics_started(cx);
1546 Ok(())
1547 }
1548
1549 fn handle_disk_based_diagnostics_updated(
1550 &mut self,
1551 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1552 _: Arc<Client>,
1553 cx: &mut ModelContext<Self>,
1554 ) -> Result<()> {
1555 self.disk_based_diagnostics_finished(cx);
1556 Ok(())
1557 }
1558
1559 pub fn handle_update_buffer(
1560 &mut self,
1561 envelope: TypedEnvelope<proto::UpdateBuffer>,
1562 _: Arc<Client>,
1563 cx: &mut ModelContext<Self>,
1564 ) -> Result<()> {
1565 let payload = envelope.payload.clone();
1566 let buffer_id = payload.buffer_id as usize;
1567 let ops = payload
1568 .operations
1569 .into_iter()
1570 .map(|op| language::proto::deserialize_operation(op))
1571 .collect::<Result<Vec<_>, _>>()?;
1572 if let Some(buffer) = self.open_buffers.get_mut(&buffer_id) {
1573 if let Some(buffer) = buffer.upgrade(cx) {
1574 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1575 }
1576 }
1577 Ok(())
1578 }
1579
1580 pub fn handle_update_buffer_file(
1581 &mut self,
1582 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1583 _: Arc<Client>,
1584 cx: &mut ModelContext<Self>,
1585 ) -> Result<()> {
1586 let payload = envelope.payload.clone();
1587 let buffer_id = payload.buffer_id as usize;
1588 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1589 let worktree = self
1590 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1591 .ok_or_else(|| anyhow!("no such worktree"))?;
1592 let file = File::from_proto(file, worktree.clone(), cx)?;
1593 let buffer = self
1594 .open_buffers
1595 .get_mut(&buffer_id)
1596 .and_then(|b| b.upgrade(cx))
1597 .ok_or_else(|| anyhow!("no such buffer"))?;
1598 buffer.update(cx, |buffer, cx| {
1599 buffer.file_updated(Box::new(file), cx).detach();
1600 });
1601
1602 Ok(())
1603 }
1604
1605 pub fn handle_save_buffer(
1606 &mut self,
1607 envelope: TypedEnvelope<proto::SaveBuffer>,
1608 rpc: Arc<Client>,
1609 cx: &mut ModelContext<Self>,
1610 ) -> Result<()> {
1611 let sender_id = envelope.original_sender_id()?;
1612 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1613 let buffer = self
1614 .shared_buffers
1615 .get(&sender_id)
1616 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1617 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1618 let receipt = envelope.receipt();
1619 let buffer_id = envelope.payload.buffer_id;
1620 let save = cx.spawn(|_, mut cx| async move {
1621 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1622 });
1623
1624 cx.background()
1625 .spawn(
1626 async move {
1627 let (version, mtime) = save.await?;
1628
1629 rpc.respond(
1630 receipt,
1631 proto::BufferSaved {
1632 project_id,
1633 buffer_id,
1634 version: (&version).into(),
1635 mtime: Some(mtime.into()),
1636 },
1637 )
1638 .await?;
1639
1640 Ok(())
1641 }
1642 .log_err(),
1643 )
1644 .detach();
1645 Ok(())
1646 }
1647
1648 pub fn handle_format_buffer(
1649 &mut self,
1650 envelope: TypedEnvelope<proto::FormatBuffer>,
1651 rpc: Arc<Client>,
1652 cx: &mut ModelContext<Self>,
1653 ) -> Result<()> {
1654 let receipt = envelope.receipt();
1655 let sender_id = envelope.original_sender_id()?;
1656 let buffer = self
1657 .shared_buffers
1658 .get(&sender_id)
1659 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1660 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1661 cx.spawn(|_, mut cx| async move {
1662 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1663 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1664 // associated with formatting.
1665 cx.spawn(|_| async move {
1666 match format {
1667 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
1668 Err(error) => {
1669 rpc.respond_with_error(
1670 receipt,
1671 proto::Error {
1672 message: error.to_string(),
1673 },
1674 )
1675 .await?
1676 }
1677 }
1678 Ok::<_, anyhow::Error>(())
1679 })
1680 .await
1681 .log_err();
1682 })
1683 .detach();
1684 Ok(())
1685 }
1686
1687 fn handle_get_completions(
1688 &mut self,
1689 envelope: TypedEnvelope<proto::GetCompletions>,
1690 rpc: Arc<Client>,
1691 cx: &mut ModelContext<Self>,
1692 ) -> Result<()> {
1693 let receipt = envelope.receipt();
1694 let sender_id = envelope.original_sender_id()?;
1695 let buffer = self
1696 .shared_buffers
1697 .get(&sender_id)
1698 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1699 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1700 let position = envelope
1701 .payload
1702 .position
1703 .and_then(language::proto::deserialize_anchor)
1704 .ok_or_else(|| anyhow!("invalid position"))?;
1705 cx.spawn(|_, mut cx| async move {
1706 match buffer
1707 .update(&mut cx, |buffer, cx| buffer.completions(position, cx))
1708 .await
1709 {
1710 Ok(completions) => {
1711 rpc.respond(
1712 receipt,
1713 proto::GetCompletionsResponse {
1714 completions: completions
1715 .into_iter()
1716 .map(|completion| proto::Completion {
1717 old_start: Some(language::proto::serialize_anchor(
1718 &completion.old_range.start,
1719 )),
1720 old_end: Some(language::proto::serialize_anchor(
1721 &completion.old_range.end,
1722 )),
1723 new_text: completion.new_text,
1724 lsp_completion: serde_json::to_vec(&completion.lsp_completion)
1725 .unwrap(),
1726 })
1727 .collect(),
1728 },
1729 )
1730 .await
1731 }
1732 Err(error) => {
1733 rpc.respond_with_error(
1734 receipt,
1735 proto::Error {
1736 message: error.to_string(),
1737 },
1738 )
1739 .await
1740 }
1741 }
1742 })
1743 .detach_and_log_err(cx);
1744 Ok(())
1745 }
1746
1747 pub fn handle_get_definition(
1748 &mut self,
1749 envelope: TypedEnvelope<proto::GetDefinition>,
1750 rpc: Arc<Client>,
1751 cx: &mut ModelContext<Self>,
1752 ) -> Result<()> {
1753 let receipt = envelope.receipt();
1754 let sender_id = envelope.original_sender_id()?;
1755 let source_buffer = self
1756 .shared_buffers
1757 .get(&sender_id)
1758 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1759 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1760 let position = envelope
1761 .payload
1762 .position
1763 .and_then(deserialize_anchor)
1764 .ok_or_else(|| anyhow!("invalid position"))?;
1765 if !source_buffer.read(cx).can_resolve(&position) {
1766 return Err(anyhow!("cannot resolve position"));
1767 }
1768
1769 let definitions = self.definition(&source_buffer, position, cx);
1770 cx.spawn(|this, mut cx| async move {
1771 let definitions = definitions.await?;
1772 let mut response = proto::GetDefinitionResponse {
1773 definitions: Default::default(),
1774 };
1775 this.update(&mut cx, |this, cx| {
1776 for definition in definitions {
1777 let buffer =
1778 this.serialize_buffer_for_peer(&definition.target_buffer, sender_id, cx);
1779 response.definitions.push(proto::Definition {
1780 target_start: Some(serialize_anchor(&definition.target_range.start)),
1781 target_end: Some(serialize_anchor(&definition.target_range.end)),
1782 buffer: Some(buffer),
1783 });
1784 }
1785 });
1786 rpc.respond(receipt, response).await?;
1787 Ok::<_, anyhow::Error>(())
1788 })
1789 .detach_and_log_err(cx);
1790
1791 Ok(())
1792 }
1793
1794 pub fn handle_open_buffer(
1795 &mut self,
1796 envelope: TypedEnvelope<proto::OpenBuffer>,
1797 rpc: Arc<Client>,
1798 cx: &mut ModelContext<Self>,
1799 ) -> anyhow::Result<()> {
1800 let receipt = envelope.receipt();
1801 let peer_id = envelope.original_sender_id()?;
1802 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1803 let open_buffer = self.open_buffer(
1804 ProjectPath {
1805 worktree_id,
1806 path: PathBuf::from(envelope.payload.path).into(),
1807 },
1808 cx,
1809 );
1810 cx.spawn(|this, mut cx| {
1811 async move {
1812 let buffer = open_buffer.await?;
1813 let buffer = this.update(&mut cx, |this, cx| {
1814 this.serialize_buffer_for_peer(&buffer, peer_id, cx)
1815 });
1816 rpc.respond(
1817 receipt,
1818 proto::OpenBufferResponse {
1819 buffer: Some(buffer),
1820 },
1821 )
1822 .await
1823 }
1824 .log_err()
1825 })
1826 .detach();
1827 Ok(())
1828 }
1829
1830 fn serialize_buffer_for_peer(
1831 &mut self,
1832 buffer: &ModelHandle<Buffer>,
1833 peer_id: PeerId,
1834 cx: &AppContext,
1835 ) -> proto::Buffer {
1836 let buffer_id = buffer.read(cx).remote_id();
1837 let shared_buffers = self.shared_buffers.entry(peer_id).or_default();
1838 match shared_buffers.entry(buffer_id) {
1839 hash_map::Entry::Occupied(_) => proto::Buffer {
1840 variant: Some(proto::buffer::Variant::Id(buffer_id)),
1841 },
1842 hash_map::Entry::Vacant(entry) => {
1843 entry.insert(buffer.clone());
1844 proto::Buffer {
1845 variant: Some(proto::buffer::Variant::State(buffer.read(cx).to_proto())),
1846 }
1847 }
1848 }
1849 }
1850
1851 fn deserialize_remote_buffer(
1852 &mut self,
1853 buffer: proto::Buffer,
1854 cx: &mut ModelContext<Self>,
1855 ) -> Result<ModelHandle<Buffer>> {
1856 match buffer.variant.ok_or_else(|| anyhow!("missing buffer"))? {
1857 proto::buffer::Variant::Id(id) => self
1858 .open_buffers
1859 .get(&(id as usize))
1860 .and_then(|buffer| buffer.upgrade(cx))
1861 .ok_or_else(|| anyhow!("no buffer exists for id {}", id)),
1862 proto::buffer::Variant::State(mut buffer) => {
1863 let mut buffer_worktree = None;
1864 let mut buffer_file = None;
1865 if let Some(file) = buffer.file.take() {
1866 let worktree_id = WorktreeId::from_proto(file.worktree_id);
1867 let worktree = self
1868 .worktree_for_id(worktree_id, cx)
1869 .ok_or_else(|| anyhow!("no worktree found for id {}", file.worktree_id))?;
1870 buffer_file = Some(Box::new(File::from_proto(file, worktree.clone(), cx)?)
1871 as Box<dyn language::File>);
1872 buffer_worktree = Some(worktree);
1873 }
1874
1875 let buffer = cx.add_model(|cx| {
1876 Buffer::from_proto(self.replica_id(), buffer, buffer_file, cx).unwrap()
1877 });
1878 self.register_buffer(&buffer, buffer_worktree.as_ref(), cx)?;
1879 Ok(buffer)
1880 }
1881 }
1882 }
1883
1884 pub fn handle_close_buffer(
1885 &mut self,
1886 envelope: TypedEnvelope<proto::CloseBuffer>,
1887 _: Arc<Client>,
1888 cx: &mut ModelContext<Self>,
1889 ) -> anyhow::Result<()> {
1890 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1891 shared_buffers.remove(&envelope.payload.buffer_id);
1892 cx.notify();
1893 }
1894 Ok(())
1895 }
1896
1897 pub fn handle_buffer_saved(
1898 &mut self,
1899 envelope: TypedEnvelope<proto::BufferSaved>,
1900 _: Arc<Client>,
1901 cx: &mut ModelContext<Self>,
1902 ) -> Result<()> {
1903 let payload = envelope.payload.clone();
1904 let buffer = self
1905 .open_buffers
1906 .get(&(payload.buffer_id as usize))
1907 .and_then(|buffer| buffer.upgrade(cx));
1908 if let Some(buffer) = buffer {
1909 buffer.update(cx, |buffer, cx| {
1910 let version = payload.version.try_into()?;
1911 let mtime = payload
1912 .mtime
1913 .ok_or_else(|| anyhow!("missing mtime"))?
1914 .into();
1915 buffer.did_save(version, mtime, None, cx);
1916 Result::<_, anyhow::Error>::Ok(())
1917 })?;
1918 }
1919 Ok(())
1920 }
1921
1922 pub fn handle_buffer_reloaded(
1923 &mut self,
1924 envelope: TypedEnvelope<proto::BufferReloaded>,
1925 _: Arc<Client>,
1926 cx: &mut ModelContext<Self>,
1927 ) -> Result<()> {
1928 let payload = envelope.payload.clone();
1929 let buffer = self
1930 .open_buffers
1931 .get(&(payload.buffer_id as usize))
1932 .and_then(|buffer| buffer.upgrade(cx));
1933 if let Some(buffer) = buffer {
1934 buffer.update(cx, |buffer, cx| {
1935 let version = payload.version.try_into()?;
1936 let mtime = payload
1937 .mtime
1938 .ok_or_else(|| anyhow!("missing mtime"))?
1939 .into();
1940 buffer.did_reload(version, mtime, cx);
1941 Result::<_, anyhow::Error>::Ok(())
1942 })?;
1943 }
1944 Ok(())
1945 }
1946
1947 pub fn match_paths<'a>(
1948 &self,
1949 query: &'a str,
1950 include_ignored: bool,
1951 smart_case: bool,
1952 max_results: usize,
1953 cancel_flag: &'a AtomicBool,
1954 cx: &AppContext,
1955 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1956 let worktrees = self
1957 .worktrees(cx)
1958 .filter(|worktree| !worktree.read(cx).is_weak())
1959 .collect::<Vec<_>>();
1960 let include_root_name = worktrees.len() > 1;
1961 let candidate_sets = worktrees
1962 .into_iter()
1963 .map(|worktree| CandidateSet {
1964 snapshot: worktree.read(cx).snapshot(),
1965 include_ignored,
1966 include_root_name,
1967 })
1968 .collect::<Vec<_>>();
1969
1970 let background = cx.background().clone();
1971 async move {
1972 fuzzy::match_paths(
1973 candidate_sets.as_slice(),
1974 query,
1975 smart_case,
1976 max_results,
1977 cancel_flag,
1978 background,
1979 )
1980 .await
1981 }
1982 }
1983}
1984
1985impl WorktreeHandle {
1986 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
1987 match self {
1988 WorktreeHandle::Strong(handle) => Some(handle.clone()),
1989 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
1990 }
1991 }
1992}
1993
1994struct CandidateSet {
1995 snapshot: Snapshot,
1996 include_ignored: bool,
1997 include_root_name: bool,
1998}
1999
2000impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
2001 type Candidates = CandidateSetIter<'a>;
2002
2003 fn id(&self) -> usize {
2004 self.snapshot.id().to_usize()
2005 }
2006
2007 fn len(&self) -> usize {
2008 if self.include_ignored {
2009 self.snapshot.file_count()
2010 } else {
2011 self.snapshot.visible_file_count()
2012 }
2013 }
2014
2015 fn prefix(&self) -> Arc<str> {
2016 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
2017 self.snapshot.root_name().into()
2018 } else if self.include_root_name {
2019 format!("{}/", self.snapshot.root_name()).into()
2020 } else {
2021 "".into()
2022 }
2023 }
2024
2025 fn candidates(&'a self, start: usize) -> Self::Candidates {
2026 CandidateSetIter {
2027 traversal: self.snapshot.files(self.include_ignored, start),
2028 }
2029 }
2030}
2031
2032struct CandidateSetIter<'a> {
2033 traversal: Traversal<'a>,
2034}
2035
2036impl<'a> Iterator for CandidateSetIter<'a> {
2037 type Item = PathMatchCandidate<'a>;
2038
2039 fn next(&mut self) -> Option<Self::Item> {
2040 self.traversal.next().map(|entry| {
2041 if let EntryKind::File(char_bag) = entry.kind {
2042 PathMatchCandidate {
2043 path: &entry.path,
2044 char_bag,
2045 }
2046 } else {
2047 unreachable!()
2048 }
2049 })
2050 }
2051}
2052
2053impl Entity for Project {
2054 type Event = Event;
2055
2056 fn release(&mut self, cx: &mut gpui::MutableAppContext) {
2057 match &self.client_state {
2058 ProjectClientState::Local { remote_id_rx, .. } => {
2059 if let Some(project_id) = *remote_id_rx.borrow() {
2060 let rpc = self.client.clone();
2061 cx.spawn(|_| async move {
2062 if let Err(err) = rpc.send(proto::UnregisterProject { project_id }).await {
2063 log::error!("error unregistering project: {}", err);
2064 }
2065 })
2066 .detach();
2067 }
2068 }
2069 ProjectClientState::Remote { remote_id, .. } => {
2070 let rpc = self.client.clone();
2071 let project_id = *remote_id;
2072 cx.spawn(|_| async move {
2073 if let Err(err) = rpc.send(proto::LeaveProject { project_id }).await {
2074 log::error!("error leaving project: {}", err);
2075 }
2076 })
2077 .detach();
2078 }
2079 }
2080 }
2081
2082 fn app_will_quit(
2083 &mut self,
2084 _: &mut MutableAppContext,
2085 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
2086 use futures::FutureExt;
2087
2088 let shutdown_futures = self
2089 .language_servers
2090 .drain()
2091 .filter_map(|(_, server)| server.shutdown())
2092 .collect::<Vec<_>>();
2093 Some(
2094 async move {
2095 futures::future::join_all(shutdown_futures).await;
2096 }
2097 .boxed(),
2098 )
2099 }
2100}
2101
2102impl Collaborator {
2103 fn from_proto(
2104 message: proto::Collaborator,
2105 user_store: &ModelHandle<UserStore>,
2106 cx: &mut AsyncAppContext,
2107 ) -> impl Future<Output = Result<Self>> {
2108 let user = user_store.update(cx, |user_store, cx| {
2109 user_store.fetch_user(message.user_id, cx)
2110 });
2111
2112 async move {
2113 Ok(Self {
2114 peer_id: PeerId(message.peer_id),
2115 user: user.await?,
2116 replica_id: message.replica_id as ReplicaId,
2117 })
2118 }
2119 }
2120}
2121
2122impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
2123 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
2124 Self {
2125 worktree_id,
2126 path: path.as_ref().into(),
2127 }
2128 }
2129}
2130
2131#[cfg(test)]
2132mod tests {
2133 use super::{Event, *};
2134 use client::test::FakeHttpClient;
2135 use fs::RealFs;
2136 use futures::StreamExt;
2137 use gpui::{test::subscribe, TestAppContext};
2138 use language::{
2139 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
2140 LanguageServerConfig, Point,
2141 };
2142 use lsp::Url;
2143 use serde_json::json;
2144 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
2145 use unindent::Unindent as _;
2146 use util::test::temp_tree;
2147 use worktree::WorktreeHandle as _;
2148
2149 #[gpui::test]
2150 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2151 let dir = temp_tree(json!({
2152 "root": {
2153 "apple": "",
2154 "banana": {
2155 "carrot": {
2156 "date": "",
2157 "endive": "",
2158 }
2159 },
2160 "fennel": {
2161 "grape": "",
2162 }
2163 }
2164 }));
2165
2166 let root_link_path = dir.path().join("root_link");
2167 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2168 unix::fs::symlink(
2169 &dir.path().join("root/fennel"),
2170 &dir.path().join("root/finnochio"),
2171 )
2172 .unwrap();
2173
2174 let project = build_project(Arc::new(RealFs), &mut cx);
2175
2176 let (tree, _) = project
2177 .update(&mut cx, |project, cx| {
2178 project.find_or_create_local_worktree(&root_link_path, false, cx)
2179 })
2180 .await
2181 .unwrap();
2182
2183 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2184 .await;
2185 cx.read(|cx| {
2186 let tree = tree.read(cx);
2187 assert_eq!(tree.file_count(), 5);
2188 assert_eq!(
2189 tree.inode_for_path("fennel/grape"),
2190 tree.inode_for_path("finnochio/grape")
2191 );
2192 });
2193
2194 let cancel_flag = Default::default();
2195 let results = project
2196 .read_with(&cx, |project, cx| {
2197 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
2198 })
2199 .await;
2200 assert_eq!(
2201 results
2202 .into_iter()
2203 .map(|result| result.path)
2204 .collect::<Vec<Arc<Path>>>(),
2205 vec![
2206 PathBuf::from("banana/carrot/date").into(),
2207 PathBuf::from("banana/carrot/endive").into(),
2208 ]
2209 );
2210 }
2211
2212 #[gpui::test]
2213 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
2214 let (language_server_config, mut fake_server) =
2215 LanguageServerConfig::fake(cx.background()).await;
2216 let progress_token = language_server_config
2217 .disk_based_diagnostics_progress_token
2218 .clone()
2219 .unwrap();
2220
2221 let mut languages = LanguageRegistry::new();
2222 languages.add(Arc::new(Language::new(
2223 LanguageConfig {
2224 name: "Rust".to_string(),
2225 path_suffixes: vec!["rs".to_string()],
2226 language_server: Some(language_server_config),
2227 ..Default::default()
2228 },
2229 Some(tree_sitter_rust::language()),
2230 )));
2231
2232 let dir = temp_tree(json!({
2233 "a.rs": "fn a() { A }",
2234 "b.rs": "const y: i32 = 1",
2235 }));
2236
2237 let http_client = FakeHttpClient::with_404_response();
2238 let client = Client::new(http_client.clone());
2239 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2240
2241 let project = cx.update(|cx| {
2242 Project::local(
2243 client,
2244 user_store,
2245 Arc::new(languages),
2246 Arc::new(RealFs),
2247 cx,
2248 )
2249 });
2250
2251 let (tree, _) = project
2252 .update(&mut cx, |project, cx| {
2253 project.find_or_create_local_worktree(dir.path(), false, cx)
2254 })
2255 .await
2256 .unwrap();
2257 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2258
2259 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2260 .await;
2261
2262 // Cause worktree to start the fake language server
2263 let _buffer = project
2264 .update(&mut cx, |project, cx| {
2265 project.open_buffer(
2266 ProjectPath {
2267 worktree_id,
2268 path: Path::new("b.rs").into(),
2269 },
2270 cx,
2271 )
2272 })
2273 .await
2274 .unwrap();
2275
2276 let mut events = subscribe(&project, &mut cx);
2277
2278 fake_server.start_progress(&progress_token).await;
2279 assert_eq!(
2280 events.next().await.unwrap(),
2281 Event::DiskBasedDiagnosticsStarted
2282 );
2283
2284 fake_server.start_progress(&progress_token).await;
2285 fake_server.end_progress(&progress_token).await;
2286 fake_server.start_progress(&progress_token).await;
2287
2288 fake_server
2289 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2290 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2291 version: None,
2292 diagnostics: vec![lsp::Diagnostic {
2293 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2294 severity: Some(lsp::DiagnosticSeverity::ERROR),
2295 message: "undefined variable 'A'".to_string(),
2296 ..Default::default()
2297 }],
2298 })
2299 .await;
2300 assert_eq!(
2301 events.next().await.unwrap(),
2302 Event::DiagnosticsUpdated(ProjectPath {
2303 worktree_id,
2304 path: Arc::from(Path::new("a.rs"))
2305 })
2306 );
2307
2308 fake_server.end_progress(&progress_token).await;
2309 fake_server.end_progress(&progress_token).await;
2310 assert_eq!(
2311 events.next().await.unwrap(),
2312 Event::DiskBasedDiagnosticsUpdated
2313 );
2314 assert_eq!(
2315 events.next().await.unwrap(),
2316 Event::DiskBasedDiagnosticsFinished
2317 );
2318
2319 let buffer = project
2320 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2321 .await
2322 .unwrap();
2323
2324 buffer.read_with(&cx, |buffer, _| {
2325 let snapshot = buffer.snapshot();
2326 let diagnostics = snapshot
2327 .diagnostics_in_range::<_, Point>(0..buffer.len())
2328 .collect::<Vec<_>>();
2329 assert_eq!(
2330 diagnostics,
2331 &[DiagnosticEntry {
2332 range: Point::new(0, 9)..Point::new(0, 10),
2333 diagnostic: Diagnostic {
2334 severity: lsp::DiagnosticSeverity::ERROR,
2335 message: "undefined variable 'A'".to_string(),
2336 group_id: 0,
2337 is_primary: true,
2338 ..Default::default()
2339 }
2340 }]
2341 )
2342 });
2343 }
2344
2345 #[gpui::test]
2346 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2347 let dir = temp_tree(json!({
2348 "root": {
2349 "dir1": {},
2350 "dir2": {
2351 "dir3": {}
2352 }
2353 }
2354 }));
2355
2356 let project = build_project(Arc::new(RealFs), &mut cx);
2357 let (tree, _) = project
2358 .update(&mut cx, |project, cx| {
2359 project.find_or_create_local_worktree(&dir.path(), false, cx)
2360 })
2361 .await
2362 .unwrap();
2363
2364 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2365 .await;
2366
2367 let cancel_flag = Default::default();
2368 let results = project
2369 .read_with(&cx, |project, cx| {
2370 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2371 })
2372 .await;
2373
2374 assert!(results.is_empty());
2375 }
2376
2377 #[gpui::test]
2378 async fn test_definition(mut cx: gpui::TestAppContext) {
2379 let (language_server_config, mut fake_server) =
2380 LanguageServerConfig::fake(cx.background()).await;
2381
2382 let mut languages = LanguageRegistry::new();
2383 languages.add(Arc::new(Language::new(
2384 LanguageConfig {
2385 name: "Rust".to_string(),
2386 path_suffixes: vec!["rs".to_string()],
2387 language_server: Some(language_server_config),
2388 ..Default::default()
2389 },
2390 Some(tree_sitter_rust::language()),
2391 )));
2392
2393 let dir = temp_tree(json!({
2394 "a.rs": "const fn a() { A }",
2395 "b.rs": "const y: i32 = crate::a()",
2396 }));
2397
2398 let http_client = FakeHttpClient::with_404_response();
2399 let client = Client::new(http_client.clone());
2400 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2401 let project = cx.update(|cx| {
2402 Project::local(
2403 client,
2404 user_store,
2405 Arc::new(languages),
2406 Arc::new(RealFs),
2407 cx,
2408 )
2409 });
2410
2411 let (tree, _) = project
2412 .update(&mut cx, |project, cx| {
2413 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2414 })
2415 .await
2416 .unwrap();
2417 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2418 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2419 .await;
2420
2421 // Cause worktree to start the fake language server
2422 let buffer = project
2423 .update(&mut cx, |project, cx| {
2424 project.open_buffer(
2425 ProjectPath {
2426 worktree_id,
2427 path: Path::new("").into(),
2428 },
2429 cx,
2430 )
2431 })
2432 .await
2433 .unwrap();
2434 let definitions =
2435 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2436 let (request_id, request) = fake_server
2437 .receive_request::<lsp::request::GotoDefinition>()
2438 .await;
2439 let request_params = request.text_document_position_params;
2440 assert_eq!(
2441 request_params.text_document.uri.to_file_path().unwrap(),
2442 dir.path().join("b.rs")
2443 );
2444 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2445
2446 fake_server
2447 .respond(
2448 request_id,
2449 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2450 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2451 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2452 ))),
2453 )
2454 .await;
2455 let mut definitions = definitions.await.unwrap();
2456 assert_eq!(definitions.len(), 1);
2457 let definition = definitions.pop().unwrap();
2458 cx.update(|cx| {
2459 let target_buffer = definition.target_buffer.read(cx);
2460 assert_eq!(
2461 target_buffer
2462 .file()
2463 .unwrap()
2464 .as_local()
2465 .unwrap()
2466 .abs_path(cx),
2467 dir.path().join("a.rs")
2468 );
2469 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2470 assert_eq!(
2471 list_worktrees(&project, cx),
2472 [
2473 (dir.path().join("b.rs"), false),
2474 (dir.path().join("a.rs"), true)
2475 ]
2476 );
2477
2478 drop(definition);
2479 });
2480 cx.read(|cx| {
2481 assert_eq!(
2482 list_worktrees(&project, cx),
2483 [(dir.path().join("b.rs"), false)]
2484 );
2485 });
2486
2487 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2488 project
2489 .read(cx)
2490 .worktrees(cx)
2491 .map(|worktree| {
2492 let worktree = worktree.read(cx);
2493 (
2494 worktree.as_local().unwrap().abs_path().to_path_buf(),
2495 worktree.is_weak(),
2496 )
2497 })
2498 .collect::<Vec<_>>()
2499 }
2500 }
2501
2502 #[gpui::test]
2503 async fn test_save_file(mut cx: gpui::TestAppContext) {
2504 let fs = Arc::new(FakeFs::new(cx.background()));
2505 fs.insert_tree(
2506 "/dir",
2507 json!({
2508 "file1": "the old contents",
2509 }),
2510 )
2511 .await;
2512
2513 let project = build_project(fs.clone(), &mut cx);
2514 let worktree_id = project
2515 .update(&mut cx, |p, cx| {
2516 p.find_or_create_local_worktree("/dir", false, cx)
2517 })
2518 .await
2519 .unwrap()
2520 .0
2521 .read_with(&cx, |tree, _| tree.id());
2522
2523 let buffer = project
2524 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2525 .await
2526 .unwrap();
2527 buffer
2528 .update(&mut cx, |buffer, cx| {
2529 assert_eq!(buffer.text(), "the old contents");
2530 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2531 buffer.save(cx)
2532 })
2533 .await
2534 .unwrap();
2535
2536 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2537 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2538 }
2539
2540 #[gpui::test]
2541 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2542 let fs = Arc::new(FakeFs::new(cx.background()));
2543 fs.insert_tree(
2544 "/dir",
2545 json!({
2546 "file1": "the old contents",
2547 }),
2548 )
2549 .await;
2550
2551 let project = build_project(fs.clone(), &mut cx);
2552 let worktree_id = project
2553 .update(&mut cx, |p, cx| {
2554 p.find_or_create_local_worktree("/dir/file1", false, cx)
2555 })
2556 .await
2557 .unwrap()
2558 .0
2559 .read_with(&cx, |tree, _| tree.id());
2560
2561 let buffer = project
2562 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2563 .await
2564 .unwrap();
2565 buffer
2566 .update(&mut cx, |buffer, cx| {
2567 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2568 buffer.save(cx)
2569 })
2570 .await
2571 .unwrap();
2572
2573 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2574 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2575 }
2576
2577 #[gpui::test(retries = 5)]
2578 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2579 let dir = temp_tree(json!({
2580 "a": {
2581 "file1": "",
2582 "file2": "",
2583 "file3": "",
2584 },
2585 "b": {
2586 "c": {
2587 "file4": "",
2588 "file5": "",
2589 }
2590 }
2591 }));
2592
2593 let project = build_project(Arc::new(RealFs), &mut cx);
2594 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2595
2596 let (tree, _) = project
2597 .update(&mut cx, |p, cx| {
2598 p.find_or_create_local_worktree(dir.path(), false, cx)
2599 })
2600 .await
2601 .unwrap();
2602 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2603
2604 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2605 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2606 async move { buffer.await.unwrap() }
2607 };
2608 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2609 tree.read_with(cx, |tree, _| {
2610 tree.entry_for_path(path)
2611 .expect(&format!("no entry for path {}", path))
2612 .id
2613 })
2614 };
2615
2616 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2617 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2618 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2619 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2620
2621 let file2_id = id_for_path("a/file2", &cx);
2622 let file3_id = id_for_path("a/file3", &cx);
2623 let file4_id = id_for_path("b/c/file4", &cx);
2624
2625 // Wait for the initial scan.
2626 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2627 .await;
2628
2629 // Create a remote copy of this worktree.
2630 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2631 let (remote, load_task) = cx.update(|cx| {
2632 Worktree::remote(
2633 1,
2634 1,
2635 initial_snapshot.to_proto(&Default::default(), Default::default()),
2636 rpc.clone(),
2637 cx,
2638 )
2639 });
2640 load_task.await;
2641
2642 cx.read(|cx| {
2643 assert!(!buffer2.read(cx).is_dirty());
2644 assert!(!buffer3.read(cx).is_dirty());
2645 assert!(!buffer4.read(cx).is_dirty());
2646 assert!(!buffer5.read(cx).is_dirty());
2647 });
2648
2649 // Rename and delete files and directories.
2650 tree.flush_fs_events(&cx).await;
2651 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2652 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2653 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2654 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2655 tree.flush_fs_events(&cx).await;
2656
2657 let expected_paths = vec![
2658 "a",
2659 "a/file1",
2660 "a/file2.new",
2661 "b",
2662 "d",
2663 "d/file3",
2664 "d/file4",
2665 ];
2666
2667 cx.read(|app| {
2668 assert_eq!(
2669 tree.read(app)
2670 .paths()
2671 .map(|p| p.to_str().unwrap())
2672 .collect::<Vec<_>>(),
2673 expected_paths
2674 );
2675
2676 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2677 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2678 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2679
2680 assert_eq!(
2681 buffer2.read(app).file().unwrap().path().as_ref(),
2682 Path::new("a/file2.new")
2683 );
2684 assert_eq!(
2685 buffer3.read(app).file().unwrap().path().as_ref(),
2686 Path::new("d/file3")
2687 );
2688 assert_eq!(
2689 buffer4.read(app).file().unwrap().path().as_ref(),
2690 Path::new("d/file4")
2691 );
2692 assert_eq!(
2693 buffer5.read(app).file().unwrap().path().as_ref(),
2694 Path::new("b/c/file5")
2695 );
2696
2697 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2698 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2699 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2700 assert!(buffer5.read(app).file().unwrap().is_deleted());
2701 });
2702
2703 // Update the remote worktree. Check that it becomes consistent with the
2704 // local worktree.
2705 remote.update(&mut cx, |remote, cx| {
2706 let update_message =
2707 tree.read(cx)
2708 .snapshot()
2709 .build_update(&initial_snapshot, 1, 1, true);
2710 remote
2711 .as_remote_mut()
2712 .unwrap()
2713 .snapshot
2714 .apply_remote_update(update_message)
2715 .unwrap();
2716
2717 assert_eq!(
2718 remote
2719 .paths()
2720 .map(|p| p.to_str().unwrap())
2721 .collect::<Vec<_>>(),
2722 expected_paths
2723 );
2724 });
2725 }
2726
2727 #[gpui::test]
2728 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2729 let fs = Arc::new(FakeFs::new(cx.background()));
2730 fs.insert_tree(
2731 "/the-dir",
2732 json!({
2733 "a.txt": "a-contents",
2734 "b.txt": "b-contents",
2735 }),
2736 )
2737 .await;
2738
2739 let project = build_project(fs.clone(), &mut cx);
2740 let worktree_id = project
2741 .update(&mut cx, |p, cx| {
2742 p.find_or_create_local_worktree("/the-dir", false, cx)
2743 })
2744 .await
2745 .unwrap()
2746 .0
2747 .read_with(&cx, |tree, _| tree.id());
2748
2749 // Spawn multiple tasks to open paths, repeating some paths.
2750 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2751 (
2752 p.open_buffer((worktree_id, "a.txt"), cx),
2753 p.open_buffer((worktree_id, "b.txt"), cx),
2754 p.open_buffer((worktree_id, "a.txt"), cx),
2755 )
2756 });
2757
2758 let buffer_a_1 = buffer_a_1.await.unwrap();
2759 let buffer_a_2 = buffer_a_2.await.unwrap();
2760 let buffer_b = buffer_b.await.unwrap();
2761 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2762 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2763
2764 // There is only one buffer per path.
2765 let buffer_a_id = buffer_a_1.id();
2766 assert_eq!(buffer_a_2.id(), buffer_a_id);
2767
2768 // Open the same path again while it is still open.
2769 drop(buffer_a_1);
2770 let buffer_a_3 = project
2771 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2772 .await
2773 .unwrap();
2774
2775 // There's still only one buffer per path.
2776 assert_eq!(buffer_a_3.id(), buffer_a_id);
2777 }
2778
2779 #[gpui::test]
2780 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2781 use std::fs;
2782
2783 let dir = temp_tree(json!({
2784 "file1": "abc",
2785 "file2": "def",
2786 "file3": "ghi",
2787 }));
2788
2789 let project = build_project(Arc::new(RealFs), &mut cx);
2790 let (worktree, _) = project
2791 .update(&mut cx, |p, cx| {
2792 p.find_or_create_local_worktree(dir.path(), false, cx)
2793 })
2794 .await
2795 .unwrap();
2796 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2797
2798 worktree.flush_fs_events(&cx).await;
2799 worktree
2800 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2801 .await;
2802
2803 let buffer1 = project
2804 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2805 .await
2806 .unwrap();
2807 let events = Rc::new(RefCell::new(Vec::new()));
2808
2809 // initially, the buffer isn't dirty.
2810 buffer1.update(&mut cx, |buffer, cx| {
2811 cx.subscribe(&buffer1, {
2812 let events = events.clone();
2813 move |_, _, event, _| events.borrow_mut().push(event.clone())
2814 })
2815 .detach();
2816
2817 assert!(!buffer.is_dirty());
2818 assert!(events.borrow().is_empty());
2819
2820 buffer.edit(vec![1..2], "", cx);
2821 });
2822
2823 // after the first edit, the buffer is dirty, and emits a dirtied event.
2824 buffer1.update(&mut cx, |buffer, cx| {
2825 assert!(buffer.text() == "ac");
2826 assert!(buffer.is_dirty());
2827 assert_eq!(
2828 *events.borrow(),
2829 &[language::Event::Edited, language::Event::Dirtied]
2830 );
2831 events.borrow_mut().clear();
2832 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2833 });
2834
2835 // after saving, the buffer is not dirty, and emits a saved event.
2836 buffer1.update(&mut cx, |buffer, cx| {
2837 assert!(!buffer.is_dirty());
2838 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2839 events.borrow_mut().clear();
2840
2841 buffer.edit(vec![1..1], "B", cx);
2842 buffer.edit(vec![2..2], "D", cx);
2843 });
2844
2845 // after editing again, the buffer is dirty, and emits another dirty event.
2846 buffer1.update(&mut cx, |buffer, cx| {
2847 assert!(buffer.text() == "aBDc");
2848 assert!(buffer.is_dirty());
2849 assert_eq!(
2850 *events.borrow(),
2851 &[
2852 language::Event::Edited,
2853 language::Event::Dirtied,
2854 language::Event::Edited,
2855 ],
2856 );
2857 events.borrow_mut().clear();
2858
2859 // TODO - currently, after restoring the buffer to its
2860 // previously-saved state, the is still considered dirty.
2861 buffer.edit([1..3], "", cx);
2862 assert!(buffer.text() == "ac");
2863 assert!(buffer.is_dirty());
2864 });
2865
2866 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2867
2868 // When a file is deleted, the buffer is considered dirty.
2869 let events = Rc::new(RefCell::new(Vec::new()));
2870 let buffer2 = project
2871 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2872 .await
2873 .unwrap();
2874 buffer2.update(&mut cx, |_, cx| {
2875 cx.subscribe(&buffer2, {
2876 let events = events.clone();
2877 move |_, _, event, _| events.borrow_mut().push(event.clone())
2878 })
2879 .detach();
2880 });
2881
2882 fs::remove_file(dir.path().join("file2")).unwrap();
2883 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2884 assert_eq!(
2885 *events.borrow(),
2886 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2887 );
2888
2889 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2890 let events = Rc::new(RefCell::new(Vec::new()));
2891 let buffer3 = project
2892 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2893 .await
2894 .unwrap();
2895 buffer3.update(&mut cx, |_, cx| {
2896 cx.subscribe(&buffer3, {
2897 let events = events.clone();
2898 move |_, _, event, _| events.borrow_mut().push(event.clone())
2899 })
2900 .detach();
2901 });
2902
2903 worktree.flush_fs_events(&cx).await;
2904 buffer3.update(&mut cx, |buffer, cx| {
2905 buffer.edit(Some(0..0), "x", cx);
2906 });
2907 events.borrow_mut().clear();
2908 fs::remove_file(dir.path().join("file3")).unwrap();
2909 buffer3
2910 .condition(&cx, |_, _| !events.borrow().is_empty())
2911 .await;
2912 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2913 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2914 }
2915
2916 #[gpui::test]
2917 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2918 use std::fs;
2919
2920 let initial_contents = "aaa\nbbbbb\nc\n";
2921 let dir = temp_tree(json!({ "the-file": initial_contents }));
2922
2923 let project = build_project(Arc::new(RealFs), &mut cx);
2924 let (worktree, _) = project
2925 .update(&mut cx, |p, cx| {
2926 p.find_or_create_local_worktree(dir.path(), false, cx)
2927 })
2928 .await
2929 .unwrap();
2930 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2931
2932 worktree
2933 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2934 .await;
2935
2936 let abs_path = dir.path().join("the-file");
2937 let buffer = project
2938 .update(&mut cx, |p, cx| {
2939 p.open_buffer((worktree_id, "the-file"), cx)
2940 })
2941 .await
2942 .unwrap();
2943
2944 // TODO
2945 // Add a cursor on each row.
2946 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2947 // assert!(!buffer.is_dirty());
2948 // buffer.add_selection_set(
2949 // &(0..3)
2950 // .map(|row| Selection {
2951 // id: row as usize,
2952 // start: Point::new(row, 1),
2953 // end: Point::new(row, 1),
2954 // reversed: false,
2955 // goal: SelectionGoal::None,
2956 // })
2957 // .collect::<Vec<_>>(),
2958 // cx,
2959 // )
2960 // });
2961
2962 // Change the file on disk, adding two new lines of text, and removing
2963 // one line.
2964 buffer.read_with(&cx, |buffer, _| {
2965 assert!(!buffer.is_dirty());
2966 assert!(!buffer.has_conflict());
2967 });
2968 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2969 fs::write(&abs_path, new_contents).unwrap();
2970
2971 // Because the buffer was not modified, it is reloaded from disk. Its
2972 // contents are edited according to the diff between the old and new
2973 // file contents.
2974 buffer
2975 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2976 .await;
2977
2978 buffer.update(&mut cx, |buffer, _| {
2979 assert_eq!(buffer.text(), new_contents);
2980 assert!(!buffer.is_dirty());
2981 assert!(!buffer.has_conflict());
2982
2983 // TODO
2984 // let cursor_positions = buffer
2985 // .selection_set(selection_set_id)
2986 // .unwrap()
2987 // .selections::<Point>(&*buffer)
2988 // .map(|selection| {
2989 // assert_eq!(selection.start, selection.end);
2990 // selection.start
2991 // })
2992 // .collect::<Vec<_>>();
2993 // assert_eq!(
2994 // cursor_positions,
2995 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
2996 // );
2997 });
2998
2999 // Modify the buffer
3000 buffer.update(&mut cx, |buffer, cx| {
3001 buffer.edit(vec![0..0], " ", cx);
3002 assert!(buffer.is_dirty());
3003 assert!(!buffer.has_conflict());
3004 });
3005
3006 // Change the file on disk again, adding blank lines to the beginning.
3007 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3008
3009 // Because the buffer is modified, it doesn't reload from disk, but is
3010 // marked as having a conflict.
3011 buffer
3012 .condition(&cx, |buffer, _| buffer.has_conflict())
3013 .await;
3014 }
3015
3016 #[gpui::test]
3017 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3018 let fs = Arc::new(FakeFs::new(cx.background()));
3019 fs.insert_tree(
3020 "/the-dir",
3021 json!({
3022 "a.rs": "
3023 fn foo(mut v: Vec<usize>) {
3024 for x in &v {
3025 v.push(1);
3026 }
3027 }
3028 "
3029 .unindent(),
3030 }),
3031 )
3032 .await;
3033
3034 let project = build_project(fs.clone(), &mut cx);
3035 let (worktree, _) = project
3036 .update(&mut cx, |p, cx| {
3037 p.find_or_create_local_worktree("/the-dir", false, cx)
3038 })
3039 .await
3040 .unwrap();
3041 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
3042
3043 let buffer = project
3044 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
3045 .await
3046 .unwrap();
3047
3048 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3049 let message = lsp::PublishDiagnosticsParams {
3050 uri: buffer_uri.clone(),
3051 diagnostics: vec![
3052 lsp::Diagnostic {
3053 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3054 severity: Some(DiagnosticSeverity::WARNING),
3055 message: "error 1".to_string(),
3056 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3057 location: lsp::Location {
3058 uri: buffer_uri.clone(),
3059 range: lsp::Range::new(
3060 lsp::Position::new(1, 8),
3061 lsp::Position::new(1, 9),
3062 ),
3063 },
3064 message: "error 1 hint 1".to_string(),
3065 }]),
3066 ..Default::default()
3067 },
3068 lsp::Diagnostic {
3069 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3070 severity: Some(DiagnosticSeverity::HINT),
3071 message: "error 1 hint 1".to_string(),
3072 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3073 location: lsp::Location {
3074 uri: buffer_uri.clone(),
3075 range: lsp::Range::new(
3076 lsp::Position::new(1, 8),
3077 lsp::Position::new(1, 9),
3078 ),
3079 },
3080 message: "original diagnostic".to_string(),
3081 }]),
3082 ..Default::default()
3083 },
3084 lsp::Diagnostic {
3085 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3086 severity: Some(DiagnosticSeverity::ERROR),
3087 message: "error 2".to_string(),
3088 related_information: Some(vec![
3089 lsp::DiagnosticRelatedInformation {
3090 location: lsp::Location {
3091 uri: buffer_uri.clone(),
3092 range: lsp::Range::new(
3093 lsp::Position::new(1, 13),
3094 lsp::Position::new(1, 15),
3095 ),
3096 },
3097 message: "error 2 hint 1".to_string(),
3098 },
3099 lsp::DiagnosticRelatedInformation {
3100 location: lsp::Location {
3101 uri: buffer_uri.clone(),
3102 range: lsp::Range::new(
3103 lsp::Position::new(1, 13),
3104 lsp::Position::new(1, 15),
3105 ),
3106 },
3107 message: "error 2 hint 2".to_string(),
3108 },
3109 ]),
3110 ..Default::default()
3111 },
3112 lsp::Diagnostic {
3113 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3114 severity: Some(DiagnosticSeverity::HINT),
3115 message: "error 2 hint 1".to_string(),
3116 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3117 location: lsp::Location {
3118 uri: buffer_uri.clone(),
3119 range: lsp::Range::new(
3120 lsp::Position::new(2, 8),
3121 lsp::Position::new(2, 17),
3122 ),
3123 },
3124 message: "original diagnostic".to_string(),
3125 }]),
3126 ..Default::default()
3127 },
3128 lsp::Diagnostic {
3129 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3130 severity: Some(DiagnosticSeverity::HINT),
3131 message: "error 2 hint 2".to_string(),
3132 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3133 location: lsp::Location {
3134 uri: buffer_uri.clone(),
3135 range: lsp::Range::new(
3136 lsp::Position::new(2, 8),
3137 lsp::Position::new(2, 17),
3138 ),
3139 },
3140 message: "original diagnostic".to_string(),
3141 }]),
3142 ..Default::default()
3143 },
3144 ],
3145 version: None,
3146 };
3147
3148 project
3149 .update(&mut cx, |p, cx| {
3150 p.update_diagnostics(message, &Default::default(), cx)
3151 })
3152 .unwrap();
3153 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3154
3155 assert_eq!(
3156 buffer
3157 .diagnostics_in_range::<_, Point>(0..buffer.len())
3158 .collect::<Vec<_>>(),
3159 &[
3160 DiagnosticEntry {
3161 range: Point::new(1, 8)..Point::new(1, 9),
3162 diagnostic: Diagnostic {
3163 severity: DiagnosticSeverity::WARNING,
3164 message: "error 1".to_string(),
3165 group_id: 0,
3166 is_primary: true,
3167 ..Default::default()
3168 }
3169 },
3170 DiagnosticEntry {
3171 range: Point::new(1, 8)..Point::new(1, 9),
3172 diagnostic: Diagnostic {
3173 severity: DiagnosticSeverity::HINT,
3174 message: "error 1 hint 1".to_string(),
3175 group_id: 0,
3176 is_primary: false,
3177 ..Default::default()
3178 }
3179 },
3180 DiagnosticEntry {
3181 range: Point::new(1, 13)..Point::new(1, 15),
3182 diagnostic: Diagnostic {
3183 severity: DiagnosticSeverity::HINT,
3184 message: "error 2 hint 1".to_string(),
3185 group_id: 1,
3186 is_primary: false,
3187 ..Default::default()
3188 }
3189 },
3190 DiagnosticEntry {
3191 range: Point::new(1, 13)..Point::new(1, 15),
3192 diagnostic: Diagnostic {
3193 severity: DiagnosticSeverity::HINT,
3194 message: "error 2 hint 2".to_string(),
3195 group_id: 1,
3196 is_primary: false,
3197 ..Default::default()
3198 }
3199 },
3200 DiagnosticEntry {
3201 range: Point::new(2, 8)..Point::new(2, 17),
3202 diagnostic: Diagnostic {
3203 severity: DiagnosticSeverity::ERROR,
3204 message: "error 2".to_string(),
3205 group_id: 1,
3206 is_primary: true,
3207 ..Default::default()
3208 }
3209 }
3210 ]
3211 );
3212
3213 assert_eq!(
3214 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3215 &[
3216 DiagnosticEntry {
3217 range: Point::new(1, 8)..Point::new(1, 9),
3218 diagnostic: Diagnostic {
3219 severity: DiagnosticSeverity::WARNING,
3220 message: "error 1".to_string(),
3221 group_id: 0,
3222 is_primary: true,
3223 ..Default::default()
3224 }
3225 },
3226 DiagnosticEntry {
3227 range: Point::new(1, 8)..Point::new(1, 9),
3228 diagnostic: Diagnostic {
3229 severity: DiagnosticSeverity::HINT,
3230 message: "error 1 hint 1".to_string(),
3231 group_id: 0,
3232 is_primary: false,
3233 ..Default::default()
3234 }
3235 },
3236 ]
3237 );
3238 assert_eq!(
3239 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3240 &[
3241 DiagnosticEntry {
3242 range: Point::new(1, 13)..Point::new(1, 15),
3243 diagnostic: Diagnostic {
3244 severity: DiagnosticSeverity::HINT,
3245 message: "error 2 hint 1".to_string(),
3246 group_id: 1,
3247 is_primary: false,
3248 ..Default::default()
3249 }
3250 },
3251 DiagnosticEntry {
3252 range: Point::new(1, 13)..Point::new(1, 15),
3253 diagnostic: Diagnostic {
3254 severity: DiagnosticSeverity::HINT,
3255 message: "error 2 hint 2".to_string(),
3256 group_id: 1,
3257 is_primary: false,
3258 ..Default::default()
3259 }
3260 },
3261 DiagnosticEntry {
3262 range: Point::new(2, 8)..Point::new(2, 17),
3263 diagnostic: Diagnostic {
3264 severity: DiagnosticSeverity::ERROR,
3265 message: "error 2".to_string(),
3266 group_id: 1,
3267 is_primary: true,
3268 ..Default::default()
3269 }
3270 }
3271 ]
3272 );
3273 }
3274
3275 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3276 let languages = Arc::new(LanguageRegistry::new());
3277 let http_client = FakeHttpClient::with_404_response();
3278 let client = client::Client::new(http_client.clone());
3279 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3280 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3281 }
3282}