1pub mod fs;
2mod ignore;
3pub mod worktree;
4
5use anyhow::{anyhow, Result};
6use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
7use clock::ReplicaId;
8use collections::{hash_map, HashMap, HashSet};
9use futures::Future;
10use fuzzy::{PathMatch, PathMatchCandidate, PathMatchCandidateSet};
11use gpui::{
12 AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task,
13 WeakModelHandle,
14};
15use language::{
16 proto::{deserialize_anchor, serialize_anchor},
17 range_from_lsp, Bias, Buffer, Diagnostic, DiagnosticEntry, File as _, Language,
18 LanguageRegistry, PointUtf16, ToOffset, ToPointUtf16,
19};
20use lsp::{DiagnosticSeverity, LanguageServer};
21use postage::{prelude::Stream, watch};
22use smol::block_on;
23use std::{
24 convert::TryInto,
25 ops::Range,
26 path::{Path, PathBuf},
27 sync::{atomic::AtomicBool, Arc},
28};
29use util::{post_inc, ResultExt, TryFutureExt as _};
30
31pub use fs::*;
32pub use worktree::*;
33
34pub struct Project {
35 worktrees: Vec<WorktreeHandle>,
36 active_entry: Option<ProjectEntry>,
37 languages: Arc<LanguageRegistry>,
38 language_servers: HashMap<(WorktreeId, String), Arc<LanguageServer>>,
39 client: Arc<client::Client>,
40 user_store: ModelHandle<UserStore>,
41 fs: Arc<dyn Fs>,
42 client_state: ProjectClientState,
43 collaborators: HashMap<PeerId, Collaborator>,
44 subscriptions: Vec<client::Subscription>,
45 language_servers_with_diagnostics_running: isize,
46 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
47 loading_buffers: HashMap<
48 ProjectPath,
49 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
50 >,
51 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
52}
53
54enum WorktreeHandle {
55 Strong(ModelHandle<Worktree>),
56 Weak(WeakModelHandle<Worktree>),
57}
58
59enum ProjectClientState {
60 Local {
61 is_shared: bool,
62 remote_id_tx: watch::Sender<Option<u64>>,
63 remote_id_rx: watch::Receiver<Option<u64>>,
64 _maintain_remote_id_task: Task<Option<()>>,
65 },
66 Remote {
67 sharing_has_stopped: bool,
68 remote_id: u64,
69 replica_id: ReplicaId,
70 },
71}
72
73#[derive(Clone, Debug)]
74pub struct Collaborator {
75 pub user: Arc<User>,
76 pub peer_id: PeerId,
77 pub replica_id: ReplicaId,
78}
79
80#[derive(Clone, Debug, PartialEq)]
81pub enum Event {
82 ActiveEntryChanged(Option<ProjectEntry>),
83 WorktreeRemoved(WorktreeId),
84 DiskBasedDiagnosticsStarted,
85 DiskBasedDiagnosticsUpdated,
86 DiskBasedDiagnosticsFinished,
87 DiagnosticsUpdated(ProjectPath),
88}
89
90#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
91pub struct ProjectPath {
92 pub worktree_id: WorktreeId,
93 pub path: Arc<Path>,
94}
95
96#[derive(Clone, Debug, Default, PartialEq)]
97pub struct DiagnosticSummary {
98 pub error_count: usize,
99 pub warning_count: usize,
100 pub info_count: usize,
101 pub hint_count: usize,
102}
103
104#[derive(Debug)]
105pub struct Definition {
106 pub target_buffer: ModelHandle<Buffer>,
107 pub target_range: Range<language::Anchor>,
108}
109
110impl DiagnosticSummary {
111 fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
112 let mut this = Self {
113 error_count: 0,
114 warning_count: 0,
115 info_count: 0,
116 hint_count: 0,
117 };
118
119 for entry in diagnostics {
120 if entry.diagnostic.is_primary {
121 match entry.diagnostic.severity {
122 DiagnosticSeverity::ERROR => this.error_count += 1,
123 DiagnosticSeverity::WARNING => this.warning_count += 1,
124 DiagnosticSeverity::INFORMATION => this.info_count += 1,
125 DiagnosticSeverity::HINT => this.hint_count += 1,
126 _ => {}
127 }
128 }
129 }
130
131 this
132 }
133
134 pub fn to_proto(&self, path: Arc<Path>) -> proto::DiagnosticSummary {
135 proto::DiagnosticSummary {
136 path: path.to_string_lossy().to_string(),
137 error_count: self.error_count as u32,
138 warning_count: self.warning_count as u32,
139 info_count: self.info_count as u32,
140 hint_count: self.hint_count as u32,
141 }
142 }
143}
144
145#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
146pub struct ProjectEntry {
147 pub worktree_id: WorktreeId,
148 pub entry_id: usize,
149}
150
151impl Project {
152 pub fn local(
153 client: Arc<Client>,
154 user_store: ModelHandle<UserStore>,
155 languages: Arc<LanguageRegistry>,
156 fs: Arc<dyn Fs>,
157 cx: &mut MutableAppContext,
158 ) -> ModelHandle<Self> {
159 cx.add_model(|cx: &mut ModelContext<Self>| {
160 let (remote_id_tx, remote_id_rx) = watch::channel();
161 let _maintain_remote_id_task = cx.spawn_weak({
162 let rpc = client.clone();
163 move |this, mut cx| {
164 async move {
165 let mut status = rpc.status();
166 while let Some(status) = status.recv().await {
167 if let Some(this) = this.upgrade(&cx) {
168 let remote_id = if let client::Status::Connected { .. } = status {
169 let response = rpc.request(proto::RegisterProject {}).await?;
170 Some(response.project_id)
171 } else {
172 None
173 };
174
175 if let Some(project_id) = remote_id {
176 let mut registrations = Vec::new();
177 this.update(&mut cx, |this, cx| {
178 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
179 registrations.push(worktree.update(
180 cx,
181 |worktree, cx| {
182 let worktree = worktree.as_local_mut().unwrap();
183 worktree.register(project_id, cx)
184 },
185 ));
186 }
187 });
188 for registration in registrations {
189 registration.await?;
190 }
191 }
192 this.update(&mut cx, |this, cx| this.set_remote_id(remote_id, cx));
193 }
194 }
195 Ok(())
196 }
197 .log_err()
198 }
199 });
200
201 Self {
202 worktrees: Default::default(),
203 collaborators: Default::default(),
204 open_buffers: Default::default(),
205 loading_buffers: Default::default(),
206 shared_buffers: Default::default(),
207 client_state: ProjectClientState::Local {
208 is_shared: false,
209 remote_id_tx,
210 remote_id_rx,
211 _maintain_remote_id_task,
212 },
213 subscriptions: Vec::new(),
214 active_entry: None,
215 languages,
216 client,
217 user_store,
218 fs,
219 language_servers_with_diagnostics_running: 0,
220 language_servers: Default::default(),
221 }
222 })
223 }
224
225 pub async fn remote(
226 remote_id: u64,
227 client: Arc<Client>,
228 user_store: ModelHandle<UserStore>,
229 languages: Arc<LanguageRegistry>,
230 fs: Arc<dyn Fs>,
231 cx: &mut AsyncAppContext,
232 ) -> Result<ModelHandle<Self>> {
233 client.authenticate_and_connect(&cx).await?;
234
235 let response = client
236 .request(proto::JoinProject {
237 project_id: remote_id,
238 })
239 .await?;
240
241 let replica_id = response.replica_id as ReplicaId;
242
243 let mut worktrees = Vec::new();
244 for worktree in response.worktrees {
245 let (worktree, load_task) = cx
246 .update(|cx| Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx));
247 worktrees.push(worktree);
248 load_task.detach();
249 }
250
251 let user_ids = response
252 .collaborators
253 .iter()
254 .map(|peer| peer.user_id)
255 .collect();
256 user_store
257 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
258 .await?;
259 let mut collaborators = HashMap::default();
260 for message in response.collaborators {
261 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
262 collaborators.insert(collaborator.peer_id, collaborator);
263 }
264
265 Ok(cx.add_model(|cx| {
266 let mut this = Self {
267 worktrees: Vec::new(),
268 open_buffers: Default::default(),
269 loading_buffers: Default::default(),
270 shared_buffers: Default::default(),
271 active_entry: None,
272 collaborators,
273 languages,
274 user_store,
275 fs,
276 subscriptions: vec![
277 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare_project),
278 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
279 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
280 client.subscribe_to_entity(remote_id, cx, Self::handle_share_worktree),
281 client.subscribe_to_entity(remote_id, cx, Self::handle_unregister_worktree),
282 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
283 client.subscribe_to_entity(
284 remote_id,
285 cx,
286 Self::handle_update_diagnostic_summary,
287 ),
288 client.subscribe_to_entity(
289 remote_id,
290 cx,
291 Self::handle_disk_based_diagnostics_updating,
292 ),
293 client.subscribe_to_entity(
294 remote_id,
295 cx,
296 Self::handle_disk_based_diagnostics_updated,
297 ),
298 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
299 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer_file),
300 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_reloaded),
301 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
302 ],
303 client,
304 client_state: ProjectClientState::Remote {
305 sharing_has_stopped: false,
306 remote_id,
307 replica_id,
308 },
309 language_servers_with_diagnostics_running: 0,
310 language_servers: Default::default(),
311 };
312 for worktree in worktrees {
313 this.add_worktree(&worktree, cx);
314 }
315 this
316 }))
317 }
318
319 fn set_remote_id(&mut self, remote_id: Option<u64>, cx: &mut ModelContext<Self>) {
320 if let ProjectClientState::Local { remote_id_tx, .. } = &mut self.client_state {
321 *remote_id_tx.borrow_mut() = remote_id;
322 }
323
324 self.subscriptions.clear();
325 if let Some(remote_id) = remote_id {
326 let client = &self.client;
327 self.subscriptions.extend([
328 client.subscribe_to_entity(remote_id, cx, Self::handle_open_buffer),
329 client.subscribe_to_entity(remote_id, cx, Self::handle_close_buffer),
330 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
331 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
332 client.subscribe_to_entity(remote_id, cx, Self::handle_update_worktree),
333 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
334 client.subscribe_to_entity(remote_id, cx, Self::handle_save_buffer),
335 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
336 client.subscribe_to_entity(remote_id, cx, Self::handle_format_buffer),
337 client.subscribe_to_entity(remote_id, cx, Self::handle_get_completions),
338 client.subscribe_to_entity(
339 remote_id,
340 cx,
341 Self::handle_apply_additional_edits_for_completion,
342 ),
343 client.subscribe_to_entity(remote_id, cx, Self::handle_get_definition),
344 ]);
345 }
346 }
347
348 pub fn remote_id(&self) -> Option<u64> {
349 match &self.client_state {
350 ProjectClientState::Local { remote_id_rx, .. } => *remote_id_rx.borrow(),
351 ProjectClientState::Remote { remote_id, .. } => Some(*remote_id),
352 }
353 }
354
355 pub fn next_remote_id(&self) -> impl Future<Output = u64> {
356 let mut id = None;
357 let mut watch = None;
358 match &self.client_state {
359 ProjectClientState::Local { remote_id_rx, .. } => watch = Some(remote_id_rx.clone()),
360 ProjectClientState::Remote { remote_id, .. } => id = Some(*remote_id),
361 }
362
363 async move {
364 if let Some(id) = id {
365 return id;
366 }
367 let mut watch = watch.unwrap();
368 loop {
369 let id = *watch.borrow();
370 if let Some(id) = id {
371 return id;
372 }
373 watch.recv().await;
374 }
375 }
376 }
377
378 pub fn replica_id(&self) -> ReplicaId {
379 match &self.client_state {
380 ProjectClientState::Local { .. } => 0,
381 ProjectClientState::Remote { replica_id, .. } => *replica_id,
382 }
383 }
384
385 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
386 &self.collaborators
387 }
388
389 pub fn worktrees<'a>(
390 &'a self,
391 cx: &'a AppContext,
392 ) -> impl 'a + Iterator<Item = ModelHandle<Worktree>> {
393 self.worktrees
394 .iter()
395 .filter_map(move |worktree| worktree.upgrade(cx))
396 }
397
398 pub fn worktree_for_id(
399 &self,
400 id: WorktreeId,
401 cx: &AppContext,
402 ) -> Option<ModelHandle<Worktree>> {
403 self.worktrees(cx)
404 .find(|worktree| worktree.read(cx).id() == id)
405 }
406
407 pub fn share(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
408 let rpc = self.client.clone();
409 cx.spawn(|this, mut cx| async move {
410 let project_id = this.update(&mut cx, |this, _| {
411 if let ProjectClientState::Local {
412 is_shared,
413 remote_id_rx,
414 ..
415 } = &mut this.client_state
416 {
417 *is_shared = true;
418 remote_id_rx
419 .borrow()
420 .ok_or_else(|| anyhow!("no project id"))
421 } else {
422 Err(anyhow!("can't share a remote project"))
423 }
424 })?;
425
426 rpc.request(proto::ShareProject { project_id }).await?;
427 let mut tasks = Vec::new();
428 this.update(&mut cx, |this, cx| {
429 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
430 worktree.update(cx, |worktree, cx| {
431 let worktree = worktree.as_local_mut().unwrap();
432 tasks.push(worktree.share(project_id, cx));
433 });
434 }
435 });
436 for task in tasks {
437 task.await?;
438 }
439 this.update(&mut cx, |_, cx| cx.notify());
440 Ok(())
441 })
442 }
443
444 pub fn unshare(&self, cx: &mut ModelContext<Self>) -> Task<anyhow::Result<()>> {
445 let rpc = self.client.clone();
446 cx.spawn(|this, mut cx| async move {
447 let project_id = this.update(&mut cx, |this, _| {
448 if let ProjectClientState::Local {
449 is_shared,
450 remote_id_rx,
451 ..
452 } = &mut this.client_state
453 {
454 *is_shared = false;
455 remote_id_rx
456 .borrow()
457 .ok_or_else(|| anyhow!("no project id"))
458 } else {
459 Err(anyhow!("can't share a remote project"))
460 }
461 })?;
462
463 rpc.send(proto::UnshareProject { project_id })?;
464 this.update(&mut cx, |this, cx| {
465 this.collaborators.clear();
466 this.shared_buffers.clear();
467 for worktree in this.worktrees(cx).collect::<Vec<_>>() {
468 worktree.update(cx, |worktree, _| {
469 worktree.as_local_mut().unwrap().unshare();
470 });
471 }
472 cx.notify()
473 });
474 Ok(())
475 })
476 }
477
478 pub fn is_read_only(&self) -> bool {
479 match &self.client_state {
480 ProjectClientState::Local { .. } => false,
481 ProjectClientState::Remote {
482 sharing_has_stopped,
483 ..
484 } => *sharing_has_stopped,
485 }
486 }
487
488 pub fn is_local(&self) -> bool {
489 match &self.client_state {
490 ProjectClientState::Local { .. } => true,
491 ProjectClientState::Remote { .. } => false,
492 }
493 }
494
495 pub fn open_buffer(
496 &mut self,
497 path: impl Into<ProjectPath>,
498 cx: &mut ModelContext<Self>,
499 ) -> Task<Result<ModelHandle<Buffer>>> {
500 let project_path = path.into();
501 let worktree = if let Some(worktree) = self.worktree_for_id(project_path.worktree_id, cx) {
502 worktree
503 } else {
504 return Task::ready(Err(anyhow!("no such worktree")));
505 };
506
507 // If there is already a buffer for the given path, then return it.
508 let existing_buffer = self.get_open_buffer(&project_path, cx);
509 if let Some(existing_buffer) = existing_buffer {
510 return Task::ready(Ok(existing_buffer));
511 }
512
513 let mut loading_watch = match self.loading_buffers.entry(project_path.clone()) {
514 // If the given path is already being loaded, then wait for that existing
515 // task to complete and return the same buffer.
516 hash_map::Entry::Occupied(e) => e.get().clone(),
517
518 // Otherwise, record the fact that this path is now being loaded.
519 hash_map::Entry::Vacant(entry) => {
520 let (mut tx, rx) = postage::watch::channel();
521 entry.insert(rx.clone());
522
523 let load_buffer = if worktree.read(cx).is_local() {
524 self.open_local_buffer(&project_path.path, &worktree, cx)
525 } else {
526 self.open_remote_buffer(&project_path.path, &worktree, cx)
527 };
528
529 cx.spawn(move |this, mut cx| async move {
530 let load_result = load_buffer.await;
531 *tx.borrow_mut() = Some(this.update(&mut cx, |this, _| {
532 // Record the fact that the buffer is no longer loading.
533 this.loading_buffers.remove(&project_path);
534 let buffer = load_result.map_err(Arc::new)?;
535 Ok(buffer)
536 }));
537 })
538 .detach();
539 rx
540 }
541 };
542
543 cx.foreground().spawn(async move {
544 loop {
545 if let Some(result) = loading_watch.borrow().as_ref() {
546 match result {
547 Ok(buffer) => return Ok(buffer.clone()),
548 Err(error) => return Err(anyhow!("{}", error)),
549 }
550 }
551 loading_watch.recv().await;
552 }
553 })
554 }
555
556 fn open_local_buffer(
557 &mut self,
558 path: &Arc<Path>,
559 worktree: &ModelHandle<Worktree>,
560 cx: &mut ModelContext<Self>,
561 ) -> Task<Result<ModelHandle<Buffer>>> {
562 let load_buffer = worktree.update(cx, |worktree, cx| {
563 let worktree = worktree.as_local_mut().unwrap();
564 worktree.load_buffer(path, cx)
565 });
566 let worktree = worktree.downgrade();
567 cx.spawn(|this, mut cx| async move {
568 let buffer = load_buffer.await?;
569 let worktree = worktree
570 .upgrade(&cx)
571 .ok_or_else(|| anyhow!("worktree was removed"))?;
572 this.update(&mut cx, |this, cx| {
573 this.register_buffer(&buffer, Some(&worktree), cx)
574 })?;
575 Ok(buffer)
576 })
577 }
578
579 fn open_remote_buffer(
580 &mut self,
581 path: &Arc<Path>,
582 worktree: &ModelHandle<Worktree>,
583 cx: &mut ModelContext<Self>,
584 ) -> Task<Result<ModelHandle<Buffer>>> {
585 let rpc = self.client.clone();
586 let project_id = self.remote_id().unwrap();
587 let remote_worktree_id = worktree.read(cx).id();
588 let path = path.clone();
589 let path_string = path.to_string_lossy().to_string();
590 cx.spawn(|this, mut cx| async move {
591 let response = rpc
592 .request(proto::OpenBuffer {
593 project_id,
594 worktree_id: remote_worktree_id.to_proto(),
595 path: path_string,
596 })
597 .await?;
598 let buffer = response.buffer.ok_or_else(|| anyhow!("missing buffer"))?;
599 this.update(&mut cx, |this, cx| {
600 this.deserialize_remote_buffer(buffer, cx)
601 })
602 })
603 }
604
605 pub fn save_buffer_as(
606 &self,
607 buffer: ModelHandle<Buffer>,
608 abs_path: PathBuf,
609 cx: &mut ModelContext<Project>,
610 ) -> Task<Result<()>> {
611 let worktree_task = self.find_or_create_local_worktree(&abs_path, false, cx);
612 cx.spawn(|this, mut cx| async move {
613 let (worktree, path) = worktree_task.await?;
614 worktree
615 .update(&mut cx, |worktree, cx| {
616 worktree
617 .as_local_mut()
618 .unwrap()
619 .save_buffer_as(buffer.clone(), path, cx)
620 })
621 .await?;
622 this.update(&mut cx, |this, cx| {
623 this.assign_language_to_buffer(&buffer, Some(&worktree), cx);
624 });
625 Ok(())
626 })
627 }
628
629 #[cfg(any(test, feature = "test-support"))]
630 pub fn has_open_buffer(&self, path: impl Into<ProjectPath>, cx: &AppContext) -> bool {
631 let path = path.into();
632 if let Some(worktree) = self.worktree_for_id(path.worktree_id, cx) {
633 self.open_buffers.iter().any(|(_, buffer)| {
634 if let Some(buffer) = buffer.upgrade(cx) {
635 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
636 if file.worktree == worktree && file.path() == &path.path {
637 return true;
638 }
639 }
640 }
641 false
642 })
643 } else {
644 false
645 }
646 }
647
648 fn get_open_buffer(
649 &mut self,
650 path: &ProjectPath,
651 cx: &mut ModelContext<Self>,
652 ) -> Option<ModelHandle<Buffer>> {
653 let mut result = None;
654 let worktree = self.worktree_for_id(path.worktree_id, cx)?;
655 self.open_buffers.retain(|_, buffer| {
656 if let Some(buffer) = buffer.upgrade(cx) {
657 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
658 if file.worktree == worktree && file.path() == &path.path {
659 result = Some(buffer);
660 }
661 }
662 true
663 } else {
664 false
665 }
666 });
667 result
668 }
669
670 fn register_buffer(
671 &mut self,
672 buffer: &ModelHandle<Buffer>,
673 worktree: Option<&ModelHandle<Worktree>>,
674 cx: &mut ModelContext<Self>,
675 ) -> Result<()> {
676 if self
677 .open_buffers
678 .insert(buffer.read(cx).remote_id() as usize, buffer.downgrade())
679 .is_some()
680 {
681 return Err(anyhow!("registered the same buffer twice"));
682 }
683 self.assign_language_to_buffer(&buffer, worktree, cx);
684 Ok(())
685 }
686
687 fn assign_language_to_buffer(
688 &mut self,
689 buffer: &ModelHandle<Buffer>,
690 worktree: Option<&ModelHandle<Worktree>>,
691 cx: &mut ModelContext<Self>,
692 ) -> Option<()> {
693 let (path, full_path) = {
694 let file = buffer.read(cx).file()?;
695 (file.path().clone(), file.full_path(cx))
696 };
697
698 // If the buffer has a language, set it and start/assign the language server
699 if let Some(language) = self.languages.select_language(&full_path) {
700 buffer.update(cx, |buffer, cx| {
701 buffer.set_language(Some(language.clone()), cx);
702 });
703
704 // For local worktrees, start a language server if needed.
705 // Also assign the language server and any previously stored diagnostics to the buffer.
706 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
707 let worktree_id = local_worktree.id();
708 let worktree_abs_path = local_worktree.abs_path().clone();
709
710 let language_server = match self
711 .language_servers
712 .entry((worktree_id, language.name().to_string()))
713 {
714 hash_map::Entry::Occupied(e) => Some(e.get().clone()),
715 hash_map::Entry::Vacant(e) => Self::start_language_server(
716 self.client.clone(),
717 language.clone(),
718 &worktree_abs_path,
719 cx,
720 )
721 .map(|server| e.insert(server).clone()),
722 };
723
724 buffer.update(cx, |buffer, cx| {
725 buffer.set_language_server(language_server, cx);
726 });
727 }
728 }
729
730 if let Some(local_worktree) = worktree.and_then(|w| w.read(cx).as_local()) {
731 if let Some(diagnostics) = local_worktree.diagnostics_for_path(&path) {
732 buffer.update(cx, |buffer, cx| {
733 buffer.update_diagnostics(None, diagnostics, cx).log_err();
734 });
735 }
736 }
737
738 None
739 }
740
741 fn start_language_server(
742 rpc: Arc<Client>,
743 language: Arc<Language>,
744 worktree_path: &Path,
745 cx: &mut ModelContext<Self>,
746 ) -> Option<Arc<LanguageServer>> {
747 enum LspEvent {
748 DiagnosticsStart,
749 DiagnosticsUpdate(lsp::PublishDiagnosticsParams),
750 DiagnosticsFinish,
751 }
752
753 let language_server = language
754 .start_server(worktree_path, cx)
755 .log_err()
756 .flatten()?;
757 let disk_based_sources = language
758 .disk_based_diagnostic_sources()
759 .cloned()
760 .unwrap_or_default();
761 let disk_based_diagnostics_progress_token =
762 language.disk_based_diagnostics_progress_token().cloned();
763 let has_disk_based_diagnostic_progress_token =
764 disk_based_diagnostics_progress_token.is_some();
765 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
766
767 // Listen for `PublishDiagnostics` notifications.
768 language_server
769 .on_notification::<lsp::notification::PublishDiagnostics, _>({
770 let diagnostics_tx = diagnostics_tx.clone();
771 move |params| {
772 if !has_disk_based_diagnostic_progress_token {
773 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
774 }
775 block_on(diagnostics_tx.send(LspEvent::DiagnosticsUpdate(params))).ok();
776 if !has_disk_based_diagnostic_progress_token {
777 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
778 }
779 }
780 })
781 .detach();
782
783 // Listen for `Progress` notifications. Send an event when the language server
784 // transitions between running jobs and not running any jobs.
785 let mut running_jobs_for_this_server: i32 = 0;
786 language_server
787 .on_notification::<lsp::notification::Progress, _>(move |params| {
788 let token = match params.token {
789 lsp::NumberOrString::Number(_) => None,
790 lsp::NumberOrString::String(token) => Some(token),
791 };
792
793 if token == disk_based_diagnostics_progress_token {
794 match params.value {
795 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
796 lsp::WorkDoneProgress::Begin(_) => {
797 running_jobs_for_this_server += 1;
798 if running_jobs_for_this_server == 1 {
799 block_on(diagnostics_tx.send(LspEvent::DiagnosticsStart)).ok();
800 }
801 }
802 lsp::WorkDoneProgress::End(_) => {
803 running_jobs_for_this_server -= 1;
804 if running_jobs_for_this_server == 0 {
805 block_on(diagnostics_tx.send(LspEvent::DiagnosticsFinish)).ok();
806 }
807 }
808 _ => {}
809 },
810 }
811 }
812 })
813 .detach();
814
815 // Process all the LSP events.
816 cx.spawn_weak(|this, mut cx| async move {
817 while let Ok(message) = diagnostics_rx.recv().await {
818 let this = cx.read(|cx| this.upgrade(cx))?;
819 match message {
820 LspEvent::DiagnosticsStart => {
821 this.update(&mut cx, |this, cx| {
822 this.disk_based_diagnostics_started(cx);
823 if let Some(project_id) = this.remote_id() {
824 rpc.send(proto::DiskBasedDiagnosticsUpdating { project_id })
825 .log_err();
826 }
827 });
828 }
829 LspEvent::DiagnosticsUpdate(mut params) => {
830 language.process_diagnostics(&mut params);
831 this.update(&mut cx, |this, cx| {
832 this.update_diagnostics(params, &disk_based_sources, cx)
833 .log_err();
834 });
835 }
836 LspEvent::DiagnosticsFinish => {
837 this.update(&mut cx, |this, cx| {
838 this.disk_based_diagnostics_finished(cx);
839 if let Some(project_id) = this.remote_id() {
840 rpc.send(proto::DiskBasedDiagnosticsUpdated { project_id })
841 .log_err();
842 }
843 });
844 }
845 }
846 }
847 Some(())
848 })
849 .detach();
850
851 Some(language_server)
852 }
853
854 pub fn update_diagnostics(
855 &mut self,
856 params: lsp::PublishDiagnosticsParams,
857 disk_based_sources: &HashSet<String>,
858 cx: &mut ModelContext<Self>,
859 ) -> Result<()> {
860 let abs_path = params
861 .uri
862 .to_file_path()
863 .map_err(|_| anyhow!("URI is not a file"))?;
864 let mut next_group_id = 0;
865 let mut diagnostics = Vec::default();
866 let mut primary_diagnostic_group_ids = HashMap::default();
867 let mut sources_by_group_id = HashMap::default();
868 let mut supporting_diagnostic_severities = HashMap::default();
869 for diagnostic in ¶ms.diagnostics {
870 let source = diagnostic.source.as_ref();
871 let code = diagnostic.code.as_ref().map(|code| match code {
872 lsp::NumberOrString::Number(code) => code.to_string(),
873 lsp::NumberOrString::String(code) => code.clone(),
874 });
875 let range = range_from_lsp(diagnostic.range);
876 let is_supporting = diagnostic
877 .related_information
878 .as_ref()
879 .map_or(false, |infos| {
880 infos.iter().any(|info| {
881 primary_diagnostic_group_ids.contains_key(&(
882 source,
883 code.clone(),
884 range_from_lsp(info.location.range),
885 ))
886 })
887 });
888
889 if is_supporting {
890 if let Some(severity) = diagnostic.severity {
891 supporting_diagnostic_severities
892 .insert((source, code.clone(), range), severity);
893 }
894 } else {
895 let group_id = post_inc(&mut next_group_id);
896 let is_disk_based =
897 source.map_or(false, |source| disk_based_sources.contains(source));
898
899 sources_by_group_id.insert(group_id, source);
900 primary_diagnostic_group_ids
901 .insert((source, code.clone(), range.clone()), group_id);
902
903 diagnostics.push(DiagnosticEntry {
904 range,
905 diagnostic: Diagnostic {
906 code: code.clone(),
907 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
908 message: diagnostic.message.clone(),
909 group_id,
910 is_primary: true,
911 is_valid: true,
912 is_disk_based,
913 },
914 });
915 if let Some(infos) = &diagnostic.related_information {
916 for info in infos {
917 if info.location.uri == params.uri && !info.message.is_empty() {
918 let range = range_from_lsp(info.location.range);
919 diagnostics.push(DiagnosticEntry {
920 range,
921 diagnostic: Diagnostic {
922 code: code.clone(),
923 severity: DiagnosticSeverity::INFORMATION,
924 message: info.message.clone(),
925 group_id,
926 is_primary: false,
927 is_valid: true,
928 is_disk_based,
929 },
930 });
931 }
932 }
933 }
934 }
935 }
936
937 for entry in &mut diagnostics {
938 let diagnostic = &mut entry.diagnostic;
939 if !diagnostic.is_primary {
940 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
941 if let Some(&severity) = supporting_diagnostic_severities.get(&(
942 source,
943 diagnostic.code.clone(),
944 entry.range.clone(),
945 )) {
946 diagnostic.severity = severity;
947 }
948 }
949 }
950
951 self.update_diagnostic_entries(abs_path, params.version, diagnostics, cx)?;
952 Ok(())
953 }
954
955 pub fn update_diagnostic_entries(
956 &mut self,
957 abs_path: PathBuf,
958 version: Option<i32>,
959 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
960 cx: &mut ModelContext<Project>,
961 ) -> Result<(), anyhow::Error> {
962 let (worktree, relative_path) = self
963 .find_local_worktree(&abs_path, cx)
964 .ok_or_else(|| anyhow!("no worktree found for diagnostics"))?;
965 let project_path = ProjectPath {
966 worktree_id: worktree.read(cx).id(),
967 path: relative_path.into(),
968 };
969
970 for buffer in self.open_buffers.values() {
971 if let Some(buffer) = buffer.upgrade(cx) {
972 if buffer
973 .read(cx)
974 .file()
975 .map_or(false, |file| *file.path() == project_path.path)
976 {
977 buffer.update(cx, |buffer, cx| {
978 buffer.update_diagnostics(version, diagnostics.clone(), cx)
979 })?;
980 break;
981 }
982 }
983 }
984 worktree.update(cx, |worktree, cx| {
985 worktree
986 .as_local_mut()
987 .ok_or_else(|| anyhow!("not a local worktree"))?
988 .update_diagnostics(project_path.path.clone(), diagnostics, cx)
989 })?;
990 cx.emit(Event::DiagnosticsUpdated(project_path));
991 Ok(())
992 }
993
994 pub fn definition<T: ToOffset>(
995 &self,
996 source_buffer_handle: &ModelHandle<Buffer>,
997 position: T,
998 cx: &mut ModelContext<Self>,
999 ) -> Task<Result<Vec<Definition>>> {
1000 let source_buffer_handle = source_buffer_handle.clone();
1001 let source_buffer = source_buffer_handle.read(cx);
1002 let worktree;
1003 let buffer_abs_path;
1004 if let Some(file) = File::from_dyn(source_buffer.file()) {
1005 worktree = file.worktree.clone();
1006 buffer_abs_path = file.as_local().map(|f| f.abs_path(cx));
1007 } else {
1008 return Task::ready(Err(anyhow!("buffer does not belong to any worktree")));
1009 };
1010
1011 if worktree.read(cx).as_local().is_some() {
1012 let point = source_buffer.offset_to_point_utf16(position.to_offset(source_buffer));
1013 let buffer_abs_path = buffer_abs_path.unwrap();
1014 let lang_name;
1015 let lang_server;
1016 if let Some(lang) = source_buffer.language() {
1017 lang_name = lang.name().to_string();
1018 if let Some(server) = self
1019 .language_servers
1020 .get(&(worktree.read(cx).id(), lang_name.clone()))
1021 {
1022 lang_server = server.clone();
1023 } else {
1024 return Task::ready(Err(anyhow!("buffer does not have a language server")));
1025 };
1026 } else {
1027 return Task::ready(Err(anyhow!("buffer does not have a language")));
1028 }
1029
1030 cx.spawn(|this, mut cx| async move {
1031 let response = lang_server
1032 .request::<lsp::request::GotoDefinition>(lsp::GotoDefinitionParams {
1033 text_document_position_params: lsp::TextDocumentPositionParams {
1034 text_document: lsp::TextDocumentIdentifier::new(
1035 lsp::Url::from_file_path(&buffer_abs_path).unwrap(),
1036 ),
1037 position: lsp::Position::new(point.row, point.column),
1038 },
1039 work_done_progress_params: Default::default(),
1040 partial_result_params: Default::default(),
1041 })
1042 .await?;
1043
1044 let mut definitions = Vec::new();
1045 if let Some(response) = response {
1046 let mut unresolved_locations = Vec::new();
1047 match response {
1048 lsp::GotoDefinitionResponse::Scalar(loc) => {
1049 unresolved_locations.push((loc.uri, loc.range));
1050 }
1051 lsp::GotoDefinitionResponse::Array(locs) => {
1052 unresolved_locations.extend(locs.into_iter().map(|l| (l.uri, l.range)));
1053 }
1054 lsp::GotoDefinitionResponse::Link(links) => {
1055 unresolved_locations.extend(
1056 links
1057 .into_iter()
1058 .map(|l| (l.target_uri, l.target_selection_range)),
1059 );
1060 }
1061 }
1062
1063 for (target_uri, target_range) in unresolved_locations {
1064 let abs_path = target_uri
1065 .to_file_path()
1066 .map_err(|_| anyhow!("invalid target path"))?;
1067
1068 let (worktree, relative_path) = if let Some(result) =
1069 this.read_with(&cx, |this, cx| this.find_local_worktree(&abs_path, cx))
1070 {
1071 result
1072 } else {
1073 let worktree = this
1074 .update(&mut cx, |this, cx| {
1075 this.create_local_worktree(&abs_path, true, cx)
1076 })
1077 .await?;
1078 this.update(&mut cx, |this, cx| {
1079 this.language_servers.insert(
1080 (worktree.read(cx).id(), lang_name.clone()),
1081 lang_server.clone(),
1082 );
1083 });
1084 (worktree, PathBuf::new())
1085 };
1086
1087 let project_path = ProjectPath {
1088 worktree_id: worktree.read_with(&cx, |worktree, _| worktree.id()),
1089 path: relative_path.into(),
1090 };
1091 let target_buffer_handle = this
1092 .update(&mut cx, |this, cx| this.open_buffer(project_path, cx))
1093 .await?;
1094 cx.read(|cx| {
1095 let target_buffer = target_buffer_handle.read(cx);
1096 let target_start = target_buffer
1097 .clip_point_utf16(target_range.start.to_point_utf16(), Bias::Left);
1098 let target_end = target_buffer
1099 .clip_point_utf16(target_range.end.to_point_utf16(), Bias::Left);
1100 definitions.push(Definition {
1101 target_buffer: target_buffer_handle,
1102 target_range: target_buffer.anchor_after(target_start)
1103 ..target_buffer.anchor_before(target_end),
1104 });
1105 });
1106 }
1107 }
1108
1109 Ok(definitions)
1110 })
1111 } else if let Some(project_id) = self.remote_id() {
1112 let client = self.client.clone();
1113 let request = proto::GetDefinition {
1114 project_id,
1115 buffer_id: source_buffer.remote_id(),
1116 position: Some(serialize_anchor(&source_buffer.anchor_before(position))),
1117 };
1118 cx.spawn(|this, mut cx| async move {
1119 let response = client.request(request).await?;
1120 this.update(&mut cx, |this, cx| {
1121 let mut definitions = Vec::new();
1122 for definition in response.definitions {
1123 let target_buffer = this.deserialize_remote_buffer(
1124 definition.buffer.ok_or_else(|| anyhow!("missing buffer"))?,
1125 cx,
1126 )?;
1127 let target_start = definition
1128 .target_start
1129 .and_then(deserialize_anchor)
1130 .ok_or_else(|| anyhow!("missing target start"))?;
1131 let target_end = definition
1132 .target_end
1133 .and_then(deserialize_anchor)
1134 .ok_or_else(|| anyhow!("missing target end"))?;
1135 definitions.push(Definition {
1136 target_buffer,
1137 target_range: target_start..target_end,
1138 })
1139 }
1140
1141 Ok(definitions)
1142 })
1143 })
1144 } else {
1145 Task::ready(Err(anyhow!("project does not have a remote id")))
1146 }
1147 }
1148
1149 pub fn find_or_create_local_worktree(
1150 &self,
1151 abs_path: impl AsRef<Path>,
1152 weak: bool,
1153 cx: &mut ModelContext<Self>,
1154 ) -> Task<Result<(ModelHandle<Worktree>, PathBuf)>> {
1155 let abs_path = abs_path.as_ref();
1156 if let Some((tree, relative_path)) = self.find_local_worktree(abs_path, cx) {
1157 Task::ready(Ok((tree.clone(), relative_path.into())))
1158 } else {
1159 let worktree = self.create_local_worktree(abs_path, weak, cx);
1160 cx.foreground()
1161 .spawn(async move { Ok((worktree.await?, PathBuf::new())) })
1162 }
1163 }
1164
1165 fn find_local_worktree(
1166 &self,
1167 abs_path: &Path,
1168 cx: &AppContext,
1169 ) -> Option<(ModelHandle<Worktree>, PathBuf)> {
1170 for tree in self.worktrees(cx) {
1171 if let Some(relative_path) = tree
1172 .read(cx)
1173 .as_local()
1174 .and_then(|t| abs_path.strip_prefix(t.abs_path()).ok())
1175 {
1176 return Some((tree.clone(), relative_path.into()));
1177 }
1178 }
1179 None
1180 }
1181
1182 pub fn is_shared(&self) -> bool {
1183 match &self.client_state {
1184 ProjectClientState::Local { is_shared, .. } => *is_shared,
1185 ProjectClientState::Remote { .. } => false,
1186 }
1187 }
1188
1189 fn create_local_worktree(
1190 &self,
1191 abs_path: impl AsRef<Path>,
1192 weak: bool,
1193 cx: &mut ModelContext<Self>,
1194 ) -> Task<Result<ModelHandle<Worktree>>> {
1195 let fs = self.fs.clone();
1196 let client = self.client.clone();
1197 let path = Arc::from(abs_path.as_ref());
1198 cx.spawn(|project, mut cx| async move {
1199 let worktree = Worktree::local(client.clone(), path, weak, fs, &mut cx).await?;
1200
1201 let (remote_project_id, is_shared) = project.update(&mut cx, |project, cx| {
1202 project.add_worktree(&worktree, cx);
1203 (project.remote_id(), project.is_shared())
1204 });
1205
1206 if let Some(project_id) = remote_project_id {
1207 worktree
1208 .update(&mut cx, |worktree, cx| {
1209 worktree.as_local_mut().unwrap().register(project_id, cx)
1210 })
1211 .await?;
1212 if is_shared {
1213 worktree
1214 .update(&mut cx, |worktree, cx| {
1215 worktree.as_local_mut().unwrap().share(project_id, cx)
1216 })
1217 .await?;
1218 }
1219 }
1220
1221 Ok(worktree)
1222 })
1223 }
1224
1225 pub fn remove_worktree(&mut self, id: WorktreeId, cx: &mut ModelContext<Self>) {
1226 self.worktrees.retain(|worktree| {
1227 worktree
1228 .upgrade(cx)
1229 .map_or(false, |w| w.read(cx).id() != id)
1230 });
1231 cx.notify();
1232 }
1233
1234 fn add_worktree(&mut self, worktree: &ModelHandle<Worktree>, cx: &mut ModelContext<Self>) {
1235 cx.observe(&worktree, |_, _, cx| cx.notify()).detach();
1236 if worktree.read(cx).is_local() {
1237 cx.subscribe(&worktree, |this, worktree, _, cx| {
1238 this.update_local_worktree_buffers(worktree, cx);
1239 })
1240 .detach();
1241 }
1242
1243 let push_weak_handle = {
1244 let worktree = worktree.read(cx);
1245 worktree.is_local() && worktree.is_weak()
1246 };
1247 if push_weak_handle {
1248 cx.observe_release(&worktree, |this, cx| {
1249 this.worktrees
1250 .retain(|worktree| worktree.upgrade(cx).is_some());
1251 cx.notify();
1252 })
1253 .detach();
1254 self.worktrees
1255 .push(WorktreeHandle::Weak(worktree.downgrade()));
1256 } else {
1257 self.worktrees
1258 .push(WorktreeHandle::Strong(worktree.clone()));
1259 }
1260 cx.notify();
1261 }
1262
1263 fn update_local_worktree_buffers(
1264 &mut self,
1265 worktree_handle: ModelHandle<Worktree>,
1266 cx: &mut ModelContext<Self>,
1267 ) {
1268 let snapshot = worktree_handle.read(cx).snapshot();
1269 let mut buffers_to_delete = Vec::new();
1270 for (buffer_id, buffer) in &self.open_buffers {
1271 if let Some(buffer) = buffer.upgrade(cx) {
1272 buffer.update(cx, |buffer, cx| {
1273 if let Some(old_file) = File::from_dyn(buffer.file()) {
1274 if old_file.worktree != worktree_handle {
1275 return;
1276 }
1277
1278 let new_file = if let Some(entry) = old_file
1279 .entry_id
1280 .and_then(|entry_id| snapshot.entry_for_id(entry_id))
1281 {
1282 File {
1283 is_local: true,
1284 entry_id: Some(entry.id),
1285 mtime: entry.mtime,
1286 path: entry.path.clone(),
1287 worktree: worktree_handle.clone(),
1288 }
1289 } else if let Some(entry) =
1290 snapshot.entry_for_path(old_file.path().as_ref())
1291 {
1292 File {
1293 is_local: true,
1294 entry_id: Some(entry.id),
1295 mtime: entry.mtime,
1296 path: entry.path.clone(),
1297 worktree: worktree_handle.clone(),
1298 }
1299 } else {
1300 File {
1301 is_local: true,
1302 entry_id: None,
1303 path: old_file.path().clone(),
1304 mtime: old_file.mtime(),
1305 worktree: worktree_handle.clone(),
1306 }
1307 };
1308
1309 if let Some(project_id) = self.remote_id() {
1310 self.client
1311 .send(proto::UpdateBufferFile {
1312 project_id,
1313 buffer_id: *buffer_id as u64,
1314 file: Some(new_file.to_proto()),
1315 })
1316 .log_err();
1317 }
1318 buffer.file_updated(Box::new(new_file), cx).detach();
1319 }
1320 });
1321 } else {
1322 buffers_to_delete.push(*buffer_id);
1323 }
1324 }
1325
1326 for buffer_id in buffers_to_delete {
1327 self.open_buffers.remove(&buffer_id);
1328 }
1329 }
1330
1331 pub fn set_active_path(&mut self, entry: Option<ProjectPath>, cx: &mut ModelContext<Self>) {
1332 let new_active_entry = entry.and_then(|project_path| {
1333 let worktree = self.worktree_for_id(project_path.worktree_id, cx)?;
1334 let entry = worktree.read(cx).entry_for_path(project_path.path)?;
1335 Some(ProjectEntry {
1336 worktree_id: project_path.worktree_id,
1337 entry_id: entry.id,
1338 })
1339 });
1340 if new_active_entry != self.active_entry {
1341 self.active_entry = new_active_entry;
1342 cx.emit(Event::ActiveEntryChanged(new_active_entry));
1343 }
1344 }
1345
1346 pub fn is_running_disk_based_diagnostics(&self) -> bool {
1347 self.language_servers_with_diagnostics_running > 0
1348 }
1349
1350 pub fn diagnostic_summary(&self, cx: &AppContext) -> DiagnosticSummary {
1351 let mut summary = DiagnosticSummary::default();
1352 for (_, path_summary) in self.diagnostic_summaries(cx) {
1353 summary.error_count += path_summary.error_count;
1354 summary.warning_count += path_summary.warning_count;
1355 summary.info_count += path_summary.info_count;
1356 summary.hint_count += path_summary.hint_count;
1357 }
1358 summary
1359 }
1360
1361 pub fn diagnostic_summaries<'a>(
1362 &'a self,
1363 cx: &'a AppContext,
1364 ) -> impl Iterator<Item = (ProjectPath, DiagnosticSummary)> + 'a {
1365 self.worktrees(cx).flat_map(move |worktree| {
1366 let worktree = worktree.read(cx);
1367 let worktree_id = worktree.id();
1368 worktree
1369 .diagnostic_summaries()
1370 .map(move |(path, summary)| (ProjectPath { worktree_id, path }, summary))
1371 })
1372 }
1373
1374 pub fn disk_based_diagnostics_started(&mut self, cx: &mut ModelContext<Self>) {
1375 self.language_servers_with_diagnostics_running += 1;
1376 if self.language_servers_with_diagnostics_running == 1 {
1377 cx.emit(Event::DiskBasedDiagnosticsStarted);
1378 }
1379 }
1380
1381 pub fn disk_based_diagnostics_finished(&mut self, cx: &mut ModelContext<Self>) {
1382 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1383 self.language_servers_with_diagnostics_running -= 1;
1384 if self.language_servers_with_diagnostics_running == 0 {
1385 cx.emit(Event::DiskBasedDiagnosticsFinished);
1386 }
1387 }
1388
1389 pub fn active_entry(&self) -> Option<ProjectEntry> {
1390 self.active_entry
1391 }
1392
1393 // RPC message handlers
1394
1395 fn handle_unshare_project(
1396 &mut self,
1397 _: TypedEnvelope<proto::UnshareProject>,
1398 _: Arc<Client>,
1399 cx: &mut ModelContext<Self>,
1400 ) -> Result<()> {
1401 if let ProjectClientState::Remote {
1402 sharing_has_stopped,
1403 ..
1404 } = &mut self.client_state
1405 {
1406 *sharing_has_stopped = true;
1407 self.collaborators.clear();
1408 cx.notify();
1409 Ok(())
1410 } else {
1411 unreachable!()
1412 }
1413 }
1414
1415 fn handle_add_collaborator(
1416 &mut self,
1417 mut envelope: TypedEnvelope<proto::AddProjectCollaborator>,
1418 _: Arc<Client>,
1419 cx: &mut ModelContext<Self>,
1420 ) -> Result<()> {
1421 let user_store = self.user_store.clone();
1422 let collaborator = envelope
1423 .payload
1424 .collaborator
1425 .take()
1426 .ok_or_else(|| anyhow!("empty collaborator"))?;
1427
1428 cx.spawn(|this, mut cx| {
1429 async move {
1430 let collaborator =
1431 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
1432 this.update(&mut cx, |this, cx| {
1433 this.collaborators
1434 .insert(collaborator.peer_id, collaborator);
1435 cx.notify();
1436 });
1437 Ok(())
1438 }
1439 .log_err()
1440 })
1441 .detach();
1442
1443 Ok(())
1444 }
1445
1446 fn handle_remove_collaborator(
1447 &mut self,
1448 envelope: TypedEnvelope<proto::RemoveProjectCollaborator>,
1449 _: Arc<Client>,
1450 cx: &mut ModelContext<Self>,
1451 ) -> Result<()> {
1452 let peer_id = PeerId(envelope.payload.peer_id);
1453 let replica_id = self
1454 .collaborators
1455 .remove(&peer_id)
1456 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1457 .replica_id;
1458 self.shared_buffers.remove(&peer_id);
1459 for (_, buffer) in &self.open_buffers {
1460 if let Some(buffer) = buffer.upgrade(cx) {
1461 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1462 }
1463 }
1464 cx.notify();
1465 Ok(())
1466 }
1467
1468 fn handle_share_worktree(
1469 &mut self,
1470 envelope: TypedEnvelope<proto::ShareWorktree>,
1471 client: Arc<Client>,
1472 cx: &mut ModelContext<Self>,
1473 ) -> Result<()> {
1474 let remote_id = self.remote_id().ok_or_else(|| anyhow!("invalid project"))?;
1475 let replica_id = self.replica_id();
1476 let worktree = envelope
1477 .payload
1478 .worktree
1479 .ok_or_else(|| anyhow!("invalid worktree"))?;
1480 let (worktree, load_task) = Worktree::remote(remote_id, replica_id, worktree, client, cx);
1481 self.add_worktree(&worktree, cx);
1482 load_task.detach();
1483 Ok(())
1484 }
1485
1486 fn handle_unregister_worktree(
1487 &mut self,
1488 envelope: TypedEnvelope<proto::UnregisterWorktree>,
1489 _: Arc<Client>,
1490 cx: &mut ModelContext<Self>,
1491 ) -> Result<()> {
1492 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1493 self.remove_worktree(worktree_id, cx);
1494 Ok(())
1495 }
1496
1497 fn handle_update_worktree(
1498 &mut self,
1499 envelope: TypedEnvelope<proto::UpdateWorktree>,
1500 _: Arc<Client>,
1501 cx: &mut ModelContext<Self>,
1502 ) -> Result<()> {
1503 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1504 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1505 worktree.update(cx, |worktree, cx| {
1506 let worktree = worktree.as_remote_mut().unwrap();
1507 worktree.update_from_remote(envelope, cx)
1508 })?;
1509 }
1510 Ok(())
1511 }
1512
1513 fn handle_update_diagnostic_summary(
1514 &mut self,
1515 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1516 _: Arc<Client>,
1517 cx: &mut ModelContext<Self>,
1518 ) -> Result<()> {
1519 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1520 if let Some(worktree) = self.worktree_for_id(worktree_id, cx) {
1521 if let Some(summary) = envelope.payload.summary {
1522 let project_path = ProjectPath {
1523 worktree_id,
1524 path: Path::new(&summary.path).into(),
1525 };
1526 worktree.update(cx, |worktree, _| {
1527 worktree
1528 .as_remote_mut()
1529 .unwrap()
1530 .update_diagnostic_summary(project_path.path.clone(), &summary);
1531 });
1532 cx.emit(Event::DiagnosticsUpdated(project_path));
1533 }
1534 }
1535 Ok(())
1536 }
1537
1538 fn handle_disk_based_diagnostics_updating(
1539 &mut self,
1540 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdating>,
1541 _: Arc<Client>,
1542 cx: &mut ModelContext<Self>,
1543 ) -> Result<()> {
1544 self.disk_based_diagnostics_started(cx);
1545 Ok(())
1546 }
1547
1548 fn handle_disk_based_diagnostics_updated(
1549 &mut self,
1550 _: TypedEnvelope<proto::DiskBasedDiagnosticsUpdated>,
1551 _: Arc<Client>,
1552 cx: &mut ModelContext<Self>,
1553 ) -> Result<()> {
1554 self.disk_based_diagnostics_finished(cx);
1555 Ok(())
1556 }
1557
1558 pub fn handle_update_buffer(
1559 &mut self,
1560 envelope: TypedEnvelope<proto::UpdateBuffer>,
1561 _: Arc<Client>,
1562 cx: &mut ModelContext<Self>,
1563 ) -> Result<()> {
1564 let payload = envelope.payload.clone();
1565 let buffer_id = payload.buffer_id as usize;
1566 let ops = payload
1567 .operations
1568 .into_iter()
1569 .map(|op| language::proto::deserialize_operation(op))
1570 .collect::<Result<Vec<_>, _>>()?;
1571 if let Some(buffer) = self.open_buffers.get_mut(&buffer_id) {
1572 if let Some(buffer) = buffer.upgrade(cx) {
1573 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
1574 }
1575 }
1576 Ok(())
1577 }
1578
1579 pub fn handle_update_buffer_file(
1580 &mut self,
1581 envelope: TypedEnvelope<proto::UpdateBufferFile>,
1582 _: Arc<Client>,
1583 cx: &mut ModelContext<Self>,
1584 ) -> Result<()> {
1585 let payload = envelope.payload.clone();
1586 let buffer_id = payload.buffer_id as usize;
1587 let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?;
1588 let worktree = self
1589 .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx)
1590 .ok_or_else(|| anyhow!("no such worktree"))?;
1591 let file = File::from_proto(file, worktree.clone(), cx)?;
1592 let buffer = self
1593 .open_buffers
1594 .get_mut(&buffer_id)
1595 .and_then(|b| b.upgrade(cx))
1596 .ok_or_else(|| anyhow!("no such buffer"))?;
1597 buffer.update(cx, |buffer, cx| {
1598 buffer.file_updated(Box::new(file), cx).detach();
1599 });
1600
1601 Ok(())
1602 }
1603
1604 pub fn handle_save_buffer(
1605 &mut self,
1606 envelope: TypedEnvelope<proto::SaveBuffer>,
1607 rpc: Arc<Client>,
1608 cx: &mut ModelContext<Self>,
1609 ) -> Result<()> {
1610 let sender_id = envelope.original_sender_id()?;
1611 let project_id = self.remote_id().ok_or_else(|| anyhow!("not connected"))?;
1612 let buffer = self
1613 .shared_buffers
1614 .get(&sender_id)
1615 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1616 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1617 let receipt = envelope.receipt();
1618 let buffer_id = envelope.payload.buffer_id;
1619 let save = cx.spawn(|_, mut cx| async move {
1620 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
1621 });
1622
1623 cx.background()
1624 .spawn(
1625 async move {
1626 let (version, mtime) = save.await?;
1627
1628 rpc.respond(
1629 receipt,
1630 proto::BufferSaved {
1631 project_id,
1632 buffer_id,
1633 version: (&version).into(),
1634 mtime: Some(mtime.into()),
1635 },
1636 )?;
1637
1638 Ok(())
1639 }
1640 .log_err(),
1641 )
1642 .detach();
1643 Ok(())
1644 }
1645
1646 pub fn handle_format_buffer(
1647 &mut self,
1648 envelope: TypedEnvelope<proto::FormatBuffer>,
1649 rpc: Arc<Client>,
1650 cx: &mut ModelContext<Self>,
1651 ) -> Result<()> {
1652 let receipt = envelope.receipt();
1653 let sender_id = envelope.original_sender_id()?;
1654 let buffer = self
1655 .shared_buffers
1656 .get(&sender_id)
1657 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1658 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1659 cx.spawn(|_, mut cx| async move {
1660 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
1661 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
1662 // associated with formatting.
1663 cx.spawn(|_| async move {
1664 match format {
1665 Ok(()) => rpc.respond(receipt, proto::Ack {})?,
1666 Err(error) => rpc.respond_with_error(
1667 receipt,
1668 proto::Error {
1669 message: error.to_string(),
1670 },
1671 )?,
1672 }
1673 Ok::<_, anyhow::Error>(())
1674 })
1675 .await
1676 .log_err();
1677 })
1678 .detach();
1679 Ok(())
1680 }
1681
1682 fn handle_get_completions(
1683 &mut self,
1684 envelope: TypedEnvelope<proto::GetCompletions>,
1685 rpc: Arc<Client>,
1686 cx: &mut ModelContext<Self>,
1687 ) -> Result<()> {
1688 let receipt = envelope.receipt();
1689 let sender_id = envelope.original_sender_id()?;
1690 let buffer = self
1691 .shared_buffers
1692 .get(&sender_id)
1693 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1694 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1695 let position = envelope
1696 .payload
1697 .position
1698 .and_then(language::proto::deserialize_anchor)
1699 .ok_or_else(|| anyhow!("invalid position"))?;
1700 cx.spawn(|_, mut cx| async move {
1701 match buffer
1702 .update(&mut cx, |buffer, cx| buffer.completions(position, cx))
1703 .await
1704 {
1705 Ok(completions) => rpc.respond(
1706 receipt,
1707 proto::GetCompletionsResponse {
1708 completions: completions
1709 .iter()
1710 .map(language::proto::serialize_completion)
1711 .collect(),
1712 },
1713 ),
1714 Err(error) => rpc.respond_with_error(
1715 receipt,
1716 proto::Error {
1717 message: error.to_string(),
1718 },
1719 ),
1720 }
1721 })
1722 .detach_and_log_err(cx);
1723 Ok(())
1724 }
1725
1726 fn handle_apply_additional_edits_for_completion(
1727 &mut self,
1728 envelope: TypedEnvelope<proto::ApplyCompletionAdditionalEdits>,
1729 rpc: Arc<Client>,
1730 cx: &mut ModelContext<Self>,
1731 ) -> Result<()> {
1732 let receipt = envelope.receipt();
1733 let sender_id = envelope.original_sender_id()?;
1734 let buffer = self
1735 .shared_buffers
1736 .get(&sender_id)
1737 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1738 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1739 let language = buffer.read(cx).language();
1740 let completion = language::proto::deserialize_completion(
1741 envelope
1742 .payload
1743 .completion
1744 .ok_or_else(|| anyhow!("invalid position"))?,
1745 language,
1746 )?;
1747 cx.spawn(|_, mut cx| async move {
1748 match buffer
1749 .update(&mut cx, |buffer, cx| {
1750 buffer.apply_additional_edits_for_completion(completion, false, cx)
1751 })
1752 .await
1753 {
1754 Ok(edit_ids) => rpc.respond(
1755 receipt,
1756 proto::ApplyCompletionAdditionalEditsResponse {
1757 additional_edits: edit_ids
1758 .into_iter()
1759 .map(|edit_id| proto::AdditionalEdit {
1760 replica_id: edit_id.replica_id as u32,
1761 local_timestamp: edit_id.value,
1762 })
1763 .collect(),
1764 },
1765 ),
1766 Err(error) => rpc.respond_with_error(
1767 receipt,
1768 proto::Error {
1769 message: error.to_string(),
1770 },
1771 ),
1772 }
1773 })
1774 .detach_and_log_err(cx);
1775 Ok(())
1776 }
1777
1778 pub fn handle_get_definition(
1779 &mut self,
1780 envelope: TypedEnvelope<proto::GetDefinition>,
1781 rpc: Arc<Client>,
1782 cx: &mut ModelContext<Self>,
1783 ) -> Result<()> {
1784 let receipt = envelope.receipt();
1785 let sender_id = envelope.original_sender_id()?;
1786 let source_buffer = self
1787 .shared_buffers
1788 .get(&sender_id)
1789 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
1790 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
1791 let position = envelope
1792 .payload
1793 .position
1794 .and_then(deserialize_anchor)
1795 .ok_or_else(|| anyhow!("invalid position"))?;
1796 if !source_buffer.read(cx).can_resolve(&position) {
1797 return Err(anyhow!("cannot resolve position"));
1798 }
1799
1800 let definitions = self.definition(&source_buffer, position, cx);
1801 cx.spawn(|this, mut cx| async move {
1802 let definitions = definitions.await?;
1803 let mut response = proto::GetDefinitionResponse {
1804 definitions: Default::default(),
1805 };
1806 this.update(&mut cx, |this, cx| {
1807 for definition in definitions {
1808 let buffer =
1809 this.serialize_buffer_for_peer(&definition.target_buffer, sender_id, cx);
1810 response.definitions.push(proto::Definition {
1811 target_start: Some(serialize_anchor(&definition.target_range.start)),
1812 target_end: Some(serialize_anchor(&definition.target_range.end)),
1813 buffer: Some(buffer),
1814 });
1815 }
1816 });
1817 rpc.respond(receipt, response)?;
1818 Ok::<_, anyhow::Error>(())
1819 })
1820 .detach_and_log_err(cx);
1821
1822 Ok(())
1823 }
1824
1825 pub fn handle_open_buffer(
1826 &mut self,
1827 envelope: TypedEnvelope<proto::OpenBuffer>,
1828 rpc: Arc<Client>,
1829 cx: &mut ModelContext<Self>,
1830 ) -> anyhow::Result<()> {
1831 let receipt = envelope.receipt();
1832 let peer_id = envelope.original_sender_id()?;
1833 let worktree_id = WorktreeId::from_proto(envelope.payload.worktree_id);
1834 let open_buffer = self.open_buffer(
1835 ProjectPath {
1836 worktree_id,
1837 path: PathBuf::from(envelope.payload.path).into(),
1838 },
1839 cx,
1840 );
1841 cx.spawn(|this, mut cx| {
1842 async move {
1843 let buffer = open_buffer.await?;
1844 let buffer = this.update(&mut cx, |this, cx| {
1845 this.serialize_buffer_for_peer(&buffer, peer_id, cx)
1846 });
1847 rpc.respond(
1848 receipt,
1849 proto::OpenBufferResponse {
1850 buffer: Some(buffer),
1851 },
1852 )
1853 }
1854 .log_err()
1855 })
1856 .detach();
1857 Ok(())
1858 }
1859
1860 fn serialize_buffer_for_peer(
1861 &mut self,
1862 buffer: &ModelHandle<Buffer>,
1863 peer_id: PeerId,
1864 cx: &AppContext,
1865 ) -> proto::Buffer {
1866 let buffer_id = buffer.read(cx).remote_id();
1867 let shared_buffers = self.shared_buffers.entry(peer_id).or_default();
1868 match shared_buffers.entry(buffer_id) {
1869 hash_map::Entry::Occupied(_) => proto::Buffer {
1870 variant: Some(proto::buffer::Variant::Id(buffer_id)),
1871 },
1872 hash_map::Entry::Vacant(entry) => {
1873 entry.insert(buffer.clone());
1874 proto::Buffer {
1875 variant: Some(proto::buffer::Variant::State(buffer.read(cx).to_proto())),
1876 }
1877 }
1878 }
1879 }
1880
1881 fn deserialize_remote_buffer(
1882 &mut self,
1883 buffer: proto::Buffer,
1884 cx: &mut ModelContext<Self>,
1885 ) -> Result<ModelHandle<Buffer>> {
1886 match buffer.variant.ok_or_else(|| anyhow!("missing buffer"))? {
1887 proto::buffer::Variant::Id(id) => self
1888 .open_buffers
1889 .get(&(id as usize))
1890 .and_then(|buffer| buffer.upgrade(cx))
1891 .ok_or_else(|| anyhow!("no buffer exists for id {}", id)),
1892 proto::buffer::Variant::State(mut buffer) => {
1893 let mut buffer_worktree = None;
1894 let mut buffer_file = None;
1895 if let Some(file) = buffer.file.take() {
1896 let worktree_id = WorktreeId::from_proto(file.worktree_id);
1897 let worktree = self
1898 .worktree_for_id(worktree_id, cx)
1899 .ok_or_else(|| anyhow!("no worktree found for id {}", file.worktree_id))?;
1900 buffer_file = Some(Box::new(File::from_proto(file, worktree.clone(), cx)?)
1901 as Box<dyn language::File>);
1902 buffer_worktree = Some(worktree);
1903 }
1904
1905 let buffer = cx.add_model(|cx| {
1906 Buffer::from_proto(self.replica_id(), buffer, buffer_file, cx).unwrap()
1907 });
1908 self.register_buffer(&buffer, buffer_worktree.as_ref(), cx)?;
1909 Ok(buffer)
1910 }
1911 }
1912 }
1913
1914 pub fn handle_close_buffer(
1915 &mut self,
1916 envelope: TypedEnvelope<proto::CloseBuffer>,
1917 _: Arc<Client>,
1918 cx: &mut ModelContext<Self>,
1919 ) -> anyhow::Result<()> {
1920 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1921 shared_buffers.remove(&envelope.payload.buffer_id);
1922 cx.notify();
1923 }
1924 Ok(())
1925 }
1926
1927 pub fn handle_buffer_saved(
1928 &mut self,
1929 envelope: TypedEnvelope<proto::BufferSaved>,
1930 _: Arc<Client>,
1931 cx: &mut ModelContext<Self>,
1932 ) -> Result<()> {
1933 let payload = envelope.payload.clone();
1934 let buffer = self
1935 .open_buffers
1936 .get(&(payload.buffer_id as usize))
1937 .and_then(|buffer| buffer.upgrade(cx));
1938 if let Some(buffer) = buffer {
1939 buffer.update(cx, |buffer, cx| {
1940 let version = payload.version.try_into()?;
1941 let mtime = payload
1942 .mtime
1943 .ok_or_else(|| anyhow!("missing mtime"))?
1944 .into();
1945 buffer.did_save(version, mtime, None, cx);
1946 Result::<_, anyhow::Error>::Ok(())
1947 })?;
1948 }
1949 Ok(())
1950 }
1951
1952 pub fn handle_buffer_reloaded(
1953 &mut self,
1954 envelope: TypedEnvelope<proto::BufferReloaded>,
1955 _: Arc<Client>,
1956 cx: &mut ModelContext<Self>,
1957 ) -> Result<()> {
1958 let payload = envelope.payload.clone();
1959 let buffer = self
1960 .open_buffers
1961 .get(&(payload.buffer_id as usize))
1962 .and_then(|buffer| buffer.upgrade(cx));
1963 if let Some(buffer) = buffer {
1964 buffer.update(cx, |buffer, cx| {
1965 let version = payload.version.try_into()?;
1966 let mtime = payload
1967 .mtime
1968 .ok_or_else(|| anyhow!("missing mtime"))?
1969 .into();
1970 buffer.did_reload(version, mtime, cx);
1971 Result::<_, anyhow::Error>::Ok(())
1972 })?;
1973 }
1974 Ok(())
1975 }
1976
1977 pub fn match_paths<'a>(
1978 &self,
1979 query: &'a str,
1980 include_ignored: bool,
1981 smart_case: bool,
1982 max_results: usize,
1983 cancel_flag: &'a AtomicBool,
1984 cx: &AppContext,
1985 ) -> impl 'a + Future<Output = Vec<PathMatch>> {
1986 let worktrees = self
1987 .worktrees(cx)
1988 .filter(|worktree| !worktree.read(cx).is_weak())
1989 .collect::<Vec<_>>();
1990 let include_root_name = worktrees.len() > 1;
1991 let candidate_sets = worktrees
1992 .into_iter()
1993 .map(|worktree| CandidateSet {
1994 snapshot: worktree.read(cx).snapshot(),
1995 include_ignored,
1996 include_root_name,
1997 })
1998 .collect::<Vec<_>>();
1999
2000 let background = cx.background().clone();
2001 async move {
2002 fuzzy::match_paths(
2003 candidate_sets.as_slice(),
2004 query,
2005 smart_case,
2006 max_results,
2007 cancel_flag,
2008 background,
2009 )
2010 .await
2011 }
2012 }
2013}
2014
2015impl WorktreeHandle {
2016 pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
2017 match self {
2018 WorktreeHandle::Strong(handle) => Some(handle.clone()),
2019 WorktreeHandle::Weak(handle) => handle.upgrade(cx),
2020 }
2021 }
2022}
2023
2024struct CandidateSet {
2025 snapshot: Snapshot,
2026 include_ignored: bool,
2027 include_root_name: bool,
2028}
2029
2030impl<'a> PathMatchCandidateSet<'a> for CandidateSet {
2031 type Candidates = CandidateSetIter<'a>;
2032
2033 fn id(&self) -> usize {
2034 self.snapshot.id().to_usize()
2035 }
2036
2037 fn len(&self) -> usize {
2038 if self.include_ignored {
2039 self.snapshot.file_count()
2040 } else {
2041 self.snapshot.visible_file_count()
2042 }
2043 }
2044
2045 fn prefix(&self) -> Arc<str> {
2046 if self.snapshot.root_entry().map_or(false, |e| e.is_file()) {
2047 self.snapshot.root_name().into()
2048 } else if self.include_root_name {
2049 format!("{}/", self.snapshot.root_name()).into()
2050 } else {
2051 "".into()
2052 }
2053 }
2054
2055 fn candidates(&'a self, start: usize) -> Self::Candidates {
2056 CandidateSetIter {
2057 traversal: self.snapshot.files(self.include_ignored, start),
2058 }
2059 }
2060}
2061
2062struct CandidateSetIter<'a> {
2063 traversal: Traversal<'a>,
2064}
2065
2066impl<'a> Iterator for CandidateSetIter<'a> {
2067 type Item = PathMatchCandidate<'a>;
2068
2069 fn next(&mut self) -> Option<Self::Item> {
2070 self.traversal.next().map(|entry| {
2071 if let EntryKind::File(char_bag) = entry.kind {
2072 PathMatchCandidate {
2073 path: &entry.path,
2074 char_bag,
2075 }
2076 } else {
2077 unreachable!()
2078 }
2079 })
2080 }
2081}
2082
2083impl Entity for Project {
2084 type Event = Event;
2085
2086 fn release(&mut self, _: &mut gpui::MutableAppContext) {
2087 match &self.client_state {
2088 ProjectClientState::Local { remote_id_rx, .. } => {
2089 if let Some(project_id) = *remote_id_rx.borrow() {
2090 self.client
2091 .send(proto::UnregisterProject { project_id })
2092 .log_err();
2093 }
2094 }
2095 ProjectClientState::Remote { remote_id, .. } => {
2096 self.client
2097 .send(proto::LeaveProject {
2098 project_id: *remote_id,
2099 })
2100 .log_err();
2101 }
2102 }
2103 }
2104
2105 fn app_will_quit(
2106 &mut self,
2107 _: &mut MutableAppContext,
2108 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
2109 use futures::FutureExt;
2110
2111 let shutdown_futures = self
2112 .language_servers
2113 .drain()
2114 .filter_map(|(_, server)| server.shutdown())
2115 .collect::<Vec<_>>();
2116 Some(
2117 async move {
2118 futures::future::join_all(shutdown_futures).await;
2119 }
2120 .boxed(),
2121 )
2122 }
2123}
2124
2125impl Collaborator {
2126 fn from_proto(
2127 message: proto::Collaborator,
2128 user_store: &ModelHandle<UserStore>,
2129 cx: &mut AsyncAppContext,
2130 ) -> impl Future<Output = Result<Self>> {
2131 let user = user_store.update(cx, |user_store, cx| {
2132 user_store.fetch_user(message.user_id, cx)
2133 });
2134
2135 async move {
2136 Ok(Self {
2137 peer_id: PeerId(message.peer_id),
2138 user: user.await?,
2139 replica_id: message.replica_id as ReplicaId,
2140 })
2141 }
2142 }
2143}
2144
2145impl<P: AsRef<Path>> From<(WorktreeId, P)> for ProjectPath {
2146 fn from((worktree_id, path): (WorktreeId, P)) -> Self {
2147 Self {
2148 worktree_id,
2149 path: path.as_ref().into(),
2150 }
2151 }
2152}
2153
2154#[cfg(test)]
2155mod tests {
2156 use super::{Event, *};
2157 use client::test::FakeHttpClient;
2158 use fs::RealFs;
2159 use futures::StreamExt;
2160 use gpui::{test::subscribe, TestAppContext};
2161 use language::{
2162 tree_sitter_rust, AnchorRangeExt, Diagnostic, LanguageConfig, LanguageRegistry,
2163 LanguageServerConfig, Point,
2164 };
2165 use lsp::Url;
2166 use serde_json::json;
2167 use std::{cell::RefCell, os::unix, path::PathBuf, rc::Rc};
2168 use unindent::Unindent as _;
2169 use util::test::temp_tree;
2170 use worktree::WorktreeHandle as _;
2171
2172 #[gpui::test]
2173 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
2174 let dir = temp_tree(json!({
2175 "root": {
2176 "apple": "",
2177 "banana": {
2178 "carrot": {
2179 "date": "",
2180 "endive": "",
2181 }
2182 },
2183 "fennel": {
2184 "grape": "",
2185 }
2186 }
2187 }));
2188
2189 let root_link_path = dir.path().join("root_link");
2190 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
2191 unix::fs::symlink(
2192 &dir.path().join("root/fennel"),
2193 &dir.path().join("root/finnochio"),
2194 )
2195 .unwrap();
2196
2197 let project = build_project(Arc::new(RealFs), &mut cx);
2198
2199 let (tree, _) = project
2200 .update(&mut cx, |project, cx| {
2201 project.find_or_create_local_worktree(&root_link_path, false, cx)
2202 })
2203 .await
2204 .unwrap();
2205
2206 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2207 .await;
2208 cx.read(|cx| {
2209 let tree = tree.read(cx);
2210 assert_eq!(tree.file_count(), 5);
2211 assert_eq!(
2212 tree.inode_for_path("fennel/grape"),
2213 tree.inode_for_path("finnochio/grape")
2214 );
2215 });
2216
2217 let cancel_flag = Default::default();
2218 let results = project
2219 .read_with(&cx, |project, cx| {
2220 project.match_paths("bna", false, false, 10, &cancel_flag, cx)
2221 })
2222 .await;
2223 assert_eq!(
2224 results
2225 .into_iter()
2226 .map(|result| result.path)
2227 .collect::<Vec<Arc<Path>>>(),
2228 vec![
2229 PathBuf::from("banana/carrot/date").into(),
2230 PathBuf::from("banana/carrot/endive").into(),
2231 ]
2232 );
2233 }
2234
2235 #[gpui::test]
2236 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
2237 let (language_server_config, mut fake_server) =
2238 LanguageServerConfig::fake(cx.background()).await;
2239 let progress_token = language_server_config
2240 .disk_based_diagnostics_progress_token
2241 .clone()
2242 .unwrap();
2243
2244 let mut languages = LanguageRegistry::new();
2245 languages.add(Arc::new(Language::new(
2246 LanguageConfig {
2247 name: "Rust".to_string(),
2248 path_suffixes: vec!["rs".to_string()],
2249 language_server: Some(language_server_config),
2250 ..Default::default()
2251 },
2252 Some(tree_sitter_rust::language()),
2253 )));
2254
2255 let dir = temp_tree(json!({
2256 "a.rs": "fn a() { A }",
2257 "b.rs": "const y: i32 = 1",
2258 }));
2259
2260 let http_client = FakeHttpClient::with_404_response();
2261 let client = Client::new(http_client.clone());
2262 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2263
2264 let project = cx.update(|cx| {
2265 Project::local(
2266 client,
2267 user_store,
2268 Arc::new(languages),
2269 Arc::new(RealFs),
2270 cx,
2271 )
2272 });
2273
2274 let (tree, _) = project
2275 .update(&mut cx, |project, cx| {
2276 project.find_or_create_local_worktree(dir.path(), false, cx)
2277 })
2278 .await
2279 .unwrap();
2280 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2281
2282 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2283 .await;
2284
2285 // Cause worktree to start the fake language server
2286 let _buffer = project
2287 .update(&mut cx, |project, cx| {
2288 project.open_buffer(
2289 ProjectPath {
2290 worktree_id,
2291 path: Path::new("b.rs").into(),
2292 },
2293 cx,
2294 )
2295 })
2296 .await
2297 .unwrap();
2298
2299 let mut events = subscribe(&project, &mut cx);
2300
2301 fake_server.start_progress(&progress_token).await;
2302 assert_eq!(
2303 events.next().await.unwrap(),
2304 Event::DiskBasedDiagnosticsStarted
2305 );
2306
2307 fake_server.start_progress(&progress_token).await;
2308 fake_server.end_progress(&progress_token).await;
2309 fake_server.start_progress(&progress_token).await;
2310
2311 fake_server
2312 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
2313 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2314 version: None,
2315 diagnostics: vec![lsp::Diagnostic {
2316 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2317 severity: Some(lsp::DiagnosticSeverity::ERROR),
2318 message: "undefined variable 'A'".to_string(),
2319 ..Default::default()
2320 }],
2321 })
2322 .await;
2323 assert_eq!(
2324 events.next().await.unwrap(),
2325 Event::DiagnosticsUpdated(ProjectPath {
2326 worktree_id,
2327 path: Arc::from(Path::new("a.rs"))
2328 })
2329 );
2330
2331 fake_server.end_progress(&progress_token).await;
2332 fake_server.end_progress(&progress_token).await;
2333 assert_eq!(
2334 events.next().await.unwrap(),
2335 Event::DiskBasedDiagnosticsUpdated
2336 );
2337 assert_eq!(
2338 events.next().await.unwrap(),
2339 Event::DiskBasedDiagnosticsFinished
2340 );
2341
2342 let buffer = project
2343 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
2344 .await
2345 .unwrap();
2346
2347 buffer.read_with(&cx, |buffer, _| {
2348 let snapshot = buffer.snapshot();
2349 let diagnostics = snapshot
2350 .diagnostics_in_range::<_, Point>(0..buffer.len())
2351 .collect::<Vec<_>>();
2352 assert_eq!(
2353 diagnostics,
2354 &[DiagnosticEntry {
2355 range: Point::new(0, 9)..Point::new(0, 10),
2356 diagnostic: Diagnostic {
2357 severity: lsp::DiagnosticSeverity::ERROR,
2358 message: "undefined variable 'A'".to_string(),
2359 group_id: 0,
2360 is_primary: true,
2361 ..Default::default()
2362 }
2363 }]
2364 )
2365 });
2366 }
2367
2368 #[gpui::test]
2369 async fn test_search_worktree_without_files(mut cx: gpui::TestAppContext) {
2370 let dir = temp_tree(json!({
2371 "root": {
2372 "dir1": {},
2373 "dir2": {
2374 "dir3": {}
2375 }
2376 }
2377 }));
2378
2379 let project = build_project(Arc::new(RealFs), &mut cx);
2380 let (tree, _) = project
2381 .update(&mut cx, |project, cx| {
2382 project.find_or_create_local_worktree(&dir.path(), false, cx)
2383 })
2384 .await
2385 .unwrap();
2386
2387 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2388 .await;
2389
2390 let cancel_flag = Default::default();
2391 let results = project
2392 .read_with(&cx, |project, cx| {
2393 project.match_paths("dir", false, false, 10, &cancel_flag, cx)
2394 })
2395 .await;
2396
2397 assert!(results.is_empty());
2398 }
2399
2400 #[gpui::test]
2401 async fn test_definition(mut cx: gpui::TestAppContext) {
2402 let (language_server_config, mut fake_server) =
2403 LanguageServerConfig::fake(cx.background()).await;
2404
2405 let mut languages = LanguageRegistry::new();
2406 languages.add(Arc::new(Language::new(
2407 LanguageConfig {
2408 name: "Rust".to_string(),
2409 path_suffixes: vec!["rs".to_string()],
2410 language_server: Some(language_server_config),
2411 ..Default::default()
2412 },
2413 Some(tree_sitter_rust::language()),
2414 )));
2415
2416 let dir = temp_tree(json!({
2417 "a.rs": "const fn a() { A }",
2418 "b.rs": "const y: i32 = crate::a()",
2419 }));
2420
2421 let http_client = FakeHttpClient::with_404_response();
2422 let client = Client::new(http_client.clone());
2423 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2424 let project = cx.update(|cx| {
2425 Project::local(
2426 client,
2427 user_store,
2428 Arc::new(languages),
2429 Arc::new(RealFs),
2430 cx,
2431 )
2432 });
2433
2434 let (tree, _) = project
2435 .update(&mut cx, |project, cx| {
2436 project.find_or_create_local_worktree(dir.path().join("b.rs"), false, cx)
2437 })
2438 .await
2439 .unwrap();
2440 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2441 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2442 .await;
2443
2444 // Cause worktree to start the fake language server
2445 let buffer = project
2446 .update(&mut cx, |project, cx| {
2447 project.open_buffer(
2448 ProjectPath {
2449 worktree_id,
2450 path: Path::new("").into(),
2451 },
2452 cx,
2453 )
2454 })
2455 .await
2456 .unwrap();
2457 let definitions =
2458 project.update(&mut cx, |project, cx| project.definition(&buffer, 22, cx));
2459 let (request_id, request) = fake_server
2460 .receive_request::<lsp::request::GotoDefinition>()
2461 .await;
2462 let request_params = request.text_document_position_params;
2463 assert_eq!(
2464 request_params.text_document.uri.to_file_path().unwrap(),
2465 dir.path().join("b.rs")
2466 );
2467 assert_eq!(request_params.position, lsp::Position::new(0, 22));
2468
2469 fake_server
2470 .respond(
2471 request_id,
2472 Some(lsp::GotoDefinitionResponse::Scalar(lsp::Location::new(
2473 lsp::Url::from_file_path(dir.path().join("a.rs")).unwrap(),
2474 lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
2475 ))),
2476 )
2477 .await;
2478 let mut definitions = definitions.await.unwrap();
2479 assert_eq!(definitions.len(), 1);
2480 let definition = definitions.pop().unwrap();
2481 cx.update(|cx| {
2482 let target_buffer = definition.target_buffer.read(cx);
2483 assert_eq!(
2484 target_buffer
2485 .file()
2486 .unwrap()
2487 .as_local()
2488 .unwrap()
2489 .abs_path(cx),
2490 dir.path().join("a.rs")
2491 );
2492 assert_eq!(definition.target_range.to_offset(target_buffer), 9..10);
2493 assert_eq!(
2494 list_worktrees(&project, cx),
2495 [
2496 (dir.path().join("b.rs"), false),
2497 (dir.path().join("a.rs"), true)
2498 ]
2499 );
2500
2501 drop(definition);
2502 });
2503 cx.read(|cx| {
2504 assert_eq!(
2505 list_worktrees(&project, cx),
2506 [(dir.path().join("b.rs"), false)]
2507 );
2508 });
2509
2510 fn list_worktrees(project: &ModelHandle<Project>, cx: &AppContext) -> Vec<(PathBuf, bool)> {
2511 project
2512 .read(cx)
2513 .worktrees(cx)
2514 .map(|worktree| {
2515 let worktree = worktree.read(cx);
2516 (
2517 worktree.as_local().unwrap().abs_path().to_path_buf(),
2518 worktree.is_weak(),
2519 )
2520 })
2521 .collect::<Vec<_>>()
2522 }
2523 }
2524
2525 #[gpui::test]
2526 async fn test_save_file(mut cx: gpui::TestAppContext) {
2527 let fs = Arc::new(FakeFs::new(cx.background()));
2528 fs.insert_tree(
2529 "/dir",
2530 json!({
2531 "file1": "the old contents",
2532 }),
2533 )
2534 .await;
2535
2536 let project = build_project(fs.clone(), &mut cx);
2537 let worktree_id = project
2538 .update(&mut cx, |p, cx| {
2539 p.find_or_create_local_worktree("/dir", false, cx)
2540 })
2541 .await
2542 .unwrap()
2543 .0
2544 .read_with(&cx, |tree, _| tree.id());
2545
2546 let buffer = project
2547 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2548 .await
2549 .unwrap();
2550 buffer
2551 .update(&mut cx, |buffer, cx| {
2552 assert_eq!(buffer.text(), "the old contents");
2553 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2554 buffer.save(cx)
2555 })
2556 .await
2557 .unwrap();
2558
2559 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2560 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2561 }
2562
2563 #[gpui::test]
2564 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
2565 let fs = Arc::new(FakeFs::new(cx.background()));
2566 fs.insert_tree(
2567 "/dir",
2568 json!({
2569 "file1": "the old contents",
2570 }),
2571 )
2572 .await;
2573
2574 let project = build_project(fs.clone(), &mut cx);
2575 let worktree_id = project
2576 .update(&mut cx, |p, cx| {
2577 p.find_or_create_local_worktree("/dir/file1", false, cx)
2578 })
2579 .await
2580 .unwrap()
2581 .0
2582 .read_with(&cx, |tree, _| tree.id());
2583
2584 let buffer = project
2585 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, ""), cx))
2586 .await
2587 .unwrap();
2588 buffer
2589 .update(&mut cx, |buffer, cx| {
2590 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
2591 buffer.save(cx)
2592 })
2593 .await
2594 .unwrap();
2595
2596 let new_text = fs.load(Path::new("/dir/file1")).await.unwrap();
2597 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
2598 }
2599
2600 #[gpui::test(retries = 5)]
2601 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
2602 let dir = temp_tree(json!({
2603 "a": {
2604 "file1": "",
2605 "file2": "",
2606 "file3": "",
2607 },
2608 "b": {
2609 "c": {
2610 "file4": "",
2611 "file5": "",
2612 }
2613 }
2614 }));
2615
2616 let project = build_project(Arc::new(RealFs), &mut cx);
2617 let rpc = project.read_with(&cx, |p, _| p.client.clone());
2618
2619 let (tree, _) = project
2620 .update(&mut cx, |p, cx| {
2621 p.find_or_create_local_worktree(dir.path(), false, cx)
2622 })
2623 .await
2624 .unwrap();
2625 let worktree_id = tree.read_with(&cx, |tree, _| tree.id());
2626
2627 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
2628 let buffer = project.update(cx, |p, cx| p.open_buffer((worktree_id, path), cx));
2629 async move { buffer.await.unwrap() }
2630 };
2631 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
2632 tree.read_with(cx, |tree, _| {
2633 tree.entry_for_path(path)
2634 .expect(&format!("no entry for path {}", path))
2635 .id
2636 })
2637 };
2638
2639 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
2640 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
2641 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
2642 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
2643
2644 let file2_id = id_for_path("a/file2", &cx);
2645 let file3_id = id_for_path("a/file3", &cx);
2646 let file4_id = id_for_path("b/c/file4", &cx);
2647
2648 // Wait for the initial scan.
2649 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2650 .await;
2651
2652 // Create a remote copy of this worktree.
2653 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
2654 let (remote, load_task) = cx.update(|cx| {
2655 Worktree::remote(
2656 1,
2657 1,
2658 initial_snapshot.to_proto(&Default::default(), Default::default()),
2659 rpc.clone(),
2660 cx,
2661 )
2662 });
2663 load_task.await;
2664
2665 cx.read(|cx| {
2666 assert!(!buffer2.read(cx).is_dirty());
2667 assert!(!buffer3.read(cx).is_dirty());
2668 assert!(!buffer4.read(cx).is_dirty());
2669 assert!(!buffer5.read(cx).is_dirty());
2670 });
2671
2672 // Rename and delete files and directories.
2673 tree.flush_fs_events(&cx).await;
2674 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
2675 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
2676 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
2677 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
2678 tree.flush_fs_events(&cx).await;
2679
2680 let expected_paths = vec![
2681 "a",
2682 "a/file1",
2683 "a/file2.new",
2684 "b",
2685 "d",
2686 "d/file3",
2687 "d/file4",
2688 ];
2689
2690 cx.read(|app| {
2691 assert_eq!(
2692 tree.read(app)
2693 .paths()
2694 .map(|p| p.to_str().unwrap())
2695 .collect::<Vec<_>>(),
2696 expected_paths
2697 );
2698
2699 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
2700 assert_eq!(id_for_path("d/file3", &cx), file3_id);
2701 assert_eq!(id_for_path("d/file4", &cx), file4_id);
2702
2703 assert_eq!(
2704 buffer2.read(app).file().unwrap().path().as_ref(),
2705 Path::new("a/file2.new")
2706 );
2707 assert_eq!(
2708 buffer3.read(app).file().unwrap().path().as_ref(),
2709 Path::new("d/file3")
2710 );
2711 assert_eq!(
2712 buffer4.read(app).file().unwrap().path().as_ref(),
2713 Path::new("d/file4")
2714 );
2715 assert_eq!(
2716 buffer5.read(app).file().unwrap().path().as_ref(),
2717 Path::new("b/c/file5")
2718 );
2719
2720 assert!(!buffer2.read(app).file().unwrap().is_deleted());
2721 assert!(!buffer3.read(app).file().unwrap().is_deleted());
2722 assert!(!buffer4.read(app).file().unwrap().is_deleted());
2723 assert!(buffer5.read(app).file().unwrap().is_deleted());
2724 });
2725
2726 // Update the remote worktree. Check that it becomes consistent with the
2727 // local worktree.
2728 remote.update(&mut cx, |remote, cx| {
2729 let update_message =
2730 tree.read(cx)
2731 .snapshot()
2732 .build_update(&initial_snapshot, 1, 1, true);
2733 remote
2734 .as_remote_mut()
2735 .unwrap()
2736 .snapshot
2737 .apply_remote_update(update_message)
2738 .unwrap();
2739
2740 assert_eq!(
2741 remote
2742 .paths()
2743 .map(|p| p.to_str().unwrap())
2744 .collect::<Vec<_>>(),
2745 expected_paths
2746 );
2747 });
2748 }
2749
2750 #[gpui::test]
2751 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
2752 let fs = Arc::new(FakeFs::new(cx.background()));
2753 fs.insert_tree(
2754 "/the-dir",
2755 json!({
2756 "a.txt": "a-contents",
2757 "b.txt": "b-contents",
2758 }),
2759 )
2760 .await;
2761
2762 let project = build_project(fs.clone(), &mut cx);
2763 let worktree_id = project
2764 .update(&mut cx, |p, cx| {
2765 p.find_or_create_local_worktree("/the-dir", false, cx)
2766 })
2767 .await
2768 .unwrap()
2769 .0
2770 .read_with(&cx, |tree, _| tree.id());
2771
2772 // Spawn multiple tasks to open paths, repeating some paths.
2773 let (buffer_a_1, buffer_b, buffer_a_2) = project.update(&mut cx, |p, cx| {
2774 (
2775 p.open_buffer((worktree_id, "a.txt"), cx),
2776 p.open_buffer((worktree_id, "b.txt"), cx),
2777 p.open_buffer((worktree_id, "a.txt"), cx),
2778 )
2779 });
2780
2781 let buffer_a_1 = buffer_a_1.await.unwrap();
2782 let buffer_a_2 = buffer_a_2.await.unwrap();
2783 let buffer_b = buffer_b.await.unwrap();
2784 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
2785 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
2786
2787 // There is only one buffer per path.
2788 let buffer_a_id = buffer_a_1.id();
2789 assert_eq!(buffer_a_2.id(), buffer_a_id);
2790
2791 // Open the same path again while it is still open.
2792 drop(buffer_a_1);
2793 let buffer_a_3 = project
2794 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.txt"), cx))
2795 .await
2796 .unwrap();
2797
2798 // There's still only one buffer per path.
2799 assert_eq!(buffer_a_3.id(), buffer_a_id);
2800 }
2801
2802 #[gpui::test]
2803 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
2804 use std::fs;
2805
2806 let dir = temp_tree(json!({
2807 "file1": "abc",
2808 "file2": "def",
2809 "file3": "ghi",
2810 }));
2811
2812 let project = build_project(Arc::new(RealFs), &mut cx);
2813 let (worktree, _) = project
2814 .update(&mut cx, |p, cx| {
2815 p.find_or_create_local_worktree(dir.path(), false, cx)
2816 })
2817 .await
2818 .unwrap();
2819 let worktree_id = worktree.read_with(&cx, |worktree, _| worktree.id());
2820
2821 worktree.flush_fs_events(&cx).await;
2822 worktree
2823 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2824 .await;
2825
2826 let buffer1 = project
2827 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file1"), cx))
2828 .await
2829 .unwrap();
2830 let events = Rc::new(RefCell::new(Vec::new()));
2831
2832 // initially, the buffer isn't dirty.
2833 buffer1.update(&mut cx, |buffer, cx| {
2834 cx.subscribe(&buffer1, {
2835 let events = events.clone();
2836 move |_, _, event, _| events.borrow_mut().push(event.clone())
2837 })
2838 .detach();
2839
2840 assert!(!buffer.is_dirty());
2841 assert!(events.borrow().is_empty());
2842
2843 buffer.edit(vec![1..2], "", cx);
2844 });
2845
2846 // after the first edit, the buffer is dirty, and emits a dirtied event.
2847 buffer1.update(&mut cx, |buffer, cx| {
2848 assert!(buffer.text() == "ac");
2849 assert!(buffer.is_dirty());
2850 assert_eq!(
2851 *events.borrow(),
2852 &[language::Event::Edited, language::Event::Dirtied]
2853 );
2854 events.borrow_mut().clear();
2855 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
2856 });
2857
2858 // after saving, the buffer is not dirty, and emits a saved event.
2859 buffer1.update(&mut cx, |buffer, cx| {
2860 assert!(!buffer.is_dirty());
2861 assert_eq!(*events.borrow(), &[language::Event::Saved]);
2862 events.borrow_mut().clear();
2863
2864 buffer.edit(vec![1..1], "B", cx);
2865 buffer.edit(vec![2..2], "D", cx);
2866 });
2867
2868 // after editing again, the buffer is dirty, and emits another dirty event.
2869 buffer1.update(&mut cx, |buffer, cx| {
2870 assert!(buffer.text() == "aBDc");
2871 assert!(buffer.is_dirty());
2872 assert_eq!(
2873 *events.borrow(),
2874 &[
2875 language::Event::Edited,
2876 language::Event::Dirtied,
2877 language::Event::Edited,
2878 ],
2879 );
2880 events.borrow_mut().clear();
2881
2882 // TODO - currently, after restoring the buffer to its
2883 // previously-saved state, the is still considered dirty.
2884 buffer.edit([1..3], "", cx);
2885 assert!(buffer.text() == "ac");
2886 assert!(buffer.is_dirty());
2887 });
2888
2889 assert_eq!(*events.borrow(), &[language::Event::Edited]);
2890
2891 // When a file is deleted, the buffer is considered dirty.
2892 let events = Rc::new(RefCell::new(Vec::new()));
2893 let buffer2 = project
2894 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file2"), cx))
2895 .await
2896 .unwrap();
2897 buffer2.update(&mut cx, |_, cx| {
2898 cx.subscribe(&buffer2, {
2899 let events = events.clone();
2900 move |_, _, event, _| events.borrow_mut().push(event.clone())
2901 })
2902 .detach();
2903 });
2904
2905 fs::remove_file(dir.path().join("file2")).unwrap();
2906 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
2907 assert_eq!(
2908 *events.borrow(),
2909 &[language::Event::Dirtied, language::Event::FileHandleChanged]
2910 );
2911
2912 // When a file is already dirty when deleted, we don't emit a Dirtied event.
2913 let events = Rc::new(RefCell::new(Vec::new()));
2914 let buffer3 = project
2915 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "file3"), cx))
2916 .await
2917 .unwrap();
2918 buffer3.update(&mut cx, |_, cx| {
2919 cx.subscribe(&buffer3, {
2920 let events = events.clone();
2921 move |_, _, event, _| events.borrow_mut().push(event.clone())
2922 })
2923 .detach();
2924 });
2925
2926 worktree.flush_fs_events(&cx).await;
2927 buffer3.update(&mut cx, |buffer, cx| {
2928 buffer.edit(Some(0..0), "x", cx);
2929 });
2930 events.borrow_mut().clear();
2931 fs::remove_file(dir.path().join("file3")).unwrap();
2932 buffer3
2933 .condition(&cx, |_, _| !events.borrow().is_empty())
2934 .await;
2935 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
2936 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
2937 }
2938
2939 #[gpui::test]
2940 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
2941 use std::fs;
2942
2943 let initial_contents = "aaa\nbbbbb\nc\n";
2944 let dir = temp_tree(json!({ "the-file": initial_contents }));
2945
2946 let project = build_project(Arc::new(RealFs), &mut cx);
2947 let (worktree, _) = project
2948 .update(&mut cx, |p, cx| {
2949 p.find_or_create_local_worktree(dir.path(), false, cx)
2950 })
2951 .await
2952 .unwrap();
2953 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
2954
2955 worktree
2956 .read_with(&cx, |t, _| t.as_local().unwrap().scan_complete())
2957 .await;
2958
2959 let abs_path = dir.path().join("the-file");
2960 let buffer = project
2961 .update(&mut cx, |p, cx| {
2962 p.open_buffer((worktree_id, "the-file"), cx)
2963 })
2964 .await
2965 .unwrap();
2966
2967 // TODO
2968 // Add a cursor on each row.
2969 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
2970 // assert!(!buffer.is_dirty());
2971 // buffer.add_selection_set(
2972 // &(0..3)
2973 // .map(|row| Selection {
2974 // id: row as usize,
2975 // start: Point::new(row, 1),
2976 // end: Point::new(row, 1),
2977 // reversed: false,
2978 // goal: SelectionGoal::None,
2979 // })
2980 // .collect::<Vec<_>>(),
2981 // cx,
2982 // )
2983 // });
2984
2985 // Change the file on disk, adding two new lines of text, and removing
2986 // one line.
2987 buffer.read_with(&cx, |buffer, _| {
2988 assert!(!buffer.is_dirty());
2989 assert!(!buffer.has_conflict());
2990 });
2991 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
2992 fs::write(&abs_path, new_contents).unwrap();
2993
2994 // Because the buffer was not modified, it is reloaded from disk. Its
2995 // contents are edited according to the diff between the old and new
2996 // file contents.
2997 buffer
2998 .condition(&cx, |buffer, _| buffer.text() == new_contents)
2999 .await;
3000
3001 buffer.update(&mut cx, |buffer, _| {
3002 assert_eq!(buffer.text(), new_contents);
3003 assert!(!buffer.is_dirty());
3004 assert!(!buffer.has_conflict());
3005
3006 // TODO
3007 // let cursor_positions = buffer
3008 // .selection_set(selection_set_id)
3009 // .unwrap()
3010 // .selections::<Point>(&*buffer)
3011 // .map(|selection| {
3012 // assert_eq!(selection.start, selection.end);
3013 // selection.start
3014 // })
3015 // .collect::<Vec<_>>();
3016 // assert_eq!(
3017 // cursor_positions,
3018 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3019 // );
3020 });
3021
3022 // Modify the buffer
3023 buffer.update(&mut cx, |buffer, cx| {
3024 buffer.edit(vec![0..0], " ", cx);
3025 assert!(buffer.is_dirty());
3026 assert!(!buffer.has_conflict());
3027 });
3028
3029 // Change the file on disk again, adding blank lines to the beginning.
3030 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3031
3032 // Because the buffer is modified, it doesn't reload from disk, but is
3033 // marked as having a conflict.
3034 buffer
3035 .condition(&cx, |buffer, _| buffer.has_conflict())
3036 .await;
3037 }
3038
3039 #[gpui::test]
3040 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3041 let fs = Arc::new(FakeFs::new(cx.background()));
3042 fs.insert_tree(
3043 "/the-dir",
3044 json!({
3045 "a.rs": "
3046 fn foo(mut v: Vec<usize>) {
3047 for x in &v {
3048 v.push(1);
3049 }
3050 }
3051 "
3052 .unindent(),
3053 }),
3054 )
3055 .await;
3056
3057 let project = build_project(fs.clone(), &mut cx);
3058 let (worktree, _) = project
3059 .update(&mut cx, |p, cx| {
3060 p.find_or_create_local_worktree("/the-dir", false, cx)
3061 })
3062 .await
3063 .unwrap();
3064 let worktree_id = worktree.read_with(&cx, |tree, _| tree.id());
3065
3066 let buffer = project
3067 .update(&mut cx, |p, cx| p.open_buffer((worktree_id, "a.rs"), cx))
3068 .await
3069 .unwrap();
3070
3071 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3072 let message = lsp::PublishDiagnosticsParams {
3073 uri: buffer_uri.clone(),
3074 diagnostics: vec![
3075 lsp::Diagnostic {
3076 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3077 severity: Some(DiagnosticSeverity::WARNING),
3078 message: "error 1".to_string(),
3079 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3080 location: lsp::Location {
3081 uri: buffer_uri.clone(),
3082 range: lsp::Range::new(
3083 lsp::Position::new(1, 8),
3084 lsp::Position::new(1, 9),
3085 ),
3086 },
3087 message: "error 1 hint 1".to_string(),
3088 }]),
3089 ..Default::default()
3090 },
3091 lsp::Diagnostic {
3092 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3093 severity: Some(DiagnosticSeverity::HINT),
3094 message: "error 1 hint 1".to_string(),
3095 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3096 location: lsp::Location {
3097 uri: buffer_uri.clone(),
3098 range: lsp::Range::new(
3099 lsp::Position::new(1, 8),
3100 lsp::Position::new(1, 9),
3101 ),
3102 },
3103 message: "original diagnostic".to_string(),
3104 }]),
3105 ..Default::default()
3106 },
3107 lsp::Diagnostic {
3108 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3109 severity: Some(DiagnosticSeverity::ERROR),
3110 message: "error 2".to_string(),
3111 related_information: Some(vec![
3112 lsp::DiagnosticRelatedInformation {
3113 location: lsp::Location {
3114 uri: buffer_uri.clone(),
3115 range: lsp::Range::new(
3116 lsp::Position::new(1, 13),
3117 lsp::Position::new(1, 15),
3118 ),
3119 },
3120 message: "error 2 hint 1".to_string(),
3121 },
3122 lsp::DiagnosticRelatedInformation {
3123 location: lsp::Location {
3124 uri: buffer_uri.clone(),
3125 range: lsp::Range::new(
3126 lsp::Position::new(1, 13),
3127 lsp::Position::new(1, 15),
3128 ),
3129 },
3130 message: "error 2 hint 2".to_string(),
3131 },
3132 ]),
3133 ..Default::default()
3134 },
3135 lsp::Diagnostic {
3136 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3137 severity: Some(DiagnosticSeverity::HINT),
3138 message: "error 2 hint 1".to_string(),
3139 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3140 location: lsp::Location {
3141 uri: buffer_uri.clone(),
3142 range: lsp::Range::new(
3143 lsp::Position::new(2, 8),
3144 lsp::Position::new(2, 17),
3145 ),
3146 },
3147 message: "original diagnostic".to_string(),
3148 }]),
3149 ..Default::default()
3150 },
3151 lsp::Diagnostic {
3152 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3153 severity: Some(DiagnosticSeverity::HINT),
3154 message: "error 2 hint 2".to_string(),
3155 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3156 location: lsp::Location {
3157 uri: buffer_uri.clone(),
3158 range: lsp::Range::new(
3159 lsp::Position::new(2, 8),
3160 lsp::Position::new(2, 17),
3161 ),
3162 },
3163 message: "original diagnostic".to_string(),
3164 }]),
3165 ..Default::default()
3166 },
3167 ],
3168 version: None,
3169 };
3170
3171 project
3172 .update(&mut cx, |p, cx| {
3173 p.update_diagnostics(message, &Default::default(), cx)
3174 })
3175 .unwrap();
3176 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3177
3178 assert_eq!(
3179 buffer
3180 .diagnostics_in_range::<_, Point>(0..buffer.len())
3181 .collect::<Vec<_>>(),
3182 &[
3183 DiagnosticEntry {
3184 range: Point::new(1, 8)..Point::new(1, 9),
3185 diagnostic: Diagnostic {
3186 severity: DiagnosticSeverity::WARNING,
3187 message: "error 1".to_string(),
3188 group_id: 0,
3189 is_primary: true,
3190 ..Default::default()
3191 }
3192 },
3193 DiagnosticEntry {
3194 range: Point::new(1, 8)..Point::new(1, 9),
3195 diagnostic: Diagnostic {
3196 severity: DiagnosticSeverity::HINT,
3197 message: "error 1 hint 1".to_string(),
3198 group_id: 0,
3199 is_primary: false,
3200 ..Default::default()
3201 }
3202 },
3203 DiagnosticEntry {
3204 range: Point::new(1, 13)..Point::new(1, 15),
3205 diagnostic: Diagnostic {
3206 severity: DiagnosticSeverity::HINT,
3207 message: "error 2 hint 1".to_string(),
3208 group_id: 1,
3209 is_primary: false,
3210 ..Default::default()
3211 }
3212 },
3213 DiagnosticEntry {
3214 range: Point::new(1, 13)..Point::new(1, 15),
3215 diagnostic: Diagnostic {
3216 severity: DiagnosticSeverity::HINT,
3217 message: "error 2 hint 2".to_string(),
3218 group_id: 1,
3219 is_primary: false,
3220 ..Default::default()
3221 }
3222 },
3223 DiagnosticEntry {
3224 range: Point::new(2, 8)..Point::new(2, 17),
3225 diagnostic: Diagnostic {
3226 severity: DiagnosticSeverity::ERROR,
3227 message: "error 2".to_string(),
3228 group_id: 1,
3229 is_primary: true,
3230 ..Default::default()
3231 }
3232 }
3233 ]
3234 );
3235
3236 assert_eq!(
3237 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3238 &[
3239 DiagnosticEntry {
3240 range: Point::new(1, 8)..Point::new(1, 9),
3241 diagnostic: Diagnostic {
3242 severity: DiagnosticSeverity::WARNING,
3243 message: "error 1".to_string(),
3244 group_id: 0,
3245 is_primary: true,
3246 ..Default::default()
3247 }
3248 },
3249 DiagnosticEntry {
3250 range: Point::new(1, 8)..Point::new(1, 9),
3251 diagnostic: Diagnostic {
3252 severity: DiagnosticSeverity::HINT,
3253 message: "error 1 hint 1".to_string(),
3254 group_id: 0,
3255 is_primary: false,
3256 ..Default::default()
3257 }
3258 },
3259 ]
3260 );
3261 assert_eq!(
3262 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3263 &[
3264 DiagnosticEntry {
3265 range: Point::new(1, 13)..Point::new(1, 15),
3266 diagnostic: Diagnostic {
3267 severity: DiagnosticSeverity::HINT,
3268 message: "error 2 hint 1".to_string(),
3269 group_id: 1,
3270 is_primary: false,
3271 ..Default::default()
3272 }
3273 },
3274 DiagnosticEntry {
3275 range: Point::new(1, 13)..Point::new(1, 15),
3276 diagnostic: Diagnostic {
3277 severity: DiagnosticSeverity::HINT,
3278 message: "error 2 hint 2".to_string(),
3279 group_id: 1,
3280 is_primary: false,
3281 ..Default::default()
3282 }
3283 },
3284 DiagnosticEntry {
3285 range: Point::new(2, 8)..Point::new(2, 17),
3286 diagnostic: Diagnostic {
3287 severity: DiagnosticSeverity::ERROR,
3288 message: "error 2".to_string(),
3289 group_id: 1,
3290 is_primary: true,
3291 ..Default::default()
3292 }
3293 }
3294 ]
3295 );
3296 }
3297
3298 fn build_project(fs: Arc<dyn Fs>, cx: &mut TestAppContext) -> ModelHandle<Project> {
3299 let languages = Arc::new(LanguageRegistry::new());
3300 let http_client = FakeHttpClient::with_404_response();
3301 let client = client::Client::new(http_client.clone());
3302 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3303 cx.update(|cx| Project::local(client, user_store, languages, fs, cx))
3304 }
3305}