1mod store;
2
3use super::{
4 auth,
5 db::{ChannelId, MessageId, UserId},
6 AppState,
7};
8use anyhow::anyhow;
9use async_std::{sync::RwLock, task};
10use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream};
11use futures::{future::BoxFuture, FutureExt};
12use postage::{mpsc, prelude::Sink as _, prelude::Stream as _};
13use sha1::{Digest as _, Sha1};
14use std::{
15 any::TypeId,
16 collections::{HashMap, HashSet},
17 future::Future,
18 mem,
19 sync::Arc,
20 time::Instant,
21};
22use store::{Store, Worktree};
23use surf::StatusCode;
24use tide::log;
25use tide::{
26 http::headers::{HeaderName, CONNECTION, UPGRADE},
27 Request, Response,
28};
29use time::OffsetDateTime;
30use zrpc::{
31 proto::{self, AnyTypedEnvelope, EnvelopedMessage},
32 Connection, ConnectionId, Peer, TypedEnvelope,
33};
34
35type MessageHandler = Box<
36 dyn Send
37 + Sync
38 + Fn(Arc<Server>, Box<dyn AnyTypedEnvelope>) -> BoxFuture<'static, tide::Result<()>>,
39>;
40
41pub struct Server {
42 peer: Arc<Peer>,
43 store: RwLock<Store>,
44 app_state: Arc<AppState>,
45 handlers: HashMap<TypeId, MessageHandler>,
46 notifications: Option<mpsc::Sender<()>>,
47}
48
49const MESSAGE_COUNT_PER_PAGE: usize = 100;
50const MAX_MESSAGE_LEN: usize = 1024;
51
52impl Server {
53 pub fn new(
54 app_state: Arc<AppState>,
55 peer: Arc<Peer>,
56 notifications: Option<mpsc::Sender<()>>,
57 ) -> Arc<Self> {
58 let mut server = Self {
59 peer,
60 app_state,
61 store: Default::default(),
62 handlers: Default::default(),
63 notifications,
64 };
65
66 server
67 .add_handler(Server::ping)
68 .add_handler(Server::open_worktree)
69 .add_handler(Server::close_worktree)
70 .add_handler(Server::share_worktree)
71 .add_handler(Server::unshare_worktree)
72 .add_handler(Server::join_worktree)
73 .add_handler(Server::leave_worktree)
74 .add_handler(Server::update_worktree)
75 .add_handler(Server::open_buffer)
76 .add_handler(Server::close_buffer)
77 .add_handler(Server::update_buffer)
78 .add_handler(Server::buffer_saved)
79 .add_handler(Server::save_buffer)
80 .add_handler(Server::get_channels)
81 .add_handler(Server::get_users)
82 .add_handler(Server::join_channel)
83 .add_handler(Server::leave_channel)
84 .add_handler(Server::send_channel_message)
85 .add_handler(Server::get_channel_messages);
86
87 Arc::new(server)
88 }
89
90 fn add_handler<F, Fut, M>(&mut self, handler: F) -> &mut Self
91 where
92 F: 'static + Send + Sync + Fn(Arc<Self>, TypedEnvelope<M>) -> Fut,
93 Fut: 'static + Send + Future<Output = tide::Result<()>>,
94 M: EnvelopedMessage,
95 {
96 let prev_handler = self.handlers.insert(
97 TypeId::of::<M>(),
98 Box::new(move |server, envelope| {
99 let envelope = envelope.into_any().downcast::<TypedEnvelope<M>>().unwrap();
100 (handler)(server, *envelope).boxed()
101 }),
102 );
103 if prev_handler.is_some() {
104 panic!("registered a handler for the same message twice");
105 }
106 self
107 }
108
109 pub fn handle_connection(
110 self: &Arc<Self>,
111 connection: Connection,
112 addr: String,
113 user_id: UserId,
114 ) -> impl Future<Output = ()> {
115 let this = self.clone();
116 async move {
117 let (connection_id, handle_io, mut incoming_rx) =
118 this.peer.add_connection(connection).await;
119 this.store
120 .write()
121 .await
122 .add_connection(connection_id, user_id);
123 if let Err(err) = this.update_collaborators_for_users(&[user_id]).await {
124 log::error!("error updating collaborators for {:?}: {}", user_id, err);
125 }
126
127 let handle_io = handle_io.fuse();
128 futures::pin_mut!(handle_io);
129 loop {
130 let next_message = incoming_rx.recv().fuse();
131 futures::pin_mut!(next_message);
132 futures::select_biased! {
133 message = next_message => {
134 if let Some(message) = message {
135 let start_time = Instant::now();
136 log::info!("RPC message received: {}", message.payload_type_name());
137 if let Some(handler) = this.handlers.get(&message.payload_type_id()) {
138 if let Err(err) = (handler)(this.clone(), message).await {
139 log::error!("error handling message: {:?}", err);
140 } else {
141 log::info!("RPC message handled. duration:{:?}", start_time.elapsed());
142 }
143
144 if let Some(mut notifications) = this.notifications.clone() {
145 let _ = notifications.send(()).await;
146 }
147 } else {
148 log::warn!("unhandled message: {}", message.payload_type_name());
149 }
150 } else {
151 log::info!("rpc connection closed {:?}", addr);
152 break;
153 }
154 }
155 handle_io = handle_io => {
156 if let Err(err) = handle_io {
157 log::error!("error handling rpc connection {:?} - {:?}", addr, err);
158 }
159 break;
160 }
161 }
162 }
163
164 if let Err(err) = this.sign_out(connection_id).await {
165 log::error!("error signing out connection {:?} - {:?}", addr, err);
166 }
167 }
168 }
169
170 async fn sign_out(self: &Arc<Self>, connection_id: ConnectionId) -> tide::Result<()> {
171 self.peer.disconnect(connection_id).await;
172 let removed_connection = self.store.write().await.remove_connection(connection_id)?;
173
174 for (worktree_id, worktree) in removed_connection.hosted_worktrees {
175 if let Some(share) = worktree.share {
176 broadcast(
177 connection_id,
178 share.guest_connection_ids.keys().copied().collect(),
179 |conn_id| {
180 self.peer
181 .send(conn_id, proto::UnshareWorktree { worktree_id })
182 },
183 )
184 .await?;
185 }
186 }
187
188 for (worktree_id, peer_ids) in removed_connection.guest_worktree_ids {
189 broadcast(connection_id, peer_ids, |conn_id| {
190 self.peer.send(
191 conn_id,
192 proto::RemovePeer {
193 worktree_id,
194 peer_id: connection_id.0,
195 },
196 )
197 })
198 .await?;
199 }
200
201 self.update_collaborators_for_users(removed_connection.collaborator_ids.iter())
202 .await?;
203
204 Ok(())
205 }
206
207 async fn ping(self: Arc<Server>, request: TypedEnvelope<proto::Ping>) -> tide::Result<()> {
208 self.peer.respond(request.receipt(), proto::Ack {}).await?;
209 Ok(())
210 }
211
212 async fn open_worktree(
213 self: Arc<Server>,
214 request: TypedEnvelope<proto::OpenWorktree>,
215 ) -> tide::Result<()> {
216 let receipt = request.receipt();
217 let host_user_id = self
218 .store
219 .read()
220 .await
221 .user_id_for_connection(request.sender_id)?;
222
223 let mut collaborator_user_ids = HashSet::new();
224 collaborator_user_ids.insert(host_user_id);
225 for github_login in request.payload.collaborator_logins {
226 match self.app_state.db.create_user(&github_login, false).await {
227 Ok(collaborator_user_id) => {
228 collaborator_user_ids.insert(collaborator_user_id);
229 }
230 Err(err) => {
231 let message = err.to_string();
232 self.peer
233 .respond_with_error(receipt, proto::Error { message })
234 .await?;
235 return Ok(());
236 }
237 }
238 }
239
240 let collaborator_user_ids = collaborator_user_ids.into_iter().collect::<Vec<_>>();
241 let worktree_id = self.store.write().await.add_worktree(Worktree {
242 host_connection_id: request.sender_id,
243 collaborator_user_ids: collaborator_user_ids.clone(),
244 root_name: request.payload.root_name,
245 share: None,
246 });
247
248 self.peer
249 .respond(receipt, proto::OpenWorktreeResponse { worktree_id })
250 .await?;
251 self.update_collaborators_for_users(&collaborator_user_ids)
252 .await?;
253
254 Ok(())
255 }
256
257 async fn close_worktree(
258 self: Arc<Server>,
259 request: TypedEnvelope<proto::CloseWorktree>,
260 ) -> tide::Result<()> {
261 let worktree_id = request.payload.worktree_id;
262 let worktree = self
263 .store
264 .write()
265 .await
266 .remove_worktree(worktree_id, request.sender_id)?;
267
268 if let Some(share) = worktree.share {
269 broadcast(
270 request.sender_id,
271 share.guest_connection_ids.keys().copied().collect(),
272 |conn_id| {
273 self.peer
274 .send(conn_id, proto::UnshareWorktree { worktree_id })
275 },
276 )
277 .await?;
278 }
279 self.update_collaborators_for_users(&worktree.collaborator_user_ids)
280 .await?;
281 Ok(())
282 }
283
284 async fn share_worktree(
285 self: Arc<Server>,
286 mut request: TypedEnvelope<proto::ShareWorktree>,
287 ) -> tide::Result<()> {
288 let worktree = request
289 .payload
290 .worktree
291 .as_mut()
292 .ok_or_else(|| anyhow!("missing worktree"))?;
293 let entries = mem::take(&mut worktree.entries)
294 .into_iter()
295 .map(|entry| (entry.id, entry))
296 .collect();
297
298 if let Some(collaborator_user_ids) =
299 self.store
300 .write()
301 .await
302 .share_worktree(worktree.id, request.sender_id, entries)
303 {
304 self.peer
305 .respond(request.receipt(), proto::ShareWorktreeResponse {})
306 .await?;
307 self.update_collaborators_for_users(&collaborator_user_ids)
308 .await?;
309 } else {
310 self.peer
311 .respond_with_error(
312 request.receipt(),
313 proto::Error {
314 message: "no such worktree".to_string(),
315 },
316 )
317 .await?;
318 }
319 Ok(())
320 }
321
322 async fn unshare_worktree(
323 self: Arc<Server>,
324 request: TypedEnvelope<proto::UnshareWorktree>,
325 ) -> tide::Result<()> {
326 let worktree_id = request.payload.worktree_id;
327 let (connection_ids, collaborator_user_ids) = self
328 .store
329 .write()
330 .await
331 .unshare_worktree(worktree_id, request.sender_id)?;
332
333 broadcast(request.sender_id, connection_ids, |conn_id| {
334 self.peer
335 .send(conn_id, proto::UnshareWorktree { worktree_id })
336 })
337 .await?;
338 self.update_collaborators_for_users(&collaborator_user_ids)
339 .await?;
340
341 Ok(())
342 }
343
344 async fn join_worktree(
345 self: Arc<Server>,
346 request: TypedEnvelope<proto::JoinWorktree>,
347 ) -> tide::Result<()> {
348 let worktree_id = request.payload.worktree_id;
349 let user_id = self
350 .store
351 .read()
352 .await
353 .user_id_for_connection(request.sender_id)?;
354
355 let response;
356 let connection_ids;
357 let collaborator_user_ids;
358 let mut state = self.store.write().await;
359 match state.join_worktree(request.sender_id, user_id, worktree_id) {
360 Ok((peer_replica_id, worktree)) => {
361 let share = worktree.share()?;
362 let peer_count = share.guest_connection_ids.len();
363 let mut peers = Vec::with_capacity(peer_count);
364 peers.push(proto::Peer {
365 peer_id: worktree.host_connection_id.0,
366 replica_id: 0,
367 });
368 for (peer_conn_id, peer_replica_id) in &share.guest_connection_ids {
369 if *peer_conn_id != request.sender_id {
370 peers.push(proto::Peer {
371 peer_id: peer_conn_id.0,
372 replica_id: *peer_replica_id as u32,
373 });
374 }
375 }
376 response = proto::JoinWorktreeResponse {
377 worktree: Some(proto::Worktree {
378 id: worktree_id,
379 root_name: worktree.root_name.clone(),
380 entries: share.entries.values().cloned().collect(),
381 }),
382 replica_id: peer_replica_id as u32,
383 peers,
384 };
385 connection_ids = worktree.connection_ids();
386 collaborator_user_ids = worktree.collaborator_user_ids.clone();
387 }
388 Err(error) => {
389 self.peer
390 .respond_with_error(
391 request.receipt(),
392 proto::Error {
393 message: error.to_string(),
394 },
395 )
396 .await?;
397 return Ok(());
398 }
399 }
400
401 drop(state);
402 broadcast(request.sender_id, connection_ids, |conn_id| {
403 self.peer.send(
404 conn_id,
405 proto::AddPeer {
406 worktree_id,
407 peer: Some(proto::Peer {
408 peer_id: request.sender_id.0,
409 replica_id: response.replica_id,
410 }),
411 },
412 )
413 })
414 .await?;
415 self.peer.respond(request.receipt(), response).await?;
416 self.update_collaborators_for_users(&collaborator_user_ids)
417 .await?;
418
419 Ok(())
420 }
421
422 async fn leave_worktree(
423 self: Arc<Server>,
424 request: TypedEnvelope<proto::LeaveWorktree>,
425 ) -> tide::Result<()> {
426 let sender_id = request.sender_id;
427 let worktree_id = request.payload.worktree_id;
428
429 if let Some((connection_ids, collaborator_ids)) = self
430 .store
431 .write()
432 .await
433 .leave_worktree(sender_id, worktree_id)
434 {
435 broadcast(sender_id, connection_ids, |conn_id| {
436 self.peer.send(
437 conn_id,
438 proto::RemovePeer {
439 worktree_id,
440 peer_id: sender_id.0,
441 },
442 )
443 })
444 .await?;
445 self.update_collaborators_for_users(&collaborator_ids)
446 .await?;
447 }
448 Ok(())
449 }
450
451 async fn update_worktree(
452 self: Arc<Server>,
453 request: TypedEnvelope<proto::UpdateWorktree>,
454 ) -> tide::Result<()> {
455 let connection_ids = self.store.write().await.update_worktree(
456 request.sender_id,
457 request.payload.worktree_id,
458 &request.payload.removed_entries,
459 &request.payload.updated_entries,
460 )?;
461
462 broadcast(request.sender_id, connection_ids, |connection_id| {
463 self.peer
464 .forward_send(request.sender_id, connection_id, request.payload.clone())
465 })
466 .await?;
467
468 Ok(())
469 }
470
471 async fn open_buffer(
472 self: Arc<Server>,
473 request: TypedEnvelope<proto::OpenBuffer>,
474 ) -> tide::Result<()> {
475 let receipt = request.receipt();
476 let host_connection_id = self
477 .store
478 .read()
479 .await
480 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
481 let response = self
482 .peer
483 .forward_request(request.sender_id, host_connection_id, request.payload)
484 .await?;
485 self.peer.respond(receipt, response).await?;
486 Ok(())
487 }
488
489 async fn close_buffer(
490 self: Arc<Server>,
491 request: TypedEnvelope<proto::CloseBuffer>,
492 ) -> tide::Result<()> {
493 let host_connection_id = self
494 .store
495 .read()
496 .await
497 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
498 self.peer
499 .forward_send(request.sender_id, host_connection_id, request.payload)
500 .await?;
501 Ok(())
502 }
503
504 async fn save_buffer(
505 self: Arc<Server>,
506 request: TypedEnvelope<proto::SaveBuffer>,
507 ) -> tide::Result<()> {
508 let host;
509 let guests;
510 {
511 let state = self.store.read().await;
512 host = state
513 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
514 guests = state
515 .worktree_guest_connection_ids(request.sender_id, request.payload.worktree_id)?;
516 }
517
518 let sender = request.sender_id;
519 let receipt = request.receipt();
520 let response = self
521 .peer
522 .forward_request(sender, host, request.payload.clone())
523 .await?;
524
525 broadcast(host, guests, |conn_id| {
526 let response = response.clone();
527 let peer = &self.peer;
528 async move {
529 if conn_id == sender {
530 peer.respond(receipt, response).await
531 } else {
532 peer.forward_send(host, conn_id, response).await
533 }
534 }
535 })
536 .await?;
537
538 Ok(())
539 }
540
541 async fn update_buffer(
542 self: Arc<Server>,
543 request: TypedEnvelope<proto::UpdateBuffer>,
544 ) -> tide::Result<()> {
545 broadcast(
546 request.sender_id,
547 self.store
548 .read()
549 .await
550 .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?,
551 |connection_id| {
552 self.peer
553 .forward_send(request.sender_id, connection_id, request.payload.clone())
554 },
555 )
556 .await?;
557 self.peer.respond(request.receipt(), proto::Ack {}).await?;
558 Ok(())
559 }
560
561 async fn buffer_saved(
562 self: Arc<Server>,
563 request: TypedEnvelope<proto::BufferSaved>,
564 ) -> tide::Result<()> {
565 broadcast(
566 request.sender_id,
567 self.store
568 .read()
569 .await
570 .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?,
571 |connection_id| {
572 self.peer
573 .forward_send(request.sender_id, connection_id, request.payload.clone())
574 },
575 )
576 .await?;
577 Ok(())
578 }
579
580 async fn get_channels(
581 self: Arc<Server>,
582 request: TypedEnvelope<proto::GetChannels>,
583 ) -> tide::Result<()> {
584 let user_id = self
585 .store
586 .read()
587 .await
588 .user_id_for_connection(request.sender_id)?;
589 let channels = self.app_state.db.get_accessible_channels(user_id).await?;
590 self.peer
591 .respond(
592 request.receipt(),
593 proto::GetChannelsResponse {
594 channels: channels
595 .into_iter()
596 .map(|chan| proto::Channel {
597 id: chan.id.to_proto(),
598 name: chan.name,
599 })
600 .collect(),
601 },
602 )
603 .await?;
604 Ok(())
605 }
606
607 async fn get_users(
608 self: Arc<Server>,
609 request: TypedEnvelope<proto::GetUsers>,
610 ) -> tide::Result<()> {
611 let receipt = request.receipt();
612 let user_ids = request.payload.user_ids.into_iter().map(UserId::from_proto);
613 let users = self
614 .app_state
615 .db
616 .get_users_by_ids(user_ids)
617 .await?
618 .into_iter()
619 .map(|user| proto::User {
620 id: user.id.to_proto(),
621 avatar_url: format!("https://github.com/{}.png?size=128", user.github_login),
622 github_login: user.github_login,
623 })
624 .collect();
625 self.peer
626 .respond(receipt, proto::GetUsersResponse { users })
627 .await?;
628 Ok(())
629 }
630
631 async fn update_collaborators_for_users<'a>(
632 self: &Arc<Server>,
633 user_ids: impl IntoIterator<Item = &'a UserId>,
634 ) -> tide::Result<()> {
635 let mut send_futures = Vec::new();
636
637 let state = self.store.read().await;
638 for user_id in user_ids {
639 let collaborators = state.collaborators_for_user(*user_id);
640 for connection_id in state.connection_ids_for_user(*user_id) {
641 send_futures.push(self.peer.send(
642 connection_id,
643 proto::UpdateCollaborators {
644 collaborators: collaborators.clone(),
645 },
646 ));
647 }
648 }
649
650 drop(state);
651 futures::future::try_join_all(send_futures).await?;
652
653 Ok(())
654 }
655
656 async fn join_channel(
657 self: Arc<Self>,
658 request: TypedEnvelope<proto::JoinChannel>,
659 ) -> tide::Result<()> {
660 let user_id = self
661 .store
662 .read()
663 .await
664 .user_id_for_connection(request.sender_id)?;
665 let channel_id = ChannelId::from_proto(request.payload.channel_id);
666 if !self
667 .app_state
668 .db
669 .can_user_access_channel(user_id, channel_id)
670 .await?
671 {
672 Err(anyhow!("access denied"))?;
673 }
674
675 self.store
676 .write()
677 .await
678 .join_channel(request.sender_id, channel_id);
679 let messages = self
680 .app_state
681 .db
682 .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None)
683 .await?
684 .into_iter()
685 .map(|msg| proto::ChannelMessage {
686 id: msg.id.to_proto(),
687 body: msg.body,
688 timestamp: msg.sent_at.unix_timestamp() as u64,
689 sender_id: msg.sender_id.to_proto(),
690 nonce: Some(msg.nonce.as_u128().into()),
691 })
692 .collect::<Vec<_>>();
693 self.peer
694 .respond(
695 request.receipt(),
696 proto::JoinChannelResponse {
697 done: messages.len() < MESSAGE_COUNT_PER_PAGE,
698 messages,
699 },
700 )
701 .await?;
702 Ok(())
703 }
704
705 async fn leave_channel(
706 self: Arc<Self>,
707 request: TypedEnvelope<proto::LeaveChannel>,
708 ) -> tide::Result<()> {
709 let user_id = self
710 .store
711 .read()
712 .await
713 .user_id_for_connection(request.sender_id)?;
714 let channel_id = ChannelId::from_proto(request.payload.channel_id);
715 if !self
716 .app_state
717 .db
718 .can_user_access_channel(user_id, channel_id)
719 .await?
720 {
721 Err(anyhow!("access denied"))?;
722 }
723
724 self.store
725 .write()
726 .await
727 .leave_channel(request.sender_id, channel_id);
728
729 Ok(())
730 }
731
732 async fn send_channel_message(
733 self: Arc<Self>,
734 request: TypedEnvelope<proto::SendChannelMessage>,
735 ) -> tide::Result<()> {
736 let receipt = request.receipt();
737 let channel_id = ChannelId::from_proto(request.payload.channel_id);
738 let user_id;
739 let connection_ids;
740 {
741 let state = self.store.read().await;
742 user_id = state.user_id_for_connection(request.sender_id)?;
743 if let Some(ids) = state.channel_connection_ids(channel_id) {
744 connection_ids = ids;
745 } else {
746 return Ok(());
747 }
748 }
749
750 // Validate the message body.
751 let body = request.payload.body.trim().to_string();
752 if body.len() > MAX_MESSAGE_LEN {
753 self.peer
754 .respond_with_error(
755 receipt,
756 proto::Error {
757 message: "message is too long".to_string(),
758 },
759 )
760 .await?;
761 return Ok(());
762 }
763 if body.is_empty() {
764 self.peer
765 .respond_with_error(
766 receipt,
767 proto::Error {
768 message: "message can't be blank".to_string(),
769 },
770 )
771 .await?;
772 return Ok(());
773 }
774
775 let timestamp = OffsetDateTime::now_utc();
776 let nonce = if let Some(nonce) = request.payload.nonce {
777 nonce
778 } else {
779 self.peer
780 .respond_with_error(
781 receipt,
782 proto::Error {
783 message: "nonce can't be blank".to_string(),
784 },
785 )
786 .await?;
787 return Ok(());
788 };
789
790 let message_id = self
791 .app_state
792 .db
793 .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into())
794 .await?
795 .to_proto();
796 let message = proto::ChannelMessage {
797 sender_id: user_id.to_proto(),
798 id: message_id,
799 body,
800 timestamp: timestamp.unix_timestamp() as u64,
801 nonce: Some(nonce),
802 };
803 broadcast(request.sender_id, connection_ids, |conn_id| {
804 self.peer.send(
805 conn_id,
806 proto::ChannelMessageSent {
807 channel_id: channel_id.to_proto(),
808 message: Some(message.clone()),
809 },
810 )
811 })
812 .await?;
813 self.peer
814 .respond(
815 receipt,
816 proto::SendChannelMessageResponse {
817 message: Some(message),
818 },
819 )
820 .await?;
821 Ok(())
822 }
823
824 async fn get_channel_messages(
825 self: Arc<Self>,
826 request: TypedEnvelope<proto::GetChannelMessages>,
827 ) -> tide::Result<()> {
828 let user_id = self
829 .store
830 .read()
831 .await
832 .user_id_for_connection(request.sender_id)?;
833 let channel_id = ChannelId::from_proto(request.payload.channel_id);
834 if !self
835 .app_state
836 .db
837 .can_user_access_channel(user_id, channel_id)
838 .await?
839 {
840 Err(anyhow!("access denied"))?;
841 }
842
843 let messages = self
844 .app_state
845 .db
846 .get_channel_messages(
847 channel_id,
848 MESSAGE_COUNT_PER_PAGE,
849 Some(MessageId::from_proto(request.payload.before_message_id)),
850 )
851 .await?
852 .into_iter()
853 .map(|msg| proto::ChannelMessage {
854 id: msg.id.to_proto(),
855 body: msg.body,
856 timestamp: msg.sent_at.unix_timestamp() as u64,
857 sender_id: msg.sender_id.to_proto(),
858 nonce: Some(msg.nonce.as_u128().into()),
859 })
860 .collect::<Vec<_>>();
861 self.peer
862 .respond(
863 request.receipt(),
864 proto::GetChannelMessagesResponse {
865 done: messages.len() < MESSAGE_COUNT_PER_PAGE,
866 messages,
867 },
868 )
869 .await?;
870 Ok(())
871 }
872}
873
874pub async fn broadcast<F, T>(
875 sender_id: ConnectionId,
876 receiver_ids: Vec<ConnectionId>,
877 mut f: F,
878) -> anyhow::Result<()>
879where
880 F: FnMut(ConnectionId) -> T,
881 T: Future<Output = anyhow::Result<()>>,
882{
883 let futures = receiver_ids
884 .into_iter()
885 .filter(|id| *id != sender_id)
886 .map(|id| f(id));
887 futures::future::try_join_all(futures).await?;
888 Ok(())
889}
890
891pub fn add_routes(app: &mut tide::Server<Arc<AppState>>, rpc: &Arc<Peer>) {
892 let server = Server::new(app.state().clone(), rpc.clone(), None);
893 app.at("/rpc").with(auth::VerifyToken).get(move |request: Request<Arc<AppState>>| {
894 let user_id = request.ext::<UserId>().copied();
895 let server = server.clone();
896 async move {
897 const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
898
899 let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade");
900 let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket");
901 let upgrade_requested = connection_upgrade && upgrade_to_websocket;
902
903 if !upgrade_requested {
904 return Ok(Response::new(StatusCode::UpgradeRequired));
905 }
906
907 let header = match request.header("Sec-Websocket-Key") {
908 Some(h) => h.as_str(),
909 None => return Err(anyhow!("expected sec-websocket-key"))?,
910 };
911
912 let mut response = Response::new(StatusCode::SwitchingProtocols);
913 response.insert_header(UPGRADE, "websocket");
914 response.insert_header(CONNECTION, "Upgrade");
915 let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize();
916 response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..]));
917 response.insert_header("Sec-Websocket-Version", "13");
918
919 let http_res: &mut tide::http::Response = response.as_mut();
920 let upgrade_receiver = http_res.recv_upgrade().await;
921 let addr = request.remote().unwrap_or("unknown").to_string();
922 let user_id = user_id.ok_or_else(|| anyhow!("user_id is not present on request. ensure auth::VerifyToken middleware is present"))?;
923 task::spawn(async move {
924 if let Some(stream) = upgrade_receiver.await {
925 server.handle_connection(Connection::new(WebSocketStream::from_raw_socket(stream, Role::Server, None).await), addr, user_id).await;
926 }
927 });
928
929 Ok(response)
930 }
931 });
932}
933
934fn header_contains_ignore_case<T>(
935 request: &tide::Request<T>,
936 header_name: HeaderName,
937 value: &str,
938) -> bool {
939 request
940 .header(header_name)
941 .map(|h| {
942 h.as_str()
943 .split(',')
944 .any(|s| s.trim().eq_ignore_ascii_case(value.trim()))
945 })
946 .unwrap_or(false)
947}
948
949#[cfg(test)]
950mod tests {
951 use super::*;
952 use crate::{
953 auth,
954 db::{tests::TestDb, UserId},
955 github, AppState, Config,
956 };
957 use async_std::{sync::RwLockReadGuard, task};
958 use gpui::{ModelHandle, TestAppContext};
959 use parking_lot::Mutex;
960 use postage::{mpsc, watch};
961 use serde_json::json;
962 use sqlx::types::time::OffsetDateTime;
963 use std::{
964 path::Path,
965 sync::{
966 atomic::{AtomicBool, Ordering::SeqCst},
967 Arc,
968 },
969 time::Duration,
970 };
971 use zed::{
972 channel::{Channel, ChannelDetails, ChannelList},
973 editor::{Editor, Insert},
974 fs::{FakeFs, Fs as _},
975 language::LanguageRegistry,
976 rpc::{self, Client, Credentials, EstablishConnectionError},
977 settings,
978 test::FakeHttpClient,
979 user::UserStore,
980 worktree::Worktree,
981 };
982 use zrpc::Peer;
983
984 #[gpui::test]
985 async fn test_share_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
986 let (window_b, _) = cx_b.add_window(|_| EmptyView);
987 let settings = cx_b.read(settings::test).1;
988 let lang_registry = Arc::new(LanguageRegistry::new());
989
990 // Connect to a server as 2 clients.
991 let mut server = TestServer::start().await;
992 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
993 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
994
995 cx_a.foreground().forbid_parking();
996
997 // Share a local worktree as client A
998 let fs = Arc::new(FakeFs::new());
999 fs.insert_tree(
1000 "/a",
1001 json!({
1002 ".zed.toml": r#"collaborators = ["user_b"]"#,
1003 "a.txt": "a-contents",
1004 "b.txt": "b-contents",
1005 }),
1006 )
1007 .await;
1008 let worktree_a = Worktree::open_local(
1009 client_a.clone(),
1010 "/a".as_ref(),
1011 fs,
1012 lang_registry.clone(),
1013 &mut cx_a.to_async(),
1014 )
1015 .await
1016 .unwrap();
1017 worktree_a
1018 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1019 .await;
1020 let worktree_id = worktree_a
1021 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1022 .await
1023 .unwrap();
1024
1025 // Join that worktree as client B, and see that a guest has joined as client A.
1026 let worktree_b = Worktree::open_remote(
1027 client_b.clone(),
1028 worktree_id,
1029 lang_registry.clone(),
1030 &mut cx_b.to_async(),
1031 )
1032 .await
1033 .unwrap();
1034 let replica_id_b = worktree_b.read_with(&cx_b, |tree, _| tree.replica_id());
1035 worktree_a
1036 .condition(&cx_a, |tree, _| {
1037 tree.peers()
1038 .values()
1039 .any(|replica_id| *replica_id == replica_id_b)
1040 })
1041 .await;
1042
1043 // Open the same file as client B and client A.
1044 let buffer_b = worktree_b
1045 .update(&mut cx_b, |worktree, cx| worktree.open_buffer("b.txt", cx))
1046 .await
1047 .unwrap();
1048 buffer_b.read_with(&cx_b, |buf, _| assert_eq!(buf.text(), "b-contents"));
1049 worktree_a.read_with(&cx_a, |tree, cx| assert!(tree.has_open_buffer("b.txt", cx)));
1050 let buffer_a = worktree_a
1051 .update(&mut cx_a, |tree, cx| tree.open_buffer("b.txt", cx))
1052 .await
1053 .unwrap();
1054
1055 // Create a selection set as client B and see that selection set as client A.
1056 let editor_b = cx_b.add_view(window_b, |cx| Editor::for_buffer(buffer_b, settings, cx));
1057 buffer_a
1058 .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1)
1059 .await;
1060
1061 // Edit the buffer as client B and see that edit as client A.
1062 editor_b.update(&mut cx_b, |editor, cx| {
1063 editor.insert(&Insert("ok, ".into()), cx)
1064 });
1065 buffer_a
1066 .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents")
1067 .await;
1068
1069 // Remove the selection set as client B, see those selections disappear as client A.
1070 cx_b.update(move |_| drop(editor_b));
1071 buffer_a
1072 .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0)
1073 .await;
1074
1075 // Close the buffer as client A, see that the buffer is closed.
1076 cx_a.update(move |_| drop(buffer_a));
1077 worktree_a
1078 .condition(&cx_a, |tree, cx| !tree.has_open_buffer("b.txt", cx))
1079 .await;
1080
1081 // Dropping the worktree removes client B from client A's peers.
1082 cx_b.update(move |_| drop(worktree_b));
1083 worktree_a
1084 .condition(&cx_a, |tree, _| tree.peers().is_empty())
1085 .await;
1086 }
1087
1088 #[gpui::test]
1089 async fn test_propagate_saves_and_fs_changes_in_shared_worktree(
1090 mut cx_a: TestAppContext,
1091 mut cx_b: TestAppContext,
1092 mut cx_c: TestAppContext,
1093 ) {
1094 cx_a.foreground().forbid_parking();
1095 let lang_registry = Arc::new(LanguageRegistry::new());
1096
1097 // Connect to a server as 3 clients.
1098 let mut server = TestServer::start().await;
1099 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1100 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1101 let (client_c, _) = server.create_client(&mut cx_c, "user_c").await;
1102
1103 let fs = Arc::new(FakeFs::new());
1104
1105 // Share a worktree as client A.
1106 fs.insert_tree(
1107 "/a",
1108 json!({
1109 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
1110 "file1": "",
1111 "file2": ""
1112 }),
1113 )
1114 .await;
1115
1116 let worktree_a = Worktree::open_local(
1117 client_a.clone(),
1118 "/a".as_ref(),
1119 fs.clone(),
1120 lang_registry.clone(),
1121 &mut cx_a.to_async(),
1122 )
1123 .await
1124 .unwrap();
1125 worktree_a
1126 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1127 .await;
1128 let worktree_id = worktree_a
1129 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1130 .await
1131 .unwrap();
1132
1133 // Join that worktree as clients B and C.
1134 let worktree_b = Worktree::open_remote(
1135 client_b.clone(),
1136 worktree_id,
1137 lang_registry.clone(),
1138 &mut cx_b.to_async(),
1139 )
1140 .await
1141 .unwrap();
1142 let worktree_c = Worktree::open_remote(
1143 client_c.clone(),
1144 worktree_id,
1145 lang_registry.clone(),
1146 &mut cx_c.to_async(),
1147 )
1148 .await
1149 .unwrap();
1150
1151 // Open and edit a buffer as both guests B and C.
1152 let buffer_b = worktree_b
1153 .update(&mut cx_b, |tree, cx| tree.open_buffer("file1", cx))
1154 .await
1155 .unwrap();
1156 let buffer_c = worktree_c
1157 .update(&mut cx_c, |tree, cx| tree.open_buffer("file1", cx))
1158 .await
1159 .unwrap();
1160 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx));
1161 buffer_c.update(&mut cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx));
1162
1163 // Open and edit that buffer as the host.
1164 let buffer_a = worktree_a
1165 .update(&mut cx_a, |tree, cx| tree.open_buffer("file1", cx))
1166 .await
1167 .unwrap();
1168
1169 buffer_a
1170 .condition(&mut cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ")
1171 .await;
1172 buffer_a.update(&mut cx_a, |buf, cx| {
1173 buf.edit([buf.len()..buf.len()], "i-am-a", cx)
1174 });
1175
1176 // Wait for edits to propagate
1177 buffer_a
1178 .condition(&mut cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1179 .await;
1180 buffer_b
1181 .condition(&mut cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1182 .await;
1183 buffer_c
1184 .condition(&mut cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1185 .await;
1186
1187 // Edit the buffer as the host and concurrently save as guest B.
1188 let save_b = buffer_b.update(&mut cx_b, |buf, cx| buf.save(cx).unwrap());
1189 buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx));
1190 save_b.await.unwrap();
1191 assert_eq!(
1192 fs.load("/a/file1".as_ref()).await.unwrap(),
1193 "hi-a, i-am-c, i-am-b, i-am-a"
1194 );
1195 buffer_a.read_with(&cx_a, |buf, _| assert!(!buf.is_dirty()));
1196 buffer_b.read_with(&cx_b, |buf, _| assert!(!buf.is_dirty()));
1197 buffer_c.condition(&cx_c, |buf, _| !buf.is_dirty()).await;
1198
1199 // Make changes on host's file system, see those changes on the guests.
1200 fs.rename("/a/file2".as_ref(), "/a/file3".as_ref())
1201 .await
1202 .unwrap();
1203 fs.insert_file(Path::new("/a/file4"), "4".into())
1204 .await
1205 .unwrap();
1206
1207 worktree_b
1208 .condition(&cx_b, |tree, _| tree.file_count() == 4)
1209 .await;
1210 worktree_c
1211 .condition(&cx_c, |tree, _| tree.file_count() == 4)
1212 .await;
1213 worktree_b.read_with(&cx_b, |tree, _| {
1214 assert_eq!(
1215 tree.paths()
1216 .map(|p| p.to_string_lossy())
1217 .collect::<Vec<_>>(),
1218 &[".zed.toml", "file1", "file3", "file4"]
1219 )
1220 });
1221 worktree_c.read_with(&cx_c, |tree, _| {
1222 assert_eq!(
1223 tree.paths()
1224 .map(|p| p.to_string_lossy())
1225 .collect::<Vec<_>>(),
1226 &[".zed.toml", "file1", "file3", "file4"]
1227 )
1228 });
1229 }
1230
1231 #[gpui::test]
1232 async fn test_buffer_conflict_after_save(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1233 cx_a.foreground().forbid_parking();
1234 let lang_registry = Arc::new(LanguageRegistry::new());
1235
1236 // Connect to a server as 2 clients.
1237 let mut server = TestServer::start().await;
1238 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1239 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1240
1241 // Share a local worktree as client A
1242 let fs = Arc::new(FakeFs::new());
1243 fs.insert_tree(
1244 "/dir",
1245 json!({
1246 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
1247 "a.txt": "a-contents",
1248 }),
1249 )
1250 .await;
1251
1252 let worktree_a = Worktree::open_local(
1253 client_a.clone(),
1254 "/dir".as_ref(),
1255 fs,
1256 lang_registry.clone(),
1257 &mut cx_a.to_async(),
1258 )
1259 .await
1260 .unwrap();
1261 worktree_a
1262 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1263 .await;
1264 let worktree_id = worktree_a
1265 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1266 .await
1267 .unwrap();
1268
1269 // Join that worktree as client B, and see that a guest has joined as client A.
1270 let worktree_b = Worktree::open_remote(
1271 client_b.clone(),
1272 worktree_id,
1273 lang_registry.clone(),
1274 &mut cx_b.to_async(),
1275 )
1276 .await
1277 .unwrap();
1278
1279 let buffer_b = worktree_b
1280 .update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx))
1281 .await
1282 .unwrap();
1283 let mtime = buffer_b.read_with(&cx_b, |buf, _| buf.file().unwrap().mtime);
1284
1285 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "world ", cx));
1286 buffer_b.read_with(&cx_b, |buf, _| {
1287 assert!(buf.is_dirty());
1288 assert!(!buf.has_conflict());
1289 });
1290
1291 buffer_b
1292 .update(&mut cx_b, |buf, cx| buf.save(cx))
1293 .unwrap()
1294 .await
1295 .unwrap();
1296 worktree_b
1297 .condition(&cx_b, |_, cx| {
1298 buffer_b.read(cx).file().unwrap().mtime != mtime
1299 })
1300 .await;
1301 buffer_b.read_with(&cx_b, |buf, _| {
1302 assert!(!buf.is_dirty());
1303 assert!(!buf.has_conflict());
1304 });
1305
1306 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "hello ", cx));
1307 buffer_b.read_with(&cx_b, |buf, _| {
1308 assert!(buf.is_dirty());
1309 assert!(!buf.has_conflict());
1310 });
1311 }
1312
1313 #[gpui::test]
1314 async fn test_editing_while_guest_opens_buffer(
1315 mut cx_a: TestAppContext,
1316 mut cx_b: TestAppContext,
1317 ) {
1318 cx_a.foreground().forbid_parking();
1319 let lang_registry = Arc::new(LanguageRegistry::new());
1320
1321 // Connect to a server as 2 clients.
1322 let mut server = TestServer::start().await;
1323 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1324 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1325
1326 // Share a local worktree as client A
1327 let fs = Arc::new(FakeFs::new());
1328 fs.insert_tree(
1329 "/dir",
1330 json!({
1331 ".zed.toml": r#"collaborators = ["user_b"]"#,
1332 "a.txt": "a-contents",
1333 }),
1334 )
1335 .await;
1336 let worktree_a = Worktree::open_local(
1337 client_a.clone(),
1338 "/dir".as_ref(),
1339 fs,
1340 lang_registry.clone(),
1341 &mut cx_a.to_async(),
1342 )
1343 .await
1344 .unwrap();
1345 worktree_a
1346 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1347 .await;
1348 let worktree_id = worktree_a
1349 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1350 .await
1351 .unwrap();
1352
1353 // Join that worktree as client B, and see that a guest has joined as client A.
1354 let worktree_b = Worktree::open_remote(
1355 client_b.clone(),
1356 worktree_id,
1357 lang_registry.clone(),
1358 &mut cx_b.to_async(),
1359 )
1360 .await
1361 .unwrap();
1362
1363 let buffer_a = worktree_a
1364 .update(&mut cx_a, |tree, cx| tree.open_buffer("a.txt", cx))
1365 .await
1366 .unwrap();
1367 let buffer_b = cx_b
1368 .background()
1369 .spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx)));
1370
1371 task::yield_now().await;
1372 buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "z", cx));
1373
1374 let text = buffer_a.read_with(&cx_a, |buf, _| buf.text());
1375 let buffer_b = buffer_b.await.unwrap();
1376 buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await;
1377 }
1378
1379 #[gpui::test]
1380 async fn test_peer_disconnection(mut cx_a: TestAppContext, cx_b: TestAppContext) {
1381 cx_a.foreground().forbid_parking();
1382 let lang_registry = Arc::new(LanguageRegistry::new());
1383
1384 // Connect to a server as 2 clients.
1385 let mut server = TestServer::start().await;
1386 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1387 let (client_b, _) = server.create_client(&mut cx_a, "user_b").await;
1388
1389 // Share a local worktree as client A
1390 let fs = Arc::new(FakeFs::new());
1391 fs.insert_tree(
1392 "/a",
1393 json!({
1394 ".zed.toml": r#"collaborators = ["user_b"]"#,
1395 "a.txt": "a-contents",
1396 "b.txt": "b-contents",
1397 }),
1398 )
1399 .await;
1400 let worktree_a = Worktree::open_local(
1401 client_a.clone(),
1402 "/a".as_ref(),
1403 fs,
1404 lang_registry.clone(),
1405 &mut cx_a.to_async(),
1406 )
1407 .await
1408 .unwrap();
1409 worktree_a
1410 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1411 .await;
1412 let worktree_id = worktree_a
1413 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1414 .await
1415 .unwrap();
1416
1417 // Join that worktree as client B, and see that a guest has joined as client A.
1418 let _worktree_b = Worktree::open_remote(
1419 client_b.clone(),
1420 worktree_id,
1421 lang_registry.clone(),
1422 &mut cx_b.to_async(),
1423 )
1424 .await
1425 .unwrap();
1426 worktree_a
1427 .condition(&cx_a, |tree, _| tree.peers().len() == 1)
1428 .await;
1429
1430 // Drop client B's connection and ensure client A observes client B leaving the worktree.
1431 client_b.disconnect(&cx_b.to_async()).await.unwrap();
1432 worktree_a
1433 .condition(&cx_a, |tree, _| tree.peers().len() == 0)
1434 .await;
1435 }
1436
1437 #[gpui::test]
1438 async fn test_basic_chat(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1439 cx_a.foreground().forbid_parking();
1440
1441 // Connect to a server as 2 clients.
1442 let mut server = TestServer::start().await;
1443 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1444 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1445
1446 // Create an org that includes these 2 users.
1447 let db = &server.app_state.db;
1448 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1449 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1450 .await
1451 .unwrap();
1452 db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false)
1453 .await
1454 .unwrap();
1455
1456 // Create a channel that includes all the users.
1457 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1458 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1459 .await
1460 .unwrap();
1461 db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false)
1462 .await
1463 .unwrap();
1464 db.create_channel_message(
1465 channel_id,
1466 current_user_id(&user_store_b, &cx_b),
1467 "hello A, it's B.",
1468 OffsetDateTime::now_utc(),
1469 1,
1470 )
1471 .await
1472 .unwrap();
1473
1474 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1475 channels_a
1476 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1477 .await;
1478 channels_a.read_with(&cx_a, |list, _| {
1479 assert_eq!(
1480 list.available_channels().unwrap(),
1481 &[ChannelDetails {
1482 id: channel_id.to_proto(),
1483 name: "test-channel".to_string()
1484 }]
1485 )
1486 });
1487 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1488 this.get_channel(channel_id.to_proto(), cx).unwrap()
1489 });
1490 channel_a.read_with(&cx_a, |channel, _| assert!(channel.messages().is_empty()));
1491 channel_a
1492 .condition(&cx_a, |channel, _| {
1493 channel_messages(channel)
1494 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1495 })
1496 .await;
1497
1498 let channels_b = cx_b.add_model(|cx| ChannelList::new(user_store_b, client_b, cx));
1499 channels_b
1500 .condition(&mut cx_b, |list, _| list.available_channels().is_some())
1501 .await;
1502 channels_b.read_with(&cx_b, |list, _| {
1503 assert_eq!(
1504 list.available_channels().unwrap(),
1505 &[ChannelDetails {
1506 id: channel_id.to_proto(),
1507 name: "test-channel".to_string()
1508 }]
1509 )
1510 });
1511
1512 let channel_b = channels_b.update(&mut cx_b, |this, cx| {
1513 this.get_channel(channel_id.to_proto(), cx).unwrap()
1514 });
1515 channel_b.read_with(&cx_b, |channel, _| assert!(channel.messages().is_empty()));
1516 channel_b
1517 .condition(&cx_b, |channel, _| {
1518 channel_messages(channel)
1519 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1520 })
1521 .await;
1522
1523 channel_a
1524 .update(&mut cx_a, |channel, cx| {
1525 channel
1526 .send_message("oh, hi B.".to_string(), cx)
1527 .unwrap()
1528 .detach();
1529 let task = channel.send_message("sup".to_string(), cx).unwrap();
1530 assert_eq!(
1531 channel_messages(channel),
1532 &[
1533 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1534 ("user_a".to_string(), "oh, hi B.".to_string(), true),
1535 ("user_a".to_string(), "sup".to_string(), true)
1536 ]
1537 );
1538 task
1539 })
1540 .await
1541 .unwrap();
1542
1543 channel_b
1544 .condition(&cx_b, |channel, _| {
1545 channel_messages(channel)
1546 == [
1547 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1548 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1549 ("user_a".to_string(), "sup".to_string(), false),
1550 ]
1551 })
1552 .await;
1553
1554 assert_eq!(
1555 server
1556 .state()
1557 .await
1558 .channel(channel_id)
1559 .unwrap()
1560 .connection_ids
1561 .len(),
1562 2
1563 );
1564 cx_b.update(|_| drop(channel_b));
1565 server
1566 .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1)
1567 .await;
1568
1569 cx_a.update(|_| drop(channel_a));
1570 server
1571 .condition(|state| state.channel(channel_id).is_none())
1572 .await;
1573 }
1574
1575 #[gpui::test]
1576 async fn test_chat_message_validation(mut cx_a: TestAppContext) {
1577 cx_a.foreground().forbid_parking();
1578
1579 let mut server = TestServer::start().await;
1580 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1581
1582 let db = &server.app_state.db;
1583 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1584 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1585 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1586 .await
1587 .unwrap();
1588 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1589 .await
1590 .unwrap();
1591
1592 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1593 channels_a
1594 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1595 .await;
1596 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1597 this.get_channel(channel_id.to_proto(), cx).unwrap()
1598 });
1599
1600 // Messages aren't allowed to be too long.
1601 channel_a
1602 .update(&mut cx_a, |channel, cx| {
1603 let long_body = "this is long.\n".repeat(1024);
1604 channel.send_message(long_body, cx).unwrap()
1605 })
1606 .await
1607 .unwrap_err();
1608
1609 // Messages aren't allowed to be blank.
1610 channel_a.update(&mut cx_a, |channel, cx| {
1611 channel.send_message(String::new(), cx).unwrap_err()
1612 });
1613
1614 // Leading and trailing whitespace are trimmed.
1615 channel_a
1616 .update(&mut cx_a, |channel, cx| {
1617 channel
1618 .send_message("\n surrounded by whitespace \n".to_string(), cx)
1619 .unwrap()
1620 })
1621 .await
1622 .unwrap();
1623 assert_eq!(
1624 db.get_channel_messages(channel_id, 10, None)
1625 .await
1626 .unwrap()
1627 .iter()
1628 .map(|m| &m.body)
1629 .collect::<Vec<_>>(),
1630 &["surrounded by whitespace"]
1631 );
1632 }
1633
1634 #[gpui::test]
1635 async fn test_chat_reconnection(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1636 cx_a.foreground().forbid_parking();
1637
1638 // Connect to a server as 2 clients.
1639 let mut server = TestServer::start().await;
1640 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1641 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1642 let mut status_b = client_b.status();
1643
1644 // Create an org that includes these 2 users.
1645 let db = &server.app_state.db;
1646 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1647 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1648 .await
1649 .unwrap();
1650 db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false)
1651 .await
1652 .unwrap();
1653
1654 // Create a channel that includes all the users.
1655 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1656 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1657 .await
1658 .unwrap();
1659 db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false)
1660 .await
1661 .unwrap();
1662 db.create_channel_message(
1663 channel_id,
1664 current_user_id(&user_store_b, &cx_b),
1665 "hello A, it's B.",
1666 OffsetDateTime::now_utc(),
1667 2,
1668 )
1669 .await
1670 .unwrap();
1671
1672 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1673 channels_a
1674 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1675 .await;
1676
1677 channels_a.read_with(&cx_a, |list, _| {
1678 assert_eq!(
1679 list.available_channels().unwrap(),
1680 &[ChannelDetails {
1681 id: channel_id.to_proto(),
1682 name: "test-channel".to_string()
1683 }]
1684 )
1685 });
1686 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1687 this.get_channel(channel_id.to_proto(), cx).unwrap()
1688 });
1689 channel_a.read_with(&cx_a, |channel, _| assert!(channel.messages().is_empty()));
1690 channel_a
1691 .condition(&cx_a, |channel, _| {
1692 channel_messages(channel)
1693 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1694 })
1695 .await;
1696
1697 let channels_b = cx_b.add_model(|cx| ChannelList::new(user_store_b.clone(), client_b, cx));
1698 channels_b
1699 .condition(&mut cx_b, |list, _| list.available_channels().is_some())
1700 .await;
1701 channels_b.read_with(&cx_b, |list, _| {
1702 assert_eq!(
1703 list.available_channels().unwrap(),
1704 &[ChannelDetails {
1705 id: channel_id.to_proto(),
1706 name: "test-channel".to_string()
1707 }]
1708 )
1709 });
1710
1711 let channel_b = channels_b.update(&mut cx_b, |this, cx| {
1712 this.get_channel(channel_id.to_proto(), cx).unwrap()
1713 });
1714 channel_b.read_with(&cx_b, |channel, _| assert!(channel.messages().is_empty()));
1715 channel_b
1716 .condition(&cx_b, |channel, _| {
1717 channel_messages(channel)
1718 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1719 })
1720 .await;
1721
1722 // Disconnect client B, ensuring we can still access its cached channel data.
1723 server.forbid_connections();
1724 server.disconnect_client(current_user_id(&user_store_b, &cx_b));
1725 while !matches!(
1726 status_b.recv().await,
1727 Some(rpc::Status::ReconnectionError { .. })
1728 ) {}
1729
1730 channels_b.read_with(&cx_b, |channels, _| {
1731 assert_eq!(
1732 channels.available_channels().unwrap(),
1733 [ChannelDetails {
1734 id: channel_id.to_proto(),
1735 name: "test-channel".to_string()
1736 }]
1737 )
1738 });
1739 channel_b.read_with(&cx_b, |channel, _| {
1740 assert_eq!(
1741 channel_messages(channel),
1742 [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1743 )
1744 });
1745
1746 // Send a message from client B while it is disconnected.
1747 channel_b
1748 .update(&mut cx_b, |channel, cx| {
1749 let task = channel
1750 .send_message("can you see this?".to_string(), cx)
1751 .unwrap();
1752 assert_eq!(
1753 channel_messages(channel),
1754 &[
1755 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1756 ("user_b".to_string(), "can you see this?".to_string(), true)
1757 ]
1758 );
1759 task
1760 })
1761 .await
1762 .unwrap_err();
1763
1764 // Send a message from client A while B is disconnected.
1765 channel_a
1766 .update(&mut cx_a, |channel, cx| {
1767 channel
1768 .send_message("oh, hi B.".to_string(), cx)
1769 .unwrap()
1770 .detach();
1771 let task = channel.send_message("sup".to_string(), cx).unwrap();
1772 assert_eq!(
1773 channel_messages(channel),
1774 &[
1775 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1776 ("user_a".to_string(), "oh, hi B.".to_string(), true),
1777 ("user_a".to_string(), "sup".to_string(), true)
1778 ]
1779 );
1780 task
1781 })
1782 .await
1783 .unwrap();
1784
1785 // Give client B a chance to reconnect.
1786 server.allow_connections();
1787 cx_b.foreground().advance_clock(Duration::from_secs(10));
1788
1789 // Verify that B sees the new messages upon reconnection, as well as the message client B
1790 // sent while offline.
1791 channel_b
1792 .condition(&cx_b, |channel, _| {
1793 channel_messages(channel)
1794 == [
1795 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1796 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1797 ("user_a".to_string(), "sup".to_string(), false),
1798 ("user_b".to_string(), "can you see this?".to_string(), false),
1799 ]
1800 })
1801 .await;
1802
1803 // Ensure client A and B can communicate normally after reconnection.
1804 channel_a
1805 .update(&mut cx_a, |channel, cx| {
1806 channel.send_message("you online?".to_string(), cx).unwrap()
1807 })
1808 .await
1809 .unwrap();
1810 channel_b
1811 .condition(&cx_b, |channel, _| {
1812 channel_messages(channel)
1813 == [
1814 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1815 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1816 ("user_a".to_string(), "sup".to_string(), false),
1817 ("user_b".to_string(), "can you see this?".to_string(), false),
1818 ("user_a".to_string(), "you online?".to_string(), false),
1819 ]
1820 })
1821 .await;
1822
1823 channel_b
1824 .update(&mut cx_b, |channel, cx| {
1825 channel.send_message("yep".to_string(), cx).unwrap()
1826 })
1827 .await
1828 .unwrap();
1829 channel_a
1830 .condition(&cx_a, |channel, _| {
1831 channel_messages(channel)
1832 == [
1833 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1834 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1835 ("user_a".to_string(), "sup".to_string(), false),
1836 ("user_b".to_string(), "can you see this?".to_string(), false),
1837 ("user_a".to_string(), "you online?".to_string(), false),
1838 ("user_b".to_string(), "yep".to_string(), false),
1839 ]
1840 })
1841 .await;
1842 }
1843
1844 #[gpui::test]
1845 async fn test_collaborators(
1846 mut cx_a: TestAppContext,
1847 mut cx_b: TestAppContext,
1848 mut cx_c: TestAppContext,
1849 ) {
1850 cx_a.foreground().forbid_parking();
1851 let lang_registry = Arc::new(LanguageRegistry::new());
1852
1853 // Connect to a server as 3 clients.
1854 let mut server = TestServer::start().await;
1855 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1856 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1857 let (_client_c, user_store_c) = server.create_client(&mut cx_c, "user_c").await;
1858
1859 let fs = Arc::new(FakeFs::new());
1860
1861 // Share a worktree as client A.
1862 fs.insert_tree(
1863 "/a",
1864 json!({
1865 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
1866 }),
1867 )
1868 .await;
1869
1870 let worktree_a = Worktree::open_local(
1871 client_a.clone(),
1872 "/a".as_ref(),
1873 fs.clone(),
1874 lang_registry.clone(),
1875 &mut cx_a.to_async(),
1876 )
1877 .await
1878 .unwrap();
1879
1880 user_store_a
1881 .condition(&cx_a, |user_store, _| {
1882 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
1883 })
1884 .await;
1885 user_store_b
1886 .condition(&cx_b, |user_store, _| {
1887 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
1888 })
1889 .await;
1890 user_store_c
1891 .condition(&cx_c, |user_store, _| {
1892 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
1893 })
1894 .await;
1895
1896 let worktree_id = worktree_a
1897 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1898 .await
1899 .unwrap();
1900
1901 let _worktree_b = Worktree::open_remote(
1902 client_b.clone(),
1903 worktree_id,
1904 lang_registry.clone(),
1905 &mut cx_b.to_async(),
1906 )
1907 .await
1908 .unwrap();
1909
1910 user_store_a
1911 .condition(&cx_a, |user_store, _| {
1912 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
1913 })
1914 .await;
1915 user_store_b
1916 .condition(&cx_b, |user_store, _| {
1917 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
1918 })
1919 .await;
1920 user_store_c
1921 .condition(&cx_c, |user_store, _| {
1922 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
1923 })
1924 .await;
1925
1926 cx_a.update(move |_| drop(worktree_a));
1927 user_store_a
1928 .condition(&cx_a, |user_store, _| collaborators(user_store) == vec![])
1929 .await;
1930 user_store_b
1931 .condition(&cx_b, |user_store, _| collaborators(user_store) == vec![])
1932 .await;
1933 user_store_c
1934 .condition(&cx_c, |user_store, _| collaborators(user_store) == vec![])
1935 .await;
1936
1937 fn collaborators(user_store: &UserStore) -> Vec<(&str, Vec<(&str, Vec<&str>)>)> {
1938 user_store
1939 .collaborators()
1940 .iter()
1941 .map(|collaborator| {
1942 let worktrees = collaborator
1943 .worktrees
1944 .iter()
1945 .map(|w| {
1946 (
1947 w.root_name.as_str(),
1948 w.participants
1949 .iter()
1950 .map(|p| p.github_login.as_str())
1951 .collect(),
1952 )
1953 })
1954 .collect();
1955 (collaborator.user.github_login.as_str(), worktrees)
1956 })
1957 .collect()
1958 }
1959 }
1960
1961 struct TestServer {
1962 peer: Arc<Peer>,
1963 app_state: Arc<AppState>,
1964 server: Arc<Server>,
1965 notifications: mpsc::Receiver<()>,
1966 connection_killers: Arc<Mutex<HashMap<UserId, watch::Sender<Option<()>>>>>,
1967 forbid_connections: Arc<AtomicBool>,
1968 _test_db: TestDb,
1969 }
1970
1971 impl TestServer {
1972 async fn start() -> Self {
1973 let test_db = TestDb::new();
1974 let app_state = Self::build_app_state(&test_db).await;
1975 let peer = Peer::new();
1976 let notifications = mpsc::channel(128);
1977 let server = Server::new(app_state.clone(), peer.clone(), Some(notifications.0));
1978 Self {
1979 peer,
1980 app_state,
1981 server,
1982 notifications: notifications.1,
1983 connection_killers: Default::default(),
1984 forbid_connections: Default::default(),
1985 _test_db: test_db,
1986 }
1987 }
1988
1989 async fn create_client(
1990 &mut self,
1991 cx: &mut TestAppContext,
1992 name: &str,
1993 ) -> (Arc<Client>, ModelHandle<UserStore>) {
1994 let user_id = self.app_state.db.create_user(name, false).await.unwrap();
1995 let client_name = name.to_string();
1996 let mut client = Client::new();
1997 let server = self.server.clone();
1998 let connection_killers = self.connection_killers.clone();
1999 let forbid_connections = self.forbid_connections.clone();
2000 Arc::get_mut(&mut client)
2001 .unwrap()
2002 .override_authenticate(move |cx| {
2003 cx.spawn(|_| async move {
2004 let access_token = "the-token".to_string();
2005 Ok(Credentials {
2006 user_id: user_id.0 as u64,
2007 access_token,
2008 })
2009 })
2010 })
2011 .override_establish_connection(move |credentials, cx| {
2012 assert_eq!(credentials.user_id, user_id.0 as u64);
2013 assert_eq!(credentials.access_token, "the-token");
2014
2015 let server = server.clone();
2016 let connection_killers = connection_killers.clone();
2017 let forbid_connections = forbid_connections.clone();
2018 let client_name = client_name.clone();
2019 cx.spawn(move |cx| async move {
2020 if forbid_connections.load(SeqCst) {
2021 Err(EstablishConnectionError::other(anyhow!(
2022 "server is forbidding connections"
2023 )))
2024 } else {
2025 let (client_conn, server_conn, kill_conn) = Connection::in_memory();
2026 connection_killers.lock().insert(user_id, kill_conn);
2027 cx.background()
2028 .spawn(server.handle_connection(server_conn, client_name, user_id))
2029 .detach();
2030 Ok(client_conn)
2031 }
2032 })
2033 });
2034
2035 let http = FakeHttpClient::new(|_| async move { Ok(surf::http::Response::new(404)) });
2036 client
2037 .authenticate_and_connect(&cx.to_async())
2038 .await
2039 .unwrap();
2040
2041 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
2042 let mut authed_user =
2043 user_store.read_with(cx, |user_store, _| user_store.watch_current_user());
2044 while authed_user.recv().await.unwrap().is_none() {}
2045
2046 (client, user_store)
2047 }
2048
2049 fn disconnect_client(&self, user_id: UserId) {
2050 if let Some(mut kill_conn) = self.connection_killers.lock().remove(&user_id) {
2051 let _ = kill_conn.try_send(Some(()));
2052 }
2053 }
2054
2055 fn forbid_connections(&self) {
2056 self.forbid_connections.store(true, SeqCst);
2057 }
2058
2059 fn allow_connections(&self) {
2060 self.forbid_connections.store(false, SeqCst);
2061 }
2062
2063 async fn build_app_state(test_db: &TestDb) -> Arc<AppState> {
2064 let mut config = Config::default();
2065 config.session_secret = "a".repeat(32);
2066 config.database_url = test_db.url.clone();
2067 let github_client = github::AppClient::test();
2068 Arc::new(AppState {
2069 db: test_db.db().clone(),
2070 handlebars: Default::default(),
2071 auth_client: auth::build_client("", ""),
2072 repo_client: github::RepoClient::test(&github_client),
2073 github_client,
2074 config,
2075 })
2076 }
2077
2078 async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> {
2079 self.server.store.read().await
2080 }
2081
2082 async fn condition<F>(&mut self, mut predicate: F)
2083 where
2084 F: FnMut(&Store) -> bool,
2085 {
2086 async_std::future::timeout(Duration::from_millis(500), async {
2087 while !(predicate)(&*self.server.store.read().await) {
2088 self.notifications.recv().await;
2089 }
2090 })
2091 .await
2092 .expect("condition timed out");
2093 }
2094 }
2095
2096 impl Drop for TestServer {
2097 fn drop(&mut self) {
2098 task::block_on(self.peer.reset());
2099 }
2100 }
2101
2102 fn current_user_id(user_store: &ModelHandle<UserStore>, cx: &TestAppContext) -> UserId {
2103 UserId::from_proto(
2104 user_store.read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
2105 )
2106 }
2107
2108 fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> {
2109 channel
2110 .messages()
2111 .cursor::<(), ()>()
2112 .map(|m| {
2113 (
2114 m.sender.github_login.clone(),
2115 m.body.clone(),
2116 m.is_pending(),
2117 )
2118 })
2119 .collect()
2120 }
2121
2122 struct EmptyView;
2123
2124 impl gpui::Entity for EmptyView {
2125 type Event = ();
2126 }
2127
2128 impl gpui::View for EmptyView {
2129 fn ui_name() -> &'static str {
2130 "empty view"
2131 }
2132
2133 fn render(&mut self, _: &mut gpui::RenderContext<Self>) -> gpui::ElementBox {
2134 gpui::Element::boxed(gpui::elements::Empty)
2135 }
2136 }
2137}