1mod store;
2
3use super::{
4 auth::process_auth_header,
5 db::{ChannelId, MessageId, UserId},
6 AppState,
7};
8use anyhow::anyhow;
9use async_std::{sync::RwLock, task};
10use async_tungstenite::{tungstenite::protocol::Role, WebSocketStream};
11use futures::{future::BoxFuture, FutureExt};
12use postage::{mpsc, prelude::Sink as _, prelude::Stream as _};
13use sha1::{Digest as _, Sha1};
14use std::{
15 any::TypeId,
16 collections::{HashMap, HashSet},
17 future::Future,
18 mem,
19 sync::Arc,
20 time::Instant,
21};
22use store::{JoinedWorktree, Store, Worktree};
23use surf::StatusCode;
24use tide::log;
25use tide::{
26 http::headers::{HeaderName, CONNECTION, UPGRADE},
27 Request, Response,
28};
29use time::OffsetDateTime;
30use zrpc::{
31 proto::{self, AnyTypedEnvelope, EnvelopedMessage},
32 Connection, ConnectionId, Peer, TypedEnvelope,
33};
34
35type MessageHandler = Box<
36 dyn Send
37 + Sync
38 + Fn(Arc<Server>, Box<dyn AnyTypedEnvelope>) -> BoxFuture<'static, tide::Result<()>>,
39>;
40
41pub struct Server {
42 peer: Arc<Peer>,
43 store: RwLock<Store>,
44 app_state: Arc<AppState>,
45 handlers: HashMap<TypeId, MessageHandler>,
46 notifications: Option<mpsc::Sender<()>>,
47}
48
49const MESSAGE_COUNT_PER_PAGE: usize = 100;
50const MAX_MESSAGE_LEN: usize = 1024;
51
52impl Server {
53 pub fn new(
54 app_state: Arc<AppState>,
55 peer: Arc<Peer>,
56 notifications: Option<mpsc::Sender<()>>,
57 ) -> Arc<Self> {
58 let mut server = Self {
59 peer,
60 app_state,
61 store: Default::default(),
62 handlers: Default::default(),
63 notifications,
64 };
65
66 server
67 .add_handler(Server::ping)
68 .add_handler(Server::open_worktree)
69 .add_handler(Server::close_worktree)
70 .add_handler(Server::share_worktree)
71 .add_handler(Server::unshare_worktree)
72 .add_handler(Server::join_worktree)
73 .add_handler(Server::leave_worktree)
74 .add_handler(Server::update_worktree)
75 .add_handler(Server::open_buffer)
76 .add_handler(Server::close_buffer)
77 .add_handler(Server::update_buffer)
78 .add_handler(Server::buffer_saved)
79 .add_handler(Server::save_buffer)
80 .add_handler(Server::get_channels)
81 .add_handler(Server::get_users)
82 .add_handler(Server::join_channel)
83 .add_handler(Server::leave_channel)
84 .add_handler(Server::send_channel_message)
85 .add_handler(Server::get_channel_messages);
86
87 Arc::new(server)
88 }
89
90 fn add_handler<F, Fut, M>(&mut self, handler: F) -> &mut Self
91 where
92 F: 'static + Send + Sync + Fn(Arc<Self>, TypedEnvelope<M>) -> Fut,
93 Fut: 'static + Send + Future<Output = tide::Result<()>>,
94 M: EnvelopedMessage,
95 {
96 let prev_handler = self.handlers.insert(
97 TypeId::of::<M>(),
98 Box::new(move |server, envelope| {
99 let envelope = envelope.into_any().downcast::<TypedEnvelope<M>>().unwrap();
100 (handler)(server, *envelope).boxed()
101 }),
102 );
103 if prev_handler.is_some() {
104 panic!("registered a handler for the same message twice");
105 }
106 self
107 }
108
109 pub fn handle_connection(
110 self: &Arc<Self>,
111 connection: Connection,
112 addr: String,
113 user_id: UserId,
114 ) -> impl Future<Output = ()> {
115 let mut this = self.clone();
116 async move {
117 let (connection_id, handle_io, mut incoming_rx) =
118 this.peer.add_connection(connection).await;
119 this.state_mut()
120 .await
121 .add_connection(connection_id, user_id);
122 if let Err(err) = this.update_collaborators_for_users(&[user_id]).await {
123 log::error!("error updating collaborators for {:?}: {}", user_id, err);
124 }
125
126 let handle_io = handle_io.fuse();
127 futures::pin_mut!(handle_io);
128 loop {
129 let next_message = incoming_rx.recv().fuse();
130 futures::pin_mut!(next_message);
131 futures::select_biased! {
132 message = next_message => {
133 if let Some(message) = message {
134 let start_time = Instant::now();
135 log::info!("RPC message received: {}", message.payload_type_name());
136 if let Some(handler) = this.handlers.get(&message.payload_type_id()) {
137 if let Err(err) = (handler)(this.clone(), message).await {
138 log::error!("error handling message: {:?}", err);
139 } else {
140 log::info!("RPC message handled. duration:{:?}", start_time.elapsed());
141 }
142
143 if let Some(mut notifications) = this.notifications.clone() {
144 let _ = notifications.send(()).await;
145 }
146 } else {
147 log::warn!("unhandled message: {}", message.payload_type_name());
148 }
149 } else {
150 log::info!("rpc connection closed {:?}", addr);
151 break;
152 }
153 }
154 handle_io = handle_io => {
155 if let Err(err) = handle_io {
156 log::error!("error handling rpc connection {:?} - {:?}", addr, err);
157 }
158 break;
159 }
160 }
161 }
162
163 if let Err(err) = this.sign_out(connection_id).await {
164 log::error!("error signing out connection {:?} - {:?}", addr, err);
165 }
166 }
167 }
168
169 async fn sign_out(self: &mut Arc<Self>, connection_id: ConnectionId) -> tide::Result<()> {
170 self.peer.disconnect(connection_id).await;
171 let removed_connection = self.state_mut().await.remove_connection(connection_id)?;
172
173 for (worktree_id, worktree) in removed_connection.hosted_worktrees {
174 if let Some(share) = worktree.share {
175 broadcast(
176 connection_id,
177 share.guest_connection_ids.keys().copied().collect(),
178 |conn_id| {
179 self.peer
180 .send(conn_id, proto::UnshareWorktree { worktree_id })
181 },
182 )
183 .await?;
184 }
185 }
186
187 for (worktree_id, peer_ids) in removed_connection.guest_worktree_ids {
188 broadcast(connection_id, peer_ids, |conn_id| {
189 self.peer.send(
190 conn_id,
191 proto::RemovePeer {
192 worktree_id,
193 peer_id: connection_id.0,
194 },
195 )
196 })
197 .await?;
198 }
199
200 self.update_collaborators_for_users(removed_connection.collaborator_ids.iter())
201 .await?;
202
203 Ok(())
204 }
205
206 async fn ping(self: Arc<Server>, request: TypedEnvelope<proto::Ping>) -> tide::Result<()> {
207 self.peer.respond(request.receipt(), proto::Ack {}).await?;
208 Ok(())
209 }
210
211 async fn open_worktree(
212 mut self: Arc<Server>,
213 request: TypedEnvelope<proto::OpenWorktree>,
214 ) -> tide::Result<()> {
215 let receipt = request.receipt();
216 let host_user_id = self
217 .state()
218 .await
219 .user_id_for_connection(request.sender_id)?;
220
221 let mut collaborator_user_ids = HashSet::new();
222 collaborator_user_ids.insert(host_user_id);
223 for github_login in request.payload.collaborator_logins {
224 match self.app_state.db.create_user(&github_login, false).await {
225 Ok(collaborator_user_id) => {
226 collaborator_user_ids.insert(collaborator_user_id);
227 }
228 Err(err) => {
229 let message = err.to_string();
230 self.peer
231 .respond_with_error(receipt, proto::Error { message })
232 .await?;
233 return Ok(());
234 }
235 }
236 }
237
238 let collaborator_user_ids = collaborator_user_ids.into_iter().collect::<Vec<_>>();
239 let worktree_id = self.state_mut().await.add_worktree(Worktree {
240 host_connection_id: request.sender_id,
241 collaborator_user_ids: collaborator_user_ids.clone(),
242 root_name: request.payload.root_name,
243 share: None,
244 });
245
246 self.peer
247 .respond(receipt, proto::OpenWorktreeResponse { worktree_id })
248 .await?;
249 self.update_collaborators_for_users(&collaborator_user_ids)
250 .await?;
251
252 Ok(())
253 }
254
255 async fn close_worktree(
256 mut self: Arc<Server>,
257 request: TypedEnvelope<proto::CloseWorktree>,
258 ) -> tide::Result<()> {
259 let worktree_id = request.payload.worktree_id;
260 let worktree = self
261 .state_mut()
262 .await
263 .remove_worktree(worktree_id, request.sender_id)?;
264
265 if let Some(share) = worktree.share {
266 broadcast(
267 request.sender_id,
268 share.guest_connection_ids.keys().copied().collect(),
269 |conn_id| {
270 self.peer
271 .send(conn_id, proto::UnshareWorktree { worktree_id })
272 },
273 )
274 .await?;
275 }
276 self.update_collaborators_for_users(&worktree.collaborator_user_ids)
277 .await?;
278 Ok(())
279 }
280
281 async fn share_worktree(
282 mut self: Arc<Server>,
283 mut request: TypedEnvelope<proto::ShareWorktree>,
284 ) -> tide::Result<()> {
285 let worktree = request
286 .payload
287 .worktree
288 .as_mut()
289 .ok_or_else(|| anyhow!("missing worktree"))?;
290 let entries = mem::take(&mut worktree.entries)
291 .into_iter()
292 .map(|entry| (entry.id, entry))
293 .collect();
294
295 let collaborator_user_ids =
296 self.state_mut()
297 .await
298 .share_worktree(worktree.id, request.sender_id, entries);
299 if let Some(collaborator_user_ids) = collaborator_user_ids {
300 self.peer
301 .respond(request.receipt(), proto::ShareWorktreeResponse {})
302 .await?;
303 self.update_collaborators_for_users(&collaborator_user_ids)
304 .await?;
305 } else {
306 self.peer
307 .respond_with_error(
308 request.receipt(),
309 proto::Error {
310 message: "no such worktree".to_string(),
311 },
312 )
313 .await?;
314 }
315 Ok(())
316 }
317
318 async fn unshare_worktree(
319 mut self: Arc<Server>,
320 request: TypedEnvelope<proto::UnshareWorktree>,
321 ) -> tide::Result<()> {
322 let worktree_id = request.payload.worktree_id;
323 let worktree = self
324 .state_mut()
325 .await
326 .unshare_worktree(worktree_id, request.sender_id)?;
327
328 broadcast(request.sender_id, worktree.connection_ids, |conn_id| {
329 self.peer
330 .send(conn_id, proto::UnshareWorktree { worktree_id })
331 })
332 .await?;
333 self.update_collaborators_for_users(&worktree.collaborator_ids)
334 .await?;
335
336 Ok(())
337 }
338
339 async fn join_worktree(
340 mut self: Arc<Server>,
341 request: TypedEnvelope<proto::JoinWorktree>,
342 ) -> tide::Result<()> {
343 let worktree_id = request.payload.worktree_id;
344 let user_id = self
345 .state()
346 .await
347 .user_id_for_connection(request.sender_id)?;
348
349 let mut state = self.state_mut().await;
350 match state.join_worktree(request.sender_id, user_id, worktree_id) {
351 Ok(JoinedWorktree {
352 replica_id,
353 worktree,
354 }) => {
355 let share = worktree.share()?;
356 let peer_count = share.guest_connection_ids.len();
357 let mut peers = Vec::with_capacity(peer_count);
358 peers.push(proto::Peer {
359 peer_id: worktree.host_connection_id.0,
360 replica_id: 0,
361 });
362 for (peer_conn_id, peer_replica_id) in &share.guest_connection_ids {
363 if *peer_conn_id != request.sender_id {
364 peers.push(proto::Peer {
365 peer_id: peer_conn_id.0,
366 replica_id: *peer_replica_id as u32,
367 });
368 }
369 }
370 let response = proto::JoinWorktreeResponse {
371 worktree: Some(proto::Worktree {
372 id: worktree_id,
373 root_name: worktree.root_name.clone(),
374 entries: share.entries.values().cloned().collect(),
375 }),
376 replica_id: replica_id as u32,
377 peers,
378 };
379 let connection_ids = worktree.connection_ids();
380 let collaborator_user_ids = worktree.collaborator_user_ids.clone();
381 drop(state);
382
383 broadcast(request.sender_id, connection_ids, |conn_id| {
384 self.peer.send(
385 conn_id,
386 proto::AddPeer {
387 worktree_id,
388 peer: Some(proto::Peer {
389 peer_id: request.sender_id.0,
390 replica_id: response.replica_id,
391 }),
392 },
393 )
394 })
395 .await?;
396 self.peer.respond(request.receipt(), response).await?;
397 self.update_collaborators_for_users(&collaborator_user_ids)
398 .await?;
399 }
400 Err(error) => {
401 drop(state);
402 self.peer
403 .respond_with_error(
404 request.receipt(),
405 proto::Error {
406 message: error.to_string(),
407 },
408 )
409 .await?;
410 }
411 }
412
413 Ok(())
414 }
415
416 async fn leave_worktree(
417 mut self: Arc<Server>,
418 request: TypedEnvelope<proto::LeaveWorktree>,
419 ) -> tide::Result<()> {
420 let sender_id = request.sender_id;
421 let worktree_id = request.payload.worktree_id;
422 let worktree = self
423 .state_mut()
424 .await
425 .leave_worktree(sender_id, worktree_id);
426 if let Some(worktree) = worktree {
427 broadcast(sender_id, worktree.connection_ids, |conn_id| {
428 self.peer.send(
429 conn_id,
430 proto::RemovePeer {
431 worktree_id,
432 peer_id: sender_id.0,
433 },
434 )
435 })
436 .await?;
437 self.update_collaborators_for_users(&worktree.collaborator_ids)
438 .await?;
439 }
440 Ok(())
441 }
442
443 async fn update_worktree(
444 mut self: Arc<Server>,
445 request: TypedEnvelope<proto::UpdateWorktree>,
446 ) -> tide::Result<()> {
447 let connection_ids = self.state_mut().await.update_worktree(
448 request.sender_id,
449 request.payload.worktree_id,
450 &request.payload.removed_entries,
451 &request.payload.updated_entries,
452 )?;
453
454 broadcast(request.sender_id, connection_ids, |connection_id| {
455 self.peer
456 .forward_send(request.sender_id, connection_id, request.payload.clone())
457 })
458 .await?;
459
460 Ok(())
461 }
462
463 async fn open_buffer(
464 self: Arc<Server>,
465 request: TypedEnvelope<proto::OpenBuffer>,
466 ) -> tide::Result<()> {
467 let receipt = request.receipt();
468 let host_connection_id = self
469 .state()
470 .await
471 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
472 let response = self
473 .peer
474 .forward_request(request.sender_id, host_connection_id, request.payload)
475 .await?;
476 self.peer.respond(receipt, response).await?;
477 Ok(())
478 }
479
480 async fn close_buffer(
481 self: Arc<Server>,
482 request: TypedEnvelope<proto::CloseBuffer>,
483 ) -> tide::Result<()> {
484 let host_connection_id = self
485 .state()
486 .await
487 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
488 self.peer
489 .forward_send(request.sender_id, host_connection_id, request.payload)
490 .await?;
491 Ok(())
492 }
493
494 async fn save_buffer(
495 self: Arc<Server>,
496 request: TypedEnvelope<proto::SaveBuffer>,
497 ) -> tide::Result<()> {
498 let host;
499 let guests;
500 {
501 let state = self.state().await;
502 host = state
503 .worktree_host_connection_id(request.sender_id, request.payload.worktree_id)?;
504 guests = state
505 .worktree_guest_connection_ids(request.sender_id, request.payload.worktree_id)?;
506 }
507
508 let sender = request.sender_id;
509 let receipt = request.receipt();
510 let response = self
511 .peer
512 .forward_request(sender, host, request.payload.clone())
513 .await?;
514
515 broadcast(host, guests, |conn_id| {
516 let response = response.clone();
517 let peer = &self.peer;
518 async move {
519 if conn_id == sender {
520 peer.respond(receipt, response).await
521 } else {
522 peer.forward_send(host, conn_id, response).await
523 }
524 }
525 })
526 .await?;
527
528 Ok(())
529 }
530
531 async fn update_buffer(
532 self: Arc<Server>,
533 request: TypedEnvelope<proto::UpdateBuffer>,
534 ) -> tide::Result<()> {
535 broadcast(
536 request.sender_id,
537 self.state()
538 .await
539 .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?,
540 |connection_id| {
541 self.peer
542 .forward_send(request.sender_id, connection_id, request.payload.clone())
543 },
544 )
545 .await?;
546 self.peer.respond(request.receipt(), proto::Ack {}).await?;
547 Ok(())
548 }
549
550 async fn buffer_saved(
551 self: Arc<Server>,
552 request: TypedEnvelope<proto::BufferSaved>,
553 ) -> tide::Result<()> {
554 broadcast(
555 request.sender_id,
556 self.store
557 .read()
558 .await
559 .worktree_connection_ids(request.sender_id, request.payload.worktree_id)?,
560 |connection_id| {
561 self.peer
562 .forward_send(request.sender_id, connection_id, request.payload.clone())
563 },
564 )
565 .await?;
566 Ok(())
567 }
568
569 async fn get_channels(
570 self: Arc<Server>,
571 request: TypedEnvelope<proto::GetChannels>,
572 ) -> tide::Result<()> {
573 let user_id = self
574 .state()
575 .await
576 .user_id_for_connection(request.sender_id)?;
577 let channels = self.app_state.db.get_accessible_channels(user_id).await?;
578 self.peer
579 .respond(
580 request.receipt(),
581 proto::GetChannelsResponse {
582 channels: channels
583 .into_iter()
584 .map(|chan| proto::Channel {
585 id: chan.id.to_proto(),
586 name: chan.name,
587 })
588 .collect(),
589 },
590 )
591 .await?;
592 Ok(())
593 }
594
595 async fn get_users(
596 self: Arc<Server>,
597 request: TypedEnvelope<proto::GetUsers>,
598 ) -> tide::Result<()> {
599 let receipt = request.receipt();
600 let user_ids = request.payload.user_ids.into_iter().map(UserId::from_proto);
601 let users = self
602 .app_state
603 .db
604 .get_users_by_ids(user_ids)
605 .await?
606 .into_iter()
607 .map(|user| proto::User {
608 id: user.id.to_proto(),
609 avatar_url: format!("https://github.com/{}.png?size=128", user.github_login),
610 github_login: user.github_login,
611 })
612 .collect();
613 self.peer
614 .respond(receipt, proto::GetUsersResponse { users })
615 .await?;
616 Ok(())
617 }
618
619 async fn update_collaborators_for_users<'a>(
620 self: &Arc<Server>,
621 user_ids: impl IntoIterator<Item = &'a UserId>,
622 ) -> tide::Result<()> {
623 let mut send_futures = Vec::new();
624
625 let state = self.state().await;
626 for user_id in user_ids {
627 let collaborators = state.collaborators_for_user(*user_id);
628 for connection_id in state.connection_ids_for_user(*user_id) {
629 send_futures.push(self.peer.send(
630 connection_id,
631 proto::UpdateCollaborators {
632 collaborators: collaborators.clone(),
633 },
634 ));
635 }
636 }
637
638 drop(state);
639 futures::future::try_join_all(send_futures).await?;
640
641 Ok(())
642 }
643
644 async fn join_channel(
645 mut self: Arc<Self>,
646 request: TypedEnvelope<proto::JoinChannel>,
647 ) -> tide::Result<()> {
648 let user_id = self
649 .state()
650 .await
651 .user_id_for_connection(request.sender_id)?;
652 let channel_id = ChannelId::from_proto(request.payload.channel_id);
653 if !self
654 .app_state
655 .db
656 .can_user_access_channel(user_id, channel_id)
657 .await?
658 {
659 Err(anyhow!("access denied"))?;
660 }
661
662 self.state_mut()
663 .await
664 .join_channel(request.sender_id, channel_id);
665 let messages = self
666 .app_state
667 .db
668 .get_channel_messages(channel_id, MESSAGE_COUNT_PER_PAGE, None)
669 .await?
670 .into_iter()
671 .map(|msg| proto::ChannelMessage {
672 id: msg.id.to_proto(),
673 body: msg.body,
674 timestamp: msg.sent_at.unix_timestamp() as u64,
675 sender_id: msg.sender_id.to_proto(),
676 nonce: Some(msg.nonce.as_u128().into()),
677 })
678 .collect::<Vec<_>>();
679 self.peer
680 .respond(
681 request.receipt(),
682 proto::JoinChannelResponse {
683 done: messages.len() < MESSAGE_COUNT_PER_PAGE,
684 messages,
685 },
686 )
687 .await?;
688 Ok(())
689 }
690
691 async fn leave_channel(
692 mut self: Arc<Self>,
693 request: TypedEnvelope<proto::LeaveChannel>,
694 ) -> tide::Result<()> {
695 let user_id = self
696 .state()
697 .await
698 .user_id_for_connection(request.sender_id)?;
699 let channel_id = ChannelId::from_proto(request.payload.channel_id);
700 if !self
701 .app_state
702 .db
703 .can_user_access_channel(user_id, channel_id)
704 .await?
705 {
706 Err(anyhow!("access denied"))?;
707 }
708
709 self.state_mut()
710 .await
711 .leave_channel(request.sender_id, channel_id);
712
713 Ok(())
714 }
715
716 async fn send_channel_message(
717 self: Arc<Self>,
718 request: TypedEnvelope<proto::SendChannelMessage>,
719 ) -> tide::Result<()> {
720 let receipt = request.receipt();
721 let channel_id = ChannelId::from_proto(request.payload.channel_id);
722 let user_id;
723 let connection_ids;
724 {
725 let state = self.state().await;
726 user_id = state.user_id_for_connection(request.sender_id)?;
727 if let Some(ids) = state.channel_connection_ids(channel_id) {
728 connection_ids = ids;
729 } else {
730 return Ok(());
731 }
732 }
733
734 // Validate the message body.
735 let body = request.payload.body.trim().to_string();
736 if body.len() > MAX_MESSAGE_LEN {
737 self.peer
738 .respond_with_error(
739 receipt,
740 proto::Error {
741 message: "message is too long".to_string(),
742 },
743 )
744 .await?;
745 return Ok(());
746 }
747 if body.is_empty() {
748 self.peer
749 .respond_with_error(
750 receipt,
751 proto::Error {
752 message: "message can't be blank".to_string(),
753 },
754 )
755 .await?;
756 return Ok(());
757 }
758
759 let timestamp = OffsetDateTime::now_utc();
760 let nonce = if let Some(nonce) = request.payload.nonce {
761 nonce
762 } else {
763 self.peer
764 .respond_with_error(
765 receipt,
766 proto::Error {
767 message: "nonce can't be blank".to_string(),
768 },
769 )
770 .await?;
771 return Ok(());
772 };
773
774 let message_id = self
775 .app_state
776 .db
777 .create_channel_message(channel_id, user_id, &body, timestamp, nonce.clone().into())
778 .await?
779 .to_proto();
780 let message = proto::ChannelMessage {
781 sender_id: user_id.to_proto(),
782 id: message_id,
783 body,
784 timestamp: timestamp.unix_timestamp() as u64,
785 nonce: Some(nonce),
786 };
787 broadcast(request.sender_id, connection_ids, |conn_id| {
788 self.peer.send(
789 conn_id,
790 proto::ChannelMessageSent {
791 channel_id: channel_id.to_proto(),
792 message: Some(message.clone()),
793 },
794 )
795 })
796 .await?;
797 self.peer
798 .respond(
799 receipt,
800 proto::SendChannelMessageResponse {
801 message: Some(message),
802 },
803 )
804 .await?;
805 Ok(())
806 }
807
808 async fn get_channel_messages(
809 self: Arc<Self>,
810 request: TypedEnvelope<proto::GetChannelMessages>,
811 ) -> tide::Result<()> {
812 let user_id = self
813 .state()
814 .await
815 .user_id_for_connection(request.sender_id)?;
816 let channel_id = ChannelId::from_proto(request.payload.channel_id);
817 if !self
818 .app_state
819 .db
820 .can_user_access_channel(user_id, channel_id)
821 .await?
822 {
823 Err(anyhow!("access denied"))?;
824 }
825
826 let messages = self
827 .app_state
828 .db
829 .get_channel_messages(
830 channel_id,
831 MESSAGE_COUNT_PER_PAGE,
832 Some(MessageId::from_proto(request.payload.before_message_id)),
833 )
834 .await?
835 .into_iter()
836 .map(|msg| proto::ChannelMessage {
837 id: msg.id.to_proto(),
838 body: msg.body,
839 timestamp: msg.sent_at.unix_timestamp() as u64,
840 sender_id: msg.sender_id.to_proto(),
841 nonce: Some(msg.nonce.as_u128().into()),
842 })
843 .collect::<Vec<_>>();
844 self.peer
845 .respond(
846 request.receipt(),
847 proto::GetChannelMessagesResponse {
848 done: messages.len() < MESSAGE_COUNT_PER_PAGE,
849 messages,
850 },
851 )
852 .await?;
853 Ok(())
854 }
855
856 fn state<'a>(
857 self: &'a Arc<Self>,
858 ) -> impl Future<Output = async_std::sync::RwLockReadGuard<'a, Store>> {
859 self.store.read()
860 }
861
862 fn state_mut<'a>(
863 self: &'a mut Arc<Self>,
864 ) -> impl Future<Output = async_std::sync::RwLockWriteGuard<'a, Store>> {
865 self.store.write()
866 }
867}
868
869pub async fn broadcast<F, T>(
870 sender_id: ConnectionId,
871 receiver_ids: Vec<ConnectionId>,
872 mut f: F,
873) -> anyhow::Result<()>
874where
875 F: FnMut(ConnectionId) -> T,
876 T: Future<Output = anyhow::Result<()>>,
877{
878 let futures = receiver_ids
879 .into_iter()
880 .filter(|id| *id != sender_id)
881 .map(|id| f(id));
882 futures::future::try_join_all(futures).await?;
883 Ok(())
884}
885
886pub fn add_routes(app: &mut tide::Server<Arc<AppState>>, rpc: &Arc<Peer>) {
887 let server = Server::new(app.state().clone(), rpc.clone(), None);
888 app.at("/rpc").get(move |request: Request<Arc<AppState>>| {
889 let server = server.clone();
890 async move {
891 const WEBSOCKET_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
892
893 let connection_upgrade = header_contains_ignore_case(&request, CONNECTION, "upgrade");
894 let upgrade_to_websocket = header_contains_ignore_case(&request, UPGRADE, "websocket");
895 let upgrade_requested = connection_upgrade && upgrade_to_websocket;
896 let client_protocol_version: Option<u32> = request
897 .header("X-Zed-Protocol-Version")
898 .and_then(|v| v.as_str().parse().ok());
899
900 if !upgrade_requested || client_protocol_version != Some(zrpc::PROTOCOL_VERSION) {
901 return Ok(Response::new(StatusCode::UpgradeRequired));
902 }
903
904 let header = match request.header("Sec-Websocket-Key") {
905 Some(h) => h.as_str(),
906 None => return Err(anyhow!("expected sec-websocket-key"))?,
907 };
908
909 let user_id = process_auth_header(&request).await?;
910
911 let mut response = Response::new(StatusCode::SwitchingProtocols);
912 response.insert_header(UPGRADE, "websocket");
913 response.insert_header(CONNECTION, "Upgrade");
914 let hash = Sha1::new().chain(header).chain(WEBSOCKET_GUID).finalize();
915 response.insert_header("Sec-Websocket-Accept", base64::encode(&hash[..]));
916 response.insert_header("Sec-Websocket-Version", "13");
917
918 let http_res: &mut tide::http::Response = response.as_mut();
919 let upgrade_receiver = http_res.recv_upgrade().await;
920 let addr = request.remote().unwrap_or("unknown").to_string();
921 task::spawn(async move {
922 if let Some(stream) = upgrade_receiver.await {
923 server
924 .handle_connection(
925 Connection::new(
926 WebSocketStream::from_raw_socket(stream, Role::Server, None).await,
927 ),
928 addr,
929 user_id,
930 )
931 .await;
932 }
933 });
934
935 Ok(response)
936 }
937 });
938}
939
940fn header_contains_ignore_case<T>(
941 request: &tide::Request<T>,
942 header_name: HeaderName,
943 value: &str,
944) -> bool {
945 request
946 .header(header_name)
947 .map(|h| {
948 h.as_str()
949 .split(',')
950 .any(|s| s.trim().eq_ignore_ascii_case(value.trim()))
951 })
952 .unwrap_or(false)
953}
954
955#[cfg(test)]
956mod tests {
957 use super::*;
958 use crate::{
959 auth,
960 db::{tests::TestDb, UserId},
961 github, AppState, Config,
962 };
963 use async_std::{sync::RwLockReadGuard, task};
964 use gpui::{ModelHandle, TestAppContext};
965 use parking_lot::Mutex;
966 use postage::{mpsc, watch};
967 use serde_json::json;
968 use sqlx::types::time::OffsetDateTime;
969 use std::{
970 path::Path,
971 sync::{
972 atomic::{AtomicBool, Ordering::SeqCst},
973 Arc,
974 },
975 time::Duration,
976 };
977 use zed::{
978 channel::{Channel, ChannelDetails, ChannelList},
979 editor::{Editor, EditorStyle, Insert},
980 fs::{FakeFs, Fs as _},
981 language::LanguageRegistry,
982 people_panel::JoinWorktree,
983 project::ProjectPath,
984 rpc::{self, Client, Credentials, EstablishConnectionError},
985 settings,
986 test::FakeHttpClient,
987 user::UserStore,
988 workspace::Workspace,
989 worktree::Worktree,
990 };
991 use zrpc::Peer;
992
993 #[gpui::test]
994 async fn test_share_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
995 let (window_b, _) = cx_b.add_window(|_| EmptyView);
996 let settings = cx_b.read(settings::test).1;
997 let lang_registry = Arc::new(LanguageRegistry::new());
998
999 // Connect to a server as 2 clients.
1000 let mut server = TestServer::start().await;
1001 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1002 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1003
1004 cx_a.foreground().forbid_parking();
1005
1006 // Share a local worktree as client A
1007 let fs = Arc::new(FakeFs::new());
1008 fs.insert_tree(
1009 "/a",
1010 json!({
1011 ".zed.toml": r#"collaborators = ["user_b"]"#,
1012 "a.txt": "a-contents",
1013 "b.txt": "b-contents",
1014 }),
1015 )
1016 .await;
1017 let worktree_a = Worktree::open_local(
1018 client_a.clone(),
1019 "/a".as_ref(),
1020 fs,
1021 lang_registry.clone(),
1022 &mut cx_a.to_async(),
1023 )
1024 .await
1025 .unwrap();
1026 worktree_a
1027 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1028 .await;
1029 let worktree_id = worktree_a
1030 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1031 .await
1032 .unwrap();
1033
1034 // Join that worktree as client B, and see that a guest has joined as client A.
1035 let worktree_b = Worktree::open_remote(
1036 client_b.clone(),
1037 worktree_id,
1038 lang_registry.clone(),
1039 &mut cx_b.to_async(),
1040 )
1041 .await
1042 .unwrap();
1043 let replica_id_b = worktree_b.read_with(&cx_b, |tree, _| tree.replica_id());
1044 worktree_a
1045 .condition(&cx_a, |tree, _| {
1046 tree.peers()
1047 .values()
1048 .any(|replica_id| *replica_id == replica_id_b)
1049 })
1050 .await;
1051
1052 // Open the same file as client B and client A.
1053 let buffer_b = worktree_b
1054 .update(&mut cx_b, |worktree, cx| worktree.open_buffer("b.txt", cx))
1055 .await
1056 .unwrap();
1057 buffer_b.read_with(&cx_b, |buf, _| assert_eq!(buf.text(), "b-contents"));
1058 worktree_a.read_with(&cx_a, |tree, cx| assert!(tree.has_open_buffer("b.txt", cx)));
1059 let buffer_a = worktree_a
1060 .update(&mut cx_a, |tree, cx| tree.open_buffer("b.txt", cx))
1061 .await
1062 .unwrap();
1063
1064 // Create a selection set as client B and see that selection set as client A.
1065 let editor_b = cx_b.add_view(window_b, |cx| {
1066 Editor::for_buffer(
1067 buffer_b,
1068 settings,
1069 |cx| EditorStyle::test(cx.font_cache()),
1070 cx,
1071 )
1072 });
1073 buffer_a
1074 .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 1)
1075 .await;
1076
1077 // Edit the buffer as client B and see that edit as client A.
1078 editor_b.update(&mut cx_b, |editor, cx| {
1079 editor.insert(&Insert("ok, ".into()), cx)
1080 });
1081 buffer_a
1082 .condition(&cx_a, |buffer, _| buffer.text() == "ok, b-contents")
1083 .await;
1084
1085 // Remove the selection set as client B, see those selections disappear as client A.
1086 cx_b.update(move |_| drop(editor_b));
1087 buffer_a
1088 .condition(&cx_a, |buffer, _| buffer.selection_sets().count() == 0)
1089 .await;
1090
1091 // Close the buffer as client A, see that the buffer is closed.
1092 cx_a.update(move |_| drop(buffer_a));
1093 worktree_a
1094 .condition(&cx_a, |tree, cx| !tree.has_open_buffer("b.txt", cx))
1095 .await;
1096
1097 // Dropping the worktree removes client B from client A's peers.
1098 cx_b.update(move |_| drop(worktree_b));
1099 worktree_a
1100 .condition(&cx_a, |tree, _| tree.peers().is_empty())
1101 .await;
1102 }
1103
1104 #[gpui::test]
1105 async fn test_unshare_worktree(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1106 cx_b.update(zed::workspace::init);
1107 let lang_registry = Arc::new(LanguageRegistry::new());
1108
1109 // Connect to a server as 2 clients.
1110 let mut server = TestServer::start().await;
1111 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1112 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1113 let app_state_b = zed::AppState {
1114 rpc: client_b,
1115 user_store: user_store_b,
1116 ..Arc::try_unwrap(cx_b.update(zed::test::test_app_state))
1117 .ok()
1118 .unwrap()
1119 };
1120
1121 cx_a.foreground().forbid_parking();
1122
1123 // Share a local worktree as client A
1124 let fs = Arc::new(FakeFs::new());
1125 fs.insert_tree(
1126 "/a",
1127 json!({
1128 ".zed.toml": r#"collaborators = ["user_b"]"#,
1129 "a.txt": "a-contents",
1130 "b.txt": "b-contents",
1131 }),
1132 )
1133 .await;
1134 let worktree_a = Worktree::open_local(
1135 client_a.clone(),
1136 "/a".as_ref(),
1137 fs,
1138 lang_registry.clone(),
1139 &mut cx_a.to_async(),
1140 )
1141 .await
1142 .unwrap();
1143 worktree_a
1144 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1145 .await;
1146
1147 let remote_worktree_id = worktree_a
1148 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1149 .await
1150 .unwrap();
1151
1152 let (window_b, workspace_b) = cx_b.add_window(|cx| Workspace::new(&app_state_b, cx));
1153 cx_b.update(|cx| {
1154 cx.dispatch_action(
1155 window_b,
1156 vec![workspace_b.id()],
1157 &JoinWorktree(remote_worktree_id),
1158 );
1159 });
1160 workspace_b
1161 .condition(&cx_b, |workspace, cx| workspace.worktrees(cx).len() == 1)
1162 .await;
1163
1164 let local_worktree_id_b = workspace_b.read_with(&cx_b, |workspace, cx| {
1165 let active_pane = workspace.active_pane().read(cx);
1166 assert!(active_pane.active_item().is_none());
1167 workspace.worktrees(cx).first().unwrap().id()
1168 });
1169 workspace_b
1170 .update(&mut cx_b, |workspace, cx| {
1171 workspace.open_entry(
1172 ProjectPath {
1173 worktree_id: local_worktree_id_b,
1174 path: Path::new("a.txt").into(),
1175 },
1176 cx,
1177 )
1178 })
1179 .unwrap()
1180 .await;
1181 workspace_b.read_with(&cx_b, |workspace, cx| {
1182 let active_pane = workspace.active_pane().read(cx);
1183 assert!(active_pane.active_item().is_some());
1184 });
1185
1186 worktree_a.update(&mut cx_a, |tree, cx| {
1187 tree.as_local_mut().unwrap().unshare(cx);
1188 });
1189 workspace_b
1190 .condition(&cx_b, |workspace, cx| workspace.worktrees(cx).len() == 0)
1191 .await;
1192 workspace_b.read_with(&cx_b, |workspace, cx| {
1193 let active_pane = workspace.active_pane().read(cx);
1194 assert!(active_pane.active_item().is_none());
1195 });
1196 }
1197
1198 #[gpui::test]
1199 async fn test_propagate_saves_and_fs_changes_in_shared_worktree(
1200 mut cx_a: TestAppContext,
1201 mut cx_b: TestAppContext,
1202 mut cx_c: TestAppContext,
1203 ) {
1204 cx_a.foreground().forbid_parking();
1205 let lang_registry = Arc::new(LanguageRegistry::new());
1206
1207 // Connect to a server as 3 clients.
1208 let mut server = TestServer::start().await;
1209 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1210 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1211 let (client_c, _) = server.create_client(&mut cx_c, "user_c").await;
1212
1213 let fs = Arc::new(FakeFs::new());
1214
1215 // Share a worktree as client A.
1216 fs.insert_tree(
1217 "/a",
1218 json!({
1219 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
1220 "file1": "",
1221 "file2": ""
1222 }),
1223 )
1224 .await;
1225
1226 let worktree_a = Worktree::open_local(
1227 client_a.clone(),
1228 "/a".as_ref(),
1229 fs.clone(),
1230 lang_registry.clone(),
1231 &mut cx_a.to_async(),
1232 )
1233 .await
1234 .unwrap();
1235 worktree_a
1236 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1237 .await;
1238 let worktree_id = worktree_a
1239 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1240 .await
1241 .unwrap();
1242
1243 // Join that worktree as clients B and C.
1244 let worktree_b = Worktree::open_remote(
1245 client_b.clone(),
1246 worktree_id,
1247 lang_registry.clone(),
1248 &mut cx_b.to_async(),
1249 )
1250 .await
1251 .unwrap();
1252 let worktree_c = Worktree::open_remote(
1253 client_c.clone(),
1254 worktree_id,
1255 lang_registry.clone(),
1256 &mut cx_c.to_async(),
1257 )
1258 .await
1259 .unwrap();
1260
1261 // Open and edit a buffer as both guests B and C.
1262 let buffer_b = worktree_b
1263 .update(&mut cx_b, |tree, cx| tree.open_buffer("file1", cx))
1264 .await
1265 .unwrap();
1266 let buffer_c = worktree_c
1267 .update(&mut cx_c, |tree, cx| tree.open_buffer("file1", cx))
1268 .await
1269 .unwrap();
1270 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "i-am-b, ", cx));
1271 buffer_c.update(&mut cx_c, |buf, cx| buf.edit([0..0], "i-am-c, ", cx));
1272
1273 // Open and edit that buffer as the host.
1274 let buffer_a = worktree_a
1275 .update(&mut cx_a, |tree, cx| tree.open_buffer("file1", cx))
1276 .await
1277 .unwrap();
1278
1279 buffer_a
1280 .condition(&mut cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, ")
1281 .await;
1282 buffer_a.update(&mut cx_a, |buf, cx| {
1283 buf.edit([buf.len()..buf.len()], "i-am-a", cx)
1284 });
1285
1286 // Wait for edits to propagate
1287 buffer_a
1288 .condition(&mut cx_a, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1289 .await;
1290 buffer_b
1291 .condition(&mut cx_b, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1292 .await;
1293 buffer_c
1294 .condition(&mut cx_c, |buf, _| buf.text() == "i-am-c, i-am-b, i-am-a")
1295 .await;
1296
1297 // Edit the buffer as the host and concurrently save as guest B.
1298 let save_b = buffer_b.update(&mut cx_b, |buf, cx| buf.save(cx).unwrap());
1299 buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "hi-a, ", cx));
1300 save_b.await.unwrap();
1301 assert_eq!(
1302 fs.load("/a/file1".as_ref()).await.unwrap(),
1303 "hi-a, i-am-c, i-am-b, i-am-a"
1304 );
1305 buffer_a.read_with(&cx_a, |buf, _| assert!(!buf.is_dirty()));
1306 buffer_b.read_with(&cx_b, |buf, _| assert!(!buf.is_dirty()));
1307 buffer_c.condition(&cx_c, |buf, _| !buf.is_dirty()).await;
1308
1309 // Make changes on host's file system, see those changes on the guests.
1310 fs.rename("/a/file2".as_ref(), "/a/file3".as_ref())
1311 .await
1312 .unwrap();
1313 fs.insert_file(Path::new("/a/file4"), "4".into())
1314 .await
1315 .unwrap();
1316
1317 worktree_b
1318 .condition(&cx_b, |tree, _| tree.file_count() == 4)
1319 .await;
1320 worktree_c
1321 .condition(&cx_c, |tree, _| tree.file_count() == 4)
1322 .await;
1323 worktree_b.read_with(&cx_b, |tree, _| {
1324 assert_eq!(
1325 tree.paths()
1326 .map(|p| p.to_string_lossy())
1327 .collect::<Vec<_>>(),
1328 &[".zed.toml", "file1", "file3", "file4"]
1329 )
1330 });
1331 worktree_c.read_with(&cx_c, |tree, _| {
1332 assert_eq!(
1333 tree.paths()
1334 .map(|p| p.to_string_lossy())
1335 .collect::<Vec<_>>(),
1336 &[".zed.toml", "file1", "file3", "file4"]
1337 )
1338 });
1339 }
1340
1341 #[gpui::test]
1342 async fn test_buffer_conflict_after_save(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1343 cx_a.foreground().forbid_parking();
1344 let lang_registry = Arc::new(LanguageRegistry::new());
1345
1346 // Connect to a server as 2 clients.
1347 let mut server = TestServer::start().await;
1348 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1349 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1350
1351 // Share a local worktree as client A
1352 let fs = Arc::new(FakeFs::new());
1353 fs.insert_tree(
1354 "/dir",
1355 json!({
1356 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
1357 "a.txt": "a-contents",
1358 }),
1359 )
1360 .await;
1361
1362 let worktree_a = Worktree::open_local(
1363 client_a.clone(),
1364 "/dir".as_ref(),
1365 fs,
1366 lang_registry.clone(),
1367 &mut cx_a.to_async(),
1368 )
1369 .await
1370 .unwrap();
1371 worktree_a
1372 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1373 .await;
1374 let worktree_id = worktree_a
1375 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1376 .await
1377 .unwrap();
1378
1379 // Join that worktree as client B, and see that a guest has joined as client A.
1380 let worktree_b = Worktree::open_remote(
1381 client_b.clone(),
1382 worktree_id,
1383 lang_registry.clone(),
1384 &mut cx_b.to_async(),
1385 )
1386 .await
1387 .unwrap();
1388
1389 let buffer_b = worktree_b
1390 .update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx))
1391 .await
1392 .unwrap();
1393 let mtime = buffer_b.read_with(&cx_b, |buf, _| buf.file().unwrap().mtime());
1394
1395 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "world ", cx));
1396 buffer_b.read_with(&cx_b, |buf, _| {
1397 assert!(buf.is_dirty());
1398 assert!(!buf.has_conflict());
1399 });
1400
1401 buffer_b
1402 .update(&mut cx_b, |buf, cx| buf.save(cx))
1403 .unwrap()
1404 .await
1405 .unwrap();
1406 worktree_b
1407 .condition(&cx_b, |_, cx| {
1408 buffer_b.read(cx).file().unwrap().mtime() != mtime
1409 })
1410 .await;
1411 buffer_b.read_with(&cx_b, |buf, _| {
1412 assert!(!buf.is_dirty());
1413 assert!(!buf.has_conflict());
1414 });
1415
1416 buffer_b.update(&mut cx_b, |buf, cx| buf.edit([0..0], "hello ", cx));
1417 buffer_b.read_with(&cx_b, |buf, _| {
1418 assert!(buf.is_dirty());
1419 assert!(!buf.has_conflict());
1420 });
1421 }
1422
1423 #[gpui::test]
1424 async fn test_editing_while_guest_opens_buffer(
1425 mut cx_a: TestAppContext,
1426 mut cx_b: TestAppContext,
1427 ) {
1428 cx_a.foreground().forbid_parking();
1429 let lang_registry = Arc::new(LanguageRegistry::new());
1430
1431 // Connect to a server as 2 clients.
1432 let mut server = TestServer::start().await;
1433 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1434 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1435
1436 // Share a local worktree as client A
1437 let fs = Arc::new(FakeFs::new());
1438 fs.insert_tree(
1439 "/dir",
1440 json!({
1441 ".zed.toml": r#"collaborators = ["user_b"]"#,
1442 "a.txt": "a-contents",
1443 }),
1444 )
1445 .await;
1446 let worktree_a = Worktree::open_local(
1447 client_a.clone(),
1448 "/dir".as_ref(),
1449 fs,
1450 lang_registry.clone(),
1451 &mut cx_a.to_async(),
1452 )
1453 .await
1454 .unwrap();
1455 worktree_a
1456 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1457 .await;
1458 let worktree_id = worktree_a
1459 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1460 .await
1461 .unwrap();
1462
1463 // Join that worktree as client B, and see that a guest has joined as client A.
1464 let worktree_b = Worktree::open_remote(
1465 client_b.clone(),
1466 worktree_id,
1467 lang_registry.clone(),
1468 &mut cx_b.to_async(),
1469 )
1470 .await
1471 .unwrap();
1472
1473 let buffer_a = worktree_a
1474 .update(&mut cx_a, |tree, cx| tree.open_buffer("a.txt", cx))
1475 .await
1476 .unwrap();
1477 let buffer_b = cx_b
1478 .background()
1479 .spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx)));
1480
1481 task::yield_now().await;
1482 buffer_a.update(&mut cx_a, |buf, cx| buf.edit([0..0], "z", cx));
1483
1484 let text = buffer_a.read_with(&cx_a, |buf, _| buf.text());
1485 let buffer_b = buffer_b.await.unwrap();
1486 buffer_b.condition(&cx_b, |buf, _| buf.text() == text).await;
1487 }
1488
1489 #[gpui::test]
1490 async fn test_leaving_worktree_while_opening_buffer(
1491 mut cx_a: TestAppContext,
1492 mut cx_b: TestAppContext,
1493 ) {
1494 cx_a.foreground().forbid_parking();
1495 let lang_registry = Arc::new(LanguageRegistry::new());
1496
1497 // Connect to a server as 2 clients.
1498 let mut server = TestServer::start().await;
1499 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1500 let (client_b, _) = server.create_client(&mut cx_b, "user_b").await;
1501
1502 // Share a local worktree as client A
1503 let fs = Arc::new(FakeFs::new());
1504 fs.insert_tree(
1505 "/dir",
1506 json!({
1507 ".zed.toml": r#"collaborators = ["user_b"]"#,
1508 "a.txt": "a-contents",
1509 }),
1510 )
1511 .await;
1512 let worktree_a = Worktree::open_local(
1513 client_a.clone(),
1514 "/dir".as_ref(),
1515 fs,
1516 lang_registry.clone(),
1517 &mut cx_a.to_async(),
1518 )
1519 .await
1520 .unwrap();
1521 worktree_a
1522 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1523 .await;
1524 let worktree_id = worktree_a
1525 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1526 .await
1527 .unwrap();
1528
1529 // Join that worktree as client B, and see that a guest has joined as client A.
1530 let worktree_b = Worktree::open_remote(
1531 client_b.clone(),
1532 worktree_id,
1533 lang_registry.clone(),
1534 &mut cx_b.to_async(),
1535 )
1536 .await
1537 .unwrap();
1538 worktree_a
1539 .condition(&cx_a, |tree, _| tree.peers().len() == 1)
1540 .await;
1541
1542 let buffer_b = cx_b
1543 .background()
1544 .spawn(worktree_b.update(&mut cx_b, |worktree, cx| worktree.open_buffer("a.txt", cx)));
1545 cx_b.update(|_| drop(worktree_b));
1546 drop(buffer_b);
1547 worktree_a
1548 .condition(&cx_a, |tree, _| tree.peers().len() == 0)
1549 .await;
1550 }
1551
1552 #[gpui::test]
1553 async fn test_peer_disconnection(mut cx_a: TestAppContext, cx_b: TestAppContext) {
1554 cx_a.foreground().forbid_parking();
1555 let lang_registry = Arc::new(LanguageRegistry::new());
1556
1557 // Connect to a server as 2 clients.
1558 let mut server = TestServer::start().await;
1559 let (client_a, _) = server.create_client(&mut cx_a, "user_a").await;
1560 let (client_b, _) = server.create_client(&mut cx_a, "user_b").await;
1561
1562 // Share a local worktree as client A
1563 let fs = Arc::new(FakeFs::new());
1564 fs.insert_tree(
1565 "/a",
1566 json!({
1567 ".zed.toml": r#"collaborators = ["user_b"]"#,
1568 "a.txt": "a-contents",
1569 "b.txt": "b-contents",
1570 }),
1571 )
1572 .await;
1573 let worktree_a = Worktree::open_local(
1574 client_a.clone(),
1575 "/a".as_ref(),
1576 fs,
1577 lang_registry.clone(),
1578 &mut cx_a.to_async(),
1579 )
1580 .await
1581 .unwrap();
1582 worktree_a
1583 .read_with(&cx_a, |tree, _| tree.as_local().unwrap().scan_complete())
1584 .await;
1585 let worktree_id = worktree_a
1586 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
1587 .await
1588 .unwrap();
1589
1590 // Join that worktree as client B, and see that a guest has joined as client A.
1591 let _worktree_b = Worktree::open_remote(
1592 client_b.clone(),
1593 worktree_id,
1594 lang_registry.clone(),
1595 &mut cx_b.to_async(),
1596 )
1597 .await
1598 .unwrap();
1599 worktree_a
1600 .condition(&cx_a, |tree, _| tree.peers().len() == 1)
1601 .await;
1602
1603 // Drop client B's connection and ensure client A observes client B leaving the worktree.
1604 client_b.disconnect(&cx_b.to_async()).await.unwrap();
1605 worktree_a
1606 .condition(&cx_a, |tree, _| tree.peers().len() == 0)
1607 .await;
1608 }
1609
1610 #[gpui::test]
1611 async fn test_basic_chat(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1612 cx_a.foreground().forbid_parking();
1613
1614 // Connect to a server as 2 clients.
1615 let mut server = TestServer::start().await;
1616 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1617 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1618
1619 // Create an org that includes these 2 users.
1620 let db = &server.app_state.db;
1621 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1622 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1623 .await
1624 .unwrap();
1625 db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false)
1626 .await
1627 .unwrap();
1628
1629 // Create a channel that includes all the users.
1630 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1631 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1632 .await
1633 .unwrap();
1634 db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false)
1635 .await
1636 .unwrap();
1637 db.create_channel_message(
1638 channel_id,
1639 current_user_id(&user_store_b, &cx_b),
1640 "hello A, it's B.",
1641 OffsetDateTime::now_utc(),
1642 1,
1643 )
1644 .await
1645 .unwrap();
1646
1647 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1648 channels_a
1649 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1650 .await;
1651 channels_a.read_with(&cx_a, |list, _| {
1652 assert_eq!(
1653 list.available_channels().unwrap(),
1654 &[ChannelDetails {
1655 id: channel_id.to_proto(),
1656 name: "test-channel".to_string()
1657 }]
1658 )
1659 });
1660 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1661 this.get_channel(channel_id.to_proto(), cx).unwrap()
1662 });
1663 channel_a.read_with(&cx_a, |channel, _| assert!(channel.messages().is_empty()));
1664 channel_a
1665 .condition(&cx_a, |channel, _| {
1666 channel_messages(channel)
1667 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1668 })
1669 .await;
1670
1671 let channels_b = cx_b.add_model(|cx| ChannelList::new(user_store_b, client_b, cx));
1672 channels_b
1673 .condition(&mut cx_b, |list, _| list.available_channels().is_some())
1674 .await;
1675 channels_b.read_with(&cx_b, |list, _| {
1676 assert_eq!(
1677 list.available_channels().unwrap(),
1678 &[ChannelDetails {
1679 id: channel_id.to_proto(),
1680 name: "test-channel".to_string()
1681 }]
1682 )
1683 });
1684
1685 let channel_b = channels_b.update(&mut cx_b, |this, cx| {
1686 this.get_channel(channel_id.to_proto(), cx).unwrap()
1687 });
1688 channel_b.read_with(&cx_b, |channel, _| assert!(channel.messages().is_empty()));
1689 channel_b
1690 .condition(&cx_b, |channel, _| {
1691 channel_messages(channel)
1692 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1693 })
1694 .await;
1695
1696 channel_a
1697 .update(&mut cx_a, |channel, cx| {
1698 channel
1699 .send_message("oh, hi B.".to_string(), cx)
1700 .unwrap()
1701 .detach();
1702 let task = channel.send_message("sup".to_string(), cx).unwrap();
1703 assert_eq!(
1704 channel_messages(channel),
1705 &[
1706 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1707 ("user_a".to_string(), "oh, hi B.".to_string(), true),
1708 ("user_a".to_string(), "sup".to_string(), true)
1709 ]
1710 );
1711 task
1712 })
1713 .await
1714 .unwrap();
1715
1716 channel_b
1717 .condition(&cx_b, |channel, _| {
1718 channel_messages(channel)
1719 == [
1720 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1721 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1722 ("user_a".to_string(), "sup".to_string(), false),
1723 ]
1724 })
1725 .await;
1726
1727 assert_eq!(
1728 server
1729 .state()
1730 .await
1731 .channel(channel_id)
1732 .unwrap()
1733 .connection_ids
1734 .len(),
1735 2
1736 );
1737 cx_b.update(|_| drop(channel_b));
1738 server
1739 .condition(|state| state.channel(channel_id).unwrap().connection_ids.len() == 1)
1740 .await;
1741
1742 cx_a.update(|_| drop(channel_a));
1743 server
1744 .condition(|state| state.channel(channel_id).is_none())
1745 .await;
1746 }
1747
1748 #[gpui::test]
1749 async fn test_chat_message_validation(mut cx_a: TestAppContext) {
1750 cx_a.foreground().forbid_parking();
1751
1752 let mut server = TestServer::start().await;
1753 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1754
1755 let db = &server.app_state.db;
1756 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1757 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1758 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1759 .await
1760 .unwrap();
1761 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1762 .await
1763 .unwrap();
1764
1765 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1766 channels_a
1767 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1768 .await;
1769 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1770 this.get_channel(channel_id.to_proto(), cx).unwrap()
1771 });
1772
1773 // Messages aren't allowed to be too long.
1774 channel_a
1775 .update(&mut cx_a, |channel, cx| {
1776 let long_body = "this is long.\n".repeat(1024);
1777 channel.send_message(long_body, cx).unwrap()
1778 })
1779 .await
1780 .unwrap_err();
1781
1782 // Messages aren't allowed to be blank.
1783 channel_a.update(&mut cx_a, |channel, cx| {
1784 channel.send_message(String::new(), cx).unwrap_err()
1785 });
1786
1787 // Leading and trailing whitespace are trimmed.
1788 channel_a
1789 .update(&mut cx_a, |channel, cx| {
1790 channel
1791 .send_message("\n surrounded by whitespace \n".to_string(), cx)
1792 .unwrap()
1793 })
1794 .await
1795 .unwrap();
1796 assert_eq!(
1797 db.get_channel_messages(channel_id, 10, None)
1798 .await
1799 .unwrap()
1800 .iter()
1801 .map(|m| &m.body)
1802 .collect::<Vec<_>>(),
1803 &["surrounded by whitespace"]
1804 );
1805 }
1806
1807 #[gpui::test]
1808 async fn test_chat_reconnection(mut cx_a: TestAppContext, mut cx_b: TestAppContext) {
1809 cx_a.foreground().forbid_parking();
1810
1811 // Connect to a server as 2 clients.
1812 let mut server = TestServer::start().await;
1813 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
1814 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
1815 let mut status_b = client_b.status();
1816
1817 // Create an org that includes these 2 users.
1818 let db = &server.app_state.db;
1819 let org_id = db.create_org("Test Org", "test-org").await.unwrap();
1820 db.add_org_member(org_id, current_user_id(&user_store_a, &cx_a), false)
1821 .await
1822 .unwrap();
1823 db.add_org_member(org_id, current_user_id(&user_store_b, &cx_b), false)
1824 .await
1825 .unwrap();
1826
1827 // Create a channel that includes all the users.
1828 let channel_id = db.create_org_channel(org_id, "test-channel").await.unwrap();
1829 db.add_channel_member(channel_id, current_user_id(&user_store_a, &cx_a), false)
1830 .await
1831 .unwrap();
1832 db.add_channel_member(channel_id, current_user_id(&user_store_b, &cx_b), false)
1833 .await
1834 .unwrap();
1835 db.create_channel_message(
1836 channel_id,
1837 current_user_id(&user_store_b, &cx_b),
1838 "hello A, it's B.",
1839 OffsetDateTime::now_utc(),
1840 2,
1841 )
1842 .await
1843 .unwrap();
1844
1845 let channels_a = cx_a.add_model(|cx| ChannelList::new(user_store_a, client_a, cx));
1846 channels_a
1847 .condition(&mut cx_a, |list, _| list.available_channels().is_some())
1848 .await;
1849
1850 channels_a.read_with(&cx_a, |list, _| {
1851 assert_eq!(
1852 list.available_channels().unwrap(),
1853 &[ChannelDetails {
1854 id: channel_id.to_proto(),
1855 name: "test-channel".to_string()
1856 }]
1857 )
1858 });
1859 let channel_a = channels_a.update(&mut cx_a, |this, cx| {
1860 this.get_channel(channel_id.to_proto(), cx).unwrap()
1861 });
1862 channel_a.read_with(&cx_a, |channel, _| assert!(channel.messages().is_empty()));
1863 channel_a
1864 .condition(&cx_a, |channel, _| {
1865 channel_messages(channel)
1866 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1867 })
1868 .await;
1869
1870 let channels_b = cx_b.add_model(|cx| ChannelList::new(user_store_b.clone(), client_b, cx));
1871 channels_b
1872 .condition(&mut cx_b, |list, _| list.available_channels().is_some())
1873 .await;
1874 channels_b.read_with(&cx_b, |list, _| {
1875 assert_eq!(
1876 list.available_channels().unwrap(),
1877 &[ChannelDetails {
1878 id: channel_id.to_proto(),
1879 name: "test-channel".to_string()
1880 }]
1881 )
1882 });
1883
1884 let channel_b = channels_b.update(&mut cx_b, |this, cx| {
1885 this.get_channel(channel_id.to_proto(), cx).unwrap()
1886 });
1887 channel_b.read_with(&cx_b, |channel, _| assert!(channel.messages().is_empty()));
1888 channel_b
1889 .condition(&cx_b, |channel, _| {
1890 channel_messages(channel)
1891 == [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1892 })
1893 .await;
1894
1895 // Disconnect client B, ensuring we can still access its cached channel data.
1896 server.forbid_connections();
1897 server.disconnect_client(current_user_id(&user_store_b, &cx_b));
1898 while !matches!(
1899 status_b.recv().await,
1900 Some(rpc::Status::ReconnectionError { .. })
1901 ) {}
1902
1903 channels_b.read_with(&cx_b, |channels, _| {
1904 assert_eq!(
1905 channels.available_channels().unwrap(),
1906 [ChannelDetails {
1907 id: channel_id.to_proto(),
1908 name: "test-channel".to_string()
1909 }]
1910 )
1911 });
1912 channel_b.read_with(&cx_b, |channel, _| {
1913 assert_eq!(
1914 channel_messages(channel),
1915 [("user_b".to_string(), "hello A, it's B.".to_string(), false)]
1916 )
1917 });
1918
1919 // Send a message from client B while it is disconnected.
1920 channel_b
1921 .update(&mut cx_b, |channel, cx| {
1922 let task = channel
1923 .send_message("can you see this?".to_string(), cx)
1924 .unwrap();
1925 assert_eq!(
1926 channel_messages(channel),
1927 &[
1928 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1929 ("user_b".to_string(), "can you see this?".to_string(), true)
1930 ]
1931 );
1932 task
1933 })
1934 .await
1935 .unwrap_err();
1936
1937 // Send a message from client A while B is disconnected.
1938 channel_a
1939 .update(&mut cx_a, |channel, cx| {
1940 channel
1941 .send_message("oh, hi B.".to_string(), cx)
1942 .unwrap()
1943 .detach();
1944 let task = channel.send_message("sup".to_string(), cx).unwrap();
1945 assert_eq!(
1946 channel_messages(channel),
1947 &[
1948 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1949 ("user_a".to_string(), "oh, hi B.".to_string(), true),
1950 ("user_a".to_string(), "sup".to_string(), true)
1951 ]
1952 );
1953 task
1954 })
1955 .await
1956 .unwrap();
1957
1958 // Give client B a chance to reconnect.
1959 server.allow_connections();
1960 cx_b.foreground().advance_clock(Duration::from_secs(10));
1961
1962 // Verify that B sees the new messages upon reconnection, as well as the message client B
1963 // sent while offline.
1964 channel_b
1965 .condition(&cx_b, |channel, _| {
1966 channel_messages(channel)
1967 == [
1968 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1969 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1970 ("user_a".to_string(), "sup".to_string(), false),
1971 ("user_b".to_string(), "can you see this?".to_string(), false),
1972 ]
1973 })
1974 .await;
1975
1976 // Ensure client A and B can communicate normally after reconnection.
1977 channel_a
1978 .update(&mut cx_a, |channel, cx| {
1979 channel.send_message("you online?".to_string(), cx).unwrap()
1980 })
1981 .await
1982 .unwrap();
1983 channel_b
1984 .condition(&cx_b, |channel, _| {
1985 channel_messages(channel)
1986 == [
1987 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
1988 ("user_a".to_string(), "oh, hi B.".to_string(), false),
1989 ("user_a".to_string(), "sup".to_string(), false),
1990 ("user_b".to_string(), "can you see this?".to_string(), false),
1991 ("user_a".to_string(), "you online?".to_string(), false),
1992 ]
1993 })
1994 .await;
1995
1996 channel_b
1997 .update(&mut cx_b, |channel, cx| {
1998 channel.send_message("yep".to_string(), cx).unwrap()
1999 })
2000 .await
2001 .unwrap();
2002 channel_a
2003 .condition(&cx_a, |channel, _| {
2004 channel_messages(channel)
2005 == [
2006 ("user_b".to_string(), "hello A, it's B.".to_string(), false),
2007 ("user_a".to_string(), "oh, hi B.".to_string(), false),
2008 ("user_a".to_string(), "sup".to_string(), false),
2009 ("user_b".to_string(), "can you see this?".to_string(), false),
2010 ("user_a".to_string(), "you online?".to_string(), false),
2011 ("user_b".to_string(), "yep".to_string(), false),
2012 ]
2013 })
2014 .await;
2015 }
2016
2017 #[gpui::test]
2018 async fn test_collaborators(
2019 mut cx_a: TestAppContext,
2020 mut cx_b: TestAppContext,
2021 mut cx_c: TestAppContext,
2022 ) {
2023 cx_a.foreground().forbid_parking();
2024 let lang_registry = Arc::new(LanguageRegistry::new());
2025
2026 // Connect to a server as 3 clients.
2027 let mut server = TestServer::start().await;
2028 let (client_a, user_store_a) = server.create_client(&mut cx_a, "user_a").await;
2029 let (client_b, user_store_b) = server.create_client(&mut cx_b, "user_b").await;
2030 let (_client_c, user_store_c) = server.create_client(&mut cx_c, "user_c").await;
2031
2032 let fs = Arc::new(FakeFs::new());
2033
2034 // Share a worktree as client A.
2035 fs.insert_tree(
2036 "/a",
2037 json!({
2038 ".zed.toml": r#"collaborators = ["user_b", "user_c"]"#,
2039 }),
2040 )
2041 .await;
2042
2043 let worktree_a = Worktree::open_local(
2044 client_a.clone(),
2045 "/a".as_ref(),
2046 fs.clone(),
2047 lang_registry.clone(),
2048 &mut cx_a.to_async(),
2049 )
2050 .await
2051 .unwrap();
2052
2053 user_store_a
2054 .condition(&cx_a, |user_store, _| {
2055 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
2056 })
2057 .await;
2058 user_store_b
2059 .condition(&cx_b, |user_store, _| {
2060 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
2061 })
2062 .await;
2063 user_store_c
2064 .condition(&cx_c, |user_store, _| {
2065 collaborators(user_store) == vec![("user_a", vec![("a", vec![])])]
2066 })
2067 .await;
2068
2069 let worktree_id = worktree_a
2070 .update(&mut cx_a, |tree, cx| tree.as_local_mut().unwrap().share(cx))
2071 .await
2072 .unwrap();
2073
2074 let _worktree_b = Worktree::open_remote(
2075 client_b.clone(),
2076 worktree_id,
2077 lang_registry.clone(),
2078 &mut cx_b.to_async(),
2079 )
2080 .await
2081 .unwrap();
2082
2083 user_store_a
2084 .condition(&cx_a, |user_store, _| {
2085 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
2086 })
2087 .await;
2088 user_store_b
2089 .condition(&cx_b, |user_store, _| {
2090 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
2091 })
2092 .await;
2093 user_store_c
2094 .condition(&cx_c, |user_store, _| {
2095 collaborators(user_store) == vec![("user_a", vec![("a", vec!["user_b"])])]
2096 })
2097 .await;
2098
2099 cx_a.update(move |_| drop(worktree_a));
2100 user_store_a
2101 .condition(&cx_a, |user_store, _| collaborators(user_store) == vec![])
2102 .await;
2103 user_store_b
2104 .condition(&cx_b, |user_store, _| collaborators(user_store) == vec![])
2105 .await;
2106 user_store_c
2107 .condition(&cx_c, |user_store, _| collaborators(user_store) == vec![])
2108 .await;
2109
2110 fn collaborators(user_store: &UserStore) -> Vec<(&str, Vec<(&str, Vec<&str>)>)> {
2111 user_store
2112 .collaborators()
2113 .iter()
2114 .map(|collaborator| {
2115 let worktrees = collaborator
2116 .worktrees
2117 .iter()
2118 .map(|w| {
2119 (
2120 w.root_name.as_str(),
2121 w.guests.iter().map(|p| p.github_login.as_str()).collect(),
2122 )
2123 })
2124 .collect();
2125 (collaborator.user.github_login.as_str(), worktrees)
2126 })
2127 .collect()
2128 }
2129 }
2130
2131 struct TestServer {
2132 peer: Arc<Peer>,
2133 app_state: Arc<AppState>,
2134 server: Arc<Server>,
2135 notifications: mpsc::Receiver<()>,
2136 connection_killers: Arc<Mutex<HashMap<UserId, watch::Sender<Option<()>>>>>,
2137 forbid_connections: Arc<AtomicBool>,
2138 _test_db: TestDb,
2139 }
2140
2141 impl TestServer {
2142 async fn start() -> Self {
2143 let test_db = TestDb::new();
2144 let app_state = Self::build_app_state(&test_db).await;
2145 let peer = Peer::new();
2146 let notifications = mpsc::channel(128);
2147 let server = Server::new(app_state.clone(), peer.clone(), Some(notifications.0));
2148 Self {
2149 peer,
2150 app_state,
2151 server,
2152 notifications: notifications.1,
2153 connection_killers: Default::default(),
2154 forbid_connections: Default::default(),
2155 _test_db: test_db,
2156 }
2157 }
2158
2159 async fn create_client(
2160 &mut self,
2161 cx: &mut TestAppContext,
2162 name: &str,
2163 ) -> (Arc<Client>, ModelHandle<UserStore>) {
2164 let user_id = self.app_state.db.create_user(name, false).await.unwrap();
2165 let client_name = name.to_string();
2166 let mut client = Client::new();
2167 let server = self.server.clone();
2168 let connection_killers = self.connection_killers.clone();
2169 let forbid_connections = self.forbid_connections.clone();
2170 Arc::get_mut(&mut client)
2171 .unwrap()
2172 .override_authenticate(move |cx| {
2173 cx.spawn(|_| async move {
2174 let access_token = "the-token".to_string();
2175 Ok(Credentials {
2176 user_id: user_id.0 as u64,
2177 access_token,
2178 })
2179 })
2180 })
2181 .override_establish_connection(move |credentials, cx| {
2182 assert_eq!(credentials.user_id, user_id.0 as u64);
2183 assert_eq!(credentials.access_token, "the-token");
2184
2185 let server = server.clone();
2186 let connection_killers = connection_killers.clone();
2187 let forbid_connections = forbid_connections.clone();
2188 let client_name = client_name.clone();
2189 cx.spawn(move |cx| async move {
2190 if forbid_connections.load(SeqCst) {
2191 Err(EstablishConnectionError::other(anyhow!(
2192 "server is forbidding connections"
2193 )))
2194 } else {
2195 let (client_conn, server_conn, kill_conn) = Connection::in_memory();
2196 connection_killers.lock().insert(user_id, kill_conn);
2197 cx.background()
2198 .spawn(server.handle_connection(server_conn, client_name, user_id))
2199 .detach();
2200 Ok(client_conn)
2201 }
2202 })
2203 });
2204
2205 let http = FakeHttpClient::new(|_| async move { Ok(surf::http::Response::new(404)) });
2206 client
2207 .authenticate_and_connect(&cx.to_async())
2208 .await
2209 .unwrap();
2210
2211 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
2212 let mut authed_user =
2213 user_store.read_with(cx, |user_store, _| user_store.watch_current_user());
2214 while authed_user.recv().await.unwrap().is_none() {}
2215
2216 (client, user_store)
2217 }
2218
2219 fn disconnect_client(&self, user_id: UserId) {
2220 if let Some(mut kill_conn) = self.connection_killers.lock().remove(&user_id) {
2221 let _ = kill_conn.try_send(Some(()));
2222 }
2223 }
2224
2225 fn forbid_connections(&self) {
2226 self.forbid_connections.store(true, SeqCst);
2227 }
2228
2229 fn allow_connections(&self) {
2230 self.forbid_connections.store(false, SeqCst);
2231 }
2232
2233 async fn build_app_state(test_db: &TestDb) -> Arc<AppState> {
2234 let mut config = Config::default();
2235 config.session_secret = "a".repeat(32);
2236 config.database_url = test_db.url.clone();
2237 let github_client = github::AppClient::test();
2238 Arc::new(AppState {
2239 db: test_db.db().clone(),
2240 handlebars: Default::default(),
2241 auth_client: auth::build_client("", ""),
2242 repo_client: github::RepoClient::test(&github_client),
2243 github_client,
2244 config,
2245 })
2246 }
2247
2248 async fn state<'a>(&'a self) -> RwLockReadGuard<'a, Store> {
2249 self.server.store.read().await
2250 }
2251
2252 async fn condition<F>(&mut self, mut predicate: F)
2253 where
2254 F: FnMut(&Store) -> bool,
2255 {
2256 async_std::future::timeout(Duration::from_millis(500), async {
2257 while !(predicate)(&*self.server.store.read().await) {
2258 self.notifications.recv().await;
2259 }
2260 })
2261 .await
2262 .expect("condition timed out");
2263 }
2264 }
2265
2266 impl Drop for TestServer {
2267 fn drop(&mut self) {
2268 task::block_on(self.peer.reset());
2269 }
2270 }
2271
2272 fn current_user_id(user_store: &ModelHandle<UserStore>, cx: &TestAppContext) -> UserId {
2273 UserId::from_proto(
2274 user_store.read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
2275 )
2276 }
2277
2278 fn channel_messages(channel: &Channel) -> Vec<(String, String, bool)> {
2279 channel
2280 .messages()
2281 .cursor::<()>()
2282 .map(|m| {
2283 (
2284 m.sender.github_login.clone(),
2285 m.body.clone(),
2286 m.is_pending(),
2287 )
2288 })
2289 .collect()
2290 }
2291
2292 struct EmptyView;
2293
2294 impl gpui::Entity for EmptyView {
2295 type Event = ();
2296 }
2297
2298 impl gpui::View for EmptyView {
2299 fn ui_name() -> &'static str {
2300 "empty view"
2301 }
2302
2303 fn render(&mut self, _: &mut gpui::RenderContext<Self>) -> gpui::ElementBox {
2304 gpui::Element::boxed(gpui::elements::Empty)
2305 }
2306 }
2307}