peer.rs

   1use super::{
   2    Connection,
   3    message_stream::{Message, MessageStream},
   4    proto::{
   5        self, AnyTypedEnvelope, EnvelopedMessage, PeerId, Receipt, RequestMessage, TypedEnvelope,
   6    },
   7};
   8use anyhow::{Context as _, Result, anyhow};
   9use collections::HashMap;
  10use futures::{
  11    FutureExt, SinkExt, Stream, StreamExt, TryFutureExt,
  12    channel::{mpsc, oneshot},
  13    stream::BoxStream,
  14};
  15use parking_lot::{Mutex, RwLock};
  16use proto::{ErrorCode, ErrorCodeExt, ErrorExt, RpcError};
  17use serde::{Serialize, ser::SerializeStruct};
  18use std::{
  19    fmt, future,
  20    future::Future,
  21    sync::atomic::Ordering::SeqCst,
  22    sync::{
  23        Arc,
  24        atomic::{self, AtomicU32},
  25    },
  26    time::Duration,
  27    time::Instant,
  28};
  29use tracing::instrument;
  30
  31#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Serialize)]
  32pub struct ConnectionId {
  33    pub owner_id: u32,
  34    pub id: u32,
  35}
  36
  37impl From<ConnectionId> for PeerId {
  38    fn from(id: ConnectionId) -> Self {
  39        PeerId {
  40            owner_id: id.owner_id,
  41            id: id.id,
  42        }
  43    }
  44}
  45
  46impl From<PeerId> for ConnectionId {
  47    fn from(peer_id: PeerId) -> Self {
  48        Self {
  49            owner_id: peer_id.owner_id,
  50            id: peer_id.id,
  51        }
  52    }
  53}
  54
  55impl fmt::Display for ConnectionId {
  56    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
  57        write!(f, "{}/{}", self.owner_id, self.id)
  58    }
  59}
  60
  61pub struct Peer {
  62    epoch: AtomicU32,
  63    pub connections: RwLock<HashMap<ConnectionId, ConnectionState>>,
  64    next_connection_id: AtomicU32,
  65}
  66
  67#[derive(Clone, Serialize)]
  68pub struct ConnectionState {
  69    #[serde(skip)]
  70    outgoing_tx: mpsc::UnboundedSender<Message>,
  71    next_message_id: Arc<AtomicU32>,
  72    #[allow(clippy::type_complexity)]
  73    #[serde(skip)]
  74    response_channels: Arc<
  75        Mutex<
  76            Option<
  77                HashMap<
  78                    u32,
  79                    oneshot::Sender<(proto::Envelope, std::time::Instant, oneshot::Sender<()>)>,
  80                >,
  81            >,
  82        >,
  83    >,
  84    #[allow(clippy::type_complexity)]
  85    #[serde(skip)]
  86    stream_response_channels: Arc<
  87        Mutex<
  88            Option<
  89                HashMap<u32, mpsc::UnboundedSender<(Result<proto::Envelope>, oneshot::Sender<()>)>>,
  90            >,
  91        >,
  92    >,
  93}
  94
  95const KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1);
  96const WRITE_TIMEOUT: Duration = Duration::from_secs(2);
  97pub const RECEIVE_TIMEOUT: Duration = Duration::from_secs(10);
  98
  99impl Peer {
 100    pub fn new(epoch: u32) -> Arc<Self> {
 101        Arc::new(Self {
 102            epoch: AtomicU32::new(epoch),
 103            connections: Default::default(),
 104            next_connection_id: Default::default(),
 105        })
 106    }
 107
 108    pub fn epoch(&self) -> u32 {
 109        self.epoch.load(SeqCst)
 110    }
 111
 112    #[instrument(skip_all)]
 113    pub fn add_connection<F, Fut, Out>(
 114        self: &Arc<Self>,
 115        connection: Connection,
 116        create_timer: F,
 117    ) -> (
 118        ConnectionId,
 119        impl Future<Output = anyhow::Result<()>> + Send + use<F, Fut, Out>,
 120        BoxStream<'static, Box<dyn AnyTypedEnvelope>>,
 121    )
 122    where
 123        F: Send + Fn(Duration) -> Fut,
 124        Fut: Send + Future<Output = Out>,
 125        Out: Send,
 126    {
 127        // For outgoing messages, use an unbounded channel so that application code
 128        // can always send messages without yielding. For incoming messages, use a
 129        // bounded channel so that other peers will receive backpressure if they send
 130        // messages faster than this peer can process them.
 131        #[cfg(any(test, feature = "test-support"))]
 132        const INCOMING_BUFFER_SIZE: usize = 1;
 133        #[cfg(not(any(test, feature = "test-support")))]
 134        const INCOMING_BUFFER_SIZE: usize = 256;
 135        let (mut incoming_tx, incoming_rx) = mpsc::channel(INCOMING_BUFFER_SIZE);
 136        let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded();
 137
 138        let connection_id = ConnectionId {
 139            owner_id: self.epoch.load(SeqCst),
 140            id: self.next_connection_id.fetch_add(1, SeqCst),
 141        };
 142        let connection_state = ConnectionState {
 143            outgoing_tx,
 144            next_message_id: Default::default(),
 145            response_channels: Arc::new(Mutex::new(Some(Default::default()))),
 146            stream_response_channels: Arc::new(Mutex::new(Some(Default::default()))),
 147        };
 148        let mut writer = MessageStream::new(connection.tx);
 149        let mut reader = MessageStream::new(connection.rx);
 150
 151        let this = self.clone();
 152        let response_channels = connection_state.response_channels.clone();
 153        let stream_response_channels = connection_state.stream_response_channels.clone();
 154
 155        let handle_io = async move {
 156            tracing::trace!(%connection_id, "handle io future: start");
 157
 158            let _end_connection = util::defer(|| {
 159                response_channels.lock().take();
 160                if let Some(channels) = stream_response_channels.lock().take() {
 161                    for channel in channels.values() {
 162                        let _ = channel.unbounded_send((
 163                            Err(anyhow!("connection closed")),
 164                            oneshot::channel().0,
 165                        ));
 166                    }
 167                }
 168                this.connections.write().remove(&connection_id);
 169                tracing::trace!(%connection_id, "handle io future: end");
 170            });
 171
 172            // Send messages on this frequency so the connection isn't closed.
 173            let keepalive_timer = create_timer(KEEPALIVE_INTERVAL).fuse();
 174            futures::pin_mut!(keepalive_timer);
 175
 176            // Disconnect if we don't receive messages at least this frequently.
 177            let receive_timeout = create_timer(RECEIVE_TIMEOUT).fuse();
 178            futures::pin_mut!(receive_timeout);
 179
 180            loop {
 181                tracing::trace!(%connection_id, "outer loop iteration start");
 182                let read_message = reader.read().fuse();
 183                futures::pin_mut!(read_message);
 184
 185                loop {
 186                    tracing::trace!(%connection_id, "inner loop iteration start");
 187                    futures::select_biased! {
 188                        outgoing = outgoing_rx.next().fuse() => match outgoing {
 189                            Some(outgoing) => {
 190                                tracing::trace!(%connection_id, "outgoing rpc message: writing");
 191                                futures::select_biased! {
 192                                    result = writer.write(outgoing).fuse() => {
 193                                        tracing::trace!(%connection_id, "outgoing rpc message: done writing");
 194                                        result.context("failed to write RPC message")?;
 195                                        tracing::trace!(%connection_id, "keepalive interval: resetting after sending message");
 196                                        keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse());
 197                                    }
 198                                    _ = create_timer(WRITE_TIMEOUT).fuse() => {
 199                                        tracing::trace!(%connection_id, "outgoing rpc message: writing timed out");
 200                                        anyhow::bail!("timed out writing message");
 201                                    }
 202                                }
 203                            }
 204                            None => {
 205                                tracing::trace!(%connection_id, "outgoing rpc message: channel closed");
 206                                return Ok(())
 207                            },
 208                        },
 209                        _ = keepalive_timer => {
 210                            tracing::trace!(%connection_id, "keepalive interval: pinging");
 211                            futures::select_biased! {
 212                                result = writer.write(Message::Ping).fuse() => {
 213                                    tracing::trace!(%connection_id, "keepalive interval: done pinging");
 214                                    result.context("failed to send keepalive")?;
 215                                    tracing::trace!(%connection_id, "keepalive interval: resetting after pinging");
 216                                    keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse());
 217                                }
 218                                _ = create_timer(WRITE_TIMEOUT).fuse() => {
 219                                    tracing::trace!(%connection_id, "keepalive interval: pinging timed out");
 220                                    anyhow::bail!("timed out sending keepalive");
 221                                }
 222                            }
 223                        }
 224                        incoming = read_message => {
 225                            let incoming = incoming.context("error reading rpc message from socket")?;
 226                            tracing::trace!(%connection_id, "incoming rpc message: received");
 227                            tracing::trace!(%connection_id, "receive timeout: resetting");
 228                            receive_timeout.set(create_timer(RECEIVE_TIMEOUT).fuse());
 229                            if let (Message::Envelope(incoming), received_at) = incoming {
 230                                tracing::trace!(%connection_id, "incoming rpc message: processing");
 231                                futures::select_biased! {
 232                                    result = incoming_tx.send((incoming, received_at)).fuse() => match result {
 233                                        Ok(_) => {
 234                                            tracing::trace!(%connection_id, "incoming rpc message: processed");
 235                                        }
 236                                        Err(_) => {
 237                                            tracing::trace!(%connection_id, "incoming rpc message: channel closed");
 238                                            return Ok(())
 239                                        }
 240                                    },
 241                                    _ = create_timer(WRITE_TIMEOUT).fuse() => {
 242                                        tracing::trace!(%connection_id, "incoming rpc message: processing timed out");
 243                                        anyhow::bail!("timed out processing incoming message");
 244                                    }
 245                                }
 246                            }
 247                            break;
 248                        },
 249                        _ = receive_timeout => {
 250                            tracing::trace!(%connection_id, "receive timeout: delay between messages too long");
 251                            anyhow::bail!("delay between messages too long");
 252                        }
 253                    }
 254                }
 255            }
 256        };
 257
 258        let response_channels = connection_state.response_channels.clone();
 259        let stream_response_channels = connection_state.stream_response_channels.clone();
 260        self.connections
 261            .write()
 262            .insert(connection_id, connection_state);
 263
 264        let incoming_rx = incoming_rx.filter_map(move |(incoming, received_at)| {
 265            let response_channels = response_channels.clone();
 266            let stream_response_channels = stream_response_channels.clone();
 267            async move {
 268                let message_id = incoming.id;
 269                tracing::trace!(?incoming, "incoming message future: start");
 270                let _end = util::defer(move || {
 271                    tracing::trace!(%connection_id, message_id, "incoming message future: end");
 272                });
 273
 274                if let Some(responding_to) = incoming.responding_to {
 275                    tracing::trace!(
 276                        %connection_id,
 277                        message_id,
 278                        responding_to,
 279                        "incoming response: received"
 280                    );
 281                    let response_channel =
 282                        response_channels.lock().as_mut()?.remove(&responding_to);
 283                    let stream_response_channel = stream_response_channels
 284                        .lock()
 285                        .as_ref()?
 286                        .get(&responding_to)
 287                        .cloned();
 288
 289                    if let Some(tx) = response_channel {
 290                        let requester_resumed = oneshot::channel();
 291                        if let Err(error) = tx.send((incoming, received_at, requester_resumed.0)) {
 292                            tracing::trace!(
 293                                %connection_id,
 294                                message_id,
 295                                responding_to = responding_to,
 296                                ?error,
 297                                "incoming response: request future dropped",
 298                            );
 299                        }
 300
 301                        tracing::trace!(
 302                            %connection_id,
 303                            message_id,
 304                            responding_to,
 305                            "incoming response: waiting to resume requester"
 306                        );
 307                        let _ = requester_resumed.1.await;
 308                        tracing::trace!(
 309                            %connection_id,
 310                            message_id,
 311                            responding_to,
 312                            "incoming response: requester resumed"
 313                        );
 314                    } else if let Some(tx) = stream_response_channel {
 315                        let requester_resumed = oneshot::channel();
 316                        if let Err(error) = tx.unbounded_send((Ok(incoming), requester_resumed.0)) {
 317                            tracing::debug!(
 318                                %connection_id,
 319                                message_id,
 320                                responding_to = responding_to,
 321                                ?error,
 322                                "incoming stream response: request future dropped",
 323                            );
 324                        }
 325
 326                        tracing::debug!(
 327                            %connection_id,
 328                            message_id,
 329                            responding_to,
 330                            "incoming stream response: waiting to resume requester"
 331                        );
 332                        let _ = requester_resumed.1.await;
 333                        tracing::debug!(
 334                            %connection_id,
 335                            message_id,
 336                            responding_to,
 337                            "incoming stream response: requester resumed"
 338                        );
 339                    } else {
 340                        let message_type = proto::build_typed_envelope(
 341                            connection_id.into(),
 342                            received_at,
 343                            incoming,
 344                        )
 345                        .map(|p| p.payload_type_name());
 346                        tracing::warn!(
 347                            %connection_id,
 348                            message_id,
 349                            responding_to,
 350                            message_type,
 351                            "incoming response: unknown request"
 352                        );
 353                    }
 354
 355                    None
 356                } else {
 357                    tracing::trace!(%connection_id, message_id, "incoming message: received");
 358                    proto::build_typed_envelope(connection_id.into(), received_at, incoming)
 359                        .or_else(|| {
 360                            tracing::error!(
 361                                %connection_id,
 362                                message_id,
 363                                "unable to construct a typed envelope"
 364                            );
 365                            None
 366                        })
 367                }
 368            }
 369        });
 370        (connection_id, handle_io, incoming_rx.boxed())
 371    }
 372
 373    #[cfg(any(test, feature = "test-support"))]
 374    pub fn add_test_connection(
 375        self: &Arc<Self>,
 376        connection: Connection,
 377        executor: gpui::BackgroundExecutor,
 378    ) -> (
 379        ConnectionId,
 380        impl Future<Output = anyhow::Result<()>> + Send + use<>,
 381        BoxStream<'static, Box<dyn AnyTypedEnvelope>>,
 382    ) {
 383        let executor = executor.clone();
 384        self.add_connection(connection, move |duration| executor.timer(duration))
 385    }
 386
 387    pub fn disconnect(&self, connection_id: ConnectionId) {
 388        self.connections.write().remove(&connection_id);
 389    }
 390
 391    #[cfg(any(test, feature = "test-support"))]
 392    pub fn reset(&self, epoch: u32) {
 393        self.next_connection_id.store(0, SeqCst);
 394        self.epoch.store(epoch, SeqCst);
 395    }
 396
 397    pub fn teardown(&self) {
 398        self.connections.write().clear();
 399    }
 400
 401    /// Make a request and wait for a response.
 402    pub fn request<T: RequestMessage>(
 403        &self,
 404        receiver_id: ConnectionId,
 405        request: T,
 406    ) -> impl Future<Output = Result<T::Response>> + use<T> {
 407        self.request_internal(None, receiver_id, request)
 408            .map_ok(|envelope| envelope.payload)
 409    }
 410
 411    pub fn request_envelope<T: RequestMessage>(
 412        &self,
 413        receiver_id: ConnectionId,
 414        request: T,
 415    ) -> impl Future<Output = Result<TypedEnvelope<T::Response>>> + use<T> {
 416        self.request_internal(None, receiver_id, request)
 417    }
 418
 419    pub fn forward_request<T: RequestMessage>(
 420        &self,
 421        sender_id: ConnectionId,
 422        receiver_id: ConnectionId,
 423        request: T,
 424    ) -> impl Future<Output = Result<T::Response>> {
 425        self.request_internal(Some(sender_id), receiver_id, request)
 426            .map_ok(|envelope| envelope.payload)
 427    }
 428
 429    fn request_internal<T: RequestMessage>(
 430        &self,
 431        original_sender_id: Option<ConnectionId>,
 432        receiver_id: ConnectionId,
 433        request: T,
 434    ) -> impl Future<Output = Result<TypedEnvelope<T::Response>>> + use<T> {
 435        let envelope = request.into_envelope(0, None, original_sender_id.map(Into::into));
 436        let response = self.request_dynamic(receiver_id, envelope, T::NAME);
 437        async move {
 438            let (response, received_at) = response.await?;
 439            Ok(TypedEnvelope {
 440                message_id: response.id,
 441                sender_id: receiver_id.into(),
 442                original_sender_id: response.original_sender_id,
 443                payload: T::Response::from_envelope(response)
 444                    .context("received response of the wrong type")?,
 445                received_at,
 446            })
 447        }
 448    }
 449
 450    /// Make a request and wait for a response.
 451    ///
 452    /// The caller must make sure to deserialize the response into the request's
 453    /// response type. This interface is only useful in trait objects, where
 454    /// generics can't be used. If you have a concrete type, use `request`.
 455    pub fn request_dynamic(
 456        &self,
 457        receiver_id: ConnectionId,
 458        mut envelope: proto::Envelope,
 459        type_name: &'static str,
 460    ) -> impl Future<Output = Result<(proto::Envelope, Instant)>> + use<> {
 461        let (tx, rx) = oneshot::channel();
 462        let send = self.connection_state(receiver_id).and_then(|connection| {
 463            envelope.id = connection.next_message_id.fetch_add(1, SeqCst);
 464            connection
 465                .response_channels
 466                .lock()
 467                .as_mut()
 468                .context("connection was closed")?
 469                .insert(envelope.id, tx);
 470            connection
 471                .outgoing_tx
 472                .unbounded_send(Message::Envelope(envelope))
 473                .context("connection was closed")?;
 474            Ok(())
 475        });
 476        async move {
 477            send?;
 478            let (response, received_at, _barrier) = rx.await.context("connection was closed")?;
 479            if let Some(proto::envelope::Payload::Error(error)) = &response.payload {
 480                return Err(RpcError::from_proto(error, type_name));
 481            }
 482            Ok((response, received_at))
 483        }
 484    }
 485
 486    pub fn request_stream<T: RequestMessage>(
 487        &self,
 488        receiver_id: ConnectionId,
 489        request: T,
 490    ) -> impl Future<Output = Result<impl Unpin + Stream<Item = Result<T::Response>>>> {
 491        let (tx, rx) = mpsc::unbounded();
 492        let send = self.connection_state(receiver_id).and_then(|connection| {
 493            let message_id = connection.next_message_id.fetch_add(1, SeqCst);
 494            let stream_response_channels = connection.stream_response_channels.clone();
 495            stream_response_channels
 496                .lock()
 497                .as_mut()
 498                .context("connection was closed")?
 499                .insert(message_id, tx);
 500            connection
 501                .outgoing_tx
 502                .unbounded_send(Message::Envelope(
 503                    request.into_envelope(message_id, None, None),
 504                ))
 505                .context("connection was closed")?;
 506            Ok((message_id, stream_response_channels))
 507        });
 508
 509        async move {
 510            let (message_id, stream_response_channels) = send?;
 511            let stream_response_channels = Arc::downgrade(&stream_response_channels);
 512
 513            Ok(rx.filter_map(move |(response, _barrier)| {
 514                let stream_response_channels = stream_response_channels.clone();
 515                future::ready(match response {
 516                    Ok(response) => {
 517                        if let Some(proto::envelope::Payload::Error(error)) = &response.payload {
 518                            Some(Err(RpcError::from_proto(error, T::NAME)))
 519                        } else if let Some(proto::envelope::Payload::EndStream(_)) =
 520                            &response.payload
 521                        {
 522                            // Remove the transmitting end of the response channel to end the stream.
 523                            if let Some(channels) = stream_response_channels.upgrade() {
 524                                if let Some(channels) = channels.lock().as_mut() {
 525                                    channels.remove(&message_id);
 526                                }
 527                            }
 528                            None
 529                        } else {
 530                            Some(
 531                                T::Response::from_envelope(response)
 532                                    .context("received response of the wrong type"),
 533                            )
 534                        }
 535                    }
 536                    Err(error) => Some(Err(error)),
 537                })
 538            }))
 539        }
 540    }
 541
 542    pub fn send<T: EnvelopedMessage>(&self, receiver_id: ConnectionId, message: T) -> Result<()> {
 543        let connection = self.connection_state(receiver_id)?;
 544        let message_id = connection
 545            .next_message_id
 546            .fetch_add(1, atomic::Ordering::SeqCst);
 547        connection.outgoing_tx.unbounded_send(Message::Envelope(
 548            message.into_envelope(message_id, None, None),
 549        ))?;
 550        Ok(())
 551    }
 552
 553    pub fn send_dynamic(&self, receiver_id: ConnectionId, message: proto::Envelope) -> Result<()> {
 554        let connection = self.connection_state(receiver_id)?;
 555        connection
 556            .outgoing_tx
 557            .unbounded_send(Message::Envelope(message))?;
 558        Ok(())
 559    }
 560
 561    pub fn forward_send<T: EnvelopedMessage>(
 562        &self,
 563        sender_id: ConnectionId,
 564        receiver_id: ConnectionId,
 565        message: T,
 566    ) -> Result<()> {
 567        let connection = self.connection_state(receiver_id)?;
 568        let message_id = connection
 569            .next_message_id
 570            .fetch_add(1, atomic::Ordering::SeqCst);
 571        connection
 572            .outgoing_tx
 573            .unbounded_send(Message::Envelope(message.into_envelope(
 574                message_id,
 575                None,
 576                Some(sender_id.into()),
 577            )))?;
 578        Ok(())
 579    }
 580
 581    pub fn respond<T: RequestMessage>(
 582        &self,
 583        receipt: Receipt<T>,
 584        response: T::Response,
 585    ) -> Result<()> {
 586        let connection = self.connection_state(receipt.sender_id.into())?;
 587        let message_id = connection
 588            .next_message_id
 589            .fetch_add(1, atomic::Ordering::SeqCst);
 590        connection
 591            .outgoing_tx
 592            .unbounded_send(Message::Envelope(response.into_envelope(
 593                message_id,
 594                Some(receipt.message_id),
 595                None,
 596            )))?;
 597        Ok(())
 598    }
 599
 600    pub fn end_stream<T: RequestMessage>(&self, receipt: Receipt<T>) -> Result<()> {
 601        let connection = self.connection_state(receipt.sender_id.into())?;
 602        let message_id = connection
 603            .next_message_id
 604            .fetch_add(1, atomic::Ordering::SeqCst);
 605
 606        let message = proto::EndStream {};
 607
 608        connection
 609            .outgoing_tx
 610            .unbounded_send(Message::Envelope(message.into_envelope(
 611                message_id,
 612                Some(receipt.message_id),
 613                None,
 614            )))?;
 615        Ok(())
 616    }
 617
 618    pub fn respond_with_error<T: RequestMessage>(
 619        &self,
 620        receipt: Receipt<T>,
 621        response: proto::Error,
 622    ) -> Result<()> {
 623        let connection = self.connection_state(receipt.sender_id.into())?;
 624        let message_id = connection
 625            .next_message_id
 626            .fetch_add(1, atomic::Ordering::SeqCst);
 627        connection
 628            .outgoing_tx
 629            .unbounded_send(Message::Envelope(response.into_envelope(
 630                message_id,
 631                Some(receipt.message_id),
 632                None,
 633            )))?;
 634        Ok(())
 635    }
 636
 637    pub fn respond_with_unhandled_message(
 638        &self,
 639        sender_id: ConnectionId,
 640        request_message_id: u32,
 641        message_type_name: &'static str,
 642    ) -> Result<()> {
 643        let connection = self.connection_state(sender_id)?;
 644        let response = ErrorCode::Internal
 645            .message(format!("message {} was not handled", message_type_name))
 646            .to_proto();
 647        let message_id = connection
 648            .next_message_id
 649            .fetch_add(1, atomic::Ordering::SeqCst);
 650        connection
 651            .outgoing_tx
 652            .unbounded_send(Message::Envelope(response.into_envelope(
 653                message_id,
 654                Some(request_message_id),
 655                None,
 656            )))?;
 657        Ok(())
 658    }
 659
 660    fn connection_state(&self, connection_id: ConnectionId) -> Result<ConnectionState> {
 661        let connections = self.connections.read();
 662        let connection = connections
 663            .get(&connection_id)
 664            .with_context(|| format!("no such connection: {connection_id}"))?;
 665        Ok(connection.clone())
 666    }
 667}
 668
 669impl Serialize for Peer {
 670    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
 671    where
 672        S: serde::Serializer,
 673    {
 674        let mut state = serializer.serialize_struct("Peer", 2)?;
 675        state.serialize_field("connections", &*self.connections.read())?;
 676        state.end()
 677    }
 678}
 679
 680#[cfg(test)]
 681mod tests {
 682    use super::*;
 683    use async_tungstenite::tungstenite::Message as WebSocketMessage;
 684    use gpui::TestAppContext;
 685
 686    fn init_logger() {
 687        if std::env::var("RUST_LOG").is_ok() {
 688            env_logger::init();
 689        }
 690    }
 691
 692    #[gpui::test(iterations = 50)]
 693    async fn test_request_response(cx: &mut TestAppContext) {
 694        init_logger();
 695
 696        let executor = cx.executor();
 697
 698        // create 2 clients connected to 1 server
 699        let server = Peer::new(0);
 700        let client1 = Peer::new(0);
 701        let client2 = Peer::new(0);
 702
 703        let (client1_to_server_conn, server_to_client_1_conn, _kill) =
 704            Connection::in_memory(cx.executor());
 705        let (client1_conn_id, io_task1, client1_incoming) =
 706            client1.add_test_connection(client1_to_server_conn, cx.executor());
 707        let (_, io_task2, server_incoming1) =
 708            server.add_test_connection(server_to_client_1_conn, cx.executor());
 709
 710        let (client2_to_server_conn, server_to_client_2_conn, _kill) =
 711            Connection::in_memory(cx.executor());
 712        let (client2_conn_id, io_task3, client2_incoming) =
 713            client2.add_test_connection(client2_to_server_conn, cx.executor());
 714        let (_, io_task4, server_incoming2) =
 715            server.add_test_connection(server_to_client_2_conn, cx.executor());
 716
 717        executor.spawn(io_task1).detach();
 718        executor.spawn(io_task2).detach();
 719        executor.spawn(io_task3).detach();
 720        executor.spawn(io_task4).detach();
 721        executor
 722            .spawn(handle_messages(server_incoming1, server.clone()))
 723            .detach();
 724        executor
 725            .spawn(handle_messages(client1_incoming, client1.clone()))
 726            .detach();
 727        executor
 728            .spawn(handle_messages(server_incoming2, server.clone()))
 729            .detach();
 730        executor
 731            .spawn(handle_messages(client2_incoming, client2.clone()))
 732            .detach();
 733
 734        assert_eq!(
 735            client1
 736                .request(client1_conn_id, proto::Ping {},)
 737                .await
 738                .unwrap(),
 739            proto::Ack {}
 740        );
 741
 742        assert_eq!(
 743            client2
 744                .request(client2_conn_id, proto::Ping {},)
 745                .await
 746                .unwrap(),
 747            proto::Ack {}
 748        );
 749
 750        assert_eq!(
 751            client1
 752                .request(client1_conn_id, proto::Test { id: 1 },)
 753                .await
 754                .unwrap(),
 755            proto::Test { id: 1 }
 756        );
 757
 758        assert_eq!(
 759            client2
 760                .request(client2_conn_id, proto::Test { id: 2 })
 761                .await
 762                .unwrap(),
 763            proto::Test { id: 2 }
 764        );
 765
 766        client1.disconnect(client1_conn_id);
 767        client2.disconnect(client1_conn_id);
 768
 769        async fn handle_messages(
 770            mut messages: BoxStream<'static, Box<dyn AnyTypedEnvelope>>,
 771            peer: Arc<Peer>,
 772        ) -> Result<()> {
 773            while let Some(envelope) = messages.next().await {
 774                let envelope = envelope.into_any();
 775                if let Some(envelope) = envelope.downcast_ref::<TypedEnvelope<proto::Ping>>() {
 776                    let receipt = envelope.receipt();
 777                    peer.respond(receipt, proto::Ack {})?
 778                } else if let Some(envelope) = envelope.downcast_ref::<TypedEnvelope<proto::Test>>()
 779                {
 780                    peer.respond(envelope.receipt(), envelope.payload.clone())?
 781                } else {
 782                    panic!("unknown message type");
 783                }
 784            }
 785
 786            Ok(())
 787        }
 788    }
 789
 790    #[gpui::test(iterations = 50)]
 791    async fn test_order_of_response_and_incoming(cx: &mut TestAppContext) {
 792        let executor = cx.executor();
 793        let server = Peer::new(0);
 794        let client = Peer::new(0);
 795
 796        let (client_to_server_conn, server_to_client_conn, _kill) =
 797            Connection::in_memory(executor.clone());
 798        let (client_to_server_conn_id, io_task1, mut client_incoming) =
 799            client.add_test_connection(client_to_server_conn, executor.clone());
 800
 801        let (server_to_client_conn_id, io_task2, mut server_incoming) =
 802            server.add_test_connection(server_to_client_conn, executor.clone());
 803
 804        executor.spawn(io_task1).detach();
 805        executor.spawn(io_task2).detach();
 806
 807        executor
 808            .spawn(async move {
 809                let future = server_incoming.next().await;
 810                let request = future
 811                    .unwrap()
 812                    .into_any()
 813                    .downcast::<TypedEnvelope<proto::Ping>>()
 814                    .unwrap();
 815
 816                server
 817                    .send(
 818                        server_to_client_conn_id,
 819                        ErrorCode::Internal
 820                            .message("message 1".to_string())
 821                            .to_proto(),
 822                    )
 823                    .unwrap();
 824                server
 825                    .send(
 826                        server_to_client_conn_id,
 827                        ErrorCode::Internal
 828                            .message("message 2".to_string())
 829                            .to_proto(),
 830                    )
 831                    .unwrap();
 832                server.respond(request.receipt(), proto::Ack {}).unwrap();
 833
 834                // Prevent the connection from being dropped
 835                server_incoming.next().await;
 836            })
 837            .detach();
 838
 839        let events = Arc::new(Mutex::new(Vec::new()));
 840
 841        let response = client.request(client_to_server_conn_id, proto::Ping {});
 842        let response_task = executor.spawn({
 843            let events = events.clone();
 844            async move {
 845                response.await.unwrap();
 846                events.lock().push("response".to_string());
 847            }
 848        });
 849
 850        executor
 851            .spawn({
 852                let events = events.clone();
 853                async move {
 854                    let incoming1 = client_incoming
 855                        .next()
 856                        .await
 857                        .unwrap()
 858                        .into_any()
 859                        .downcast::<TypedEnvelope<proto::Error>>()
 860                        .unwrap();
 861                    events.lock().push(incoming1.payload.message);
 862                    let incoming2 = client_incoming
 863                        .next()
 864                        .await
 865                        .unwrap()
 866                        .into_any()
 867                        .downcast::<TypedEnvelope<proto::Error>>()
 868                        .unwrap();
 869                    events.lock().push(incoming2.payload.message);
 870
 871                    // Prevent the connection from being dropped
 872                    client_incoming.next().await;
 873                }
 874            })
 875            .detach();
 876
 877        response_task.await;
 878        assert_eq!(
 879            &*events.lock(),
 880            &[
 881                "message 1".to_string(),
 882                "message 2".to_string(),
 883                "response".to_string()
 884            ]
 885        );
 886    }
 887
 888    #[gpui::test(iterations = 50)]
 889    async fn test_dropping_request_before_completion(cx: &mut TestAppContext) {
 890        let executor = cx.executor();
 891        let server = Peer::new(0);
 892        let client = Peer::new(0);
 893
 894        let (client_to_server_conn, server_to_client_conn, _kill) =
 895            Connection::in_memory(cx.executor());
 896        let (client_to_server_conn_id, io_task1, mut client_incoming) =
 897            client.add_test_connection(client_to_server_conn, cx.executor());
 898        let (server_to_client_conn_id, io_task2, mut server_incoming) =
 899            server.add_test_connection(server_to_client_conn, cx.executor());
 900
 901        executor.spawn(io_task1).detach();
 902        executor.spawn(io_task2).detach();
 903
 904        executor
 905            .spawn(async move {
 906                let request1 = server_incoming
 907                    .next()
 908                    .await
 909                    .unwrap()
 910                    .into_any()
 911                    .downcast::<TypedEnvelope<proto::Ping>>()
 912                    .unwrap();
 913                let request2 = server_incoming
 914                    .next()
 915                    .await
 916                    .unwrap()
 917                    .into_any()
 918                    .downcast::<TypedEnvelope<proto::Ping>>()
 919                    .unwrap();
 920
 921                server
 922                    .send(
 923                        server_to_client_conn_id,
 924                        ErrorCode::Internal
 925                            .message("message 1".to_string())
 926                            .to_proto(),
 927                    )
 928                    .unwrap();
 929                server
 930                    .send(
 931                        server_to_client_conn_id,
 932                        ErrorCode::Internal
 933                            .message("message 2".to_string())
 934                            .to_proto(),
 935                    )
 936                    .unwrap();
 937                server.respond(request1.receipt(), proto::Ack {}).unwrap();
 938                server.respond(request2.receipt(), proto::Ack {}).unwrap();
 939
 940                // Prevent the connection from being dropped
 941                server_incoming.next().await;
 942            })
 943            .detach();
 944
 945        let events = Arc::new(Mutex::new(Vec::new()));
 946
 947        let request1 = client.request(client_to_server_conn_id, proto::Ping {});
 948        let request1_task = executor.spawn(request1);
 949        let request2 = client.request(client_to_server_conn_id, proto::Ping {});
 950        let request2_task = executor.spawn({
 951            let events = events.clone();
 952            async move {
 953                request2.await.unwrap();
 954                events.lock().push("response 2".to_string());
 955            }
 956        });
 957
 958        executor
 959            .spawn({
 960                let events = events.clone();
 961                async move {
 962                    let incoming1 = client_incoming
 963                        .next()
 964                        .await
 965                        .unwrap()
 966                        .into_any()
 967                        .downcast::<TypedEnvelope<proto::Error>>()
 968                        .unwrap();
 969                    events.lock().push(incoming1.payload.message);
 970                    let incoming2 = client_incoming
 971                        .next()
 972                        .await
 973                        .unwrap()
 974                        .into_any()
 975                        .downcast::<TypedEnvelope<proto::Error>>()
 976                        .unwrap();
 977                    events.lock().push(incoming2.payload.message);
 978
 979                    // Prevent the connection from being dropped
 980                    client_incoming.next().await;
 981                }
 982            })
 983            .detach();
 984
 985        // Allow the request to make some progress before dropping it.
 986        cx.executor().simulate_random_delay().await;
 987        drop(request1_task);
 988
 989        request2_task.await;
 990        assert_eq!(
 991            &*events.lock(),
 992            &[
 993                "message 1".to_string(),
 994                "message 2".to_string(),
 995                "response 2".to_string()
 996            ]
 997        );
 998    }
 999
1000    #[gpui::test(iterations = 50)]
1001    async fn test_disconnect(cx: &mut TestAppContext) {
1002        let executor = cx.executor();
1003
1004        let (client_conn, mut server_conn, _kill) = Connection::in_memory(executor.clone());
1005
1006        let client = Peer::new(0);
1007        let (connection_id, io_handler, mut incoming) =
1008            client.add_test_connection(client_conn, executor.clone());
1009
1010        let (io_ended_tx, io_ended_rx) = oneshot::channel();
1011        executor
1012            .spawn(async move {
1013                io_handler.await.ok();
1014                io_ended_tx.send(()).unwrap();
1015            })
1016            .detach();
1017
1018        let (messages_ended_tx, messages_ended_rx) = oneshot::channel();
1019        executor
1020            .spawn(async move {
1021                incoming.next().await;
1022                messages_ended_tx.send(()).unwrap();
1023            })
1024            .detach();
1025
1026        client.disconnect(connection_id);
1027
1028        let _ = io_ended_rx.await;
1029        let _ = messages_ended_rx.await;
1030        assert!(
1031            server_conn
1032                .send(WebSocketMessage::Binary(vec![].into()))
1033                .await
1034                .is_err()
1035        );
1036    }
1037
1038    #[gpui::test(iterations = 50)]
1039    async fn test_io_error(cx: &mut TestAppContext) {
1040        let executor = cx.executor();
1041        let (client_conn, mut server_conn, _kill) = Connection::in_memory(executor.clone());
1042
1043        let client = Peer::new(0);
1044        let (connection_id, io_handler, mut incoming) =
1045            client.add_test_connection(client_conn, executor.clone());
1046        executor.spawn(io_handler).detach();
1047        executor
1048            .spawn(async move { incoming.next().await })
1049            .detach();
1050
1051        let response = executor.spawn(client.request(connection_id, proto::Ping {}));
1052        let _request = server_conn.rx.next().await.unwrap().unwrap();
1053
1054        drop(server_conn);
1055        assert_eq!(
1056            response.await.unwrap_err().to_string(),
1057            "connection was closed"
1058        );
1059    }
1060}