From b150efbd966e93952fe73756ff273cd4a1b3996e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 13 Dec 2022 22:38:55 +0100 Subject: [PATCH 01/29] Set log level to debug for preview deployment Also, add a log statement when we receive the interrupt signal. Co-Authored-By: Max Brunsfeld --- crates/collab/k8s/environments/preview.sh | 2 +- crates/collab/src/main.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/collab/k8s/environments/preview.sh b/crates/collab/k8s/environments/preview.sh index 4d9dd849e96f9bb5fc672beb0b7547c6246358de..ed037b0f0cd2da09f8da53d4289f1b9daf91dedf 100644 --- a/crates/collab/k8s/environments/preview.sh +++ b/crates/collab/k8s/environments/preview.sh @@ -1,3 +1,3 @@ ZED_ENVIRONMENT=preview -RUST_LOG=info +RUST_LOG=debug INVITE_LINK_PREFIX=https://zed.dev/invites/ diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 8ad1313d1ea8bff15b54793aa40aa97405d20c8a..63daed173be7bc93578a9939e718e328603fbce2 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -69,6 +69,7 @@ async fn main() -> Result<()> { tokio::signal::ctrl_c() .await .expect("failed to listen for interrupt signal"); + tracing::info!("Received interrupt signal"); rpc_server.teardown(); }) .await?; From 7824ace58b0b35f460bb2393612cd004af6ad433 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 13 Dec 2022 22:40:55 +0100 Subject: [PATCH 02/29] collab 0.3.5 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16b2742d436e9b8cfb9bcb035f5510e73b4fadd9..196251b5f95379f62cee18df0784d1787695785a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.4" +version = "0.3.5" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c40ae9cd83e411209f2ff270f8f2f44073fabd24..c9cd8de4c53f3cb1bf60c736f390ee5ce9711c2c 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.4" +version = "0.3.5" [[bin]] name = "collab" From 34b69896e4330d111e9519d8f92c9125ff66c4e8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 13 Dec 2022 23:03:04 +0100 Subject: [PATCH 03/29] Listen to SIGTERM in addition to ctrl-c for graceful shutdown --- crates/collab/k8s/environments/preview.sh | 2 +- crates/collab/src/main.rs | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/collab/k8s/environments/preview.sh b/crates/collab/k8s/environments/preview.sh index ed037b0f0cd2da09f8da53d4289f1b9daf91dedf..4d9dd849e96f9bb5fc672beb0b7547c6246358de 100644 --- a/crates/collab/k8s/environments/preview.sh +++ b/crates/collab/k8s/environments/preview.sh @@ -1,3 +1,3 @@ ZED_ENVIRONMENT=preview -RUST_LOG=debug +RUST_LOG=info INVITE_LINK_PREFIX=https://zed.dev/invites/ diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 63daed173be7bc93578a9939e718e328603fbce2..1386df715721f3f4d6018484a2841cbf7565e01e 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -7,6 +7,7 @@ use std::{ net::{SocketAddr, TcpListener}, path::Path, }; +use tokio::signal::unix::SignalKind; use tracing_log::LogTracer; use tracing_subscriber::{filter::EnvFilter, fmt::format::JsonFields, Layer}; use util::ResultExt; @@ -66,9 +67,15 @@ async fn main() -> Result<()> { axum::Server::from_tcp(listener)? .serve(app.into_make_service_with_connect_info::()) .with_graceful_shutdown(async move { - tokio::signal::ctrl_c() - .await + let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate()) .expect("failed to listen for interrupt signal"); + let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt()) + .expect("failed to listen for interrupt signal"); + let sigterm = sigterm.recv(); + let sigint = sigint.recv(); + futures::pin_mut!(sigterm); + futures::pin_mut!(sigint); + futures::future::select(sigterm, sigint).await; tracing::info!("Received interrupt signal"); rpc_server.teardown(); }) From 2596fefa04fc569d7772501314c1f20f1d193ab1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 13 Dec 2022 23:09:02 +0100 Subject: [PATCH 04/29] collab 0.3.6 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 196251b5f95379f62cee18df0784d1787695785a..3bf23529d1e192bd46f230440b6ca0900b1aba8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.5" +version = "0.3.6" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c9cd8de4c53f3cb1bf60c736f390ee5ce9711c2c..4eb5ae469d8f48c46512e2fff758f31e322829c6 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.5" +version = "0.3.6" [[bin]] name = "collab" From dde6cf596e8a9cbbfaab025a350bfc0fff108161 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 08:42:34 +0100 Subject: [PATCH 05/29] Don't wait for stale project deletion before listening for connections --- crates/collab/src/integration_tests.rs | 8 ++++---- crates/collab/src/main.rs | 2 +- crates/collab/src/rpc.rs | 7 +++---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index bee8f9a34fd608d7c07a98de7b273ea66c624b1f..c4fc4980ec115f84a7611395adc2e437138cde77 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -685,7 +685,7 @@ async fn test_server_restarts( ); // The server finishes restarting, cleaning up stale connections. - server.start().await.unwrap(); + server.start(); deterministic.advance_clock(CLEANUP_TIMEOUT); assert_eq!( room_participants(&room_a, cx_a), @@ -805,7 +805,7 @@ async fn test_server_restarts( // The server finishes restarting, cleaning up stale connections and canceling the // call to user D because the room has become empty. - server.start().await.unwrap(); + server.start(); deterministic.advance_clock(CLEANUP_TIMEOUT); assert!(incoming_call_d.next().await.unwrap().is_none()); } @@ -6124,7 +6124,7 @@ async fn test_random_collaboration( log::info!("Simulating server restart"); server.teardown(); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - server.start().await.unwrap(); + server.start(); deterministic.advance_clock(CLEANUP_TIMEOUT); } _ if !op_start_signals.is_empty() => { @@ -6324,7 +6324,7 @@ impl TestServer { app_state.clone(), Executor::Deterministic(deterministic.build_background()), ); - server.start().await.unwrap(); + server.start(); // Advance clock to ensure the server's cleanup task is finished. deterministic.advance_clock(CLEANUP_TIMEOUT); Self { diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 1386df715721f3f4d6018484a2841cbf7565e01e..90594e2ada8406c9f1a654bb9ea03a727983f1f7 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -58,7 +58,7 @@ async fn main() -> Result<()> { .expect("failed to bind TCP listener"); let rpc_server = collab::rpc::Server::new(state.clone(), Executor::Production); - rpc_server.start().await?; + rpc_server.start(); let app = collab::api::routes(rpc_server.clone(), state.clone()) .merge(collab::rpc::routes(rpc_server.clone())) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 69794ab35aec448c1bec30a5e8f0a37af72f10da..93fc08ca9eec1ccfe5702713856f4edea4c70013 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -238,14 +238,14 @@ impl Server { Arc::new(server) } - pub async fn start(&self) -> Result<()> { - self.app_state.db.delete_stale_projects().await?; + pub fn start(&self) { let db = self.app_state.db.clone(); let peer = self.peer.clone(); - let timeout = self.executor.sleep(CLEANUP_TIMEOUT); let pool = self.connection_pool.clone(); let live_kit_client = self.app_state.live_kit_client.clone(); + let timeout = self.executor.sleep(CLEANUP_TIMEOUT); self.executor.spawn_detached(async move { + db.delete_stale_projects().await.trace_err(); timeout.await; if let Some(room_ids) = db.stale_room_ids().await.trace_err() { for room_id in room_ids { @@ -321,7 +321,6 @@ impl Server { } } }); - Ok(()) } pub fn teardown(&self) { From 59c9a5757028187a6fff69872a2dd409deb3fbf4 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 08:43:18 +0100 Subject: [PATCH 06/29] collab 0.3.7 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bf23529d1e192bd46f230440b6ca0900b1aba8a..dc9904792da6ac6ae464560dc58acc43701c49b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.6" +version = "0.3.7" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 4eb5ae469d8f48c46512e2fff758f31e322829c6..c4a6f2799ed46449fd9870b1abf45acb1d36001b 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.6" +version = "0.3.7" [[bin]] name = "collab" From 897506c79772206f648b2c4bc5829cf2ff913940 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 08:54:46 +0100 Subject: [PATCH 07/29] Define readiness probe to know when the new server can accept traffic --- crates/collab/k8s/manifest.template.yml | 5 +++++ crates/collab/src/integration_tests.rs | 8 ++++---- crates/collab/src/main.rs | 2 +- crates/collab/src/rpc.rs | 7 ++++--- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/crates/collab/k8s/manifest.template.yml b/crates/collab/k8s/manifest.template.yml index 1f0fafb170256b7a75268d6349c38d89cf7f9d69..450a8ff5415eb740297730e48573114c91d8ee69 100644 --- a/crates/collab/k8s/manifest.template.yml +++ b/crates/collab/k8s/manifest.template.yml @@ -59,6 +59,11 @@ spec: ports: - containerPort: 8080 protocol: TCP + readinessProbe: + httpGet: + path: / + initialDelaySeconds: 5 + periodSeconds: 5 env: - name: HTTP_PORT value: "8080" diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index c4fc4980ec115f84a7611395adc2e437138cde77..bee8f9a34fd608d7c07a98de7b273ea66c624b1f 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -685,7 +685,7 @@ async fn test_server_restarts( ); // The server finishes restarting, cleaning up stale connections. - server.start(); + server.start().await.unwrap(); deterministic.advance_clock(CLEANUP_TIMEOUT); assert_eq!( room_participants(&room_a, cx_a), @@ -805,7 +805,7 @@ async fn test_server_restarts( // The server finishes restarting, cleaning up stale connections and canceling the // call to user D because the room has become empty. - server.start(); + server.start().await.unwrap(); deterministic.advance_clock(CLEANUP_TIMEOUT); assert!(incoming_call_d.next().await.unwrap().is_none()); } @@ -6124,7 +6124,7 @@ async fn test_random_collaboration( log::info!("Simulating server restart"); server.teardown(); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - server.start(); + server.start().await.unwrap(); deterministic.advance_clock(CLEANUP_TIMEOUT); } _ if !op_start_signals.is_empty() => { @@ -6324,7 +6324,7 @@ impl TestServer { app_state.clone(), Executor::Deterministic(deterministic.build_background()), ); - server.start(); + server.start().await.unwrap(); // Advance clock to ensure the server's cleanup task is finished. deterministic.advance_clock(CLEANUP_TIMEOUT); Self { diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 90594e2ada8406c9f1a654bb9ea03a727983f1f7..1386df715721f3f4d6018484a2841cbf7565e01e 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -58,7 +58,7 @@ async fn main() -> Result<()> { .expect("failed to bind TCP listener"); let rpc_server = collab::rpc::Server::new(state.clone(), Executor::Production); - rpc_server.start(); + rpc_server.start().await?; let app = collab::api::routes(rpc_server.clone(), state.clone()) .merge(collab::rpc::routes(rpc_server.clone())) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 93fc08ca9eec1ccfe5702713856f4edea4c70013..69794ab35aec448c1bec30a5e8f0a37af72f10da 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -238,14 +238,14 @@ impl Server { Arc::new(server) } - pub fn start(&self) { + pub async fn start(&self) -> Result<()> { + self.app_state.db.delete_stale_projects().await?; let db = self.app_state.db.clone(); let peer = self.peer.clone(); + let timeout = self.executor.sleep(CLEANUP_TIMEOUT); let pool = self.connection_pool.clone(); let live_kit_client = self.app_state.live_kit_client.clone(); - let timeout = self.executor.sleep(CLEANUP_TIMEOUT); self.executor.spawn_detached(async move { - db.delete_stale_projects().await.trace_err(); timeout.await; if let Some(room_ids) = db.stale_room_ids().await.trace_err() { for room_id in room_ids { @@ -321,6 +321,7 @@ impl Server { } } }); + Ok(()) } pub fn teardown(&self) { From 98a593b2630166e371882c10bc56ac8141a001a8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 08:56:02 +0100 Subject: [PATCH 08/29] collab 0.3.8 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc9904792da6ac6ae464560dc58acc43701c49b2..76442c342307fd6f70e822ca65b47a19f6f209f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.7" +version = "0.3.8" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c4a6f2799ed46449fd9870b1abf45acb1d36001b..7bc7216a898a0009b7c36aa361d549aef4458291 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.7" +version = "0.3.8" [[bin]] name = "collab" From dc47552180848eb6efc6d7d3a7084e731acaa705 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 08:58:19 +0100 Subject: [PATCH 09/29] Fix kubernetes configuration for readiness probe --- crates/collab/k8s/manifest.template.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/collab/k8s/manifest.template.yml b/crates/collab/k8s/manifest.template.yml index 450a8ff5415eb740297730e48573114c91d8ee69..42ff546cb9bb0866b112f2940cae2075ff440265 100644 --- a/crates/collab/k8s/manifest.template.yml +++ b/crates/collab/k8s/manifest.template.yml @@ -62,8 +62,9 @@ spec: readinessProbe: httpGet: path: / - initialDelaySeconds: 5 - periodSeconds: 5 + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 env: - name: HTTP_PORT value: "8080" From e00cb6b07454e27651531c50c9712dfd94a88b71 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 09:05:19 +0100 Subject: [PATCH 10/29] collab 0.3.9 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76442c342307fd6f70e822ca65b47a19f6f209f7..92f7d06a31c7cbe3027266ee08e2145447d03b11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.8" +version = "0.3.9" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 7bc7216a898a0009b7c36aa361d549aef4458291..3fea175030f5b8c1b77d20ef0034445c9f6cab87 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.8" +version = "0.3.9" [[bin]] name = "collab" From b9c77965476893d94cfe35a0a558e3c112fda46a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 09:35:36 +0100 Subject: [PATCH 11/29] Reduce readiness probe delay and period --- crates/collab/k8s/manifest.template.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab/k8s/manifest.template.yml b/crates/collab/k8s/manifest.template.yml index 42ff546cb9bb0866b112f2940cae2075ff440265..68658b575ed4bbabf94248845a83fffffba96f36 100644 --- a/crates/collab/k8s/manifest.template.yml +++ b/crates/collab/k8s/manifest.template.yml @@ -63,8 +63,8 @@ spec: httpGet: path: / port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 + initialDelaySeconds: 1 + periodSeconds: 1 env: - name: HTTP_PORT value: "8080" From 02c30b0091c868a1e6251a99b6e0515e175acb41 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 09:35:52 +0100 Subject: [PATCH 12/29] collab 0.3.10 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92f7d06a31c7cbe3027266ee08e2145447d03b11..bf649c92c38ab2b3e9d83d256571c9d2a216c323 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.9" +version = "0.3.10" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 3fea175030f5b8c1b77d20ef0034445c9f6cab87..0be4d6792d776ec684cd36f6c5141ee62a2e5230 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.9" +version = "0.3.10" [[bin]] name = "collab" From 9530976f617ad3d563bbf4762eccbf061b10f23f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 11:24:36 +0100 Subject: [PATCH 13/29] Try using a longer timeout for cleaning up stale rooms --- crates/collab/src/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 69794ab35aec448c1bec30a5e8f0a37af72f10da..e1b0056226440684d5ce78cc8a77cce2bc04e510 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -58,7 +58,7 @@ use tower::ServiceBuilder; use tracing::{info_span, instrument, Instrument}; pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(5); -pub const CLEANUP_TIMEOUT: Duration = Duration::from_secs(10); +pub const CLEANUP_TIMEOUT: Duration = Duration::from_secs(20); lazy_static! { static ref METRIC_CONNECTIONS: IntGauge = From 63e7b9189dac2768cccf146f9222be64d21f8916 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 11:25:04 +0100 Subject: [PATCH 14/29] collab 0.3.11 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf649c92c38ab2b3e9d83d256571c9d2a216c323..794a494de1dded2d4eaec815cd45bc8fcc45dbc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,7 +1130,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.10" +version = "0.3.11" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 0be4d6792d776ec684cd36f6c5141ee62a2e5230..01ecb1e751cf91591687e30c703f65668927f32e 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.10" +version = "0.3.11" [[bin]] name = "collab" From 674fddac875d4877976ab95767ffe982fbaf5ff8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 11:42:12 +0100 Subject: [PATCH 15/29] Instrument `rpc::Server::start` and reduce cleanup timeout again --- crates/collab/src/rpc.rs | 148 ++++++++++++++++++++++----------------- 1 file changed, 84 insertions(+), 64 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index e1b0056226440684d5ce78cc8a77cce2bc04e510..70802fa4312f96662dd26504c58c4c0c79988262 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -58,7 +58,7 @@ use tower::ServiceBuilder; use tracing::{info_span, instrument, Instrument}; pub const RECONNECT_TIMEOUT: Duration = Duration::from_secs(5); -pub const CLEANUP_TIMEOUT: Duration = Duration::from_secs(20); +pub const CLEANUP_TIMEOUT: Duration = Duration::from_secs(10); lazy_static! { static ref METRIC_CONNECTIONS: IntGauge = @@ -239,88 +239,108 @@ impl Server { } pub async fn start(&self) -> Result<()> { - self.app_state.db.delete_stale_projects().await?; let db = self.app_state.db.clone(); let peer = self.peer.clone(); let timeout = self.executor.sleep(CLEANUP_TIMEOUT); let pool = self.connection_pool.clone(); let live_kit_client = self.app_state.live_kit_client.clone(); - self.executor.spawn_detached(async move { - timeout.await; - if let Some(room_ids) = db.stale_room_ids().await.trace_err() { - for room_id in room_ids { - let mut contacts_to_update = HashSet::default(); - let mut canceled_calls_to_user_ids = Vec::new(); - let mut live_kit_room = String::new(); - let mut delete_live_kit_room = false; - - if let Ok(mut refreshed_room) = db.refresh_room(room_id).await { - room_updated(&refreshed_room.room, &peer); - contacts_to_update - .extend(refreshed_room.stale_participant_user_ids.iter().copied()); - contacts_to_update - .extend(refreshed_room.canceled_calls_to_user_ids.iter().copied()); - canceled_calls_to_user_ids = - mem::take(&mut refreshed_room.canceled_calls_to_user_ids); - live_kit_room = mem::take(&mut refreshed_room.room.live_kit_room); - delete_live_kit_room = refreshed_room.room.participants.is_empty(); - } - { - let pool = pool.lock(); - for canceled_user_id in canceled_calls_to_user_ids { - for connection_id in pool.user_connection_ids(canceled_user_id) { - peer.send( - connection_id, - proto::CallCanceled { - room_id: room_id.to_proto(), - }, - ) - .trace_err(); - } + let span = info_span!("start server"); + let span_enter = span.enter(); + + tracing::info!("begin deleting stale projects"); + self.app_state.db.delete_stale_projects().await?; + tracing::info!("finish deleting stale projects"); + + drop(span_enter); + self.executor.spawn_detached( + async move { + tracing::info!("waiting for cleanup timeout"); + timeout.await; + tracing::info!("cleanup timeout expired, retrieving stale rooms"); + if let Some(room_ids) = db.stale_room_ids().await.trace_err() { + tracing::info!(stale_room_count = room_ids.len(), "retrieved stale rooms"); + for room_id in room_ids { + let mut contacts_to_update = HashSet::default(); + let mut canceled_calls_to_user_ids = Vec::new(); + let mut live_kit_room = String::new(); + let mut delete_live_kit_room = false; + + if let Ok(mut refreshed_room) = db.refresh_room(room_id).await { + tracing::info!( + room_id = room_id.0, + new_participant_count = refreshed_room.room.participants.len(), + "refreshed room" + ); + room_updated(&refreshed_room.room, &peer); + contacts_to_update + .extend(refreshed_room.stale_participant_user_ids.iter().copied()); + contacts_to_update + .extend(refreshed_room.canceled_calls_to_user_ids.iter().copied()); + canceled_calls_to_user_ids = + mem::take(&mut refreshed_room.canceled_calls_to_user_ids); + live_kit_room = mem::take(&mut refreshed_room.room.live_kit_room); + delete_live_kit_room = refreshed_room.room.participants.is_empty(); } - } - for user_id in contacts_to_update { - let busy = db.is_user_busy(user_id).await.trace_err(); - let contacts = db.get_contacts(user_id).await.trace_err(); - if let Some((busy, contacts)) = busy.zip(contacts) { + { let pool = pool.lock(); - let updated_contact = contact_for_user(user_id, false, busy, &pool); - for contact in contacts { - if let db::Contact::Accepted { - user_id: contact_user_id, - .. - } = contact - { - for contact_conn_id in pool.user_connection_ids(contact_user_id) + for canceled_user_id in canceled_calls_to_user_ids { + for connection_id in pool.user_connection_ids(canceled_user_id) { + peer.send( + connection_id, + proto::CallCanceled { + room_id: room_id.to_proto(), + }, + ) + .trace_err(); + } + } + } + + for user_id in contacts_to_update { + let busy = db.is_user_busy(user_id).await.trace_err(); + let contacts = db.get_contacts(user_id).await.trace_err(); + if let Some((busy, contacts)) = busy.zip(contacts) { + let pool = pool.lock(); + let updated_contact = contact_for_user(user_id, false, busy, &pool); + for contact in contacts { + if let db::Contact::Accepted { + user_id: contact_user_id, + .. + } = contact { - peer.send( - contact_conn_id, - proto::UpdateContacts { - contacts: vec![updated_contact.clone()], - remove_contacts: Default::default(), - incoming_requests: Default::default(), - remove_incoming_requests: Default::default(), - outgoing_requests: Default::default(), - remove_outgoing_requests: Default::default(), - }, - ) - .trace_err(); + for contact_conn_id in + pool.user_connection_ids(contact_user_id) + { + peer.send( + contact_conn_id, + proto::UpdateContacts { + contacts: vec![updated_contact.clone()], + remove_contacts: Default::default(), + incoming_requests: Default::default(), + remove_incoming_requests: Default::default(), + outgoing_requests: Default::default(), + remove_outgoing_requests: Default::default(), + }, + ) + .trace_err(); + } } } } } - } - if let Some(live_kit) = live_kit_client.as_ref() { - if delete_live_kit_room { - live_kit.delete_room(live_kit_room).await.trace_err(); + if let Some(live_kit) = live_kit_client.as_ref() { + if delete_live_kit_room { + live_kit.delete_room(live_kit_room).await.trace_err(); + } } } } } - }); + .instrument(span), + ); Ok(()) } From 553585b9a1b453811e72db3d62543cd40f18774d Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 11:43:12 +0100 Subject: [PATCH 16/29] Add more logging to `Room` --- Cargo.lock | 1 + crates/call/Cargo.toml | 1 + crates/call/src/room.rs | 23 ++++++++++++++++++----- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794a494de1dded2d4eaec815cd45bc8fcc45dbc6..0cb28e228d9888159b7637b9dfe9922f36a1d52f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -823,6 +823,7 @@ dependencies = [ "futures 0.3.25", "gpui", "live_kit_client", + "log", "media", "postage", "project", diff --git a/crates/call/Cargo.toml b/crates/call/Cargo.toml index a7a3331d20be6b93701999de33d0922e184e38af..c0a6cedc622d6db2d6bec4a3ff3895182c541f85 100644 --- a/crates/call/Cargo.toml +++ b/crates/call/Cargo.toml @@ -21,6 +21,7 @@ test-support = [ client = { path = "../client" } collections = { path = "../collections" } gpui = { path = "../gpui" } +log = "0.4" live_kit_client = { path = "../live_kit_client" } media = { path = "../media" } project = { path = "../project" } diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index 8deb4341180afa02c39dc096f78b95714d93784a..b26a8fcbfe43a727755d9bee38a88e9d091513b8 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -13,7 +13,7 @@ use live_kit_client::{LocalTrackPublication, LocalVideoTrack, RemoteVideoTrackUp use postage::stream::Stream; use project::Project; use std::{mem, sync::Arc, time::Duration}; -use util::{post_inc, ResultExt}; +use util::{post_inc, ResultExt, TryFutureExt}; pub const RECONNECT_TIMEOUT: Duration = client::RECEIVE_TIMEOUT; @@ -50,7 +50,7 @@ pub struct Room { user_store: ModelHandle, subscriptions: Vec, pending_room_update: Option>, - maintain_connection: Option>>, + maintain_connection: Option>>, } impl Entity for Room { @@ -58,6 +58,7 @@ impl Entity for Room { fn release(&mut self, _: &mut MutableAppContext) { if self.status.is_online() { + log::info!("room was released, sending leave message"); self.client.send(proto::LeaveRoom {}).log_err(); } } @@ -122,7 +123,7 @@ impl Room { }; let maintain_connection = - cx.spawn_weak(|this, cx| Self::maintain_connection(this, client.clone(), cx)); + cx.spawn_weak(|this, cx| Self::maintain_connection(this, client.clone(), cx).log_err()); Self { id, @@ -229,6 +230,7 @@ impl Room { cx.notify(); cx.emit(Event::Left); + log::info!("leaving room"); self.status = RoomStatus::Offline; self.remote_participants.clear(); self.pending_participants.clear(); @@ -254,6 +256,7 @@ impl Room { .map_or(false, |s| s.is_connected()); // Even if we're initially connected, any future change of the status means we momentarily disconnected. if !is_connected || client_status.next().await.is_some() { + log::info!("detected client disconnection"); let room_id = this .upgrade(&cx) .ok_or_else(|| anyhow!("room was dropped"))? @@ -269,8 +272,13 @@ impl Room { let client_reconnection = async { let mut remaining_attempts = 3; while remaining_attempts > 0 { + log::info!( + "waiting for client status change, remaining attempts {}", + remaining_attempts + ); if let Some(status) = client_status.next().await { if status.is_connected() { + log::info!("client reconnected, attempting to rejoin room"); let rejoin_room = async { let response = client.request(proto::JoinRoom { id: room_id }).await?; @@ -285,7 +293,7 @@ impl Room { anyhow::Ok(()) }; - if rejoin_room.await.is_ok() { + if rejoin_room.await.log_err().is_some() { return true; } else { remaining_attempts -= 1; @@ -303,12 +311,15 @@ impl Room { futures::select_biased! { reconnected = client_reconnection => { if reconnected { + log::info!("successfully reconnected to room"); // If we successfully joined the room, go back around the loop // waiting for future connection status changes. continue; } } - _ = reconnection_timeout => {} + _ = reconnection_timeout => { + log::info!("room reconnection timeout expired"); + } } } @@ -316,6 +327,7 @@ impl Room { // or an error occurred while trying to re-join the room. Either way // we leave the room and return an error. if let Some(this) = this.upgrade(&cx) { + log::info!("reconnection failed, leaving room"); let _ = this.update(&mut cx, |this, cx| this.leave(cx)); } return Err(anyhow!( @@ -499,6 +511,7 @@ impl Room { this.pending_room_update.take(); if this.should_leave() { + log::info!("room is empty, leaving"); let _ = this.leave(cx); } From 9bd400cf16b55f16b7cb94d2351a69605716fe38 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 11:43:33 +0100 Subject: [PATCH 17/29] collab 0.3.12 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0cb28e228d9888159b7637b9dfe9922f36a1d52f..ad67b1e4572712f90b48726c4b2f0ae00cdc3b91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.11" +version = "0.3.12" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 01ecb1e751cf91591687e30c703f65668927f32e..d82171a7bdbd1ee676b5bc2a67ee06bb0eb381f7 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.11" +version = "0.3.12" [[bin]] name = "collab" From 05e99eb67ed804a54a421fa1a447df869d3324b5 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 15:55:56 +0100 Subject: [PATCH 18/29] Introduce an epoch to `ConnectionId` and `PeerId` --- crates/call/src/room.rs | 18 +- crates/client/src/client.rs | 18 +- crates/client/src/test.rs | 4 +- .../20221109000000_test_schema.sql | 8 +- ...4346_change_epoch_from_uuid_to_integer.sql | 9 + crates/collab/src/db.rs | 357 +++++++++++++----- crates/collab/src/db/project.rs | 2 +- crates/collab/src/db/project_collaborator.rs | 2 +- crates/collab/src/db/room_participant.rs | 4 +- crates/collab/src/db/tests.rs | 26 +- crates/collab/src/integration_tests.rs | 10 +- crates/collab/src/rpc.rs | 135 ++++--- crates/collab_ui/src/collab_titlebar_item.rs | 36 +- crates/collab_ui/src/contact_list.rs | 143 +++---- crates/project/src/lsp_command.rs | 2 +- crates/project/src/project.rs | 38 +- crates/rpc/proto/zed.proto | 29 +- crates/rpc/src/macros.rs | 7 +- crates/rpc/src/peer.rs | 132 ++++--- crates/rpc/src/proto.rs | 93 ++++- crates/rpc/src/rpc.rs | 2 +- crates/workspace/src/item.rs | 4 +- crates/workspace/src/shared_screen.rs | 2 +- crates/workspace/src/workspace.rs | 17 +- 24 files changed, 715 insertions(+), 383 deletions(-) create mode 100644 crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index b26a8fcbfe43a727755d9bee38a88e9d091513b8..1d22fe50f1a8ff4e9ac530ff991f5f9010a2b4e6 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -3,7 +3,7 @@ use crate::{ IncomingCall, }; use anyhow::{anyhow, Result}; -use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore}; +use client::{proto, Client, TypedEnvelope, User, UserStore}; use collections::{BTreeMap, HashSet}; use futures::{FutureExt, StreamExt}; use gpui::{ @@ -20,10 +20,10 @@ pub const RECONNECT_TIMEOUT: Duration = client::RECEIVE_TIMEOUT; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Event { ParticipantLocationChanged { - participant_id: PeerId, + participant_id: proto::PeerId, }, RemoteVideoTracksChanged { - participant_id: PeerId, + participant_id: proto::PeerId, }, RemoteProjectShared { owner: Arc, @@ -41,7 +41,7 @@ pub struct Room { live_kit: Option, status: RoomStatus, local_participant: LocalParticipant, - remote_participants: BTreeMap, + remote_participants: BTreeMap, pending_participants: Vec>, participant_user_ids: HashSet, pending_call_count: usize, @@ -349,7 +349,7 @@ impl Room { &self.local_participant } - pub fn remote_participants(&self) -> &BTreeMap { + pub fn remote_participants(&self) -> &BTreeMap { &self.remote_participants } @@ -419,7 +419,7 @@ impl Room { if let Some(participants) = remote_participants.log_err() { let mut participant_peer_ids = HashSet::default(); for (participant, user) in room.participants.into_iter().zip(participants) { - let peer_id = PeerId(participant.peer_id); + let Some(peer_id) = participant.peer_id else { continue }; this.participant_user_ids.insert(participant.user_id); participant_peer_ids.insert(peer_id); @@ -476,7 +476,7 @@ impl Room { if let Some(live_kit) = this.live_kit.as_ref() { let tracks = - live_kit.room.remote_video_tracks(&peer_id.0.to_string()); + live_kit.room.remote_video_tracks(&peer_id.to_string()); for track in tracks { this.remote_video_track_updated( RemoteVideoTrackUpdate::Subscribed(track), @@ -531,7 +531,7 @@ impl Room { ) -> Result<()> { match change { RemoteVideoTrackUpdate::Subscribed(track) => { - let peer_id = PeerId(track.publisher_id().parse()?); + let peer_id = track.publisher_id().parse()?; let track_id = track.sid().to_string(); let participant = self .remote_participants @@ -551,7 +551,7 @@ impl Room { publisher_id, track_id, } => { - let peer_id = PeerId(publisher_id.parse()?); + let peer_id = publisher_id.parse()?; let participant = self .remote_participants .get_mut(&peer_id) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 5e10f9ea8f4374d3e0607be2a0f68fc5259bc431..876b83b11afb003a4e9b065ebfed44741b77f4cd 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -23,7 +23,7 @@ use lazy_static::lazy_static; use parking_lot::RwLock; use postage::watch; use rand::prelude::*; -use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}; +use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, PeerId, RequestMessage}; use serde::Deserialize; use std::{ any::TypeId, @@ -140,7 +140,7 @@ impl EstablishConnectionError { } } -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum Status { SignedOut, UpgradeRequired, @@ -306,7 +306,7 @@ impl Client { pub fn new(http: Arc, cx: &AppContext) -> Arc { Arc::new(Self { id: 0, - peer: Peer::new(), + peer: Peer::new(0), telemetry: Telemetry::new(http.clone(), cx), http, state: Default::default(), @@ -810,7 +810,11 @@ impl Client { hello_message_type_name ) })?; - Ok(PeerId(hello.payload.peer_id)) + let peer_id = hello + .payload + .peer_id + .ok_or_else(|| anyhow!("invalid peer id"))?; + Ok(peer_id) }; let peer_id = match peer_id.await { @@ -822,7 +826,7 @@ impl Client { }; log::info!( - "set status to connected (connection id: {}, peer id: {})", + "set status to connected (connection id: {:?}, peer id: {:?})", connection_id, peer_id ); @@ -853,7 +857,7 @@ impl Client { .spawn(async move { match handle_io.await { Ok(()) => { - if *this.status().borrow() + if this.status().borrow().clone() == (Status::Connected { connection_id, peer_id, @@ -1194,7 +1198,7 @@ impl Client { let mut state = self.state.write(); let type_name = message.payload_type_name(); let payload_type_id = message.payload_type_id(); - let sender_id = message.original_sender_id().map(|id| id.0); + let sender_id = message.original_sender_id(); let mut subscriber = None; diff --git a/crates/client/src/test.rs b/crates/client/src/test.rs index 3cfba3b1847c4af4655ad625840492db59249974..db9e0d8c487b27a7474373af9d2c25a29e04b9d7 100644 --- a/crates/client/src/test.rs +++ b/crates/client/src/test.rs @@ -35,7 +35,7 @@ impl FakeServer { cx: &TestAppContext, ) -> Self { let server = Self { - peer: Peer::new(), + peer: Peer::new(0), state: Default::default(), user_id: client_user_id, executor: cx.foreground(), @@ -92,7 +92,7 @@ impl FakeServer { peer.send( connection_id, proto::Hello { - peer_id: connection_id.0, + peer_id: Some(connection_id.into()), }, ) .unwrap(); diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index c0cc5b3457e5b279a6f211ec7e9f2b9d30823868..b96b07d977cc280ab134ef1ddd12f48595db0254 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -44,7 +44,7 @@ CREATE TABLE "projects" ( "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, "host_connection_id" INTEGER NOT NULL, - "host_connection_epoch" TEXT NOT NULL, + "host_connection_epoch" INTEGER NOT NULL, "unregistered" BOOLEAN NOT NULL DEFAULT FALSE ); CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); @@ -103,7 +103,7 @@ CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, - "connection_epoch" TEXT NOT NULL, + "connection_epoch" INTEGER NOT NULL, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, "is_host" BOOLEAN NOT NULL @@ -119,14 +119,14 @@ CREATE TABLE "room_participants" ( "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, - "answering_connection_epoch" TEXT, + "answering_connection_epoch" INTEGER, "answering_connection_lost" BOOLEAN NOT NULL, "location_kind" INTEGER, "location_project_id" INTEGER, "initial_project_id" INTEGER, "calling_user_id" INTEGER NOT NULL REFERENCES users (id), "calling_connection_id" INTEGER NOT NULL, - "calling_connection_epoch" TEXT NOT NULL + "calling_connection_epoch" INTEGER NOT NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id"); diff --git a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql new file mode 100644 index 0000000000000000000000000000000000000000..ed294502a5ba98a1563c04502fe92fb783477e50 --- /dev/null +++ b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql @@ -0,0 +1,9 @@ +ALTER TABLE "projects" + ALTER COLUMN "host_connection_epoch" TYPE INTEGER USING 0; + +ALTER TABLE "project_collaborators" + ALTER COLUMN "connection_epoch" TYPE INTEGER USING 0; + +ALTER TABLE "room_participants" + ALTER COLUMN "answering_connection_epoch" TYPE INTEGER USING 0, + ALTER COLUMN "calling_connection_epoch" TYPE INTEGER USING 0; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 892ea4ccd658faab2ccea8b685110a6d2bb6057a..6237cc4f79d5ddc0fcf833545eb1f594da2ea57e 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1076,7 +1076,7 @@ impl Database { pub async fn create_room( &self, user_id: UserId, - connection_id: ConnectionId, + connection: ConnectionId, live_kit_room: &str, ) -> Result> { self.room_transaction(|tx| async move { @@ -1091,12 +1091,12 @@ impl Database { room_participant::ActiveModel { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(user_id), - answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), - answering_connection_epoch: ActiveValue::set(Some(self.epoch())), + answering_connection_id: ActiveValue::set(Some(connection.id as i32)), + answering_connection_epoch: ActiveValue::set(Some(connection.epoch as i32)), answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(user_id), - calling_connection_id: ActiveValue::set(connection_id.0 as i32), - calling_connection_epoch: ActiveValue::set(self.epoch()), + calling_connection_id: ActiveValue::set(connection.id as i32), + calling_connection_epoch: ActiveValue::set(connection.epoch as i32), ..Default::default() } .insert(&*tx) @@ -1112,7 +1112,7 @@ impl Database { &self, room_id: RoomId, calling_user_id: UserId, - calling_connection_id: ConnectionId, + calling_connection: ConnectionId, called_user_id: UserId, initial_project_id: Option, ) -> Result> { @@ -1122,8 +1122,8 @@ impl Database { user_id: ActiveValue::set(called_user_id), answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(calling_user_id), - calling_connection_id: ActiveValue::set(calling_connection_id.0 as i32), - calling_connection_epoch: ActiveValue::set(self.epoch()), + calling_connection_id: ActiveValue::set(calling_connection.id as i32), + calling_connection_epoch: ActiveValue::set(calling_connection.epoch as i32), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } @@ -1192,19 +1192,23 @@ impl Database { pub async fn cancel_call( &self, expected_room_id: Option, - calling_connection_id: ConnectionId, + calling_connection: ConnectionId, called_user_id: UserId, ) -> Result> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() .filter( - room_participant::Column::UserId - .eq(called_user_id) - .and( + Condition::all() + .add(room_participant::Column::UserId.eq(called_user_id)) + .add( room_participant::Column::CallingConnectionId - .eq(calling_connection_id.0 as i32), + .eq(calling_connection.id as i32), ) - .and(room_participant::Column::AnsweringConnectionId.is_null()), + .add( + room_participant::Column::CallingConnectionEpoch + .eq(calling_connection.epoch as i32), + ) + .add(room_participant::Column::AnsweringConnectionId.is_null()), ) .one(&*tx) .await? @@ -1228,7 +1232,7 @@ impl Database { &self, room_id: RoomId, user_id: UserId, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result> { self.room_transaction(|tx| async move { let result = room_participant::Entity::update_many() @@ -1242,13 +1246,13 @@ impl Database { .add(room_participant::Column::AnsweringConnectionLost.eq(true)) .add( room_participant::Column::AnsweringConnectionEpoch - .ne(self.epoch()), + .ne(connection.epoch as i32), ), ), ) .set(room_participant::ActiveModel { - answering_connection_id: ActiveValue::set(Some(connection_id.0 as i32)), - answering_connection_epoch: ActiveValue::set(Some(self.epoch())), + answering_connection_id: ActiveValue::set(Some(connection.id as i32)), + answering_connection_epoch: ActiveValue::set(Some(connection.epoch as i32)), answering_connection_lost: ActiveValue::set(false), ..Default::default() }) @@ -1264,10 +1268,20 @@ impl Database { .await } - pub async fn leave_room(&self, connection_id: ConnectionId) -> Result> { + pub async fn leave_room(&self, connection: ConnectionId) -> Result> { self.room_transaction(|tx| async move { let leaving_participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + room_participant::Column::AnsweringConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::AnsweringConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .one(&*tx) .await?; @@ -1281,9 +1295,16 @@ impl Database { // Cancel pending calls initiated by the leaving user. let called_participants = room_participant::Entity::find() .filter( - room_participant::Column::CallingConnectionId - .eq(connection_id.0) - .and(room_participant::Column::AnsweringConnectionId.is_null()), + Condition::all() + .add( + room_participant::Column::CallingConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::CallingConnectionEpoch + .eq(connection.epoch as i32), + ) + .add(room_participant::Column::AnsweringConnectionId.is_null()), ) .all(&*tx) .await?; @@ -1310,7 +1331,16 @@ impl Database { project_collaborator::Column::ProjectId, QueryProjectIds::ProjectId, ) - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + project_collaborator::Column::ConnectionId.eq(connection.id as i32), + ) + .add( + project_collaborator::Column::ConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .into_values::<_, QueryProjectIds>() .all(&*tx) .await?; @@ -1331,32 +1361,43 @@ impl Database { host_connection_id: Default::default(), }); - let collaborator_connection_id = - ConnectionId(collaborator.connection_id as u32); - if collaborator_connection_id != connection_id { + let collaborator_connection_id = ConnectionId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }; + if collaborator_connection_id != connection { left_project.connection_ids.push(collaborator_connection_id); } if collaborator.is_host { left_project.host_user_id = collaborator.user_id; - left_project.host_connection_id = - ConnectionId(collaborator.connection_id as u32); + left_project.host_connection_id = collaborator_connection_id; } } drop(collaborators); // Leave projects. project_collaborator::Entity::delete_many() - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + project_collaborator::Column::ConnectionId.eq(connection.id as i32), + ) + .add( + project_collaborator::Column::ConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .exec(&*tx) .await?; // Unshare projects. project::Entity::delete_many() .filter( - project::Column::RoomId - .eq(room_id) - .and(project::Column::HostConnectionId.eq(connection_id.0 as i32)), + Condition::all() + .add(project::Column::RoomId.eq(room_id)) + .add(project::Column::HostConnectionId.eq(connection.id as i32)) + .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), ) .exec(&*tx) .await?; @@ -1387,7 +1428,7 @@ impl Database { pub async fn update_room_participant_location( &self, room_id: RoomId, - connection_id: ConnectionId, + connection: ConnectionId, location: proto::ParticipantLocation, ) -> Result> { self.room_transaction(|tx| async { @@ -1414,9 +1455,18 @@ impl Database { } let result = room_participant::Entity::update_many() - .filter(room_participant::Column::RoomId.eq(room_id).and( - room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32), - )) + .filter( + Condition::all() + .add(room_participant::Column::RoomId.eq(room_id)) + .add( + room_participant::Column::AnsweringConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::AnsweringConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .set(room_participant::ActiveModel { location_kind: ActiveValue::set(Some(location_kind)), location_project_id: ActiveValue::set(location_project_id), @@ -1437,11 +1487,21 @@ impl Database { pub async fn connection_lost( &self, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result>> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + room_participant::Column::AnsweringConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::AnsweringConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .one(&*tx) .await? .ok_or_else(|| anyhow!("not a participant in any room"))?; @@ -1456,11 +1516,25 @@ impl Database { let collaborator_on_projects = project_collaborator::Entity::find() .find_also_related(project::Entity) - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) + .add( + project_collaborator::Column::ConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .all(&*tx) .await?; project_collaborator::Entity::delete_many() - .filter(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) + .add( + project_collaborator::Column::ConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .exec(&*tx) .await?; @@ -1473,20 +1547,30 @@ impl Database { .await?; let connection_ids = collaborators .into_iter() - .map(|collaborator| ConnectionId(collaborator.connection_id as u32)) + .map(|collaborator| ConnectionId { + id: collaborator.connection_id as u32, + epoch: collaborator.connection_epoch as u32, + }) .collect(); left_projects.push(LeftProject { id: project.id, host_user_id: project.host_user_id, - host_connection_id: ConnectionId(project.host_connection_id as u32), + host_connection_id: ConnectionId { + id: project.host_connection_id as u32, + epoch: project.host_connection_epoch as u32, + }, connection_ids, }); } } project::Entity::delete_many() - .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add(project::Column::HostConnectionId.eq(connection.id as i32)) + .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + ) .exec(&*tx) .await?; @@ -1537,7 +1621,10 @@ impl Database { let mut pending_participants = Vec::new(); while let Some(db_participant) = db_participants.next().await { let db_participant = db_participant?; - if let Some(answering_connection_id) = db_participant.answering_connection_id { + if let Some((answering_connection_id, answering_connection_epoch)) = db_participant + .answering_connection_id + .zip(db_participant.answering_connection_epoch) + { let location = match ( db_participant.location_kind, db_participant.location_project_id, @@ -1560,7 +1647,10 @@ impl Database { answering_connection_id, proto::Participant { user_id: db_participant.user_id.to_proto(), - peer_id: answering_connection_id as u32, + peer_id: Some(proto::PeerId { + epoch: answering_connection_epoch as u32, + id: answering_connection_id as u32, + }), projects: Default::default(), location: Some(proto::ParticipantLocation { variant: location }), }, @@ -1637,12 +1727,22 @@ impl Database { pub async fn share_project( &self, room_id: RoomId, - connection_id: ConnectionId, + connection: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + room_participant::Column::AnsweringConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::AnsweringConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .one(&*tx) .await? .ok_or_else(|| anyhow!("could not find participant"))?; @@ -1653,8 +1753,8 @@ impl Database { let project = project::ActiveModel { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection_id.0 as i32), - host_connection_epoch: ActiveValue::set(self.epoch()), + host_connection_id: ActiveValue::set(connection.id as i32), + host_connection_epoch: ActiveValue::set(connection.epoch as i32), ..Default::default() } .insert(&*tx) @@ -1678,8 +1778,8 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), - connection_id: ActiveValue::set(connection_id.0 as i32), - connection_epoch: ActiveValue::set(self.epoch()), + connection_id: ActiveValue::set(connection.id as i32), + connection_epoch: ActiveValue::set(connection.epoch as i32), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1697,7 +1797,7 @@ impl Database { pub async fn unshare_project( &self, project_id: ProjectId, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result)>> { self.room_transaction(|tx| async move { let guest_connection_ids = self.project_guest_connection_ids(project_id, &tx).await?; @@ -1706,7 +1806,11 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("project not found"))?; - if project.host_connection_id == connection_id.0 as i32 { + let host_connection = ConnectionId { + epoch: project.host_connection_epoch as u32, + id: project.host_connection_id as u32, + }; + if host_connection == connection { let room_id = project.room_id; project::Entity::delete(project.into_active_model()) .exec(&*tx) @@ -1723,12 +1827,16 @@ impl Database { pub async fn update_project( &self, project_id: ProjectId, - connection_id: ConnectionId, + connection: ConnectionId, worktrees: &[proto::WorktreeMetadata], ) -> Result)>> { self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) - .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add(project::Column::HostConnectionId.eq(connection.id as i32)) + .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + ) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; @@ -1774,7 +1882,7 @@ impl Database { pub async fn update_worktree( &self, update: &proto::UpdateWorktree, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result>> { self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); @@ -1782,7 +1890,11 @@ impl Database { // Ensure the update comes from the host. let project = project::Entity::find_by_id(project_id) - .filter(project::Column::HostConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add(project::Column::HostConnectionId.eq(connection.id as i32)) + .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + ) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; @@ -1862,7 +1974,7 @@ impl Database { pub async fn update_diagnostic_summary( &self, update: &proto::UpdateDiagnosticSummary, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result>> { self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); @@ -1877,7 +1989,11 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id != connection_id.0 as i32 { + let host_connection = ConnectionId { + epoch: project.host_connection_epoch as u32, + id: project.host_connection_id as u32, + }; + if host_connection != connection { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -1916,7 +2032,7 @@ impl Database { pub async fn start_language_server( &self, update: &proto::StartLanguageServer, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result>> { self.room_transaction(|tx| async move { let project_id = ProjectId::from_proto(update.project_id); @@ -1930,7 +2046,11 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - if project.host_connection_id != connection_id.0 as i32 { + let host_connection = ConnectionId { + epoch: project.host_connection_epoch as u32, + id: project.host_connection_id as u32, + }; + if host_connection != connection { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -1961,11 +2081,21 @@ impl Database { pub async fn join_project( &self, project_id: ProjectId, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result> { self.room_transaction(|tx| async move { let participant = room_participant::Entity::find() - .filter(room_participant::Column::AnsweringConnectionId.eq(connection_id.0 as i32)) + .filter( + Condition::all() + .add( + room_participant::Column::AnsweringConnectionId + .eq(connection.id as i32), + ) + .add( + room_participant::Column::AnsweringConnectionEpoch + .eq(connection.epoch as i32), + ), + ) .one(&*tx) .await? .ok_or_else(|| anyhow!("must join a room first"))?; @@ -1992,8 +2122,8 @@ impl Database { } let new_collaborator = project_collaborator::ActiveModel { project_id: ActiveValue::set(project_id), - connection_id: ActiveValue::set(connection_id.0 as i32), - connection_epoch: ActiveValue::set(self.epoch()), + connection_id: ActiveValue::set(connection.id as i32), + connection_epoch: ActiveValue::set(connection.epoch as i32), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(replica_id), is_host: ActiveValue::set(false), @@ -2095,14 +2225,18 @@ impl Database { pub async fn leave_project( &self, project_id: ProjectId, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result> { self.room_transaction(|tx| async move { let result = project_collaborator::Entity::delete_many() .filter( - project_collaborator::Column::ProjectId - .eq(project_id) - .and(project_collaborator::Column::ConnectionId.eq(connection_id.0 as i32)), + Condition::all() + .add(project_collaborator::Column::ProjectId.eq(project_id)) + .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) + .add( + project_collaborator::Column::ConnectionEpoch + .eq(connection.epoch as i32), + ), ) .exec(&*tx) .await?; @@ -2120,13 +2254,19 @@ impl Database { .await?; let connection_ids = collaborators .into_iter() - .map(|collaborator| ConnectionId(collaborator.connection_id as u32)) + .map(|collaborator| ConnectionId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }) .collect(); let left_project = LeftProject { id: project_id, host_user_id: project.host_user_id, - host_connection_id: ConnectionId(project.host_connection_id as u32), + host_connection_id: ConnectionId { + epoch: project.host_connection_epoch as u32, + id: project.host_connection_id as u32, + }, connection_ids, }; Ok((project.room_id, left_project)) @@ -2137,7 +2277,7 @@ impl Database { pub async fn project_collaborators( &self, project_id: ProjectId, - connection_id: ConnectionId, + connection: ConnectionId, ) -> Result>> { self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) @@ -2149,10 +2289,13 @@ impl Database { .all(&*tx) .await?; - if collaborators - .iter() - .any(|collaborator| collaborator.connection_id == connection_id.0 as i32) - { + if collaborators.iter().any(|collaborator| { + let collaborator_connection = ConnectionId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }; + collaborator_connection == connection + }) { Ok((project.room_id, collaborators)) } else { Err(anyhow!("no such project"))? @@ -2166,30 +2309,42 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result>> { - self.room_transaction(|tx| async move { - #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] - enum QueryAs { - ConnectionId, - } + #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] + enum QueryAs { + Epoch, + Id, + } + + #[derive(Debug, FromQueryResult)] + struct ProjectConnection { + epoch: i32, + id: i32, + } + self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - let mut db_connection_ids = project_collaborator::Entity::find() + let mut db_connections = project_collaborator::Entity::find() .select_only() + .column_as(project_collaborator::Column::ConnectionId, QueryAs::Id) .column_as( - project_collaborator::Column::ConnectionId, - QueryAs::ConnectionId, + project_collaborator::Column::ConnectionEpoch, + QueryAs::Epoch, ) .filter(project_collaborator::Column::ProjectId.eq(project_id)) - .into_values::() + .into_model::() .stream(&*tx) .await?; let mut connection_ids = HashSet::default(); - while let Some(connection_id) = db_connection_ids.next().await { - connection_ids.insert(ConnectionId(connection_id? as u32)); + while let Some(connection) = db_connections.next().await { + let connection = connection?; + connection_ids.insert(ConnectionId { + epoch: connection.epoch as u32, + id: connection.id as u32, + }); } if connection_ids.contains(&connection_id) { @@ -2208,27 +2363,39 @@ impl Database { ) -> Result> { #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] enum QueryAs { - ConnectionId, + Epoch, + Id, } - let mut db_guest_connection_ids = project_collaborator::Entity::find() + #[derive(Debug, FromQueryResult)] + struct ProjectConnection { + epoch: i32, + id: i32, + } + + let mut db_guest_connections = project_collaborator::Entity::find() .select_only() + .column_as(project_collaborator::Column::ConnectionId, QueryAs::Id) .column_as( - project_collaborator::Column::ConnectionId, - QueryAs::ConnectionId, + project_collaborator::Column::ConnectionEpoch, + QueryAs::Epoch, ) .filter( project_collaborator::Column::ProjectId .eq(project_id) .and(project_collaborator::Column::IsHost.eq(false)), ) - .into_values::() + .into_model::() .stream(tx) .await?; let mut guest_connection_ids = Vec::new(); - while let Some(connection_id) = db_guest_connection_ids.next().await { - guest_connection_ids.push(ConnectionId(connection_id? as u32)); + while let Some(connection) = db_guest_connections.next().await { + let connection = connection?; + guest_connection_ids.push(ConnectionId { + epoch: connection.epoch as u32, + id: connection.id as u32, + }); } Ok(guest_connection_ids) } diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index 971a8fcefb465114c9703003e2a74f6f38d8c397..b8cf321e51e465aae6a011cb6159cd1ec1aa39ce 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -9,7 +9,7 @@ pub struct Model { pub room_id: RoomId, pub host_user_id: UserId, pub host_connection_id: i32, - pub host_connection_epoch: Uuid, + pub host_connection_epoch: i32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index 5db307f5df27ec07f282207eb253ddae95c44970..e12b113ff9999bdede1b5ff0e71905b112926eac 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -8,7 +8,7 @@ pub struct Model { pub id: ProjectCollaboratorId, pub project_id: ProjectId, pub connection_id: i32, - pub connection_epoch: Uuid, + pub connection_epoch: i32, pub user_id: UserId, pub replica_id: ReplicaId, pub is_host: bool, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index c80c10c1bae25cc1b79a499a4ed5b310f2209527..265febd5454241015f88a9e36a582a6fa508e9a3 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -9,14 +9,14 @@ pub struct Model { pub room_id: RoomId, pub user_id: UserId, pub answering_connection_id: Option, - pub answering_connection_epoch: Option, + pub answering_connection_epoch: Option, pub answering_connection_lost: bool, pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, pub calling_user_id: UserId, pub calling_connection_id: i32, - pub calling_connection_epoch: Uuid, + pub calling_connection_epoch: i32, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 2d254c2e3702d48b4451301977a988fd1686d91f..b6b8a780a7ec2fc9bc930cec0c146c0601280d5a 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -436,36 +436,44 @@ test_both_dbs!( .unwrap(); let room_id = RoomId::from_proto( - db.create_room(user1.user_id, ConnectionId(0), "") + db.create_room(user1.user_id, ConnectionId { epoch: 0, id: 0 }, "") .await .unwrap() .id, ); - db.call(room_id, user1.user_id, ConnectionId(0), user2.user_id, None) - .await - .unwrap(); - db.join_room(room_id, user2.user_id, ConnectionId(1)) + db.call( + room_id, + user1.user_id, + ConnectionId { epoch: 0, id: 0 }, + user2.user_id, + None, + ) + .await + .unwrap(); + db.join_room(room_id, user2.user_id, ConnectionId { epoch: 0, id: 1 }) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); - db.share_project(room_id, ConnectionId(1), &[]) + db.share_project(room_id, ConnectionId { epoch: 0, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1); - db.share_project(room_id, ConnectionId(1), &[]) + db.share_project(room_id, ConnectionId { epoch: 0, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); // Projects shared by admins aren't counted. - db.share_project(room_id, ConnectionId(0), &[]) + db.share_project(room_id, ConnectionId { epoch: 0, id: 0 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); - db.leave_room(ConnectionId(1)).await.unwrap(); + db.leave_room(ConnectionId { epoch: 0, id: 1 }) + .await + .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); } ); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index bee8f9a34fd608d7c07a98de7b273ea66c624b1f..2c2a301733f81659de8d9609c617e2a56a4b5551 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -7,8 +7,8 @@ use crate::{ use anyhow::anyhow; use call::{room, ActiveCall, ParticipantLocation, Room}; use client::{ - self, test::FakeHttpClient, Client, Connection, Credentials, EstablishConnectionError, PeerId, - User, UserStore, RECEIVE_TIMEOUT, + self, proto::PeerId, test::FakeHttpClient, Client, Connection, Credentials, + EstablishConnectionError, User, UserStore, RECEIVE_TIMEOUT, }; use collections::{BTreeMap, HashMap, HashSet}; use editor::{ @@ -6066,7 +6066,7 @@ async fn test_random_collaboration( .user_connection_ids(removed_guest_id) .collect::>(); assert_eq!(user_connection_ids.len(), 1); - let removed_peer_id = PeerId(user_connection_ids[0].0); + let removed_peer_id = user_connection_ids[0].into(); let guest = clients.remove(guest_ix); op_start_signals.remove(guest_ix); server.forbid_connections(); @@ -6115,7 +6115,7 @@ async fn test_random_collaboration( .user_connection_ids(user_id) .collect::>(); assert_eq!(user_connection_ids.len(), 1); - let peer_id = PeerId(user_connection_ids[0].0); + let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); operations += 1; @@ -6429,7 +6429,7 @@ impl TestServer { let connection_id = connection_id_rx.await.unwrap(); connection_killers .lock() - .insert(PeerId(connection_id.0), killed); + .insert(connection_id.into(), killed); Ok(client_conn) } }) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 70802fa4312f96662dd26504c58c4c0c79988262..856eb36fb1fecebb27a9779059a06cd07b0c4667 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -170,7 +170,7 @@ where impl Server { pub fn new(app_state: Arc, executor: Executor) -> Arc { let mut server = Self { - peer: Peer::new(), + peer: Peer::new(0), app_state, executor, connection_pool: Default::default(), @@ -457,9 +457,9 @@ impl Server { move |duration| executor.sleep(duration) }); - tracing::info!(%user_id, %login, %connection_id, %address, "connection opened"); - this.peer.send(connection_id, proto::Hello { peer_id: connection_id.0 })?; - tracing::info!(%user_id, %login, %connection_id, %address, "sent hello message"); + tracing::info!(%user_id, %login, ?connection_id, %address, "connection opened"); + this.peer.send(connection_id, proto::Hello { peer_id: Some(connection_id.into()) })?; + tracing::info!(%user_id, %login, ?connection_id, %address, "sent hello message"); if let Some(send_connection_id) = send_connection_id.take() { let _ = send_connection_id.send(connection_id); @@ -521,7 +521,7 @@ impl Server { _ = teardown.changed().fuse() => return Ok(()), result = handle_io => { if let Err(error) = result { - tracing::error!(?error, %user_id, %login, %connection_id, %address, "error handling I/O"); + tracing::error!(?error, %user_id, %login, ?connection_id, %address, "error handling I/O"); } break; } @@ -529,7 +529,7 @@ impl Server { message = next_message => { if let Some(message) = message { let type_name = message.payload_type_name(); - let span = tracing::info_span!("receive message", %user_id, %login, %connection_id, %address, type_name); + let span = tracing::info_span!("receive message", %user_id, %login, ?connection_id, %address, type_name); let span_enter = span.enter(); if let Some(handler) = this.handlers.get(&message.payload_type_id()) { let is_background = message.is_background(); @@ -543,10 +543,10 @@ impl Server { foreground_message_handlers.push(handle_message); } } else { - tracing::error!(%user_id, %login, %connection_id, %address, "no message handler"); + tracing::error!(%user_id, %login, ?connection_id, %address, "no message handler"); } } else { - tracing::info!(%user_id, %login, %connection_id, %address, "connection closed"); + tracing::info!(%user_id, %login, ?connection_id, %address, "connection closed"); break; } } @@ -554,9 +554,9 @@ impl Server { } drop(foreground_message_handlers); - tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); + tracing::info!(%user_id, %login, ?connection_id, %address, "signing out"); if let Err(error) = sign_out(session, teardown, executor).await { - tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); + tracing::error!(%user_id, %login, ?connection_id, %address, ?error, "error signing out"); } Ok(()) @@ -1128,12 +1128,18 @@ async fn join_project( let collaborators = project .collaborators .iter() - .filter(|collaborator| collaborator.connection_id != session.connection_id.0 as i32) - .map(|collaborator| proto::Collaborator { - peer_id: collaborator.connection_id as u32, - replica_id: collaborator.replica_id.0 as u32, - user_id: collaborator.user_id.to_proto(), + .map(|collaborator| { + let peer_id = proto::PeerId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }; + proto::Collaborator { + peer_id: Some(peer_id), + replica_id: collaborator.replica_id.0 as u32, + user_id: collaborator.user_id.to_proto(), + } }) + .filter(|collaborator| collaborator.peer_id != Some(session.connection_id.into())) .collect::>(); let worktrees = project .worktrees @@ -1150,11 +1156,11 @@ async fn join_project( session .peer .send( - ConnectionId(collaborator.peer_id), + collaborator.peer_id.unwrap().into(), proto::AddProjectCollaborator { project_id: project_id.to_proto(), collaborator: Some(proto::Collaborator { - peer_id: session.connection_id.0, + peer_id: Some(session.connection_id.into()), replica_id: replica_id.0 as u32, user_id: guest_user_id.to_proto(), }), @@ -1375,13 +1381,14 @@ where .await .project_collaborators(project_id, session.connection_id) .await?; - ConnectionId( - collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))? - .connection_id as u32, - ) + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + ConnectionId { + epoch: host.connection_epoch as u32, + id: host.connection_id as u32, + } }; let payload = session @@ -1409,7 +1416,10 @@ async fn save_buffer( .iter() .find(|collaborator| collaborator.is_host) .ok_or_else(|| anyhow!("host not found"))?; - ConnectionId(host.connection_id as u32) + ConnectionId { + epoch: host.connection_epoch as u32, + id: host.connection_id as u32, + } }; let response_payload = session .peer @@ -1421,11 +1431,17 @@ async fn save_buffer( .await .project_collaborators(project_id, session.connection_id) .await?; - collaborators - .retain(|collaborator| collaborator.connection_id != session.connection_id.0 as i32); - let project_connection_ids = collaborators - .iter() - .map(|collaborator| ConnectionId(collaborator.connection_id as u32)); + collaborators.retain(|collaborator| { + let collaborator_connection = ConnectionId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }; + collaborator_connection != session.connection_id + }); + let project_connection_ids = collaborators.iter().map(|collaborator| ConnectionId { + epoch: collaborator.connection_epoch as u32, + id: collaborator.connection_id as u32, + }); broadcast(host_connection_id, project_connection_ids, |conn_id| { session .peer @@ -1439,11 +1455,10 @@ async fn create_buffer_for_peer( request: proto::CreateBufferForPeer, session: Session, ) -> Result<()> { - session.peer.forward_send( - session.connection_id, - ConnectionId(request.peer_id), - request, - )?; + let peer_id = request.peer_id.ok_or_else(|| anyhow!("invalid peer id"))?; + session + .peer + .forward_send(session.connection_id, peer_id.into(), request)?; Ok(()) } @@ -1536,7 +1551,10 @@ async fn follow( session: Session, ) -> Result<()> { let project_id = ProjectId::from_proto(request.project_id); - let leader_id = ConnectionId(request.leader_id); + let leader_id = request + .leader_id + .ok_or_else(|| anyhow!("invalid leader id"))? + .into(); let follower_id = session.connection_id; { let project_connection_ids = session @@ -1556,14 +1574,17 @@ async fn follow( .await?; response_payload .views - .retain(|view| view.leader_id != Some(follower_id.0)); + .retain(|view| view.leader_id != Some(follower_id.into())); response.send(response_payload)?; Ok(()) } async fn unfollow(request: proto::Unfollow, session: Session) -> Result<()> { let project_id = ProjectId::from_proto(request.project_id); - let leader_id = ConnectionId(request.leader_id); + let leader_id = request + .leader_id + .ok_or_else(|| anyhow!("invalid leader id"))? + .into(); let project_connection_ids = session .db() .await @@ -1592,12 +1613,16 @@ async fn update_followers(request: proto::UpdateFollowers, session: Session) -> proto::update_followers::Variant::UpdateView(payload) => payload.leader_id, proto::update_followers::Variant::UpdateActiveView(payload) => payload.leader_id, }); - for follower_id in &request.follower_ids { - let follower_id = ConnectionId(*follower_id); - if project_connection_ids.contains(&follower_id) && Some(follower_id.0) != leader_id { - session - .peer - .forward_send(session.connection_id, follower_id, request.clone())?; + for follower_peer_id in request.follower_ids.iter().copied() { + let follower_connection_id = follower_peer_id.into(); + if project_connection_ids.contains(&follower_connection_id) + && Some(follower_peer_id) != leader_id + { + session.peer.forward_send( + session.connection_id, + follower_connection_id, + request.clone(), + )?; } } Ok(()) @@ -1912,13 +1937,19 @@ fn contact_for_user( fn room_updated(room: &proto::Room, peer: &Peer) { for participant in &room.participants { - peer.send( - ConnectionId(participant.peer_id), - proto::RoomUpdated { - room: Some(room.clone()), - }, - ) - .trace_err(); + if let Some(peer_id) = participant + .peer_id + .ok_or_else(|| anyhow!("invalid participant peer id")) + .trace_err() + { + peer.send( + peer_id.into(), + proto::RoomUpdated { + room: Some(room.clone()), + }, + ) + .trace_err(); + } } } @@ -2033,7 +2064,7 @@ fn project_left(project: &db::LeftProject, session: &Session) { *connection_id, proto::RemoveProjectCollaborator { project_id: project.id.to_proto(), - peer_id: session.connection_id.0, + peer_id: Some(session.connection_id.into()), }, ) .trace_err(); diff --git a/crates/collab_ui/src/collab_titlebar_item.rs b/crates/collab_ui/src/collab_titlebar_item.rs index ab414e051bcfeb2ec77ab6bac751853fe41b0ab8..2288f77cd358996f5e0ecba98d09d56a42a3dd76 100644 --- a/crates/collab_ui/src/collab_titlebar_item.rs +++ b/crates/collab_ui/src/collab_titlebar_item.rs @@ -1,6 +1,6 @@ use crate::{contact_notification::ContactNotification, contacts_popover}; use call::{ActiveCall, ParticipantLocation}; -use client::{Authenticate, ContactEventKind, PeerId, User, UserStore}; +use client::{proto::PeerId, Authenticate, ContactEventKind, User, UserStore}; use clock::ReplicaId; use contacts_popover::ContactsPopover; use gpui::{ @@ -474,7 +474,7 @@ impl CollabTitlebarItem { cx.dispatch_action(ToggleFollow(peer_id)) }) .with_tooltip::( - peer_id.0 as usize, + peer_id.as_u64() as usize, if is_followed { format!("Unfollow {}", peer_github_login) } else { @@ -487,22 +487,24 @@ impl CollabTitlebarItem { .boxed() } else if let ParticipantLocation::SharedProject { project_id } = location { let user_id = user.id; - MouseEventHandler::::new(peer_id.0 as usize, cx, move |_, _| content) - .with_cursor_style(CursorStyle::PointingHand) - .on_click(MouseButton::Left, move |_, cx| { - cx.dispatch_action(JoinProject { - project_id, - follow_user_id: user_id, - }) + MouseEventHandler::::new(peer_id.as_u64() as usize, cx, move |_, _| { + content + }) + .with_cursor_style(CursorStyle::PointingHand) + .on_click(MouseButton::Left, move |_, cx| { + cx.dispatch_action(JoinProject { + project_id, + follow_user_id: user_id, }) - .with_tooltip::( - peer_id.0 as usize, - format!("Follow {} into external project", peer_github_login), - Some(Box::new(FollowNextCollaborator)), - theme.tooltip.clone(), - cx, - ) - .boxed() + }) + .with_tooltip::( + peer_id.as_u64() as usize, + format!("Follow {} into external project", peer_github_login), + Some(Box::new(FollowNextCollaborator)), + theme.tooltip.clone(), + cx, + ) + .boxed() } else { content } diff --git a/crates/collab_ui/src/contact_list.rs b/crates/collab_ui/src/contact_list.rs index bc8b2947c4b278fa05d6e78c091a6517b971e890..48a4d1a2b541d69f7fa8c41ab21da1e766103301 100644 --- a/crates/collab_ui/src/contact_list.rs +++ b/crates/collab_ui/src/contact_list.rs @@ -2,7 +2,7 @@ use std::{mem, sync::Arc}; use crate::contacts_popover; use call::ActiveCall; -use client::{Contact, PeerId, User, UserStore}; +use client::{proto::PeerId, Contact, User, UserStore}; use editor::{Cancel, Editor}; use fuzzy::{match_strings, StringMatchCandidate}; use gpui::{ @@ -465,7 +465,7 @@ impl ContactList { room.remote_participants() .iter() .map(|(peer_id, participant)| StringMatchCandidate { - id: peer_id.0 as usize, + id: peer_id.as_u64() as usize, string: participant.user.github_login.clone(), char_bag: participant.user.github_login.chars().collect(), }), @@ -479,7 +479,7 @@ impl ContactList { executor.clone(), )); for mat in matches { - let peer_id = PeerId(mat.candidate_id as u32); + let peer_id = PeerId::from_u64(mat.candidate_id as u64); let participant = &room.remote_participants()[&peer_id]; participant_entries.push(ContactEntry::CallParticipant { user: participant.user.clone(), @@ -881,75 +881,80 @@ impl ContactList { let baseline_offset = row.name.text.baseline_offset(font_cache) + (theme.row_height - line_height) / 2.; - MouseEventHandler::::new(peer_id.0 as usize, cx, |mouse_state, _| { - let tree_branch = *tree_branch.style_for(mouse_state, is_selected); - let row = theme.project_row.style_for(mouse_state, is_selected); - - Flex::row() - .with_child( - Stack::new() - .with_child( - Canvas::new(move |bounds, _, cx| { - let start_x = bounds.min_x() + (bounds.width() / 2.) - - (tree_branch.width / 2.); - let end_x = bounds.max_x(); - let start_y = bounds.min_y(); - let end_y = bounds.min_y() + baseline_offset - (cap_height / 2.); + MouseEventHandler::::new( + peer_id.as_u64() as usize, + cx, + |mouse_state, _| { + let tree_branch = *tree_branch.style_for(mouse_state, is_selected); + let row = theme.project_row.style_for(mouse_state, is_selected); - cx.scene.push_quad(gpui::Quad { - bounds: RectF::from_points( - vec2f(start_x, start_y), - vec2f( - start_x + tree_branch.width, - if is_last { end_y } else { bounds.max_y() }, + Flex::row() + .with_child( + Stack::new() + .with_child( + Canvas::new(move |bounds, _, cx| { + let start_x = bounds.min_x() + (bounds.width() / 2.) + - (tree_branch.width / 2.); + let end_x = bounds.max_x(); + let start_y = bounds.min_y(); + let end_y = + bounds.min_y() + baseline_offset - (cap_height / 2.); + + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, start_y), + vec2f( + start_x + tree_branch.width, + if is_last { end_y } else { bounds.max_y() }, + ), ), - ), - background: Some(tree_branch.color), - border: gpui::Border::default(), - corner_radius: 0., - }); - cx.scene.push_quad(gpui::Quad { - bounds: RectF::from_points( - vec2f(start_x, end_y), - vec2f(end_x, end_y + tree_branch.width), - ), - background: Some(tree_branch.color), - border: gpui::Border::default(), - corner_radius: 0., - }); - }) + background: Some(tree_branch.color), + border: gpui::Border::default(), + corner_radius: 0., + }); + cx.scene.push_quad(gpui::Quad { + bounds: RectF::from_points( + vec2f(start_x, end_y), + vec2f(end_x, end_y + tree_branch.width), + ), + background: Some(tree_branch.color), + border: gpui::Border::default(), + corner_radius: 0., + }); + }) + .boxed(), + ) + .constrained() + .with_width(host_avatar_height) .boxed(), - ) - .constrained() - .with_width(host_avatar_height) - .boxed(), - ) - .with_child( - Svg::new("icons/disable_screen_sharing_12.svg") - .with_color(row.icon.color) - .constrained() - .with_width(row.icon.width) - .aligned() - .left() - .contained() - .with_style(row.icon.container) - .boxed(), - ) - .with_child( - Label::new("Screen".into(), row.name.text.clone()) - .aligned() - .left() - .contained() - .with_style(row.name.container) - .flex(1., false) - .boxed(), - ) - .constrained() - .with_height(theme.row_height) - .contained() - .with_style(row.container) - .boxed() - }) + ) + .with_child( + Svg::new("icons/disable_screen_sharing_12.svg") + .with_color(row.icon.color) + .constrained() + .with_width(row.icon.width) + .aligned() + .left() + .contained() + .with_style(row.icon.container) + .boxed(), + ) + .with_child( + Label::new("Screen".into(), row.name.text.clone()) + .aligned() + .left() + .contained() + .with_style(row.name.container) + .flex(1., false) + .boxed(), + ) + .constrained() + .with_height(theme.row_height) + .contained() + .with_style(row.container) + .boxed() + }, + ) .with_cursor_style(CursorStyle::PointingHand) .on_click(MouseButton::Left, move |_, cx| { cx.dispatch_action(OpenSharedScreen { peer_id }); diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 3ea12617351ecc1708741ad1a60aef6e73702740..a0eb84558154e8f26c129ce6c7d5f7379b3fd0d6 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -3,7 +3,7 @@ use crate::{ }; use anyhow::{anyhow, Result}; use async_trait::async_trait; -use client::{proto, PeerId}; +use client::proto::{self, PeerId}; use gpui::{AppContext, AsyncAppContext, ModelHandle}; use language::{ point_from_lsp, point_to_lsp, diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 9b4a163af4e6f78e397c730f444a4f697ece1e22..7f2fcb516f82bd6102e9abd391c406b2b6e34af8 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -7,7 +7,7 @@ pub mod worktree; mod project_tests; use anyhow::{anyhow, Context, Result}; -use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; +use client::{proto, Client, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ @@ -15,7 +15,6 @@ use futures::{ future::Shared, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; - use gpui::{ AnyModelHandle, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, UpgradeModelHandle, WeakModelHandle, @@ -103,11 +102,11 @@ pub struct Project { user_store: ModelHandle, fs: Arc, client_state: Option, - collaborators: HashMap, + collaborators: HashMap, client_subscriptions: Vec, _subscriptions: Vec, opened_buffer: (watch::Sender<()>, watch::Receiver<()>), - shared_buffers: HashMap>, + shared_buffers: HashMap>, #[allow(clippy::type_complexity)] loading_buffers: HashMap< ProjectPath, @@ -164,7 +163,7 @@ enum ProjectClientState { #[derive(Clone, Debug)] pub struct Collaborator { - pub peer_id: PeerId, + pub peer_id: proto::PeerId, pub replica_id: ReplicaId, } @@ -185,7 +184,7 @@ pub enum Event { }, RemoteIdChanged(Option), DisconnectedFromHost, - CollaboratorLeft(PeerId), + CollaboratorLeft(proto::PeerId), } pub enum LanguageServerState { @@ -555,7 +554,7 @@ impl Project { .await?; let mut collaborators = HashMap::default(); for message in response.collaborators { - let collaborator = Collaborator::from_proto(message); + let collaborator = Collaborator::from_proto(message)?; collaborators.insert(collaborator.peer_id, collaborator); } @@ -754,7 +753,7 @@ impl Project { } } - pub fn collaborators(&self) -> &HashMap { + pub fn collaborators(&self) -> &HashMap { &self.collaborators } @@ -4605,7 +4604,7 @@ impl Project { .take() .ok_or_else(|| anyhow!("empty collaborator"))?; - let collaborator = Collaborator::from_proto(collaborator); + let collaborator = Collaborator::from_proto(collaborator)?; this.update(&mut cx, |this, cx| { this.collaborators .insert(collaborator.peer_id, collaborator); @@ -4622,7 +4621,10 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { this.update(&mut cx, |this, cx| { - let peer_id = PeerId(envelope.payload.peer_id); + let peer_id = envelope + .payload + .peer_id + .ok_or_else(|| anyhow!("invalid peer id"))?; let replica_id = this .collaborators .remove(&peer_id) @@ -5489,7 +5491,7 @@ impl Project { fn serialize_project_transaction_for_peer( &mut self, project_transaction: ProjectTransaction, - peer_id: PeerId, + peer_id: proto::PeerId, cx: &AppContext, ) -> proto::ProjectTransaction { let mut serialized_transaction = proto::ProjectTransaction { @@ -5545,7 +5547,7 @@ impl Project { fn create_buffer_for_peer( &mut self, buffer: &ModelHandle, - peer_id: PeerId, + peer_id: proto::PeerId, cx: &AppContext, ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); @@ -5563,7 +5565,7 @@ impl Project { client.send(proto::CreateBufferForPeer { project_id, - peer_id: peer_id.0, + peer_id: Some(peer_id), variant: Some(proto::create_buffer_for_peer::Variant::State(state)), })?; @@ -5580,7 +5582,7 @@ impl Project { let is_last = operations.is_empty(); client.send(proto::CreateBufferForPeer { project_id, - peer_id: peer_id.0, + peer_id: Some(peer_id), variant: Some(proto::create_buffer_for_peer::Variant::Chunk( proto::BufferChunk { buffer_id, @@ -6036,11 +6038,11 @@ impl Entity for Project { } impl Collaborator { - fn from_proto(message: proto::Collaborator) -> Self { - Self { - peer_id: PeerId(message.peer_id), + fn from_proto(message: proto::Collaborator) -> Result { + Ok(Self { + peer_id: message.peer_id.ok_or_else(|| anyhow!("invalid peer id"))?, replica_id: message.replica_id as ReplicaId, - } + }) } } diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index cf58adfe0b04f764af98b65419eaaca8cfaa546e..064bf6f50a4cd96dc40389c842fabce91497abb3 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -1,10 +1,15 @@ syntax = "proto3"; package zed.messages; +message PeerId { + uint32 epoch = 1; + uint32 id = 2; +} + message Envelope { uint32 id = 1; optional uint32 responding_to = 2; - optional uint32 original_sender_id = 3; + PeerId original_sender_id = 3; oneof payload { Hello hello = 4; Ack ack = 5; @@ -125,7 +130,7 @@ message Envelope { // Messages message Hello { - uint32 peer_id = 1; + PeerId peer_id = 1; } message Ping {} @@ -167,7 +172,7 @@ message Room { message Participant { uint64 user_id = 1; - uint32 peer_id = 2; + PeerId peer_id = 2; repeated ParticipantProject projects = 3; ParticipantLocation location = 4; } @@ -319,7 +324,7 @@ message AddProjectCollaborator { message RemoveProjectCollaborator { uint64 project_id = 1; - uint32 peer_id = 2; + PeerId peer_id = 2; } message GetDefinition { @@ -438,7 +443,7 @@ message OpenBufferResponse { message CreateBufferForPeer { uint64 project_id = 1; - uint32 peer_id = 2; + PeerId peer_id = 2; oneof variant { BufferState state = 3; BufferChunk chunk = 4; @@ -794,7 +799,7 @@ message UpdateDiagnostics { message Follow { uint64 project_id = 1; - uint32 leader_id = 2; + PeerId leader_id = 2; } message FollowResponse { @@ -804,7 +809,7 @@ message FollowResponse { message UpdateFollowers { uint64 project_id = 1; - repeated uint32 follower_ids = 2; + repeated PeerId follower_ids = 2; oneof variant { UpdateActiveView update_active_view = 3; View create_view = 4; @@ -814,7 +819,7 @@ message UpdateFollowers { message Unfollow { uint64 project_id = 1; - uint32 leader_id = 2; + PeerId leader_id = 2; } message GetPrivateUserInfo {} @@ -828,12 +833,12 @@ message GetPrivateUserInfoResponse { message UpdateActiveView { optional uint64 id = 1; - optional uint32 leader_id = 2; + optional PeerId leader_id = 2; } message UpdateView { uint64 id = 1; - optional uint32 leader_id = 2; + optional PeerId leader_id = 2; oneof variant { Editor editor = 3; @@ -849,7 +854,7 @@ message UpdateView { message View { uint64 id = 1; - optional uint32 leader_id = 2; + optional PeerId leader_id = 2; oneof variant { Editor editor = 3; @@ -865,7 +870,7 @@ message View { } message Collaborator { - uint32 peer_id = 1; + PeerId peer_id = 1; uint32 replica_id = 2; uint64 user_id = 3; } diff --git a/crates/rpc/src/macros.rs b/crates/rpc/src/macros.rs index 38d35893ee70c34615da14aabb561c417f83170b..ebed976968e14590a472efffb3caf9d2eb25ed44 100644 --- a/crates/rpc/src/macros.rs +++ b/crates/rpc/src/macros.rs @@ -6,7 +6,10 @@ macro_rules! messages { $(Some(envelope::Payload::$name(payload)) => { Some(Box::new(TypedEnvelope { sender_id, - original_sender_id: envelope.original_sender_id.map(PeerId), + original_sender_id: envelope.original_sender_id.map(|original_sender| PeerId { + epoch: original_sender.epoch, + id: original_sender.id + }), message_id: envelope.id, payload, })) @@ -24,7 +27,7 @@ macro_rules! messages { self, id: u32, responding_to: Option, - original_sender_id: Option, + original_sender_id: Option, ) -> Envelope { Envelope { id, diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 66ba6a40292d87d13a83943b0e40239ee37b526d..43e4dfbde87aab59264f2334df9484ab0ae5a5b3 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -1,5 +1,5 @@ use super::{ - proto::{self, AnyTypedEnvelope, EnvelopedMessage, MessageStream, RequestMessage}, + proto::{self, AnyTypedEnvelope, EnvelopedMessage, MessageStream, PeerId, RequestMessage}, Connection, }; use anyhow::{anyhow, Context, Result}; @@ -11,9 +11,8 @@ use futures::{ }; use parking_lot::{Mutex, RwLock}; use serde::{ser::SerializeStruct, Serialize}; -use std::sync::atomic::Ordering::SeqCst; +use std::{fmt, sync::atomic::Ordering::SeqCst}; use std::{ - fmt, future::Future, marker::PhantomData, sync::{ @@ -25,20 +24,32 @@ use std::{ use tracing::instrument; #[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Serialize)] -pub struct ConnectionId(pub u32); +pub struct ConnectionId { + pub epoch: u32, + pub id: u32, +} -impl fmt::Display for ConnectionId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) +impl Into for ConnectionId { + fn into(self) -> PeerId { + PeerId { + epoch: self.epoch, + id: self.id, + } } } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct PeerId(pub u32); +impl From for ConnectionId { + fn from(peer_id: PeerId) -> Self { + Self { + epoch: peer_id.epoch, + id: peer_id.id, + } + } +} -impl fmt::Display for PeerId { +impl fmt::Display for ConnectionId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) + write!(f, "{}/{}", self.epoch, self.id) } } @@ -70,6 +81,7 @@ pub struct TypedEnvelope { impl TypedEnvelope { pub fn original_sender_id(&self) -> Result { self.original_sender_id + .clone() .ok_or_else(|| anyhow!("missing original_sender_id")) } } @@ -85,6 +97,7 @@ impl TypedEnvelope { } pub struct Peer { + epoch: u32, pub connections: RwLock>, next_connection_id: AtomicU32, } @@ -105,8 +118,9 @@ const WRITE_TIMEOUT: Duration = Duration::from_secs(2); pub const RECEIVE_TIMEOUT: Duration = Duration::from_secs(5); impl Peer { - pub fn new() -> Arc { + pub fn new(epoch: u32) -> Arc { Arc::new(Self { + epoch, connections: Default::default(), next_connection_id: Default::default(), }) @@ -138,7 +152,10 @@ impl Peer { let (mut incoming_tx, incoming_rx) = mpsc::channel(INCOMING_BUFFER_SIZE); let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded(); - let connection_id = ConnectionId(self.next_connection_id.fetch_add(1, SeqCst)); + let connection_id = ConnectionId { + epoch: self.epoch, + id: self.next_connection_id.fetch_add(1, SeqCst), + }; let connection_state = ConnectionState { outgoing_tx, next_message_id: Default::default(), @@ -150,12 +167,12 @@ impl Peer { let this = self.clone(); let response_channels = connection_state.response_channels.clone(); let handle_io = async move { - tracing::debug!(%connection_id, "handle io future: start"); + tracing::debug!(?connection_id, "handle io future: start"); let _end_connection = util::defer(|| { response_channels.lock().take(); this.connections.write().remove(&connection_id); - tracing::debug!(%connection_id, "handle io future: end"); + tracing::debug!(?connection_id, "handle io future: end"); }); // Send messages on this frequency so the connection isn't closed. @@ -167,68 +184,68 @@ impl Peer { futures::pin_mut!(receive_timeout); loop { - tracing::debug!(%connection_id, "outer loop iteration start"); + tracing::debug!(?connection_id, "outer loop iteration start"); let read_message = reader.read().fuse(); futures::pin_mut!(read_message); loop { - tracing::debug!(%connection_id, "inner loop iteration start"); + tracing::debug!(?connection_id, "inner loop iteration start"); futures::select_biased! { outgoing = outgoing_rx.next().fuse() => match outgoing { Some(outgoing) => { - tracing::debug!(%connection_id, "outgoing rpc message: writing"); + tracing::debug!(?connection_id, "outgoing rpc message: writing"); futures::select_biased! { result = writer.write(outgoing).fuse() => { - tracing::debug!(%connection_id, "outgoing rpc message: done writing"); + tracing::debug!(?connection_id, "outgoing rpc message: done writing"); result.context("failed to write RPC message")?; - tracing::debug!(%connection_id, "keepalive interval: resetting after sending message"); + tracing::debug!(?connection_id, "keepalive interval: resetting after sending message"); keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse()); } _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(%connection_id, "outgoing rpc message: writing timed out"); + tracing::debug!(?connection_id, "outgoing rpc message: writing timed out"); Err(anyhow!("timed out writing message"))?; } } } None => { - tracing::debug!(%connection_id, "outgoing rpc message: channel closed"); + tracing::debug!(?connection_id, "outgoing rpc message: channel closed"); return Ok(()) }, }, _ = keepalive_timer => { - tracing::debug!(%connection_id, "keepalive interval: pinging"); + tracing::debug!(?connection_id, "keepalive interval: pinging"); futures::select_biased! { result = writer.write(proto::Message::Ping).fuse() => { - tracing::debug!(%connection_id, "keepalive interval: done pinging"); + tracing::debug!(?connection_id, "keepalive interval: done pinging"); result.context("failed to send keepalive")?; - tracing::debug!(%connection_id, "keepalive interval: resetting after pinging"); + tracing::debug!(?connection_id, "keepalive interval: resetting after pinging"); keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse()); } _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(%connection_id, "keepalive interval: pinging timed out"); + tracing::debug!(?connection_id, "keepalive interval: pinging timed out"); Err(anyhow!("timed out sending keepalive"))?; } } } incoming = read_message => { let incoming = incoming.context("error reading rpc message from socket")?; - tracing::debug!(%connection_id, "incoming rpc message: received"); - tracing::debug!(%connection_id, "receive timeout: resetting"); + tracing::debug!(?connection_id, "incoming rpc message: received"); + tracing::debug!(?connection_id, "receive timeout: resetting"); receive_timeout.set(create_timer(RECEIVE_TIMEOUT).fuse()); if let proto::Message::Envelope(incoming) = incoming { - tracing::debug!(%connection_id, "incoming rpc message: processing"); + tracing::debug!(?connection_id, "incoming rpc message: processing"); futures::select_biased! { result = incoming_tx.send(incoming).fuse() => match result { Ok(_) => { - tracing::debug!(%connection_id, "incoming rpc message: processed"); + tracing::debug!(?connection_id, "incoming rpc message: processed"); } Err(_) => { - tracing::debug!(%connection_id, "incoming rpc message: channel closed"); + tracing::debug!(?connection_id, "incoming rpc message: channel closed"); return Ok(()) } }, _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(%connection_id, "incoming rpc message: processing timed out"); + tracing::debug!(?connection_id, "incoming rpc message: processing timed out"); Err(anyhow!("timed out processing incoming message"))? } } @@ -236,7 +253,7 @@ impl Peer { break; }, _ = receive_timeout => { - tracing::debug!(%connection_id, "receive timeout: delay between messages too long"); + tracing::debug!(?connection_id, "receive timeout: delay between messages too long"); Err(anyhow!("delay between messages too long"))? } } @@ -255,16 +272,12 @@ impl Peer { let message_id = incoming.id; tracing::debug!(?incoming, "incoming message future: start"); let _end = util::defer(move || { - tracing::debug!( - %connection_id, - message_id, - "incoming message future: end" - ); + tracing::debug!(?connection_id, message_id, "incoming message future: end"); }); if let Some(responding_to) = incoming.responding_to { tracing::debug!( - %connection_id, + ?connection_id, message_id, responding_to, "incoming response: received" @@ -274,7 +287,7 @@ impl Peer { let requester_resumed = oneshot::channel(); if let Err(error) = tx.send((incoming, requester_resumed.0)) { tracing::debug!( - %connection_id, + ?connection_id, message_id, responding_to = responding_to, ?error, @@ -283,21 +296,21 @@ impl Peer { } tracing::debug!( - %connection_id, + ?connection_id, message_id, responding_to, "incoming response: waiting to resume requester" ); let _ = requester_resumed.1.await; tracing::debug!( - %connection_id, + ?connection_id, message_id, responding_to, "incoming response: requester resumed" ); } else { tracing::warn!( - %connection_id, + ?connection_id, message_id, responding_to, "incoming response: unknown request" @@ -306,14 +319,10 @@ impl Peer { None } else { - tracing::debug!( - %connection_id, - message_id, - "incoming message: received" - ); + tracing::debug!(?connection_id, message_id, "incoming message: received"); proto::build_typed_envelope(connection_id, incoming).or_else(|| { tracing::error!( - %connection_id, + ?connection_id, message_id, "unable to construct a typed envelope" ); @@ -345,6 +354,7 @@ impl Peer { pub fn reset(&self) { self.connections.write().clear(); + self.next_connection_id.store(0, SeqCst); } pub fn request( @@ -384,7 +394,7 @@ impl Peer { .unbounded_send(proto::Message::Envelope(request.into_envelope( message_id, None, - original_sender_id.map(|id| id.0), + original_sender_id.map(Into::into), ))) .map_err(|_| anyhow!("connection was closed"))?; Ok(()) @@ -433,7 +443,7 @@ impl Peer { .unbounded_send(proto::Message::Envelope(message.into_envelope( message_id, None, - Some(sender_id.0), + Some(sender_id.into()), )))?; Ok(()) } @@ -480,7 +490,7 @@ impl Peer { let connections = self.connections.read(); let connection = connections .get(&connection_id) - .ok_or_else(|| anyhow!("no such connection: {}", connection_id))?; + .ok_or_else(|| anyhow!("no such connection: {:?}", connection_id))?; Ok(connection.clone()) } } @@ -515,9 +525,9 @@ mod tests { let executor = cx.foreground(); // create 2 clients connected to 1 server - let server = Peer::new(); - let client1 = Peer::new(); - let client2 = Peer::new(); + let server = Peer::new(0); + let client1 = Peer::new(0); + let client2 = Peer::new(0); let (client1_to_server_conn, server_to_client_1_conn, _kill) = Connection::in_memory(cx.background()); @@ -609,8 +619,8 @@ mod tests { #[gpui::test(iterations = 50)] async fn test_order_of_response_and_incoming(cx: &mut TestAppContext) { let executor = cx.foreground(); - let server = Peer::new(); - let client = Peer::new(); + let server = Peer::new(0); + let client = Peer::new(0); let (client_to_server_conn, server_to_client_conn, _kill) = Connection::in_memory(cx.background()); @@ -707,8 +717,8 @@ mod tests { #[gpui::test(iterations = 50)] async fn test_dropping_request_before_completion(cx: &mut TestAppContext) { let executor = cx.foreground(); - let server = Peer::new(); - let client = Peer::new(); + let server = Peer::new(0); + let client = Peer::new(0); let (client_to_server_conn, server_to_client_conn, _kill) = Connection::in_memory(cx.background()); @@ -822,7 +832,7 @@ mod tests { let (client_conn, mut server_conn, _kill) = Connection::in_memory(cx.background()); - let client = Peer::new(); + let client = Peer::new(0); let (connection_id, io_handler, mut incoming) = client.add_test_connection(client_conn, cx.background()); @@ -857,7 +867,7 @@ mod tests { let executor = cx.foreground(); let (client_conn, mut server_conn, _kill) = Connection::in_memory(cx.background()); - let client = Peer::new(); + let client = Peer::new(0); let (connection_id, io_handler, mut incoming) = client.add_test_connection(client_conn, cx.background()); executor.spawn(io_handler).detach(); diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 6d9bc9a0aa348af8c1a14f442323fcf06064688e..77d82ec3081514e5ab406ad68ac0e2940949e376 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -1,14 +1,16 @@ -use super::{entity_messages, messages, request_messages, ConnectionId, PeerId, TypedEnvelope}; +use super::{entity_messages, messages, request_messages, ConnectionId, TypedEnvelope}; use anyhow::{anyhow, Result}; use async_tungstenite::tungstenite::Message as WebSocketMessage; use futures::{SinkExt as _, StreamExt as _}; use prost::Message as _; use serde::Serialize; use std::any::{Any, TypeId}; -use std::{cmp, iter, mem}; +use std::fmt; +use std::str::FromStr; use std::{ + cmp, fmt::Debug, - io, + io, iter, mem, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -21,7 +23,7 @@ pub trait EnvelopedMessage: Clone + Debug + Serialize + Sized + Send + Sync + 's self, id: u32, responding_to: Option, - original_sender_id: Option, + original_sender_id: Option, ) -> Envelope; fn from_envelope(envelope: Envelope) -> Option; } @@ -70,7 +72,67 @@ impl AnyTypedEnvelope for TypedEnvelope { } fn original_sender_id(&self) -> Option { - self.original_sender_id + self.original_sender_id.clone() + } +} + +impl PeerId { + pub fn from_u64(peer_id: u64) -> Self { + let epoch = (peer_id >> 32) as u32; + let id = peer_id as u32; + Self { epoch, id } + } + + pub fn as_u64(self) -> u64 { + ((self.epoch as u64) << 32) | (self.id as u64) + } +} + +impl Copy for PeerId {} + +impl Eq for PeerId {} + +impl Ord for PeerId { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.epoch + .cmp(&other.epoch) + .then_with(|| self.id.cmp(&other.id)) + } +} + +impl PartialOrd for PeerId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl std::hash::Hash for PeerId { + fn hash(&self, state: &mut H) { + self.epoch.hash(state); + self.id.hash(state); + } +} + +impl fmt::Display for PeerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.epoch, self.id) + } +} + +impl FromStr for PeerId { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let mut components = s.split('/'); + let epoch = components + .next() + .ok_or_else(|| anyhow!("invalid peer id {:?}", s))? + .parse()?; + let id = components + .next() + .ok_or_else(|| anyhow!("invalid peer id {:?}", s))? + .parse()?; + Ok(PeerId { epoch, id }) } } @@ -477,4 +539,25 @@ mod tests { stream.read().await.unwrap(); assert!(stream.encoding_buffer.capacity() <= MAX_BUFFER_LEN); } + + #[gpui::test] + fn test_converting_peer_id_from_and_to_u64() { + let peer_id = PeerId { epoch: 10, id: 3 }; + assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); + let peer_id = PeerId { + epoch: u32::MAX, + id: 3, + }; + assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); + let peer_id = PeerId { + epoch: 10, + id: u32::MAX, + }; + assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); + let peer_id = PeerId { + epoch: u32::MAX, + id: u32::MAX, + }; + assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); + } } diff --git a/crates/rpc/src/rpc.rs b/crates/rpc/src/rpc.rs index 1d4a4496d0af0f1bb9cff6f584c7561ac6fa99e2..d06f1c1c5ce0c1e54ea1a5d85c0b8acb8edb5f23 100644 --- a/crates/rpc/src/rpc.rs +++ b/crates/rpc/src/rpc.rs @@ -6,4 +6,4 @@ pub use conn::Connection; pub use peer::*; mod macros; -pub const PROTOCOL_VERSION: u32 = 41; +pub const PROTOCOL_VERSION: u32 = 42; diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index 14f847fd54b429f43e5aaa0ea352588995f2532c..bd00272c9405cced0d1ca5c4cd7b06bd70d359f8 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -280,7 +280,7 @@ impl ItemHandle for ViewHandle { proto::update_followers::Variant::CreateView(proto::View { id: followed_item.id() as u64, variant: Some(message), - leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), + leader_id: workspace.leader_for_pane(&pane), }), cx, ); @@ -334,7 +334,7 @@ impl ItemHandle for ViewHandle { proto::UpdateView { id: item.id() as u64, variant: pending_update.borrow_mut().take(), - leader_id: leader_id.map(|id| id.0), + leader_id, }, ), cx, diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index 7dee642423c805e9581520b523c5182d424c8ccb..d292ece3d5fb821f07e8ed80a0931ebb44bf0e96 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -3,7 +3,7 @@ use crate::{ }; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; -use client::{PeerId, User}; +use client::{proto::PeerId, User}; use futures::StreamExt; use gpui::{ elements::*, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 387a18006a0d703c8c04cd55201156c975854921..46e790b892bd3e8ee4596535627bd1cd086fe9a9 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -25,7 +25,10 @@ use std::{ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; -use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; +use client::{ + proto::{self, PeerId}, + Client, TypedEnvelope, UserStore, +}; use collections::{hash_map, HashMap, HashSet}; use dock::{Dock, DockDefaultItemFactory, ToggleDockButton}; use drag_and_drop::DragAndDrop; @@ -1441,7 +1444,7 @@ impl Workspace { self.update_followers( proto::update_followers::Variant::UpdateActiveView(proto::UpdateActiveView { id: self.active_item(cx).map(|item| item.id() as u64), - leader_id: self.leader_for_pane(&pane).map(|id| id.0), + leader_id: self.leader_for_pane(&pane), }), cx, ); @@ -1620,7 +1623,7 @@ impl Workspace { let project_id = self.project.read(cx).remote_id()?; let request = self.client.request(proto::Follow { project_id, - leader_id: leader_id.0, + leader_id: Some(leader_id), }); Some(cx.spawn_weak(|this, mut cx| async move { let response = request.await?; @@ -1692,7 +1695,7 @@ impl Workspace { self.client .send(proto::Unfollow { project_id, - leader_id: leader_id.0, + leader_id: Some(leader_id), }) .log_err(); } @@ -1888,7 +1891,7 @@ impl Workspace { .panes() .iter() .flat_map(|pane| { - let leader_id = this.leader_for_pane(pane).map(|id| id.0); + let leader_id = this.leader_for_pane(pane); pane.read(cx).items().filter_map({ let cx = &cx; move |item| { @@ -1997,7 +2000,7 @@ impl Workspace { .get(&leader_id) .map(|c| c.replica_id) }) - .ok_or_else(|| anyhow!("no such collaborator {}", leader_id))?; + .ok_or_else(|| anyhow!("no such collaborator {:?}", leader_id))?; let item_builders = cx.update(|cx| { cx.default_global::() @@ -2077,7 +2080,7 @@ impl Workspace { self.client .send(proto::UpdateFollowers { project_id, - follower_ids: self.leader_state.followers.iter().map(|f| f.0).collect(), + follower_ids: self.leader_state.followers.iter().copied().collect(), variant: Some(update), }) .log_err(); From 930be6706f94d982bcaa21f30d2374cbe4794000 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 18:02:39 +0100 Subject: [PATCH 19/29] WIP --- crates/client/src/client.rs | 4 +- crates/collab/k8s/manifest.template.yml | 2 + .../20221109000000_test_schema.sql | 5 + ...4346_change_epoch_from_uuid_to_integer.sql | 13 +- crates/collab/src/db.rs | 166 +++++++++++------- crates/collab/src/db/server.rs | 15 ++ crates/collab/src/integration_tests.rs | 24 ++- crates/collab/src/lib.rs | 1 + crates/collab/src/main.rs | 6 +- crates/collab/src/rpc.rs | 41 +++-- crates/rpc/src/peer.rs | 19 +- 11 files changed, 200 insertions(+), 96 deletions(-) create mode 100644 crates/collab/src/db/server.rs diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 876b83b11afb003a4e9b065ebfed44741b77f4cd..6d9ec305b697981a6c1c4f8492655db4d9eb6757 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -333,14 +333,14 @@ impl Client { } #[cfg(any(test, feature = "test-support"))] - pub fn tear_down(&self) { + pub fn teardown(&self) { let mut state = self.state.write(); state._reconnect_task.take(); state.message_handlers.clear(); state.models_by_message_type.clear(); state.entities_by_type_and_remote_id.clear(); state.entity_id_extractors.clear(); - self.peer.reset(); + self.peer.teardown(); } #[cfg(any(test, feature = "test-support"))] diff --git a/crates/collab/k8s/manifest.template.yml b/crates/collab/k8s/manifest.template.yml index 68658b575ed4bbabf94248845a83fffffba96f36..339d02892ef2cf24f40815c3fd32dc0fb72c317a 100644 --- a/crates/collab/k8s/manifest.template.yml +++ b/crates/collab/k8s/manifest.template.yml @@ -99,6 +99,8 @@ spec: value: ${RUST_LOG} - name: LOG_JSON value: "true" + - name: ZED_ENVIRONMENT + value: ${ZED_ENVIRONMENT} securityContext: capabilities: # FIXME - Switch to the more restrictive `PERFMON` capability. diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index b96b07d977cc280ab134ef1ddd12f48595db0254..fc048be48e17b7e8bf544fc8612c8560d793731b 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -134,3 +134,8 @@ CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_pa CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch"); CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id"); CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch"); + +CREATE TABLE "servers" ( + "epoch" INTEGER PRIMARY KEY AUTOINCREMENT, + "environment" VARCHAR NOT NULL +); diff --git a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql index ed294502a5ba98a1563c04502fe92fb783477e50..addd5451e80bb403ad4fc1e65e1268a14338eb46 100644 --- a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql +++ b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql @@ -1,9 +1,14 @@ ALTER TABLE "projects" - ALTER COLUMN "host_connection_epoch" TYPE INTEGER USING 0; + ALTER COLUMN "host_connection_epoch" TYPE INTEGER USING -1; ALTER TABLE "project_collaborators" - ALTER COLUMN "connection_epoch" TYPE INTEGER USING 0; + ALTER COLUMN "connection_epoch" TYPE INTEGER USING -1; ALTER TABLE "room_participants" - ALTER COLUMN "answering_connection_epoch" TYPE INTEGER USING 0, - ALTER COLUMN "calling_connection_epoch" TYPE INTEGER USING 0; + ALTER COLUMN "answering_connection_epoch" TYPE INTEGER USING -1, + ALTER COLUMN "calling_connection_epoch" TYPE INTEGER USING -1; + +CREATE TABLE "servers" ( + "epoch" SERIAL PRIMARY KEY, + "environment" VARCHAR NOT NULL +); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 6237cc4f79d5ddc0fcf833545eb1f594da2ea57e..424fc308d41bce7e81de8b9a1b3dff4a80b19103 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -5,6 +5,7 @@ mod project; mod project_collaborator; mod room; mod room_participant; +mod server; mod signup; #[cfg(test)] mod tests; @@ -48,7 +49,6 @@ pub struct Database { background: Option>, #[cfg(test)] runtime: Option, - epoch: parking_lot::RwLock, } impl Database { @@ -61,18 +61,12 @@ impl Database { background: None, #[cfg(test)] runtime: None, - epoch: parking_lot::RwLock::new(Uuid::new_v4()), }) } #[cfg(test)] pub fn reset(&self) { self.rooms.clear(); - *self.epoch.write() = Uuid::new_v4(); - } - - fn epoch(&self) -> Uuid { - *self.epoch.read() } pub async fn migrate( @@ -116,14 +110,39 @@ impl Database { Ok(new_migrations) } - pub async fn delete_stale_projects(&self) -> Result<()> { + pub async fn create_server(&self, environment: &str) -> Result { self.transaction(|tx| async move { + let server = server::ActiveModel { + environment: ActiveValue::set(environment.into()), + ..Default::default() + } + .insert(&*tx) + .await?; + Ok(server.epoch) + }) + .await + } + + pub async fn delete_stale_projects( + &self, + new_epoch: ServerEpoch, + environment: &str, + ) -> Result<()> { + self.transaction(|tx| async move { + let stale_server_epochs = self + .stale_server_epochs(environment, new_epoch, &tx) + .await?; project_collaborator::Entity::delete_many() - .filter(project_collaborator::Column::ConnectionEpoch.ne(self.epoch())) + .filter( + project_collaborator::Column::ConnectionEpoch + .is_in(stale_server_epochs.iter().copied()), + ) .exec(&*tx) .await?; project::Entity::delete_many() - .filter(project::Column::HostConnectionEpoch.ne(self.epoch())) + .filter( + project::Column::HostConnectionEpoch.is_in(stale_server_epochs.iter().copied()), + ) .exec(&*tx) .await?; Ok(()) @@ -131,18 +150,27 @@ impl Database { .await } - pub async fn stale_room_ids(&self) -> Result> { + pub async fn stale_room_ids( + &self, + new_epoch: ServerEpoch, + environment: &str, + ) -> Result> { self.transaction(|tx| async move { #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] enum QueryAs { RoomId, } + let stale_server_epochs = self + .stale_server_epochs(environment, new_epoch, &tx) + .await?; Ok(room_participant::Entity::find() .select_only() .column(room_participant::Column::RoomId) .distinct() - .filter(room_participant::Column::AnsweringConnectionEpoch.ne(self.epoch())) + .filter( + room_participant::Column::AnsweringConnectionEpoch.is_in(stale_server_epochs), + ) .into_values::<_, QueryAs>() .all(&*tx) .await?) @@ -150,12 +178,16 @@ impl Database { .await } - pub async fn refresh_room(&self, room_id: RoomId) -> Result> { + pub async fn refresh_room( + &self, + room_id: RoomId, + new_epoch: ServerEpoch, + ) -> Result> { self.room_transaction(|tx| async move { let stale_participant_filter = Condition::all() .add(room_participant::Column::RoomId.eq(room_id)) .add(room_participant::Column::AnsweringConnectionId.is_not_null()) - .add(room_participant::Column::AnsweringConnectionEpoch.ne(self.epoch())); + .add(room_participant::Column::AnsweringConnectionEpoch.ne(new_epoch)); let stale_participant_user_ids = room_participant::Entity::find() .filter(stale_participant_filter.clone()) @@ -199,6 +231,35 @@ impl Database { .await } + fn delete_stale_servers(&self, environment: &str, new_epoch: ServerEpoch) -> Result<()> { + self.transaction(|tx| async { + server::Entity::delete_many().filter(Condition::all().add()) + }) + .await + } + + async fn stale_server_epochs( + &self, + environment: &str, + new_epoch: ServerEpoch, + tx: &DatabaseTransaction, + ) -> Result> { + let stale_servers = server::Entity::find() + .filter( + Condition::all().add( + server::Column::Environment + .eq(environment) + .add(server::Column::Epoch.ne(new_epoch)), + ), + ) + .all(&*tx) + .await?; + Ok(stale_servers + .into_iter() + .map(|server| server.epoch) + .collect()) + } + // users pub async fn create_user( @@ -1643,14 +1704,16 @@ impl Database { Default::default(), )), }; + + let answering_connection = ConnectionId { + epoch: answering_connection_epoch as u32, + id: answering_connection_id as u32, + }; participants.insert( - answering_connection_id, + answering_connection, proto::Participant { user_id: db_participant.user_id.to_proto(), - peer_id: Some(proto::PeerId { - epoch: answering_connection_epoch as u32, - id: answering_connection_id as u32, - }), + peer_id: Some(answering_connection.into()), projects: Default::default(), location: Some(proto::ParticipantLocation { variant: location }), }, @@ -1673,7 +1736,11 @@ impl Database { while let Some(row) = db_projects.next().await { let (db_project, db_worktree) = row?; - if let Some(participant) = participants.get_mut(&db_project.host_connection_id) { + let host_connection = ConnectionId { + epoch: db_project.host_connection_epoch as u32, + id: db_project.host_connection_id as u32, + }; + if let Some(participant) = participants.get_mut(&host_connection) { let project = if let Some(project) = participant .projects .iter_mut() @@ -2309,41 +2376,22 @@ impl Database { project_id: ProjectId, connection_id: ConnectionId, ) -> Result>> { - #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] - enum QueryAs { - Epoch, - Id, - } - - #[derive(Debug, FromQueryResult)] - struct ProjectConnection { - epoch: i32, - id: i32, - } - self.room_transaction(|tx| async move { let project = project::Entity::find_by_id(project_id) .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - let mut db_connections = project_collaborator::Entity::find() - .select_only() - .column_as(project_collaborator::Column::ConnectionId, QueryAs::Id) - .column_as( - project_collaborator::Column::ConnectionEpoch, - QueryAs::Epoch, - ) + let mut participants = project_collaborator::Entity::find() .filter(project_collaborator::Column::ProjectId.eq(project_id)) - .into_model::() .stream(&*tx) .await?; let mut connection_ids = HashSet::default(); - while let Some(connection) = db_connections.next().await { - let connection = connection?; + while let Some(participant) = participants.next().await { + let participant = participant?; connection_ids.insert(ConnectionId { - epoch: connection.epoch as u32, - id: connection.id as u32, + epoch: participant.connection_epoch as u32, + id: participant.connection_id as u32, }); } @@ -2361,40 +2409,21 @@ impl Database { project_id: ProjectId, tx: &DatabaseTransaction, ) -> Result> { - #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] - enum QueryAs { - Epoch, - Id, - } - - #[derive(Debug, FromQueryResult)] - struct ProjectConnection { - epoch: i32, - id: i32, - } - - let mut db_guest_connections = project_collaborator::Entity::find() - .select_only() - .column_as(project_collaborator::Column::ConnectionId, QueryAs::Id) - .column_as( - project_collaborator::Column::ConnectionEpoch, - QueryAs::Epoch, - ) + let mut participants = project_collaborator::Entity::find() .filter( project_collaborator::Column::ProjectId .eq(project_id) .and(project_collaborator::Column::IsHost.eq(false)), ) - .into_model::() .stream(tx) .await?; let mut guest_connection_ids = Vec::new(); - while let Some(connection) = db_guest_connections.next().await { - let connection = connection?; + while let Some(participant) = participants.next().await { + let participant = participant?; guest_connection_ids.push(ConnectionId { - epoch: connection.epoch as u32, - id: connection.id as u32, + epoch: participant.connection_epoch as u32, + id: participant.connection_id as u32, }); } Ok(guest_connection_ids) @@ -2774,6 +2803,7 @@ id_type!(RoomParticipantId); id_type!(ProjectId); id_type!(ProjectCollaboratorId); id_type!(ReplicaId); +id_type!(ServerEpoch); id_type!(SignupId); id_type!(UserId); diff --git a/crates/collab/src/db/server.rs b/crates/collab/src/db/server.rs new file mode 100644 index 0000000000000000000000000000000000000000..6735770a0f0e7e8fbafe4c99bcd3e11e207c2b89 --- /dev/null +++ b/crates/collab/src/db/server.rs @@ -0,0 +1,15 @@ +use super::ServerEpoch; +use sea_orm::entity::prelude::*; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "servers")] +pub struct Model { + #[sea_orm(primary_key)] + pub epoch: ServerEpoch, + pub environment: String, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 2c2a301733f81659de8d9609c617e2a56a4b5551..dd18d5aef20f82bd070b6165c2cabf892c477d57 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -608,7 +608,7 @@ async fn test_server_restarts( ); // The server is torn down. - server.teardown(); + server.reset().await; // Users A and B reconnect to the call. User C has troubles reconnecting, so it leaves the room. client_c.override_establish_connection(|_, cx| cx.spawn(|_| future::pending())); @@ -778,7 +778,7 @@ async fn test_server_restarts( ); // The server is torn down. - server.teardown(); + server.reset().await; // Users A and B have troubles reconnecting, so they leave the room. client_a.override_establish_connection(|_, cx| cx.spawn(|_| future::pending())); @@ -6122,7 +6122,7 @@ async fn test_random_collaboration( } 30..=34 => { log::info!("Simulating server restart"); - server.teardown(); + server.reset().await; deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); server.start().await.unwrap(); deterministic.advance_clock(CLEANUP_TIMEOUT); @@ -6320,7 +6320,13 @@ impl TestServer { ) .unwrap(); let app_state = Self::build_app_state(&test_db, &live_kit_server).await; + let epoch = app_state + .db + .create_server(&app_state.config.zed_environment) + .await + .unwrap(); let server = Server::new( + epoch, app_state.clone(), Executor::Deterministic(deterministic.build_background()), ); @@ -6337,9 +6343,15 @@ impl TestServer { } } - fn teardown(&self) { - self.server.teardown(); + async fn reset(&self) { self.app_state.db.reset(); + let epoch = self + .app_state + .db + .create_server(&self.app_state.config.zed_environment) + .await + .unwrap(); + self.server.reset(epoch); } async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient { @@ -7251,7 +7263,7 @@ impl TestClient { impl Drop for TestClient { fn drop(&mut self) { - self.client.tear_down(); + self.client.teardown(); } } diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 7e0f23f5d4096cbf33aaf8ea6275381b96f9208a..27f49f5b1e1fb6a0ba7d6fba7f72efd88723d83d 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -97,6 +97,7 @@ pub struct Config { pub live_kit_secret: Option, pub rust_log: Option, pub log_json: Option, + pub zed_environment: String, } #[derive(Default, Deserialize)] diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 1386df715721f3f4d6018484a2841cbf7565e01e..26af6442d9fd6d32f34bb4894c34464bf38cee49 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -57,7 +57,11 @@ async fn main() -> Result<()> { let listener = TcpListener::bind(&format!("0.0.0.0:{}", state.config.http_port)) .expect("failed to bind TCP listener"); - let rpc_server = collab::rpc::Server::new(state.clone(), Executor::Production); + let epoch = state + .db + .create_server(&state.config.zed_environment) + .await?; + let rpc_server = collab::rpc::Server::new(epoch, state.clone(), Executor::Production); rpc_server.start().await?; let app = collab::api::routes(rpc_server.clone(), state.clone()) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 856eb36fb1fecebb27a9779059a06cd07b0c4667..592e86cb45378116bb5901e555e2402c7c8de69c 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2,7 +2,7 @@ mod connection_pool; use crate::{ auth, - db::{self, Database, ProjectId, RoomId, User, UserId}, + db::{self, Database, ProjectId, RoomId, ServerEpoch, User, UserId}, executor::Executor, AppState, Result, }; @@ -138,6 +138,7 @@ impl Deref for DbHandle { } pub struct Server { + epoch: parking_lot::Mutex, peer: Arc, pub(crate) connection_pool: Arc>, app_state: Arc, @@ -168,9 +169,10 @@ where } impl Server { - pub fn new(app_state: Arc, executor: Executor) -> Arc { + pub fn new(epoch: ServerEpoch, app_state: Arc, executor: Executor) -> Arc { let mut server = Self { - peer: Peer::new(0), + epoch: parking_lot::Mutex::new(epoch), + peer: Peer::new(epoch.0 as u32), app_state, executor, connection_pool: Default::default(), @@ -239,7 +241,8 @@ impl Server { } pub async fn start(&self) -> Result<()> { - let db = self.app_state.db.clone(); + let epoch = *self.epoch.lock(); + let app_state = self.app_state.clone(); let peer = self.peer.clone(); let timeout = self.executor.sleep(CLEANUP_TIMEOUT); let pool = self.connection_pool.clone(); @@ -249,7 +252,10 @@ impl Server { let span_enter = span.enter(); tracing::info!("begin deleting stale projects"); - self.app_state.db.delete_stale_projects().await?; + app_state + .db + .delete_stale_projects(epoch, &app_state.config.zed_environment) + .await?; tracing::info!("finish deleting stale projects"); drop(span_enter); @@ -258,7 +264,12 @@ impl Server { tracing::info!("waiting for cleanup timeout"); timeout.await; tracing::info!("cleanup timeout expired, retrieving stale rooms"); - if let Some(room_ids) = db.stale_room_ids().await.trace_err() { + if let Some(room_ids) = app_state + .db + .stale_room_ids(epoch, &app_state.config.zed_environment) + .await + .trace_err() + { tracing::info!(stale_room_count = room_ids.len(), "retrieved stale rooms"); for room_id in room_ids { let mut contacts_to_update = HashSet::default(); @@ -266,7 +277,9 @@ impl Server { let mut live_kit_room = String::new(); let mut delete_live_kit_room = false; - if let Ok(mut refreshed_room) = db.refresh_room(room_id).await { + if let Ok(mut refreshed_room) = + app_state.db.refresh_room(room_id, epoch).await + { tracing::info!( room_id = room_id.0, new_participant_count = refreshed_room.room.participants.len(), @@ -299,8 +312,8 @@ impl Server { } for user_id in contacts_to_update { - let busy = db.is_user_busy(user_id).await.trace_err(); - let contacts = db.get_contacts(user_id).await.trace_err(); + let busy = app_state.db.is_user_busy(user_id).await.trace_err(); + let contacts = app_state.db.get_contacts(user_id).await.trace_err(); if let Some((busy, contacts)) = busy.zip(contacts) { let pool = pool.lock(); let updated_contact = contact_for_user(user_id, false, busy, &pool); @@ -345,11 +358,18 @@ impl Server { } pub fn teardown(&self) { - self.peer.reset(); + self.peer.teardown(); self.connection_pool.lock().reset(); let _ = self.teardown.send(()); } + #[cfg(test)] + pub fn reset(&self, epoch: ServerEpoch) { + self.teardown(); + *self.epoch.lock() = epoch; + self.peer.reset(epoch.0 as u32); + } + fn add_handler(&mut self, handler: F) -> &mut Self where F: 'static + Send + Sync + Fn(TypedEnvelope, Session) -> Fut, @@ -1474,6 +1494,7 @@ async fn update_buffer( .project_connection_ids(project_id, session.connection_id) .await?; + dbg!(session.connection_id, &*project_connection_ids); broadcast( session.connection_id, project_connection_ids.iter().copied(), diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 43e4dfbde87aab59264f2334df9484ab0ae5a5b3..fc6712e04fe5544c07141ce0dc3cff17a675eca4 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -97,7 +97,7 @@ impl TypedEnvelope { } pub struct Peer { - epoch: u32, + epoch: AtomicU32, pub connections: RwLock>, next_connection_id: AtomicU32, } @@ -120,12 +120,16 @@ pub const RECEIVE_TIMEOUT: Duration = Duration::from_secs(5); impl Peer { pub fn new(epoch: u32) -> Arc { Arc::new(Self { - epoch, + epoch: AtomicU32::new(epoch), connections: Default::default(), next_connection_id: Default::default(), }) } + pub fn epoch(&self) -> u32 { + self.epoch.load(SeqCst) + } + #[instrument(skip_all)] pub fn add_connection( self: &Arc, @@ -153,7 +157,7 @@ impl Peer { let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded(); let connection_id = ConnectionId { - epoch: self.epoch, + epoch: self.epoch.load(SeqCst), id: self.next_connection_id.fetch_add(1, SeqCst), }; let connection_state = ConnectionState { @@ -352,9 +356,14 @@ impl Peer { self.connections.write().remove(&connection_id); } - pub fn reset(&self) { - self.connections.write().clear(); + pub fn reset(&self, epoch: u32) { + self.teardown(); self.next_connection_id.store(0, SeqCst); + self.epoch.store(epoch, SeqCst); + } + + pub fn teardown(&self) { + self.connections.write().clear(); } pub fn request( From 363e3cae4b97be7d93d4bbd78c3ab4d43b2a1857 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 14 Dec 2022 19:25:07 +0100 Subject: [PATCH 20/29] WIP --- crates/collab/src/db.rs | 20 +++++++++++++++++--- crates/collab/src/rpc.rs | 6 ++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 424fc308d41bce7e81de8b9a1b3dff4a80b19103..46ed2ce9f72fc163779faf655c414104ccc31ba4 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -231,9 +231,23 @@ impl Database { .await } - fn delete_stale_servers(&self, environment: &str, new_epoch: ServerEpoch) -> Result<()> { - self.transaction(|tx| async { - server::Entity::delete_many().filter(Condition::all().add()) + pub async fn delete_stale_servers( + &self, + new_epoch: ServerEpoch, + environment: &str, + ) -> Result<()> { + self.transaction(|tx| async move { + server::Entity::delete_many() + .filter( + Condition::all().add( + server::Column::Environment + .eq(environment) + .add(server::Column::Epoch.ne(new_epoch)), + ), + ) + .exec(&*tx) + .await?; + Ok(()) }) .await } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 592e86cb45378116bb5901e555e2402c7c8de69c..d3a1c0ebf13e0a194f254ac8e044bf5ed05be1a7 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -351,6 +351,12 @@ impl Server { } } } + + app_state + .db + .delete_stale_servers(epoch, &app_state.config.zed_environment) + .await + .trace_err(); } .instrument(span), ); From 6c58a4f885a8761ecbb8cae3f06a34a64b6afe8f Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 14 Dec 2022 17:34:24 -0800 Subject: [PATCH 21/29] Fix stale server queries, use foreign keys from connectionsn to servers --- crates/collab/.env.toml | 1 + .../20221109000000_test_schema.sql | 10 +- ...4346_change_epoch_from_uuid_to_integer.sql | 30 ++-- crates/collab/src/db.rs | 151 +++++++++--------- crates/collab/src/db/project.rs | 4 +- crates/collab/src/db/project_collaborator.rs | 4 +- crates/collab/src/db/room_participant.rs | 6 +- crates/collab/src/db/server.rs | 4 +- crates/collab/src/db/tests.rs | 18 +-- crates/collab/src/rpc.rs | 18 +-- 10 files changed, 126 insertions(+), 120 deletions(-) diff --git a/crates/collab/.env.toml b/crates/collab/.env.toml index 1945d9cb66b33ee36a8e17f4ebbc8af54f346c5c..b4a6694e5e6578e771406d6947c2c4ad8d2efb0a 100644 --- a/crates/collab/.env.toml +++ b/crates/collab/.env.toml @@ -2,6 +2,7 @@ DATABASE_URL = "postgres://postgres@localhost/zed" HTTP_PORT = 8080 API_TOKEN = "secret" INVITE_LINK_PREFIX = "http://localhost:3000/invites/" +ZED_ENVIRONMENT = "development" LIVE_KIT_SERVER = "http://localhost:7880" LIVE_KIT_KEY = "devkey" LIVE_KIT_SECRET = "secret" diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index fc048be48e17b7e8bf544fc8612c8560d793731b..4ae6f54e372fd16ea0916e6b6e5dc5fed8859eae 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -44,7 +44,7 @@ CREATE TABLE "projects" ( "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, "host_connection_id" INTEGER NOT NULL, - "host_connection_epoch" INTEGER NOT NULL, + "host_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE, "unregistered" BOOLEAN NOT NULL DEFAULT FALSE ); CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); @@ -103,7 +103,7 @@ CREATE TABLE "project_collaborators" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, "connection_id" INTEGER NOT NULL, - "connection_epoch" INTEGER NOT NULL, + "connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE, "user_id" INTEGER NOT NULL, "replica_id" INTEGER NOT NULL, "is_host" BOOLEAN NOT NULL @@ -119,14 +119,14 @@ CREATE TABLE "room_participants" ( "room_id" INTEGER NOT NULL REFERENCES rooms (id), "user_id" INTEGER NOT NULL REFERENCES users (id), "answering_connection_id" INTEGER, - "answering_connection_epoch" INTEGER, + "answering_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE, "answering_connection_lost" BOOLEAN NOT NULL, "location_kind" INTEGER, "location_project_id" INTEGER, "initial_project_id" INTEGER, "calling_user_id" INTEGER NOT NULL REFERENCES users (id), "calling_connection_id" INTEGER NOT NULL, - "calling_connection_epoch" INTEGER NOT NULL + "calling_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE SET NULL ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id"); @@ -136,6 +136,6 @@ CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_parti CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch"); CREATE TABLE "servers" ( - "epoch" INTEGER PRIMARY KEY AUTOINCREMENT, + "id" INTEGER PRIMARY KEY AUTOINCREMENT, "environment" VARCHAR NOT NULL ); diff --git a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql index addd5451e80bb403ad4fc1e65e1268a14338eb46..e9878a96e24d17a5249a7952be7b87a00243274c 100644 --- a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql +++ b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql @@ -1,14 +1,22 @@ -ALTER TABLE "projects" - ALTER COLUMN "host_connection_epoch" TYPE INTEGER USING -1; +CREATE TABLE servers ( + id SERIAL PRIMARY KEY, + environment VARCHAR NOT NULL +); -ALTER TABLE "project_collaborators" - ALTER COLUMN "connection_epoch" TYPE INTEGER USING -1; +DELETE FROM projects; +ALTER TABLE projects + DROP COLUMN host_connection_epoch, + ADD COLUMN host_connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE; -ALTER TABLE "room_participants" - ALTER COLUMN "answering_connection_epoch" TYPE INTEGER USING -1, - ALTER COLUMN "calling_connection_epoch" TYPE INTEGER USING -1; +DELETE FROM project_collaborators; +ALTER TABLE project_collaborators + DROP COLUMN connection_epoch, + ADD COLUMN connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE; + +DELETE FROM room_participants; +ALTER TABLE room_participants + DROP COLUMN answering_connection_epoch, + DROP COLUMN calling_connection_epoch, + ADD COLUMN answering_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE, + ADD COLUMN calling_connection_server_id INTEGER REFERENCES servers (id) ON DELETE SET NULL; -CREATE TABLE "servers" ( - "epoch" SERIAL PRIMARY KEY, - "environment" VARCHAR NOT NULL -); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 46ed2ce9f72fc163779faf655c414104ccc31ba4..71faf16b4518a523f5929fcc771138ed3eb3fa0f 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -110,7 +110,7 @@ impl Database { Ok(new_migrations) } - pub async fn create_server(&self, environment: &str) -> Result { + pub async fn create_server(&self, environment: &str) -> Result { self.transaction(|tx| async move { let server = server::ActiveModel { environment: ActiveValue::set(environment.into()), @@ -118,30 +118,29 @@ impl Database { } .insert(&*tx) .await?; - Ok(server.epoch) + Ok(server.id) }) .await } pub async fn delete_stale_projects( &self, - new_epoch: ServerEpoch, + new_epoch: ServerId, environment: &str, ) -> Result<()> { self.transaction(|tx| async move { - let stale_server_epochs = self - .stale_server_epochs(environment, new_epoch, &tx) - .await?; + let stale_server_epochs = self.stale_server_ids(environment, new_epoch, &tx).await?; project_collaborator::Entity::delete_many() .filter( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .is_in(stale_server_epochs.iter().copied()), ) .exec(&*tx) .await?; project::Entity::delete_many() .filter( - project::Column::HostConnectionEpoch.is_in(stale_server_epochs.iter().copied()), + project::Column::HostConnectionServerId + .is_in(stale_server_epochs.iter().copied()), ) .exec(&*tx) .await?; @@ -152,7 +151,7 @@ impl Database { pub async fn stale_room_ids( &self, - new_epoch: ServerEpoch, + new_epoch: ServerId, environment: &str, ) -> Result> { self.transaction(|tx| async move { @@ -161,15 +160,14 @@ impl Database { RoomId, } - let stale_server_epochs = self - .stale_server_epochs(environment, new_epoch, &tx) - .await?; + let stale_server_epochs = self.stale_server_ids(environment, new_epoch, &tx).await?; Ok(room_participant::Entity::find() .select_only() .column(room_participant::Column::RoomId) .distinct() .filter( - room_participant::Column::AnsweringConnectionEpoch.is_in(stale_server_epochs), + room_participant::Column::AnsweringConnectionServerId + .is_in(stale_server_epochs), ) .into_values::<_, QueryAs>() .all(&*tx) @@ -181,13 +179,13 @@ impl Database { pub async fn refresh_room( &self, room_id: RoomId, - new_epoch: ServerEpoch, + new_epoch: ServerId, ) -> Result> { self.room_transaction(|tx| async move { let stale_participant_filter = Condition::all() .add(room_participant::Column::RoomId.eq(room_id)) .add(room_participant::Column::AnsweringConnectionId.is_not_null()) - .add(room_participant::Column::AnsweringConnectionEpoch.ne(new_epoch)); + .add(room_participant::Column::AnsweringConnectionServerId.ne(new_epoch)); let stale_participant_user_ids = room_participant::Entity::find() .filter(stale_participant_filter.clone()) @@ -231,19 +229,13 @@ impl Database { .await } - pub async fn delete_stale_servers( - &self, - new_epoch: ServerEpoch, - environment: &str, - ) -> Result<()> { + pub async fn delete_stale_servers(&self, new_epoch: ServerId, environment: &str) -> Result<()> { self.transaction(|tx| async move { server::Entity::delete_many() .filter( - Condition::all().add( - server::Column::Environment - .eq(environment) - .add(server::Column::Epoch.ne(new_epoch)), - ), + Condition::all() + .add(server::Column::Environment.eq(environment)) + .add(server::Column::Id.ne(new_epoch)), ) .exec(&*tx) .await?; @@ -252,26 +244,21 @@ impl Database { .await } - async fn stale_server_epochs( + async fn stale_server_ids( &self, environment: &str, - new_epoch: ServerEpoch, + new_epoch: ServerId, tx: &DatabaseTransaction, - ) -> Result> { + ) -> Result> { let stale_servers = server::Entity::find() .filter( - Condition::all().add( - server::Column::Environment - .eq(environment) - .add(server::Column::Epoch.ne(new_epoch)), - ), + Condition::all() + .add(server::Column::Environment.eq(environment)) + .add(server::Column::Id.ne(new_epoch)), ) .all(&*tx) .await?; - Ok(stale_servers - .into_iter() - .map(|server| server.epoch) - .collect()) + Ok(stale_servers.into_iter().map(|server| server.id).collect()) } // users @@ -1167,11 +1154,15 @@ impl Database { room_id: ActiveValue::set(room_id), user_id: ActiveValue::set(user_id), answering_connection_id: ActiveValue::set(Some(connection.id as i32)), - answering_connection_epoch: ActiveValue::set(Some(connection.epoch as i32)), + answering_connection_server_id: ActiveValue::set(Some(ServerId( + connection.epoch as i32, + ))), answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(user_id), calling_connection_id: ActiveValue::set(connection.id as i32), - calling_connection_epoch: ActiveValue::set(connection.epoch as i32), + calling_connection_server_id: ActiveValue::set(Some(ServerId( + connection.epoch as i32, + ))), ..Default::default() } .insert(&*tx) @@ -1198,7 +1189,9 @@ impl Database { answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(calling_user_id), calling_connection_id: ActiveValue::set(calling_connection.id as i32), - calling_connection_epoch: ActiveValue::set(calling_connection.epoch as i32), + calling_connection_server_id: ActiveValue::set(Some(ServerId( + calling_connection.epoch as i32, + ))), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() } @@ -1280,7 +1273,7 @@ impl Database { .eq(calling_connection.id as i32), ) .add( - room_participant::Column::CallingConnectionEpoch + room_participant::Column::CallingConnectionServerId .eq(calling_connection.epoch as i32), ) .add(room_participant::Column::AnsweringConnectionId.is_null()), @@ -1320,14 +1313,16 @@ impl Database { .add(room_participant::Column::AnsweringConnectionId.is_null()) .add(room_participant::Column::AnsweringConnectionLost.eq(true)) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .ne(connection.epoch as i32), ), ), ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection.id as i32)), - answering_connection_epoch: ActiveValue::set(Some(connection.epoch as i32)), + answering_connection_server_id: ActiveValue::set(Some(ServerId( + connection.epoch as i32, + ))), answering_connection_lost: ActiveValue::set(false), ..Default::default() }) @@ -1353,7 +1348,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1376,7 +1371,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::CallingConnectionEpoch + room_participant::Column::CallingConnectionServerId .eq(connection.epoch as i32), ) .add(room_participant::Column::AnsweringConnectionId.is_null()), @@ -1412,7 +1407,7 @@ impl Database { project_collaborator::Column::ConnectionId.eq(connection.id as i32), ) .add( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1437,7 +1432,7 @@ impl Database { }); let collaborator_connection_id = ConnectionId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; if collaborator_connection_id != connection { @@ -1459,7 +1454,7 @@ impl Database { project_collaborator::Column::ConnectionId.eq(connection.id as i32), ) .add( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1472,7 +1467,9 @@ impl Database { Condition::all() .add(project::Column::RoomId.eq(room_id)) .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + .add( + project::Column::HostConnectionServerId.eq(connection.epoch as i32), + ), ) .exec(&*tx) .await?; @@ -1538,7 +1535,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1573,7 +1570,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1595,7 +1592,7 @@ impl Database { Condition::all() .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1606,7 +1603,7 @@ impl Database { Condition::all() .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1624,7 +1621,7 @@ impl Database { .into_iter() .map(|collaborator| ConnectionId { id: collaborator.connection_id as u32, - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, }) .collect(); @@ -1633,7 +1630,7 @@ impl Database { host_user_id: project.host_user_id, host_connection_id: ConnectionId { id: project.host_connection_id as u32, - epoch: project.host_connection_epoch as u32, + epoch: project.host_connection_server_id.0 as u32, }, connection_ids, }); @@ -1644,7 +1641,7 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), ) .exec(&*tx) .await?; @@ -1696,9 +1693,9 @@ impl Database { let mut pending_participants = Vec::new(); while let Some(db_participant) = db_participants.next().await { let db_participant = db_participant?; - if let Some((answering_connection_id, answering_connection_epoch)) = db_participant + if let Some((answering_connection_id, answering_connection_server_id)) = db_participant .answering_connection_id - .zip(db_participant.answering_connection_epoch) + .zip(db_participant.answering_connection_server_id) { let location = match ( db_participant.location_kind, @@ -1720,7 +1717,7 @@ impl Database { }; let answering_connection = ConnectionId { - epoch: answering_connection_epoch as u32, + epoch: answering_connection_server_id.0 as u32, id: answering_connection_id as u32, }; participants.insert( @@ -1751,7 +1748,7 @@ impl Database { while let Some(row) = db_projects.next().await { let (db_project, db_worktree) = row?; let host_connection = ConnectionId { - epoch: db_project.host_connection_epoch as u32, + epoch: db_project.host_connection_server_id.0 as u32, id: db_project.host_connection_id as u32, }; if let Some(participant) = participants.get_mut(&host_connection) { @@ -1820,7 +1817,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .eq(connection.epoch as i32), ), ) @@ -1835,7 +1832,7 @@ impl Database { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), host_connection_id: ActiveValue::set(connection.id as i32), - host_connection_epoch: ActiveValue::set(connection.epoch as i32), + host_connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), ..Default::default() } .insert(&*tx) @@ -1860,7 +1857,7 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), connection_id: ActiveValue::set(connection.id as i32), - connection_epoch: ActiveValue::set(connection.epoch as i32), + connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1888,7 +1885,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("project not found"))?; let host_connection = ConnectionId { - epoch: project.host_connection_epoch as u32, + epoch: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection == connection { @@ -1916,7 +1913,7 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), ) .one(&*tx) .await? @@ -1974,7 +1971,7 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionEpoch.eq(connection.epoch as i32)), + .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), ) .one(&*tx) .await? @@ -2071,7 +2068,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; let host_connection = ConnectionId { - epoch: project.host_connection_epoch as u32, + epoch: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection != connection { @@ -2128,7 +2125,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; let host_connection = ConnectionId { - epoch: project.host_connection_epoch as u32, + epoch: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection != connection { @@ -2173,7 +2170,7 @@ impl Database { .eq(connection.id as i32), ) .add( - room_participant::Column::AnsweringConnectionEpoch + room_participant::Column::AnsweringConnectionServerId .eq(connection.epoch as i32), ), ) @@ -2204,7 +2201,7 @@ impl Database { let new_collaborator = project_collaborator::ActiveModel { project_id: ActiveValue::set(project_id), connection_id: ActiveValue::set(connection.id as i32), - connection_epoch: ActiveValue::set(connection.epoch as i32), + connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(replica_id), is_host: ActiveValue::set(false), @@ -2315,7 +2312,7 @@ impl Database { .add(project_collaborator::Column::ProjectId.eq(project_id)) .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( - project_collaborator::Column::ConnectionEpoch + project_collaborator::Column::ConnectionServerId .eq(connection.epoch as i32), ), ) @@ -2336,7 +2333,7 @@ impl Database { let connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }) .collect(); @@ -2345,7 +2342,7 @@ impl Database { id: project_id, host_user_id: project.host_user_id, host_connection_id: ConnectionId { - epoch: project.host_connection_epoch as u32, + epoch: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }, connection_ids, @@ -2372,7 +2369,7 @@ impl Database { if collaborators.iter().any(|collaborator| { let collaborator_connection = ConnectionId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; collaborator_connection == connection @@ -2404,7 +2401,7 @@ impl Database { while let Some(participant) = participants.next().await { let participant = participant?; connection_ids.insert(ConnectionId { - epoch: participant.connection_epoch as u32, + epoch: participant.connection_server_id.0 as u32, id: participant.connection_id as u32, }); } @@ -2436,7 +2433,7 @@ impl Database { while let Some(participant) = participants.next().await { let participant = participant?; guest_connection_ids.push(ConnectionId { - epoch: participant.connection_epoch as u32, + epoch: participant.connection_server_id.0 as u32, id: participant.connection_id as u32, }); } @@ -2817,7 +2814,7 @@ id_type!(RoomParticipantId); id_type!(ProjectId); id_type!(ProjectCollaboratorId); id_type!(ReplicaId); -id_type!(ServerEpoch); +id_type!(ServerId); id_type!(SignupId); id_type!(UserId); diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index b8cf321e51e465aae6a011cb6159cd1ec1aa39ce..1279d7d4494de3bfeb382ac0322b2de0354ceaaf 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -1,4 +1,4 @@ -use super::{ProjectId, RoomId, UserId}; +use super::{ProjectId, RoomId, ServerId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -9,7 +9,7 @@ pub struct Model { pub room_id: RoomId, pub host_user_id: UserId, pub host_connection_id: i32, - pub host_connection_epoch: i32, + pub host_connection_server_id: ServerId, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/project_collaborator.rs b/crates/collab/src/db/project_collaborator.rs index e12b113ff9999bdede1b5ff0e71905b112926eac..a1a99d1170ae5cfb254fb1c85ce50034cf03d13b 100644 --- a/crates/collab/src/db/project_collaborator.rs +++ b/crates/collab/src/db/project_collaborator.rs @@ -1,4 +1,4 @@ -use super::{ProjectCollaboratorId, ProjectId, ReplicaId, UserId}; +use super::{ProjectCollaboratorId, ProjectId, ReplicaId, ServerId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -8,7 +8,7 @@ pub struct Model { pub id: ProjectCollaboratorId, pub project_id: ProjectId, pub connection_id: i32, - pub connection_epoch: i32, + pub connection_server_id: ServerId, pub user_id: UserId, pub replica_id: ReplicaId, pub is_host: bool, diff --git a/crates/collab/src/db/room_participant.rs b/crates/collab/src/db/room_participant.rs index 265febd5454241015f88a9e36a582a6fa508e9a3..f939a3bfb8709132b1aa138ea43e21a25b0f61af 100644 --- a/crates/collab/src/db/room_participant.rs +++ b/crates/collab/src/db/room_participant.rs @@ -1,4 +1,4 @@ -use super::{ProjectId, RoomId, RoomParticipantId, UserId}; +use super::{ProjectId, RoomId, RoomParticipantId, ServerId, UserId}; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -9,14 +9,14 @@ pub struct Model { pub room_id: RoomId, pub user_id: UserId, pub answering_connection_id: Option, - pub answering_connection_epoch: Option, + pub answering_connection_server_id: Option, pub answering_connection_lost: bool, pub location_kind: Option, pub location_project_id: Option, pub initial_project_id: Option, pub calling_user_id: UserId, pub calling_connection_id: i32, - pub calling_connection_epoch: i32, + pub calling_connection_server_id: Option, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/db/server.rs b/crates/collab/src/db/server.rs index 6735770a0f0e7e8fbafe4c99bcd3e11e207c2b89..e3905f244892e8befd97f52afc119341106cb566 100644 --- a/crates/collab/src/db/server.rs +++ b/crates/collab/src/db/server.rs @@ -1,11 +1,11 @@ -use super::ServerEpoch; +use super::ServerId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] #[sea_orm(table_name = "servers")] pub struct Model { #[sea_orm(primary_key)] - pub epoch: ServerEpoch, + pub id: ServerId, pub environment: String, } diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index b6b8a780a7ec2fc9bc930cec0c146c0601280d5a..c229f0a6172b68c361e31c47080b1503db40a519 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -410,6 +410,8 @@ test_both_dbs!( test_project_count_sqlite, db, { + let epoch = db.create_server("test").await.unwrap().0 as u32; + let user1 = db .create_user( &format!("admin@example.com"), @@ -436,7 +438,7 @@ test_both_dbs!( .unwrap(); let room_id = RoomId::from_proto( - db.create_room(user1.user_id, ConnectionId { epoch: 0, id: 0 }, "") + db.create_room(user1.user_id, ConnectionId { epoch, id: 0 }, "") .await .unwrap() .id, @@ -444,36 +446,34 @@ test_both_dbs!( db.call( room_id, user1.user_id, - ConnectionId { epoch: 0, id: 0 }, + ConnectionId { epoch, id: 0 }, user2.user_id, None, ) .await .unwrap(); - db.join_room(room_id, user2.user_id, ConnectionId { epoch: 0, id: 1 }) + db.join_room(room_id, user2.user_id, ConnectionId { epoch, id: 1 }) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); - db.share_project(room_id, ConnectionId { epoch: 0, id: 1 }, &[]) + db.share_project(room_id, ConnectionId { epoch, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1); - db.share_project(room_id, ConnectionId { epoch: 0, id: 1 }, &[]) + db.share_project(room_id, ConnectionId { epoch, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); // Projects shared by admins aren't counted. - db.share_project(room_id, ConnectionId { epoch: 0, id: 0 }, &[]) + db.share_project(room_id, ConnectionId { epoch, id: 0 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); - db.leave_room(ConnectionId { epoch: 0, id: 1 }) - .await - .unwrap(); + db.leave_room(ConnectionId { epoch, id: 1 }).await.unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); } ); diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d3a1c0ebf13e0a194f254ac8e044bf5ed05be1a7..9ed50e802c4ebe5388be5c05ad86afaf136c17c9 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -2,7 +2,7 @@ mod connection_pool; use crate::{ auth, - db::{self, Database, ProjectId, RoomId, ServerEpoch, User, UserId}, + db::{self, Database, ProjectId, RoomId, ServerId, User, UserId}, executor::Executor, AppState, Result, }; @@ -138,7 +138,7 @@ impl Deref for DbHandle { } pub struct Server { - epoch: parking_lot::Mutex, + epoch: parking_lot::Mutex, peer: Arc, pub(crate) connection_pool: Arc>, app_state: Arc, @@ -169,7 +169,7 @@ where } impl Server { - pub fn new(epoch: ServerEpoch, app_state: Arc, executor: Executor) -> Arc { + pub fn new(epoch: ServerId, app_state: Arc, executor: Executor) -> Arc { let mut server = Self { epoch: parking_lot::Mutex::new(epoch), peer: Peer::new(epoch.0 as u32), @@ -370,7 +370,7 @@ impl Server { } #[cfg(test)] - pub fn reset(&self, epoch: ServerEpoch) { + pub fn reset(&self, epoch: ServerId) { self.teardown(); *self.epoch.lock() = epoch; self.peer.reset(epoch.0 as u32); @@ -1156,7 +1156,7 @@ async fn join_project( .iter() .map(|collaborator| { let peer_id = proto::PeerId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; proto::Collaborator { @@ -1412,7 +1412,7 @@ where .find(|collaborator| collaborator.is_host) .ok_or_else(|| anyhow!("host not found"))?; ConnectionId { - epoch: host.connection_epoch as u32, + epoch: host.connection_server_id.0 as u32, id: host.connection_id as u32, } }; @@ -1443,7 +1443,7 @@ async fn save_buffer( .find(|collaborator| collaborator.is_host) .ok_or_else(|| anyhow!("host not found"))?; ConnectionId { - epoch: host.connection_epoch as u32, + epoch: host.connection_server_id.0 as u32, id: host.connection_id as u32, } }; @@ -1459,13 +1459,13 @@ async fn save_buffer( .await?; collaborators.retain(|collaborator| { let collaborator_connection = ConnectionId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; collaborator_connection != session.connection_id }); let project_connection_ids = collaborators.iter().map(|collaborator| ConnectionId { - epoch: collaborator.connection_epoch as u32, + epoch: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }); broadcast(host_connection_id, project_connection_ids, |conn_id| { From af77f1188a1c7d227abc6151d402a197e69a290a Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 09:58:25 +0100 Subject: [PATCH 22/29] Re-add server_id indices for room_participants/project_collaborators --- .../migrations.sqlite/20221109000000_test_schema.sql | 10 +++++----- ...0221214144346_change_epoch_from_uuid_to_integer.sql | 6 +++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 4ae6f54e372fd16ea0916e6b6e5dc5fed8859eae..16f2ffd9566f4c007d6409d9a0fdc700a23f18e7 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -110,9 +110,9 @@ CREATE TABLE "project_collaborators" ( ); CREATE INDEX "index_project_collaborators_on_project_id" ON "project_collaborators" ("project_id"); CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_and_replica_id" ON "project_collaborators" ("project_id", "replica_id"); -CREATE INDEX "index_project_collaborators_on_connection_epoch" ON "project_collaborators" ("connection_epoch"); +CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id"); CREATE INDEX "index_project_collaborators_on_connection_id" ON "project_collaborators" ("connection_id"); -CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_epoch" ON "project_collaborators" ("project_id", "connection_id", "connection_epoch"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id"); CREATE TABLE "room_participants" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, @@ -130,10 +130,10 @@ CREATE TABLE "room_participants" ( ); CREATE UNIQUE INDEX "index_room_participants_on_user_id" ON "room_participants" ("user_id"); CREATE INDEX "index_room_participants_on_room_id" ON "room_participants" ("room_id"); -CREATE INDEX "index_room_participants_on_answering_connection_epoch" ON "room_participants" ("answering_connection_epoch"); -CREATE INDEX "index_room_participants_on_calling_connection_epoch" ON "room_participants" ("calling_connection_epoch"); +CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id"); +CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id"); CREATE INDEX "index_room_participants_on_answering_connection_id" ON "room_participants" ("answering_connection_id"); -CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_epoch" ON "room_participants" ("answering_connection_id", "answering_connection_epoch"); +CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id"); CREATE TABLE "servers" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, diff --git a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql index e9878a96e24d17a5249a7952be7b87a00243274c..5a05103be106fb43478e824885895f88b88e5f05 100644 --- a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql +++ b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql @@ -12,6 +12,8 @@ DELETE FROM project_collaborators; ALTER TABLE project_collaborators DROP COLUMN connection_epoch, ADD COLUMN connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE; +CREATE INDEX "index_project_collaborators_on_connection_server_id" ON "project_collaborators" ("connection_server_id"); +CREATE UNIQUE INDEX "index_project_collaborators_on_project_id_connection_id_and_server_id" ON "project_collaborators" ("project_id", "connection_id", "connection_server_id"); DELETE FROM room_participants; ALTER TABLE room_participants @@ -19,4 +21,6 @@ ALTER TABLE room_participants DROP COLUMN calling_connection_epoch, ADD COLUMN answering_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE, ADD COLUMN calling_connection_server_id INTEGER REFERENCES servers (id) ON DELETE SET NULL; - +CREATE INDEX "index_room_participants_on_answering_connection_server_id" ON "room_participants" ("answering_connection_server_id"); +CREATE INDEX "index_room_participants_on_calling_connection_server_id" ON "room_participants" ("calling_connection_server_id"); +CREATE UNIQUE INDEX "index_room_participants_on_answering_connection_id_and_answering_connection_server_id" ON "room_participants" ("answering_connection_id", "answering_connection_server_id"); From 688f179256768926ec0c3ce35b3e695ff4179bb6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 10:15:59 +0100 Subject: [PATCH 23/29] Use "id" nomenclature more consistently --- crates/collab/src/db.rs | 109 ++++++++++++++----------- crates/collab/src/db/tests.rs | 18 ++-- crates/collab/src/integration_tests.rs | 10 ++- crates/collab/src/rpc.rs | 39 +++++---- crates/rpc/proto/zed.proto | 2 +- crates/rpc/src/macros.rs | 2 +- crates/rpc/src/peer.rs | 10 +-- crates/rpc/src/proto.rs | 29 ++++--- 8 files changed, 126 insertions(+), 93 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 71faf16b4518a523f5929fcc771138ed3eb3fa0f..461fe06085b63a3b6b4dc6bc94b2ea0fe3fd0969 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -125,11 +125,13 @@ impl Database { pub async fn delete_stale_projects( &self, - new_epoch: ServerId, environment: &str, + new_server_id: ServerId, ) -> Result<()> { self.transaction(|tx| async move { - let stale_server_epochs = self.stale_server_ids(environment, new_epoch, &tx).await?; + let stale_server_epochs = self + .stale_server_ids(environment, new_server_id, &tx) + .await?; project_collaborator::Entity::delete_many() .filter( project_collaborator::Column::ConnectionServerId @@ -151,8 +153,8 @@ impl Database { pub async fn stale_room_ids( &self, - new_epoch: ServerId, environment: &str, + new_server_id: ServerId, ) -> Result> { self.transaction(|tx| async move { #[derive(Copy, Clone, Debug, EnumIter, DeriveColumn)] @@ -160,7 +162,9 @@ impl Database { RoomId, } - let stale_server_epochs = self.stale_server_ids(environment, new_epoch, &tx).await?; + let stale_server_epochs = self + .stale_server_ids(environment, new_server_id, &tx) + .await?; Ok(room_participant::Entity::find() .select_only() .column(room_participant::Column::RoomId) @@ -179,13 +183,13 @@ impl Database { pub async fn refresh_room( &self, room_id: RoomId, - new_epoch: ServerId, + new_server_id: ServerId, ) -> Result> { self.room_transaction(|tx| async move { let stale_participant_filter = Condition::all() .add(room_participant::Column::RoomId.eq(room_id)) .add(room_participant::Column::AnsweringConnectionId.is_not_null()) - .add(room_participant::Column::AnsweringConnectionServerId.ne(new_epoch)); + .add(room_participant::Column::AnsweringConnectionServerId.ne(new_server_id)); let stale_participant_user_ids = room_participant::Entity::find() .filter(stale_participant_filter.clone()) @@ -229,13 +233,17 @@ impl Database { .await } - pub async fn delete_stale_servers(&self, new_epoch: ServerId, environment: &str) -> Result<()> { + pub async fn delete_stale_servers( + &self, + new_server_id: ServerId, + environment: &str, + ) -> Result<()> { self.transaction(|tx| async move { server::Entity::delete_many() .filter( Condition::all() .add(server::Column::Environment.eq(environment)) - .add(server::Column::Id.ne(new_epoch)), + .add(server::Column::Id.ne(new_server_id)), ) .exec(&*tx) .await?; @@ -247,14 +255,14 @@ impl Database { async fn stale_server_ids( &self, environment: &str, - new_epoch: ServerId, + new_server_id: ServerId, tx: &DatabaseTransaction, ) -> Result> { let stale_servers = server::Entity::find() .filter( Condition::all() .add(server::Column::Environment.eq(environment)) - .add(server::Column::Id.ne(new_epoch)), + .add(server::Column::Id.ne(new_server_id)), ) .all(&*tx) .await?; @@ -1155,13 +1163,13 @@ impl Database { user_id: ActiveValue::set(user_id), answering_connection_id: ActiveValue::set(Some(connection.id as i32)), answering_connection_server_id: ActiveValue::set(Some(ServerId( - connection.epoch as i32, + connection.owner_id as i32, ))), answering_connection_lost: ActiveValue::set(false), calling_user_id: ActiveValue::set(user_id), calling_connection_id: ActiveValue::set(connection.id as i32), calling_connection_server_id: ActiveValue::set(Some(ServerId( - connection.epoch as i32, + connection.owner_id as i32, ))), ..Default::default() } @@ -1190,7 +1198,7 @@ impl Database { calling_user_id: ActiveValue::set(calling_user_id), calling_connection_id: ActiveValue::set(calling_connection.id as i32), calling_connection_server_id: ActiveValue::set(Some(ServerId( - calling_connection.epoch as i32, + calling_connection.owner_id as i32, ))), initial_project_id: ActiveValue::set(initial_project_id), ..Default::default() @@ -1274,7 +1282,7 @@ impl Database { ) .add( room_participant::Column::CallingConnectionServerId - .eq(calling_connection.epoch as i32), + .eq(calling_connection.owner_id as i32), ) .add(room_participant::Column::AnsweringConnectionId.is_null()), ) @@ -1314,14 +1322,14 @@ impl Database { .add(room_participant::Column::AnsweringConnectionLost.eq(true)) .add( room_participant::Column::AnsweringConnectionServerId - .ne(connection.epoch as i32), + .ne(connection.owner_id as i32), ), ), ) .set(room_participant::ActiveModel { answering_connection_id: ActiveValue::set(Some(connection.id as i32)), answering_connection_server_id: ActiveValue::set(Some(ServerId( - connection.epoch as i32, + connection.owner_id as i32, ))), answering_connection_lost: ActiveValue::set(false), ..Default::default() @@ -1349,7 +1357,7 @@ impl Database { ) .add( room_participant::Column::AnsweringConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .one(&*tx) @@ -1372,7 +1380,7 @@ impl Database { ) .add( room_participant::Column::CallingConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ) .add(room_participant::Column::AnsweringConnectionId.is_null()), ) @@ -1408,7 +1416,7 @@ impl Database { ) .add( project_collaborator::Column::ConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .into_values::<_, QueryProjectIds>() @@ -1432,7 +1440,7 @@ impl Database { }); let collaborator_connection_id = ConnectionId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; if collaborator_connection_id != connection { @@ -1455,7 +1463,7 @@ impl Database { ) .add( project_collaborator::Column::ConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .exec(&*tx) @@ -1468,7 +1476,8 @@ impl Database { .add(project::Column::RoomId.eq(room_id)) .add(project::Column::HostConnectionId.eq(connection.id as i32)) .add( - project::Column::HostConnectionServerId.eq(connection.epoch as i32), + project::Column::HostConnectionServerId + .eq(connection.owner_id as i32), ), ) .exec(&*tx) @@ -1536,7 +1545,7 @@ impl Database { ) .add( room_participant::Column::AnsweringConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .set(room_participant::ActiveModel { @@ -1571,7 +1580,7 @@ impl Database { ) .add( room_participant::Column::AnsweringConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .one(&*tx) @@ -1593,7 +1602,7 @@ impl Database { .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( project_collaborator::Column::ConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .all(&*tx) @@ -1604,7 +1613,7 @@ impl Database { .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( project_collaborator::Column::ConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .exec(&*tx) @@ -1621,7 +1630,7 @@ impl Database { .into_iter() .map(|collaborator| ConnectionId { id: collaborator.connection_id as u32, - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, }) .collect(); @@ -1630,7 +1639,7 @@ impl Database { host_user_id: project.host_user_id, host_connection_id: ConnectionId { id: project.host_connection_id as u32, - epoch: project.host_connection_server_id.0 as u32, + owner_id: project.host_connection_server_id.0 as u32, }, connection_ids, }); @@ -1641,7 +1650,9 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), + .add( + project::Column::HostConnectionServerId.eq(connection.owner_id as i32), + ), ) .exec(&*tx) .await?; @@ -1717,7 +1728,7 @@ impl Database { }; let answering_connection = ConnectionId { - epoch: answering_connection_server_id.0 as u32, + owner_id: answering_connection_server_id.0 as u32, id: answering_connection_id as u32, }; participants.insert( @@ -1748,7 +1759,7 @@ impl Database { while let Some(row) = db_projects.next().await { let (db_project, db_worktree) = row?; let host_connection = ConnectionId { - epoch: db_project.host_connection_server_id.0 as u32, + owner_id: db_project.host_connection_server_id.0 as u32, id: db_project.host_connection_id as u32, }; if let Some(participant) = participants.get_mut(&host_connection) { @@ -1818,7 +1829,7 @@ impl Database { ) .add( room_participant::Column::AnsweringConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .one(&*tx) @@ -1832,7 +1843,7 @@ impl Database { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), host_connection_id: ActiveValue::set(connection.id as i32), - host_connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), + host_connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), ..Default::default() } .insert(&*tx) @@ -1857,7 +1868,7 @@ impl Database { project_collaborator::ActiveModel { project_id: ActiveValue::set(project.id), connection_id: ActiveValue::set(connection.id as i32), - connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), + connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(ReplicaId(0)), is_host: ActiveValue::set(true), @@ -1885,7 +1896,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("project not found"))?; let host_connection = ConnectionId { - epoch: project.host_connection_server_id.0 as u32, + owner_id: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection == connection { @@ -1913,7 +1924,9 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), + .add( + project::Column::HostConnectionServerId.eq(connection.owner_id as i32), + ), ) .one(&*tx) .await? @@ -1971,7 +1984,9 @@ impl Database { .filter( Condition::all() .add(project::Column::HostConnectionId.eq(connection.id as i32)) - .add(project::Column::HostConnectionServerId.eq(connection.epoch as i32)), + .add( + project::Column::HostConnectionServerId.eq(connection.owner_id as i32), + ), ) .one(&*tx) .await? @@ -2068,7 +2083,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; let host_connection = ConnectionId { - epoch: project.host_connection_server_id.0 as u32, + owner_id: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection != connection { @@ -2125,7 +2140,7 @@ impl Database { .await? .ok_or_else(|| anyhow!("no such project"))?; let host_connection = ConnectionId { - epoch: project.host_connection_server_id.0 as u32, + owner_id: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }; if host_connection != connection { @@ -2171,7 +2186,7 @@ impl Database { ) .add( room_participant::Column::AnsweringConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .one(&*tx) @@ -2201,7 +2216,7 @@ impl Database { let new_collaborator = project_collaborator::ActiveModel { project_id: ActiveValue::set(project_id), connection_id: ActiveValue::set(connection.id as i32), - connection_server_id: ActiveValue::set(ServerId(connection.epoch as i32)), + connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), user_id: ActiveValue::set(participant.user_id), replica_id: ActiveValue::set(replica_id), is_host: ActiveValue::set(false), @@ -2313,7 +2328,7 @@ impl Database { .add(project_collaborator::Column::ConnectionId.eq(connection.id as i32)) .add( project_collaborator::Column::ConnectionServerId - .eq(connection.epoch as i32), + .eq(connection.owner_id as i32), ), ) .exec(&*tx) @@ -2333,7 +2348,7 @@ impl Database { let connection_ids = collaborators .into_iter() .map(|collaborator| ConnectionId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }) .collect(); @@ -2342,7 +2357,7 @@ impl Database { id: project_id, host_user_id: project.host_user_id, host_connection_id: ConnectionId { - epoch: project.host_connection_server_id.0 as u32, + owner_id: project.host_connection_server_id.0 as u32, id: project.host_connection_id as u32, }, connection_ids, @@ -2369,7 +2384,7 @@ impl Database { if collaborators.iter().any(|collaborator| { let collaborator_connection = ConnectionId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; collaborator_connection == connection @@ -2401,7 +2416,7 @@ impl Database { while let Some(participant) = participants.next().await { let participant = participant?; connection_ids.insert(ConnectionId { - epoch: participant.connection_server_id.0 as u32, + owner_id: participant.connection_server_id.0 as u32, id: participant.connection_id as u32, }); } @@ -2433,7 +2448,7 @@ impl Database { while let Some(participant) = participants.next().await { let participant = participant?; guest_connection_ids.push(ConnectionId { - epoch: participant.connection_server_id.0 as u32, + owner_id: participant.connection_server_id.0 as u32, id: participant.connection_id as u32, }); } diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index c229f0a6172b68c361e31c47080b1503db40a519..9d42c11f8bba88bd164eaab443857d910b97230d 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -410,7 +410,7 @@ test_both_dbs!( test_project_count_sqlite, db, { - let epoch = db.create_server("test").await.unwrap().0 as u32; + let owner_id = db.create_server("test").await.unwrap().0 as u32; let user1 = db .create_user( @@ -438,7 +438,7 @@ test_both_dbs!( .unwrap(); let room_id = RoomId::from_proto( - db.create_room(user1.user_id, ConnectionId { epoch, id: 0 }, "") + db.create_room(user1.user_id, ConnectionId { owner_id, id: 0 }, "") .await .unwrap() .id, @@ -446,34 +446,36 @@ test_both_dbs!( db.call( room_id, user1.user_id, - ConnectionId { epoch, id: 0 }, + ConnectionId { owner_id, id: 0 }, user2.user_id, None, ) .await .unwrap(); - db.join_room(room_id, user2.user_id, ConnectionId { epoch, id: 1 }) + db.join_room(room_id, user2.user_id, ConnectionId { owner_id, id: 1 }) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); - db.share_project(room_id, ConnectionId { epoch, id: 1 }, &[]) + db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 1); - db.share_project(room_id, ConnectionId { epoch, id: 1 }, &[]) + db.share_project(room_id, ConnectionId { owner_id, id: 1 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); // Projects shared by admins aren't counted. - db.share_project(room_id, ConnectionId { epoch, id: 0 }, &[]) + db.share_project(room_id, ConnectionId { owner_id, id: 0 }, &[]) .await .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 2); - db.leave_room(ConnectionId { epoch, id: 1 }).await.unwrap(); + db.leave_room(ConnectionId { owner_id, id: 1 }) + .await + .unwrap(); assert_eq!(db.project_count_excluding_admins().await.unwrap(), 0); } ); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index dd18d5aef20f82bd070b6165c2cabf892c477d57..e52b7f6a1de78a1024e4c0d61216353fb81904cb 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -6123,9 +6123,17 @@ async fn test_random_collaboration( 30..=34 => { log::info!("Simulating server restart"); server.reset().await; - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + deterministic.advance_clock(RECEIVE_TIMEOUT); server.start().await.unwrap(); deterministic.advance_clock(CLEANUP_TIMEOUT); + let environment = &server.app_state.config.zed_environment; + let stale_room_ids = server + .app_state + .db + .stale_room_ids(environment, server.id()) + .await + .unwrap(); + assert_eq!(stale_room_ids, vec![]); } _ if !op_start_signals.is_empty() => { while operations < max_operations && rng.lock().gen_bool(0.7) { diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 9ed50e802c4ebe5388be5c05ad86afaf136c17c9..d83aa65a6b76a2de1e5b5e0f4592214e6452f3f2 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -138,7 +138,7 @@ impl Deref for DbHandle { } pub struct Server { - epoch: parking_lot::Mutex, + id: parking_lot::Mutex, peer: Arc, pub(crate) connection_pool: Arc>, app_state: Arc, @@ -169,10 +169,10 @@ where } impl Server { - pub fn new(epoch: ServerId, app_state: Arc, executor: Executor) -> Arc { + pub fn new(id: ServerId, app_state: Arc, executor: Executor) -> Arc { let mut server = Self { - epoch: parking_lot::Mutex::new(epoch), - peer: Peer::new(epoch.0 as u32), + id: parking_lot::Mutex::new(id), + peer: Peer::new(id.0 as u32), app_state, executor, connection_pool: Default::default(), @@ -241,7 +241,7 @@ impl Server { } pub async fn start(&self) -> Result<()> { - let epoch = *self.epoch.lock(); + let server_id = *self.id.lock(); let app_state = self.app_state.clone(); let peer = self.peer.clone(); let timeout = self.executor.sleep(CLEANUP_TIMEOUT); @@ -254,7 +254,7 @@ impl Server { tracing::info!("begin deleting stale projects"); app_state .db - .delete_stale_projects(epoch, &app_state.config.zed_environment) + .delete_stale_projects(&app_state.config.zed_environment, server_id) .await?; tracing::info!("finish deleting stale projects"); @@ -266,7 +266,7 @@ impl Server { tracing::info!("cleanup timeout expired, retrieving stale rooms"); if let Some(room_ids) = app_state .db - .stale_room_ids(epoch, &app_state.config.zed_environment) + .stale_room_ids(&app_state.config.zed_environment, server_id) .await .trace_err() { @@ -278,7 +278,7 @@ impl Server { let mut delete_live_kit_room = false; if let Ok(mut refreshed_room) = - app_state.db.refresh_room(room_id, epoch).await + app_state.db.refresh_room(room_id, server_id).await { tracing::info!( room_id = room_id.0, @@ -354,7 +354,7 @@ impl Server { app_state .db - .delete_stale_servers(epoch, &app_state.config.zed_environment) + .delete_stale_servers(server_id, &app_state.config.zed_environment) .await .trace_err(); } @@ -370,10 +370,15 @@ impl Server { } #[cfg(test)] - pub fn reset(&self, epoch: ServerId) { + pub fn reset(&self, id: ServerId) { self.teardown(); - *self.epoch.lock() = epoch; - self.peer.reset(epoch.0 as u32); + *self.id.lock() = id; + self.peer.reset(id.0 as u32); + } + + #[cfg(test)] + pub fn id(&self) -> ServerId { + *self.id.lock() } fn add_handler(&mut self, handler: F) -> &mut Self @@ -1156,7 +1161,7 @@ async fn join_project( .iter() .map(|collaborator| { let peer_id = proto::PeerId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; proto::Collaborator { @@ -1412,7 +1417,7 @@ where .find(|collaborator| collaborator.is_host) .ok_or_else(|| anyhow!("host not found"))?; ConnectionId { - epoch: host.connection_server_id.0 as u32, + owner_id: host.connection_server_id.0 as u32, id: host.connection_id as u32, } }; @@ -1443,7 +1448,7 @@ async fn save_buffer( .find(|collaborator| collaborator.is_host) .ok_or_else(|| anyhow!("host not found"))?; ConnectionId { - epoch: host.connection_server_id.0 as u32, + owner_id: host.connection_server_id.0 as u32, id: host.connection_id as u32, } }; @@ -1459,13 +1464,13 @@ async fn save_buffer( .await?; collaborators.retain(|collaborator| { let collaborator_connection = ConnectionId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }; collaborator_connection != session.connection_id }); let project_connection_ids = collaborators.iter().map(|collaborator| ConnectionId { - epoch: collaborator.connection_server_id.0 as u32, + owner_id: collaborator.connection_server_id.0 as u32, id: collaborator.connection_id as u32, }); broadcast(host_connection_id, project_connection_ids, |conn_id| { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 064bf6f50a4cd96dc40389c842fabce91497abb3..a91b191d57fe7a8c07648e9af7cd35a178326dcf 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package zed.messages; message PeerId { - uint32 epoch = 1; + uint32 owner_id = 1; uint32 id = 2; } diff --git a/crates/rpc/src/macros.rs b/crates/rpc/src/macros.rs index ebed976968e14590a472efffb3caf9d2eb25ed44..89e605540da1157f5530ad7236b23358dc127c1a 100644 --- a/crates/rpc/src/macros.rs +++ b/crates/rpc/src/macros.rs @@ -7,7 +7,7 @@ macro_rules! messages { Some(Box::new(TypedEnvelope { sender_id, original_sender_id: envelope.original_sender_id.map(|original_sender| PeerId { - epoch: original_sender.epoch, + owner_id: original_sender.owner_id, id: original_sender.id }), message_id: envelope.id, diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index fc6712e04fe5544c07141ce0dc3cff17a675eca4..59a30a0f4747107fb55a0ec3be456c9e6640958b 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -25,14 +25,14 @@ use tracing::instrument; #[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Serialize)] pub struct ConnectionId { - pub epoch: u32, + pub owner_id: u32, pub id: u32, } impl Into for ConnectionId { fn into(self) -> PeerId { PeerId { - epoch: self.epoch, + owner_id: self.owner_id, id: self.id, } } @@ -41,7 +41,7 @@ impl Into for ConnectionId { impl From for ConnectionId { fn from(peer_id: PeerId) -> Self { Self { - epoch: peer_id.epoch, + owner_id: peer_id.owner_id, id: peer_id.id, } } @@ -49,7 +49,7 @@ impl From for ConnectionId { impl fmt::Display for ConnectionId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.epoch, self.id) + write!(f, "{}/{}", self.owner_id, self.id) } } @@ -157,7 +157,7 @@ impl Peer { let (outgoing_tx, mut outgoing_rx) = mpsc::unbounded(); let connection_id = ConnectionId { - epoch: self.epoch.load(SeqCst), + owner_id: self.epoch.load(SeqCst), id: self.next_connection_id.fetch_add(1, SeqCst), }; let connection_state = ConnectionState { diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 77d82ec3081514e5ab406ad68ac0e2940949e376..07c15e0f16d4a6fb8744d4fd431d351414f2582b 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -78,13 +78,13 @@ impl AnyTypedEnvelope for TypedEnvelope { impl PeerId { pub fn from_u64(peer_id: u64) -> Self { - let epoch = (peer_id >> 32) as u32; + let owner_id = (peer_id >> 32) as u32; let id = peer_id as u32; - Self { epoch, id } + Self { owner_id, id } } pub fn as_u64(self) -> u64 { - ((self.epoch as u64) << 32) | (self.id as u64) + ((self.owner_id as u64) << 32) | (self.id as u64) } } @@ -94,8 +94,8 @@ impl Eq for PeerId {} impl Ord for PeerId { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.epoch - .cmp(&other.epoch) + self.owner_id + .cmp(&other.owner_id) .then_with(|| self.id.cmp(&other.id)) } } @@ -108,14 +108,14 @@ impl PartialOrd for PeerId { impl std::hash::Hash for PeerId { fn hash(&self, state: &mut H) { - self.epoch.hash(state); + self.owner_id.hash(state); self.id.hash(state); } } impl fmt::Display for PeerId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.epoch, self.id) + write!(f, "{}/{}", self.owner_id, self.id) } } @@ -124,7 +124,7 @@ impl FromStr for PeerId { fn from_str(s: &str) -> Result { let mut components = s.split('/'); - let epoch = components + let owner_id = components .next() .ok_or_else(|| anyhow!("invalid peer id {:?}", s))? .parse()?; @@ -132,7 +132,7 @@ impl FromStr for PeerId { .next() .ok_or_else(|| anyhow!("invalid peer id {:?}", s))? .parse()?; - Ok(PeerId { epoch, id }) + Ok(PeerId { owner_id, id }) } } @@ -542,20 +542,23 @@ mod tests { #[gpui::test] fn test_converting_peer_id_from_and_to_u64() { - let peer_id = PeerId { epoch: 10, id: 3 }; + let peer_id = PeerId { + owner_id: 10, + id: 3, + }; assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); let peer_id = PeerId { - epoch: u32::MAX, + owner_id: u32::MAX, id: 3, }; assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); let peer_id = PeerId { - epoch: 10, + owner_id: 10, id: u32::MAX, }; assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); let peer_id = PeerId { - epoch: u32::MAX, + owner_id: u32::MAX, id: u32::MAX, }; assert_eq!(PeerId::from_u64(peer_id.as_u64()), peer_id); From 067a19c971ee97794a215201bb3b9c26f673b1ca Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 10:45:03 +0100 Subject: [PATCH 24/29] Avoid logging an error when user who hasn't joined any room disconnects --- crates/collab/src/db.rs | 91 +++++++++++++++++++++++++++------------- crates/collab/src/rpc.rs | 12 +++--- 2 files changed, 70 insertions(+), 33 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 461fe06085b63a3b6b4dc6bc94b2ea0fe3fd0969..1b559a48d5049d0f057ec96ad04f36ffa9b514bb 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1238,36 +1238,41 @@ impl Database { &self, expected_room_id: Option, user_id: UserId, - ) -> Result> { - self.room_transaction(|tx| async move { + ) -> Result>> { + self.optional_room_transaction(|tx| async move { + let mut filter = Condition::all() + .add(room_participant::Column::UserId.eq(user_id)) + .add(room_participant::Column::AnsweringConnectionId.is_null()); + if let Some(room_id) = expected_room_id { + filter = filter.add(room_participant::Column::RoomId.eq(room_id)); + } let participant = room_participant::Entity::find() - .filter( - room_participant::Column::UserId - .eq(user_id) - .and(room_participant::Column::AnsweringConnectionId.is_null()), - ) + .filter(filter) .one(&*tx) - .await? - .ok_or_else(|| anyhow!("could not decline call"))?; - let room_id = participant.room_id; + .await?; - if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - return Err(anyhow!("declining call on unexpected room"))?; - } + let participant = if let Some(participant) = participant { + participant + } else if expected_room_id.is_some() { + return Err(anyhow!("could not find call to decline"))?; + } else { + return Ok(None); + }; + let room_id = participant.room_id; room_participant::Entity::delete(participant.into_active_model()) .exec(&*tx) .await?; let room = self.get_room(room_id, &tx).await?; - Ok((room_id, room)) + Ok(Some((room_id, room))) }) .await } pub async fn cancel_call( &self, - expected_room_id: Option, + room_id: RoomId, calling_connection: ConnectionId, called_user_id: UserId, ) -> Result> { @@ -1276,6 +1281,7 @@ impl Database { .filter( Condition::all() .add(room_participant::Column::UserId.eq(called_user_id)) + .add(room_participant::Column::RoomId.eq(room_id)) .add( room_participant::Column::CallingConnectionId .eq(calling_connection.id as i32), @@ -1288,11 +1294,8 @@ impl Database { ) .one(&*tx) .await? - .ok_or_else(|| anyhow!("could not cancel call"))?; + .ok_or_else(|| anyhow!("no call to cancel"))?; let room_id = participant.room_id; - if expected_room_id.map_or(false, |expected_room_id| expected_room_id != room_id) { - return Err(anyhow!("canceling call on unexpected room"))?; - } room_participant::Entity::delete(participant.into_active_model()) .exec(&*tx) @@ -1346,8 +1349,11 @@ impl Database { .await } - pub async fn leave_room(&self, connection: ConnectionId) -> Result> { - self.room_transaction(|tx| async move { + pub async fn leave_room( + &self, + connection: ConnectionId, + ) -> Result>> { + self.optional_room_transaction(|tx| async move { let leaving_participant = room_participant::Entity::find() .filter( Condition::all() @@ -1498,9 +1504,9 @@ impl Database { self.rooms.remove(&room_id); } - Ok((room_id, left_room)) + Ok(Some((room_id, left_room))) } else { - Err(anyhow!("could not leave room"))? + Ok(None) } }) .await @@ -2549,26 +2555,38 @@ impl Database { self.run(body).await } - async fn room_transaction(&self, f: F) -> Result> + async fn optional_room_transaction(&self, f: F) -> Result>> where F: Send + Fn(TransactionHandle) -> Fut, - Fut: Send + Future>, + Fut: Send + Future>>, { let body = async { loop { let (tx, result) = self.with_transaction(&f).await?; match result { - Ok((room_id, data)) => { + Ok(Some((room_id, data))) => { let lock = self.rooms.entry(room_id).or_default().clone(); let _guard = lock.lock_owned().await; match tx.commit().await.map_err(Into::into) { Ok(()) => { - return Ok(RoomGuard { + return Ok(Some(RoomGuard { data, _guard, _not_send: PhantomData, - }); + })); + } + Err(error) => { + if is_serialization_error(&error) { + // Retry (don't break the loop) + } else { + return Err(error); + } } + } + } + Ok(None) => { + match tx.commit().await.map_err(Into::into) { + Ok(()) => return Ok(None), Err(error) => { if is_serialization_error(&error) { // Retry (don't break the loop) @@ -2593,6 +2611,23 @@ impl Database { self.run(body).await } + async fn room_transaction(&self, f: F) -> Result> + where + F: Send + Fn(TransactionHandle) -> Fut, + Fut: Send + Future>, + { + let data = self + .optional_room_transaction(move |tx| { + let future = f(tx); + async { + let data = future.await?; + Ok(Some(data)) + } + }) + .await?; + Ok(data.unwrap()) + } + async fn with_transaction(&self, f: &F) -> Result<(DatabaseTransaction, Result)> where F: Send + Fn(TransactionHandle) -> Fut, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index d83aa65a6b76a2de1e5b5e0f4592214e6452f3f2..74c221b82d3950dd8273c7db6718ccb16d081a19 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -820,7 +820,7 @@ async fn sign_out( .is_user_online(session.user_id) { let db = session.db().await; - if let Some(room) = db.decline_call(None, session.user_id).await.trace_err() { + if let Some(room) = db.decline_call(None, session.user_id).await.trace_err().flatten() { room_updated(&room, &session.peer); } } @@ -1024,7 +1024,7 @@ async fn cancel_call( let room = session .db() .await - .cancel_call(Some(room_id), session.connection_id, called_user_id) + .cancel_call(room_id, session.connection_id, called_user_id) .await?; room_updated(&room, &session.peer); } @@ -1057,7 +1057,8 @@ async fn decline_call(message: proto::DeclineCall, session: Session) -> Result<( .db() .await .decline_call(Some(room_id), session.user_id) - .await?; + .await? + .ok_or_else(|| anyhow!("failed to decline call"))?; room_updated(&room, &session.peer); } @@ -2026,8 +2027,7 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { let canceled_calls_to_user_ids; let live_kit_room; let delete_live_kit_room; - { - let mut left_room = session.db().await.leave_room(session.connection_id).await?; + if let Some(mut left_room) = session.db().await.leave_room(session.connection_id).await? { contacts_to_update.insert(session.user_id); for project in left_room.left_projects.values() { @@ -2039,6 +2039,8 @@ async fn leave_room_for_session(session: &Session) -> Result<()> { canceled_calls_to_user_ids = mem::take(&mut left_room.canceled_calls_to_user_ids); live_kit_room = mem::take(&mut left_room.room.live_kit_room); delete_live_kit_room = left_room.room.participants.is_empty(); + } else { + return Ok(()); } { From aadd7f2886dceaed499bce4556577f6fd7e89085 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 10:53:17 +0100 Subject: [PATCH 25/29] collab 0.3.13 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad67b1e4572712f90b48726c4b2f0ae00cdc3b91..38b4697470c748ab0c1f7b5a7cbc0fc7cd820bbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.12" +version = "0.3.13" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index d82171a7bdbd1ee676b5bc2a67ee06bb0eb381f7..aefdc1c1707d06e20c04a26055f22cc0789325be 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.12" +version = "0.3.13" [[bin]] name = "collab" From 86e5ae1f2e378ab8e0161d205ef3fa10d84aca27 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 11:27:44 +0100 Subject: [PATCH 26/29] Allow nulls in `projects.host_connection_{id,server_id}` The server version on stable won't be able to fill values for those columns when we deploy the migration to preview. With this commit we're also dropping the unused `worktree_extensions` and `project_activity_periods` tables. The last version of the server on stable (0.2.6) doesn't contain any code that accesses those tables. --- .../20221109000000_test_schema.sql | 7 ++-- ...4346_change_epoch_from_uuid_to_integer.sql | 8 +++- crates/collab/src/db.rs | 39 +++++-------------- crates/collab/src/db/project.rs | 23 +++++++++-- crates/collab/src/rpc.rs | 1 - 5 files changed, 40 insertions(+), 38 deletions(-) diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 16f2ffd9566f4c007d6409d9a0fdc700a23f18e7..d002c8a135caec2590be6cf76efda340bcfd04f3 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -43,11 +43,12 @@ CREATE TABLE "projects" ( "id" INTEGER PRIMARY KEY AUTOINCREMENT, "room_id" INTEGER REFERENCES rooms (id) NOT NULL, "host_user_id" INTEGER REFERENCES users (id) NOT NULL, - "host_connection_id" INTEGER NOT NULL, - "host_connection_server_id" INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE, + "host_connection_id" INTEGER, + "host_connection_server_id" INTEGER REFERENCES servers (id) ON DELETE CASCADE, "unregistered" BOOLEAN NOT NULL DEFAULT FALSE ); -CREATE INDEX "index_projects_on_host_connection_epoch" ON "projects" ("host_connection_epoch"); +CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id"); +CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id"); CREATE TABLE "worktrees" ( "project_id" INTEGER NOT NULL REFERENCES projects (id) ON DELETE CASCADE, diff --git a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql index 5a05103be106fb43478e824885895f88b88e5f05..5e02f76ce25d59d799d5e5d9719e4e038d1bac02 100644 --- a/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql +++ b/crates/collab/migrations/20221214144346_change_epoch_from_uuid_to_integer.sql @@ -3,10 +3,14 @@ CREATE TABLE servers ( environment VARCHAR NOT NULL ); -DELETE FROM projects; +DROP TABLE worktree_extensions; +DROP TABLE project_activity_periods; +DELETE from projects; ALTER TABLE projects DROP COLUMN host_connection_epoch, - ADD COLUMN host_connection_server_id INTEGER NOT NULL REFERENCES servers (id) ON DELETE CASCADE; + ADD COLUMN host_connection_server_id INTEGER REFERENCES servers (id) ON DELETE CASCADE; +CREATE INDEX "index_projects_on_host_connection_server_id" ON "projects" ("host_connection_server_id"); +CREATE INDEX "index_projects_on_host_connection_id_and_host_connection_server_id" ON "projects" ("host_connection_id", "host_connection_server_id"); DELETE FROM project_collaborators; ALTER TABLE project_collaborators diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1b559a48d5049d0f057ec96ad04f36ffa9b514bb..b1cbddc77ea398b26d5a9b00bb0216b097a45adc 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1643,10 +1643,7 @@ impl Database { left_projects.push(LeftProject { id: project.id, host_user_id: project.host_user_id, - host_connection_id: ConnectionId { - id: project.host_connection_id as u32, - owner_id: project.host_connection_server_id.0 as u32, - }, + host_connection_id: project.host_connection()?, connection_ids, }); } @@ -1764,10 +1761,7 @@ impl Database { while let Some(row) = db_projects.next().await { let (db_project, db_worktree) = row?; - let host_connection = ConnectionId { - owner_id: db_project.host_connection_server_id.0 as u32, - id: db_project.host_connection_id as u32, - }; + let host_connection = db_project.host_connection()?; if let Some(participant) = participants.get_mut(&host_connection) { let project = if let Some(project) = participant .projects @@ -1848,8 +1842,10 @@ impl Database { let project = project::ActiveModel { room_id: ActiveValue::set(participant.room_id), host_user_id: ActiveValue::set(participant.user_id), - host_connection_id: ActiveValue::set(connection.id as i32), - host_connection_server_id: ActiveValue::set(ServerId(connection.owner_id as i32)), + host_connection_id: ActiveValue::set(Some(connection.id as i32)), + host_connection_server_id: ActiveValue::set(Some(ServerId( + connection.owner_id as i32, + ))), ..Default::default() } .insert(&*tx) @@ -1901,11 +1897,7 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("project not found"))?; - let host_connection = ConnectionId { - owner_id: project.host_connection_server_id.0 as u32, - id: project.host_connection_id as u32, - }; - if host_connection == connection { + if project.host_connection()? == connection { let room_id = project.room_id; project::Entity::delete(project.into_active_model()) .exec(&*tx) @@ -2088,11 +2080,7 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - let host_connection = ConnectionId { - owner_id: project.host_connection_server_id.0 as u32, - id: project.host_connection_id as u32, - }; - if host_connection != connection { + if project.host_connection()? != connection { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -2145,11 +2133,7 @@ impl Database { .one(&*tx) .await? .ok_or_else(|| anyhow!("no such project"))?; - let host_connection = ConnectionId { - owner_id: project.host_connection_server_id.0 as u32, - id: project.host_connection_id as u32, - }; - if host_connection != connection { + if project.host_connection()? != connection { return Err(anyhow!("can't update a project hosted by someone else"))?; } @@ -2362,10 +2346,7 @@ impl Database { let left_project = LeftProject { id: project_id, host_user_id: project.host_user_id, - host_connection_id: ConnectionId { - owner_id: project.host_connection_server_id.0 as u32, - id: project.host_connection_id as u32, - }, + host_connection_id: project.host_connection()?, connection_ids, }; Ok((project.room_id, left_project)) diff --git a/crates/collab/src/db/project.rs b/crates/collab/src/db/project.rs index 1279d7d4494de3bfeb382ac0322b2de0354ceaaf..5b1f9f8467853344e1c57004e875ba434117195b 100644 --- a/crates/collab/src/db/project.rs +++ b/crates/collab/src/db/project.rs @@ -1,4 +1,6 @@ -use super::{ProjectId, RoomId, ServerId, UserId}; +use super::{ProjectId, Result, RoomId, ServerId, UserId}; +use anyhow::anyhow; +use rpc::ConnectionId; use sea_orm::entity::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] @@ -8,8 +10,23 @@ pub struct Model { pub id: ProjectId, pub room_id: RoomId, pub host_user_id: UserId, - pub host_connection_id: i32, - pub host_connection_server_id: ServerId, + pub host_connection_id: Option, + pub host_connection_server_id: Option, +} + +impl Model { + pub fn host_connection(&self) -> Result { + let host_connection_server_id = self + .host_connection_server_id + .ok_or_else(|| anyhow!("empty host_connection_server_id"))?; + let host_connection_id = self + .host_connection_id + .ok_or_else(|| anyhow!("empty host_connection_id"))?; + Ok(ConnectionId { + owner_id: host_connection_server_id.0 as u32, + id: host_connection_id as u32, + }) + } } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 74c221b82d3950dd8273c7db6718ccb16d081a19..8b16a3a05b274cdb8b81b89c123bd873a3ddf2f9 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1506,7 +1506,6 @@ async fn update_buffer( .project_connection_ids(project_id, session.connection_id) .await?; - dbg!(session.connection_id, &*project_connection_ids); broadcast( session.connection_id, project_connection_ids.iter().copied(), From 5fb522a9b117ea48d1041dcd7ac1d8dc11fbe4ef Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 11:31:51 +0100 Subject: [PATCH 27/29] collab 0.3.14 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38b4697470c748ab0c1f7b5a7cbc0fc7cd820bbf..c5b1023687a261f2b3fffdfd21a313583a05b7fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.3.13" +version = "0.3.14" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index aefdc1c1707d06e20c04a26055f22cc0789325be..4f2b83d7ecc4ecc01b9962ac58fea2b1c34d751a 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.3.13" +version = "0.3.14" [[bin]] name = "collab" From 5a334622ea5008dea9bd1bfad7f378e30cf73f98 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 16:34:59 +0100 Subject: [PATCH 28/29] :lipstick: --- crates/collab/src/rpc.rs | 16 ++++----- crates/rpc/proto/zed.proto | 2 +- crates/rpc/src/peer.rs | 59 +++++++++++++++---------------- crates/rpc/src/proto.rs | 2 +- crates/workspace/src/workspace.rs | 2 +- 5 files changed, 40 insertions(+), 41 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 8b16a3a05b274cdb8b81b89c123bd873a3ddf2f9..6a8ae61ed0b7bff3ad44741a2a21528dbb32a5b7 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -488,9 +488,9 @@ impl Server { move |duration| executor.sleep(duration) }); - tracing::info!(%user_id, %login, ?connection_id, %address, "connection opened"); + tracing::info!(%user_id, %login, %connection_id, %address, "connection opened"); this.peer.send(connection_id, proto::Hello { peer_id: Some(connection_id.into()) })?; - tracing::info!(%user_id, %login, ?connection_id, %address, "sent hello message"); + tracing::info!(%user_id, %login, %connection_id, %address, "sent hello message"); if let Some(send_connection_id) = send_connection_id.take() { let _ = send_connection_id.send(connection_id); @@ -552,7 +552,7 @@ impl Server { _ = teardown.changed().fuse() => return Ok(()), result = handle_io => { if let Err(error) = result { - tracing::error!(?error, %user_id, %login, ?connection_id, %address, "error handling I/O"); + tracing::error!(?error, %user_id, %login, %connection_id, %address, "error handling I/O"); } break; } @@ -560,7 +560,7 @@ impl Server { message = next_message => { if let Some(message) = message { let type_name = message.payload_type_name(); - let span = tracing::info_span!("receive message", %user_id, %login, ?connection_id, %address, type_name); + let span = tracing::info_span!("receive message", %user_id, %login, %connection_id, %address, type_name); let span_enter = span.enter(); if let Some(handler) = this.handlers.get(&message.payload_type_id()) { let is_background = message.is_background(); @@ -574,10 +574,10 @@ impl Server { foreground_message_handlers.push(handle_message); } } else { - tracing::error!(%user_id, %login, ?connection_id, %address, "no message handler"); + tracing::error!(%user_id, %login, %connection_id, %address, "no message handler"); } } else { - tracing::info!(%user_id, %login, ?connection_id, %address, "connection closed"); + tracing::info!(%user_id, %login, %connection_id, %address, "connection closed"); break; } } @@ -585,9 +585,9 @@ impl Server { } drop(foreground_message_handlers); - tracing::info!(%user_id, %login, ?connection_id, %address, "signing out"); + tracing::info!(%user_id, %login, %connection_id, %address, "signing out"); if let Err(error) = sign_out(session, teardown, executor).await { - tracing::error!(%user_id, %login, ?connection_id, %address, ?error, "error signing out"); + tracing::error!(%user_id, %login, %connection_id, %address, ?error, "error signing out"); } Ok(()) diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index 0bd181862b2a9e29b932264826813a1690bce76e..9528bd10b7de96ad1e05063c6c445786fc3d8f5c 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -9,7 +9,7 @@ message PeerId { message Envelope { uint32 id = 1; optional uint32 responding_to = 2; - PeerId original_sender_id = 3; + optional PeerId original_sender_id = 3; oneof payload { Hello hello = 4; Ack ack = 5; diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 59a30a0f4747107fb55a0ec3be456c9e6640958b..d2a4e6e0804119cbdbcb316861c27caeab57850a 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -81,7 +81,6 @@ pub struct TypedEnvelope { impl TypedEnvelope { pub fn original_sender_id(&self) -> Result { self.original_sender_id - .clone() .ok_or_else(|| anyhow!("missing original_sender_id")) } } @@ -171,12 +170,12 @@ impl Peer { let this = self.clone(); let response_channels = connection_state.response_channels.clone(); let handle_io = async move { - tracing::debug!(?connection_id, "handle io future: start"); + tracing::debug!(%connection_id, "handle io future: start"); let _end_connection = util::defer(|| { response_channels.lock().take(); this.connections.write().remove(&connection_id); - tracing::debug!(?connection_id, "handle io future: end"); + tracing::debug!(%connection_id, "handle io future: end"); }); // Send messages on this frequency so the connection isn't closed. @@ -188,68 +187,68 @@ impl Peer { futures::pin_mut!(receive_timeout); loop { - tracing::debug!(?connection_id, "outer loop iteration start"); + tracing::debug!(%connection_id, "outer loop iteration start"); let read_message = reader.read().fuse(); futures::pin_mut!(read_message); loop { - tracing::debug!(?connection_id, "inner loop iteration start"); + tracing::debug!(%connection_id, "inner loop iteration start"); futures::select_biased! { outgoing = outgoing_rx.next().fuse() => match outgoing { Some(outgoing) => { - tracing::debug!(?connection_id, "outgoing rpc message: writing"); + tracing::debug!(%connection_id, "outgoing rpc message: writing"); futures::select_biased! { result = writer.write(outgoing).fuse() => { - tracing::debug!(?connection_id, "outgoing rpc message: done writing"); + tracing::debug!(%connection_id, "outgoing rpc message: done writing"); result.context("failed to write RPC message")?; - tracing::debug!(?connection_id, "keepalive interval: resetting after sending message"); + tracing::debug!(%connection_id, "keepalive interval: resetting after sending message"); keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse()); } _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(?connection_id, "outgoing rpc message: writing timed out"); + tracing::debug!(%connection_id, "outgoing rpc message: writing timed out"); Err(anyhow!("timed out writing message"))?; } } } None => { - tracing::debug!(?connection_id, "outgoing rpc message: channel closed"); + tracing::debug!(%connection_id, "outgoing rpc message: channel closed"); return Ok(()) }, }, _ = keepalive_timer => { - tracing::debug!(?connection_id, "keepalive interval: pinging"); + tracing::debug!(%connection_id, "keepalive interval: pinging"); futures::select_biased! { result = writer.write(proto::Message::Ping).fuse() => { - tracing::debug!(?connection_id, "keepalive interval: done pinging"); + tracing::debug!(%connection_id, "keepalive interval: done pinging"); result.context("failed to send keepalive")?; - tracing::debug!(?connection_id, "keepalive interval: resetting after pinging"); + tracing::debug!(%connection_id, "keepalive interval: resetting after pinging"); keepalive_timer.set(create_timer(KEEPALIVE_INTERVAL).fuse()); } _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(?connection_id, "keepalive interval: pinging timed out"); + tracing::debug!(%connection_id, "keepalive interval: pinging timed out"); Err(anyhow!("timed out sending keepalive"))?; } } } incoming = read_message => { let incoming = incoming.context("error reading rpc message from socket")?; - tracing::debug!(?connection_id, "incoming rpc message: received"); - tracing::debug!(?connection_id, "receive timeout: resetting"); + tracing::debug!(%connection_id, "incoming rpc message: received"); + tracing::debug!(%connection_id, "receive timeout: resetting"); receive_timeout.set(create_timer(RECEIVE_TIMEOUT).fuse()); if let proto::Message::Envelope(incoming) = incoming { - tracing::debug!(?connection_id, "incoming rpc message: processing"); + tracing::debug!(%connection_id, "incoming rpc message: processing"); futures::select_biased! { result = incoming_tx.send(incoming).fuse() => match result { Ok(_) => { - tracing::debug!(?connection_id, "incoming rpc message: processed"); + tracing::debug!(%connection_id, "incoming rpc message: processed"); } Err(_) => { - tracing::debug!(?connection_id, "incoming rpc message: channel closed"); + tracing::debug!(%connection_id, "incoming rpc message: channel closed"); return Ok(()) } }, _ = create_timer(WRITE_TIMEOUT).fuse() => { - tracing::debug!(?connection_id, "incoming rpc message: processing timed out"); + tracing::debug!(%connection_id, "incoming rpc message: processing timed out"); Err(anyhow!("timed out processing incoming message"))? } } @@ -257,7 +256,7 @@ impl Peer { break; }, _ = receive_timeout => { - tracing::debug!(?connection_id, "receive timeout: delay between messages too long"); + tracing::debug!(%connection_id, "receive timeout: delay between messages too long"); Err(anyhow!("delay between messages too long"))? } } @@ -276,12 +275,12 @@ impl Peer { let message_id = incoming.id; tracing::debug!(?incoming, "incoming message future: start"); let _end = util::defer(move || { - tracing::debug!(?connection_id, message_id, "incoming message future: end"); + tracing::debug!(%connection_id, message_id, "incoming message future: end"); }); if let Some(responding_to) = incoming.responding_to { tracing::debug!( - ?connection_id, + %connection_id, message_id, responding_to, "incoming response: received" @@ -291,7 +290,7 @@ impl Peer { let requester_resumed = oneshot::channel(); if let Err(error) = tx.send((incoming, requester_resumed.0)) { tracing::debug!( - ?connection_id, + %connection_id, message_id, responding_to = responding_to, ?error, @@ -300,21 +299,21 @@ impl Peer { } tracing::debug!( - ?connection_id, + %connection_id, message_id, responding_to, "incoming response: waiting to resume requester" ); let _ = requester_resumed.1.await; tracing::debug!( - ?connection_id, + %connection_id, message_id, responding_to, "incoming response: requester resumed" ); } else { tracing::warn!( - ?connection_id, + %connection_id, message_id, responding_to, "incoming response: unknown request" @@ -323,10 +322,10 @@ impl Peer { None } else { - tracing::debug!(?connection_id, message_id, "incoming message: received"); + tracing::debug!(%connection_id, message_id, "incoming message: received"); proto::build_typed_envelope(connection_id, incoming).or_else(|| { tracing::error!( - ?connection_id, + %connection_id, message_id, "unable to construct a typed envelope" ); @@ -499,7 +498,7 @@ impl Peer { let connections = self.connections.read(); let connection = connections .get(&connection_id) - .ok_or_else(|| anyhow!("no such connection: {:?}", connection_id))?; + .ok_or_else(|| anyhow!("no such connection: {}", connection_id))?; Ok(connection.clone()) } } diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 07c15e0f16d4a6fb8744d4fd431d351414f2582b..385caf3565f639b973007a67669378f59584ea55 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -72,7 +72,7 @@ impl AnyTypedEnvelope for TypedEnvelope { } fn original_sender_id(&self) -> Option { - self.original_sender_id.clone() + self.original_sender_id } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 2aee12d31448f2e1df85643a817d596fa9cb1890..c30dc2ea2997ff25a9dfe7287f40cb2d70a85a90 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2050,7 +2050,7 @@ impl Workspace { .get(&leader_id) .map(|c| c.replica_id) }) - .ok_or_else(|| anyhow!("no such collaborator {:?}", leader_id))?; + .ok_or_else(|| anyhow!("no such collaborator {}", leader_id))?; let item_builders = cx.update(|cx| { cx.default_global::() From 2679e245a5e2bcf394a50c53745225eb43cacbb0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 15 Dec 2022 16:40:16 +0100 Subject: [PATCH 29/29] Minor stylistic change --- crates/collab/src/main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 26af6442d9fd6d32f34bb4894c34464bf38cee49..0f783c13e58d3b64c0e034c18ea6173d63be4036 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -77,8 +77,7 @@ async fn main() -> Result<()> { .expect("failed to listen for interrupt signal"); let sigterm = sigterm.recv(); let sigint = sigint.recv(); - futures::pin_mut!(sigterm); - futures::pin_mut!(sigint); + futures::pin_mut!(sigterm, sigint); futures::future::select(sigterm, sigint).await; tracing::info!("Received interrupt signal"); rpc_server.teardown();