From b74b1977d4634b78b4ed1be5d9c7e188c90e9827 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 26 Jan 2026 20:28:09 +0100 Subject: [PATCH] collab: Extract tests into integration tests crate (#47668) Reduces time needed to build collab tests from 48s to 38s. Release Notes: - N/A --- Cargo.lock | 1 + Cargo.toml | 1 + crates/collab/Cargo.toml | 9 + crates/collab/src/auth.rs | 183 +- crates/collab/src/db.rs | 25 +- crates/collab/src/db/queries/channels.rs | 10 +- crates/collab/src/db/queries/contributors.rs | 2 +- crates/collab/src/db/tests.rs | 217 -- crates/collab/src/executor.rs | 8 +- crates/collab/src/lib.rs | 5 +- crates/collab/src/rpc.rs | 16 +- crates/collab/src/rpc/connection_pool.rs | 2 +- crates/collab/src/tests.rs | 51 - crates/collab/src/tests/debug_panel_tests.rs | 2449 ----------------- .../integration}/agent_sharing_tests.rs | 2 +- .../integration}/channel_buffer_tests.rs | 6 +- .../integration}/channel_guest_tests.rs | 4 +- .../integration}/channel_tests.rs | 10 +- .../collab/tests/integration/collab_tests.rs | 230 ++ crates/collab/tests/integration/db_tests.rs | 219 ++ .../integration/db_tests}/buffer_tests.rs | 1 + .../integration/db_tests}/channel_tests.rs | 14 +- .../integration/db_tests}/db_tests.rs | 16 +- .../integration/db_tests}/extension_tests.rs | 12 +- .../integration/db_tests}/migrations.rs | 0 .../integration}/editor_tests.rs | 3 +- .../integration}/following_tests.rs | 4 +- .../tests => tests/integration}/git_tests.rs | 2 +- .../integration}/integration_tests.rs | 8 +- .../integration}/notification_tests.rs | 2 +- .../random_channel_buffer_tests.rs | 7 +- .../random_project_collaboration_tests.rs | 9 +- .../integration}/randomized_test_helpers.rs | 6 +- .../remote_editing_collaboration_tests.rs | 2 +- .../integration}/test_server.rs | 14 +- 35 files changed, 552 insertions(+), 2998 deletions(-) delete mode 100644 crates/collab/src/tests.rs delete mode 100644 crates/collab/src/tests/debug_panel_tests.rs rename crates/collab/{src/tests => tests/integration}/agent_sharing_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/channel_buffer_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/channel_guest_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/channel_tests.rs (99%) create mode 100644 crates/collab/tests/integration/collab_tests.rs create mode 100644 crates/collab/tests/integration/db_tests.rs rename crates/collab/{src/db/tests => tests/integration/db_tests}/buffer_tests.rs (99%) rename crates/collab/{src/db/tests => tests/integration/db_tests}/channel_tests.rs (98%) rename crates/collab/{src/db/tests => tests/integration/db_tests}/db_tests.rs (98%) rename crates/collab/{src/db/tests => tests/integration/db_tests}/extension_tests.rs (98%) rename crates/collab/{src/db/tests => tests/integration/db_tests}/migrations.rs (100%) rename crates/collab/{src/tests => tests/integration}/editor_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/following_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/git_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/integration_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/notification_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/random_channel_buffer_tests.rs (98%) rename crates/collab/{src/tests => tests/integration}/random_project_collaboration_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/randomized_test_helpers.rs (99%) rename crates/collab/{src/tests => tests/integration}/remote_editing_collaboration_tests.rs (99%) rename crates/collab/{src/tests => tests/integration}/test_server.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 52d64f47647b358fbbfd89ec4678f635b1195ee5..7a3b4663422ff45b3a253ec47886dc6a1eb1e141 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3293,6 +3293,7 @@ dependencies = [ "chrono", "client", "clock", + "collab", "collab_ui", "collections", "command_palette_hooks", diff --git a/Cargo.toml b/Cargo.toml index a847a2328e109afc2b016acf7fc6f6b7ab92a623..d9ee65970f1424f83a68245c14e88b8afc9dc141 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,6 +278,7 @@ clock = { path = "crates/clock" } cloud_api_client = { path = "crates/cloud_api_client" } cloud_api_types = { path = "crates/cloud_api_types" } cloud_llm_client = { path = "crates/cloud_llm_client" } +collab = { path = "crates/collab" } collab_ui = { path = "crates/collab_ui" } collections = { path = "crates/collections", version = "0.1.0" } command_palette = { path = "crates/command_palette" } diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 4b486698b53b7772335e93d4ea4f481e37779e9c..4a91c31cd77f99cbb241e0a4734b438448800090 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -17,6 +17,14 @@ name = "collab" sqlite = ["sea-orm/sqlx-sqlite", "sqlx/sqlite"] test-support = ["sqlite"] +[lib] +test = false + +[[test]] +name = "collab_tests" +required-features = ["test-support"] +path = "tests/integration/collab_tests.rs" + [dependencies] anyhow.workspace = true async-trait.workspace = true @@ -81,6 +89,7 @@ buffer_diff.workspace = true call = { workspace = true, features = ["test-support"] } channel.workspace = true client = { workspace = true, features = ["test-support"] } +collab = { workspace = true, features = ["test-support"] } collab_ui = { workspace = true, features = ["test-support"] } collections = { workspace = true, features = ["test-support"] } command_palette_hooks.workspace = true diff --git a/crates/collab/src/auth.rs b/crates/collab/src/auth.rs index 13296b79ae8b3df97753e7adf4f2078990c187b0..3134b6dff694ba6fcfbcfcbb6f4d73391d14b91c 100644 --- a/crates/collab/src/auth.rs +++ b/crates/collab/src/auth.rs @@ -108,13 +108,13 @@ pub async fn validate_header(mut req: Request, next: Next) -> impl Into )) } -const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; +pub const MAX_ACCESS_TOKENS_TO_STORE: usize = 8; #[derive(Serialize, Deserialize)] -struct AccessTokenJson { - version: usize, - id: AccessTokenId, - token: String, +pub struct AccessTokenJson { + pub version: usize, + pub id: AccessTokenId, + pub token: String, } /// Creates a new access token to identify the given user. before returning it, you should @@ -224,176 +224,3 @@ pub async fn verify_access_token( }, }) } - -#[cfg(test)] -mod test { - use rand::prelude::*; - use scrypt::password_hash::{PasswordHasher, SaltString}; - use sea_orm::EntityTrait; - - use super::*; - use crate::db::{NewUserParams, access_token}; - - #[gpui::test] - async fn test_verify_access_token(cx: &mut gpui::TestAppContext) { - let test_db = crate::db::TestDb::sqlite(cx.executor()); - let db = test_db.db(); - - let user = db - .create_user( - "example@example.com", - None, - false, - NewUserParams { - github_login: "example".into(), - github_user_id: 1, - }, - ) - .await - .unwrap(); - - let token = create_access_token(db, user.user_id, None).await.unwrap(); - assert!(matches!( - verify_access_token(&token, user.user_id, db).await.unwrap(), - VerifyAccessTokenResult { - is_valid: true, - impersonator_id: None, - } - )); - - let old_token = create_previous_access_token(user.user_id, None, db) - .await - .unwrap(); - - let old_token_id = serde_json::from_str::(&old_token) - .unwrap() - .id; - - let hash = db - .transaction(|tx| async move { - Ok(access_token::Entity::find_by_id(old_token_id) - .one(&*tx) - .await?) - }) - .await - .unwrap() - .unwrap() - .hash; - assert!(hash.starts_with("$scrypt$")); - - assert!(matches!( - verify_access_token(&old_token, user.user_id, db) - .await - .unwrap(), - VerifyAccessTokenResult { - is_valid: true, - impersonator_id: None, - } - )); - - let hash = db - .transaction(|tx| async move { - Ok(access_token::Entity::find_by_id(old_token_id) - .one(&*tx) - .await?) - }) - .await - .unwrap() - .unwrap() - .hash; - assert!(hash.starts_with("$sha256$")); - - assert!(matches!( - verify_access_token(&old_token, user.user_id, db) - .await - .unwrap(), - VerifyAccessTokenResult { - is_valid: true, - impersonator_id: None, - } - )); - - assert!(matches!( - verify_access_token(&token, user.user_id, db).await.unwrap(), - VerifyAccessTokenResult { - is_valid: true, - impersonator_id: None, - } - )); - } - - async fn create_previous_access_token( - user_id: UserId, - impersonated_user_id: Option, - db: &Database, - ) -> Result { - let access_token = rpc::auth::random_token(); - let access_token_hash = previous_hash_access_token(&access_token)?; - let id = db - .create_access_token( - user_id, - impersonated_user_id, - &access_token_hash, - MAX_ACCESS_TOKENS_TO_STORE, - ) - .await?; - Ok(serde_json::to_string(&AccessTokenJson { - version: 1, - id, - token: access_token, - })?) - } - - fn previous_hash_access_token(token: &str) -> Result { - // Avoid slow hashing in debug mode. - let params = if cfg!(debug_assertions) { - scrypt::Params::new(1, 1, 1, scrypt::Params::RECOMMENDED_LEN).unwrap() - } else { - scrypt::Params::new(14, 8, 1, scrypt::Params::RECOMMENDED_LEN).unwrap() - }; - - Ok(Scrypt - .hash_password_customized( - token.as_bytes(), - None, - None, - params, - &SaltString::generate(PasswordHashRngCompat::new()), - ) - .map_err(anyhow::Error::new)? - .to_string()) - } - - // TODO: remove once we password_hash v0.6 is released. - struct PasswordHashRngCompat(rand::rngs::ThreadRng); - - impl PasswordHashRngCompat { - fn new() -> Self { - Self(rand::rng()) - } - } - - impl scrypt::password_hash::rand_core::RngCore for PasswordHashRngCompat { - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest); - } - - fn try_fill_bytes( - &mut self, - dest: &mut [u8], - ) -> Result<(), scrypt::password_hash::rand_core::Error> { - self.fill_bytes(dest); - Ok(()) - } - } - - impl scrypt::password_hash::rand_core::CryptoRng for PasswordHashRngCompat {} -} diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index a36e54b82d96657e9f5c41550555c98d6ca1692b..165d5fc3cbe569b6a2b8963f2d70b6de0cc28b48 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1,8 +1,6 @@ mod ids; -mod queries; +pub mod queries; mod tables; -#[cfg(test)] -pub mod tests; use crate::{Error, Result}; use anyhow::{Context as _, anyhow}; @@ -37,15 +35,12 @@ use tokio::sync::{Mutex, OwnedMutexGuard}; use util::paths::PathStyle; use worktree_settings_file::LocalSettingsKind; -#[cfg(test)] -pub use tests::TestDb; - pub use ids::*; pub use sea_orm::ConnectOptions; pub use tables::user::Model as User; pub use tables::*; -#[cfg(test)] +#[cfg(feature = "test-support")] pub struct DatabaseTestOptions { pub executor: gpui::BackgroundExecutor, pub runtime: tokio::runtime::Runtime, @@ -55,14 +50,14 @@ pub struct DatabaseTestOptions { /// Database gives you a handle that lets you access the database. /// It handles pooling internally. pub struct Database { - options: ConnectOptions, - pool: DatabaseConnection, + pub options: ConnectOptions, + pub pool: DatabaseConnection, rooms: DashMap>>, projects: DashMap>>, notification_kinds_by_id: HashMap, notification_kinds_by_name: HashMap, - #[cfg(test)] - test_options: Option, + #[cfg(feature = "test-support")] + pub test_options: Option, } // The `Database` type has so many methods that its impl blocks are split into @@ -78,7 +73,7 @@ impl Database { projects: DashMap::with_capacity(16384), notification_kinds_by_id: HashMap::default(), notification_kinds_by_name: HashMap::default(), - #[cfg(test)] + #[cfg(feature = "test-support")] test_options: None, }) } @@ -87,7 +82,7 @@ impl Database { &self.options } - #[cfg(test)] + #[cfg(feature = "test-support")] pub fn reset(&self) { self.rooms.clear(); self.projects.clear(); @@ -248,7 +243,7 @@ impl Database { where F: Future>, { - #[cfg(test)] + #[cfg(feature = "test-support")] { let test_options = self.test_options.as_ref().unwrap(); test_options.executor.simulate_random_delay().await; @@ -260,7 +255,7 @@ impl Database { test_options.runtime.block_on(future) } - #[cfg(not(test))] + #[cfg(not(feature = "test-support"))] { future.await } diff --git a/crates/collab/src/db/queries/channels.rs b/crates/collab/src/db/queries/channels.rs index 3f5c8b6a885f4fa47bf868e3a2c564cc2067428e..8e783f42a86f3810a811d6d4495b237b5285ded0 100644 --- a/crates/collab/src/db/queries/channels.rs +++ b/crates/collab/src/db/queries/channels.rs @@ -7,7 +7,7 @@ use rpc::{ use sea_orm::{ActiveValue, DbBackend, TryGetableMany}; impl Database { - #[cfg(test)] + #[cfg(feature = "test-support")] pub async fn all_channels(&self) -> Result> { self.transaction(move |tx| async move { let mut channels = Vec::new(); @@ -21,12 +21,12 @@ impl Database { .await } - #[cfg(test)] + #[cfg(feature = "test-support")] pub async fn create_root_channel(&self, name: &str, creator_id: UserId) -> Result { Ok(self.create_channel(name, None, creator_id).await?.0.id) } - #[cfg(test)] + #[cfg(feature = "test-support")] pub async fn create_sub_channel( &self, name: &str, @@ -226,7 +226,7 @@ impl Database { .await } - #[cfg(test)] + #[cfg(feature = "test-support")] pub async fn set_channel_requires_zed_cla( &self, channel_id: ChannelId, @@ -885,7 +885,7 @@ impl Database { .await } - pub(crate) async fn get_channel_internal( + pub async fn get_channel_internal( &self, channel_id: ChannelId, tx: &DatabaseTransaction, diff --git a/crates/collab/src/db/queries/contributors.rs b/crates/collab/src/db/queries/contributors.rs index ed20b05352b4a7afa20c56391ba8a3eb7b12ec32..d5cc03e69d5bde55b7d89e5a6c389211d32266a9 100644 --- a/crates/collab/src/db/queries/contributors.rs +++ b/crates/collab/src/db/queries/contributors.rs @@ -2,7 +2,7 @@ use super::*; impl Database { /// Records that a given user has signed the CLA. - #[cfg(test)] + #[cfg(feature = "test-support")] pub async fn add_contributor( &self, github_login: &str, diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 30825d55baad9a287b1dd7c8822931f3428eb350..8b137891791fe96927ad78e64b0aad7bded08bdc 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -1,218 +1 @@ -mod buffer_tests; -mod channel_tests; -mod db_tests; -mod extension_tests; -mod migrations; -use std::sync::Arc; -use std::sync::atomic::{AtomicI32, Ordering::SeqCst}; -use std::time::Duration; - -use gpui::BackgroundExecutor; -use parking_lot::Mutex; -use rand::prelude::*; -use sea_orm::ConnectionTrait; -use sqlx::migrate::MigrateDatabase; - -use self::migrations::run_database_migrations; - -use super::*; - -pub struct TestDb { - pub db: Option>, - pub connection: Option, -} - -impl TestDb { - pub fn sqlite(executor: BackgroundExecutor) -> Self { - let url = "sqlite::memory:"; - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - - let mut db = runtime.block_on(async { - let mut options = ConnectOptions::new(url); - options.max_connections(5); - let mut db = Database::new(options).await.unwrap(); - let sql = include_str!(concat!( - env!("CARGO_MANIFEST_DIR"), - "/migrations.sqlite/20221109000000_test_schema.sql" - )); - db.pool - .execute(sea_orm::Statement::from_string( - db.pool.get_database_backend(), - sql, - )) - .await - .unwrap(); - db.initialize_notification_kinds().await.unwrap(); - db - }); - - db.test_options = Some(DatabaseTestOptions { - executor, - runtime, - query_failure_probability: parking_lot::Mutex::new(0.0), - }); - - Self { - db: Some(Arc::new(db)), - connection: None, - } - } - - pub fn postgres(executor: BackgroundExecutor) -> Self { - static LOCK: Mutex<()> = Mutex::new(()); - - let _guard = LOCK.lock(); - let mut rng = StdRng::from_os_rng(); - let url = format!( - "postgres://postgres@localhost/zed-test-{}", - rng.random::() - ); - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - - let mut db = runtime.block_on(async { - sqlx::Postgres::create_database(&url) - .await - .expect("failed to create test db"); - let mut options = ConnectOptions::new(url); - options - .max_connections(5) - .idle_timeout(Duration::from_secs(0)); - let mut db = Database::new(options).await.unwrap(); - let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); - run_database_migrations(db.options(), migrations_path) - .await - .unwrap(); - db.initialize_notification_kinds().await.unwrap(); - db - }); - - db.test_options = Some(DatabaseTestOptions { - executor, - runtime, - query_failure_probability: parking_lot::Mutex::new(0.0), - }); - - Self { - db: Some(Arc::new(db)), - connection: None, - } - } - - pub fn db(&self) -> &Arc { - self.db.as_ref().unwrap() - } - - pub fn set_query_failure_probability(&self, probability: f64) { - let database = self.db.as_ref().unwrap(); - let test_options = database.test_options.as_ref().unwrap(); - *test_options.query_failure_probability.lock() = probability; - } -} - -#[macro_export] -macro_rules! test_both_dbs { - ($test_name:ident, $postgres_test_name:ident, $sqlite_test_name:ident) => { - #[cfg(target_os = "macos")] - #[gpui::test] - async fn $postgres_test_name(cx: &mut gpui::TestAppContext) { - let test_db = $crate::db::TestDb::postgres(cx.executor().clone()); - $test_name(test_db.db()).await; - } - - #[gpui::test] - async fn $sqlite_test_name(cx: &mut gpui::TestAppContext) { - let test_db = $crate::db::TestDb::sqlite(cx.executor().clone()); - $test_name(test_db.db()).await; - } - }; -} - -impl Drop for TestDb { - fn drop(&mut self) { - let db = self.db.take().unwrap(); - if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() { - db.test_options.as_ref().unwrap().runtime.block_on(async { - use util::ResultExt; - let query = " - SELECT pg_terminate_backend(pg_stat_activity.pid) - FROM pg_stat_activity - WHERE - pg_stat_activity.datname = current_database() AND - pid <> pg_backend_pid(); - "; - db.pool - .execute(sea_orm::Statement::from_string( - db.pool.get_database_backend(), - query, - )) - .await - .log_err(); - sqlx::Postgres::drop_database(db.options.get_url()) - .await - .log_err(); - }) - } - } -} - -#[track_caller] -fn assert_channel_tree_matches(actual: Vec, expected: Vec) { - let expected_channels = expected.into_iter().collect::>(); - let actual_channels = actual.into_iter().collect::>(); - pretty_assertions::assert_eq!(expected_channels, actual_channels); -} - -fn channel_tree(channels: &[(ChannelId, &[ChannelId], &'static str)]) -> Vec { - use std::collections::HashMap; - - let mut result = Vec::new(); - let mut order_by_parent: HashMap, i32> = HashMap::new(); - - for (id, parent_path, name) in channels { - let parent_key = parent_path.to_vec(); - let order = if parent_key.is_empty() { - 1 - } else { - *order_by_parent - .entry(parent_key.clone()) - .and_modify(|e| *e += 1) - .or_insert(1) - }; - - result.push(Channel { - id: *id, - name: (*name).to_owned(), - visibility: ChannelVisibility::Members, - parent_path: parent_key, - channel_order: order, - }); - } - - result -} - -static GITHUB_USER_ID: AtomicI32 = AtomicI32::new(5); - -async fn new_test_user(db: &Arc, email: &str) -> UserId { - db.create_user( - email, - None, - false, - NewUserParams { - github_login: email[0..email.find('@').unwrap()].to_string(), - github_user_id: GITHUB_USER_ID.fetch_add(1, SeqCst), - }, - ) - .await - .unwrap() - .user_id -} diff --git a/crates/collab/src/executor.rs b/crates/collab/src/executor.rs index e69aed363b128be99daad4cb85598a0aa555dfb0..97597193eb2a9a70f4cfd2f877fe305e3d95a077 100644 --- a/crates/collab/src/executor.rs +++ b/crates/collab/src/executor.rs @@ -1,12 +1,12 @@ use std::{future::Future, time::Duration}; -#[cfg(test)] +#[cfg(feature = "test-support")] use gpui::BackgroundExecutor; #[derive(Clone)] pub enum Executor { Production, - #[cfg(test)] + #[cfg(feature = "test-support")] Deterministic(BackgroundExecutor), } @@ -19,7 +19,7 @@ impl Executor { Executor::Production => { tokio::spawn(future); } - #[cfg(test)] + #[cfg(feature = "test-support")] Executor::Deterministic(background) => { background.spawn(future).detach(); } @@ -31,7 +31,7 @@ impl Executor { async move { match this { Executor::Production => tokio::time::sleep(duration).await, - #[cfg(test)] + #[cfg(feature = "test-support")] Executor::Deterministic(background) => background.timer(duration).await, } } diff --git a/crates/collab/src/lib.rs b/crates/collab/src/lib.rs index 08f7e61c020ca9ea23be62636e381f9abedf7cf0..84c8fb0c2f63999662f792a0bc6798b37a2fd7bc 100644 --- a/crates/collab/src/lib.rs +++ b/crates/collab/src/lib.rs @@ -6,9 +6,6 @@ pub mod executor; pub mod rpc; pub mod seed; -#[cfg(test)] -mod tests; - use anyhow::Context as _; use aws_config::{BehaviorVersion, Region}; use axum::{ @@ -169,7 +166,7 @@ impl Config { } } - #[cfg(test)] + #[cfg(feature = "test-support")] pub fn test() -> Self { Self { http_port: 0, diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 3b665fdd23205c08a98f01e649708c00cc68175a..78206f128779c5e74bd7065ae5cd33c434d4ab6d 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -204,16 +204,16 @@ struct Session { impl Session { async fn db(&self) -> tokio::sync::MutexGuard<'_, DbHandle> { - #[cfg(test)] + #[cfg(feature = "test-support")] tokio::task::yield_now().await; let guard = self.db.lock().await; - #[cfg(test)] + #[cfg(feature = "test-support")] tokio::task::yield_now().await; guard } async fn connection_pool(&self) -> ConnectionPoolGuard<'_> { - #[cfg(test)] + #[cfg(feature = "test-support")] tokio::task::yield_now().await; let guard = self.connection_pool.lock(); ConnectionPoolGuard { @@ -267,13 +267,13 @@ impl Deref for DbHandle { pub struct Server { id: parking_lot::Mutex, peer: Arc, - pub(crate) connection_pool: Arc>, + pub connection_pool: Arc>, app_state: Arc, handlers: HashMap, teardown: watch::Sender, } -pub(crate) struct ConnectionPoolGuard<'a> { +struct ConnectionPoolGuard<'a> { guard: parking_lot::MutexGuard<'a, ConnectionPool>, _not_send: PhantomData>, } @@ -651,7 +651,7 @@ impl Server { let _ = self.teardown.send(true); } - #[cfg(test)] + #[cfg(feature = "test-support")] pub fn reset(&self, id: ServerId) { self.teardown(); *self.id.lock() = id; @@ -659,7 +659,7 @@ impl Server { let _ = self.teardown.send(false); } - #[cfg(test)] + #[cfg(feature = "test-support")] pub fn id(&self) -> ServerId { *self.id.lock() } @@ -1013,7 +1013,7 @@ impl DerefMut for ConnectionPoolGuard<'_> { impl Drop for ConnectionPoolGuard<'_> { fn drop(&mut self) { - #[cfg(test)] + #[cfg(feature = "test-support")] self.check_invariants(); } } diff --git a/crates/collab/src/rpc/connection_pool.rs b/crates/collab/src/rpc/connection_pool.rs index b1193239163fe34a0cb5802aa398abc37d1cca42..171dea37526e81cd1e487a60b9c1d99856773b14 100644 --- a/crates/collab/src/rpc/connection_pool.rs +++ b/crates/collab/src/rpc/connection_pool.rs @@ -168,7 +168,7 @@ impl ConnectionPool { .is_empty() } - #[cfg(test)] + #[cfg(feature = "test-support")] pub fn check_invariants(&self) { for (connection_id, connection) in &self.connections { assert!( diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs deleted file mode 100644 index 361eece2e554a0f487c7a4a918d117a06f2a0ea2..0000000000000000000000000000000000000000 --- a/crates/collab/src/tests.rs +++ /dev/null @@ -1,51 +0,0 @@ -use call::Room; -use client::ChannelId; -use gpui::{Entity, TestAppContext}; - -mod agent_sharing_tests; -mod channel_buffer_tests; -mod channel_guest_tests; -mod channel_tests; -mod editor_tests; -mod following_tests; -mod git_tests; -mod integration_tests; -mod notification_tests; -mod random_channel_buffer_tests; -mod random_project_collaboration_tests; -mod randomized_test_helpers; -mod remote_editing_collaboration_tests; -mod test_server; - -pub use randomized_test_helpers::{ - RandomizedTest, TestError, UserTestPlan, run_randomized_test, save_randomized_test_plan, -}; -pub use test_server::{TestClient, TestServer}; - -#[derive(Debug, Eq, PartialEq)] -struct RoomParticipants { - remote: Vec, - pending: Vec, -} - -fn room_participants(room: &Entity, cx: &mut TestAppContext) -> RoomParticipants { - room.read_with(cx, |room, _| { - let mut remote = room - .remote_participants() - .values() - .map(|participant| participant.user.github_login.clone().to_string()) - .collect::>(); - let mut pending = room - .pending_participants() - .iter() - .map(|user| user.github_login.clone().to_string()) - .collect::>(); - remote.sort(); - pending.sort(); - RoomParticipants { remote, pending } - }) -} - -fn channel_id(room: &Entity, cx: &mut TestAppContext) -> Option { - cx.read(|cx| room.read(cx).channel_id()) -} diff --git a/crates/collab/src/tests/debug_panel_tests.rs b/crates/collab/src/tests/debug_panel_tests.rs deleted file mode 100644 index 32856ee19a69051c2da1c31f0d8e70cdaac90ab8..0000000000000000000000000000000000000000 --- a/crates/collab/src/tests/debug_panel_tests.rs +++ /dev/null @@ -1,2449 +0,0 @@ -use call::ActiveCall; -use dap::DebugRequestType; -use dap::requests::{Initialize, Launch, StackTrace}; -use dap::{SourceBreakpoint, requests::SetBreakpoints}; -use debugger_ui::debugger_panel::DebugPanel; -use debugger_ui::session::DebugSession; -use editor::Editor; -use gpui::{Entity, TestAppContext, VisualTestContext}; -use project::{Project, ProjectPath, WorktreeId}; -use serde_json::json; -use std::sync::Arc; -use std::{ - path::Path, - sync::atomic::{AtomicBool, Ordering}, -}; -use workspace::{Workspace, dock::Panel}; - -use super::{TestClient, TestServer}; - -pub fn init_test(cx: &mut gpui::TestAppContext) { - zlog::init_test(); - - cx.update(|cx| { - theme::init(theme::LoadThemes::JustBase, cx); - command_palette_hooks::init(cx); - debugger_ui::init(cx); - editor::init(cx); - }); -} - -async fn add_debugger_panel(workspace: &Entity, cx: &mut VisualTestContext) { - let debugger_panel = workspace - .update_in(cx, |_workspace, window, cx| { - cx.spawn_in(window, DebugPanel::load) - }) - .await - .unwrap(); - - workspace.update_in(cx, |workspace, window, cx| { - workspace.add_panel(debugger_panel, window, cx); - }); -} - -pub fn _active_session( - workspace: Entity, - cx: &mut VisualTestContext, -) -> Entity { - workspace.update_in(cx, |workspace, _window, cx| { - let debug_panel = workspace.panel::(cx).unwrap(); - debug_panel - .update(cx, |this, cx| this.active_session(cx)) - .unwrap() - }) -} - -struct ZedInstance<'a> { - client: TestClient, - project: Option>, - active_call: Entity, - cx: &'a mut TestAppContext, -} - -impl<'a> ZedInstance<'a> { - fn new(client: TestClient, cx: &'a mut TestAppContext) -> Self { - ZedInstance { - project: None, - client, - active_call: cx.read(ActiveCall::global), - cx, - } - } - - async fn host_project( - &mut self, - project_files: Option, - ) -> (u64, WorktreeId) { - let (project, worktree_id) = self.client.build_local_project("/project", self.cx).await; - self.active_call - .update(self.cx, |call, cx| call.set_location(Some(&project), cx)) - .await - .unwrap(); - - if let Some(tree) = project_files { - self.client.fs().insert_tree("/project", tree).await; - } - - self.project = Some(project.clone()); - - let project_id = self - .active_call - .update(self.cx, |call, cx| call.share_project(project, cx)) - .await - .unwrap(); - - (project_id, worktree_id) - } - - async fn join_project(&mut self, project_id: u64) { - let remote_project = self.client.join_remote_project(project_id, self.cx).await; - self.project = Some(remote_project); - - self.active_call - .update(self.cx, |call, cx| { - call.set_location(self.project.as_ref(), cx) - }) - .await - .unwrap(); - } - - async fn expand( - &'a mut self, - ) -> ( - &'a TestClient, - Entity, - Entity, - &'a mut VisualTestContext, - ) { - let (workspace, cx) = self.client.build_workspace( - self.project - .as_ref() - .expect("Project should be hosted or built before expanding"), - self.cx, - ); - add_debugger_panel(&workspace, cx).await; - (&self.client, workspace, self.project.clone().unwrap(), cx) - } -} - -async fn _setup_three_member_test<'a, 'b, 'c>( - server: &mut TestServer, - host_cx: &'a mut TestAppContext, - first_remote_cx: &'b mut TestAppContext, - second_remote_cx: &'c mut TestAppContext, -) -> (ZedInstance<'a>, ZedInstance<'b>, ZedInstance<'c>) { - let host_client = server.create_client(host_cx, "user_host").await; - let first_remote_client = server.create_client(first_remote_cx, "user_remote_1").await; - let second_remote_client = server - .create_client(second_remote_cx, "user_remote_2") - .await; - - init_test(host_cx); - init_test(first_remote_cx); - init_test(second_remote_cx); - - server - .create_room(&mut [ - (&host_client, host_cx), - (&first_remote_client, first_remote_cx), - (&second_remote_client, second_remote_cx), - ]) - .await; - - let host_zed = ZedInstance::new(host_client, host_cx); - let first_remote_zed = ZedInstance::new(first_remote_client, first_remote_cx); - let second_remote_zed = ZedInstance::new(second_remote_client, second_remote_cx); - - (host_zed, first_remote_zed, second_remote_zed) -} - -async fn setup_two_member_test<'a, 'b>( - server: &mut TestServer, - host_cx: &'a mut TestAppContext, - remote_cx: &'b mut TestAppContext, -) -> (ZedInstance<'a>, ZedInstance<'b>) { - let host_client = server.create_client(host_cx, "user_host").await; - let remote_client = server.create_client(remote_cx, "user_remote").await; - - init_test(host_cx); - init_test(remote_cx); - - server - .create_room(&mut [(&host_client, host_cx), (&remote_client, remote_cx)]) - .await; - - let host_zed = ZedInstance::new(host_client, host_cx); - let remote_zed = ZedInstance::new(remote_client, remote_cx); - - (host_zed, remote_zed) -} - -#[gpui::test] -async fn test_debug_panel_item_opens_on_remote( - host_cx: &mut TestAppContext, - remote_cx: &mut TestAppContext, -) { - let executor = host_cx.executor(); - let mut server = TestServer::start(executor).await; - - let (mut host_zed, mut remote_zed) = - setup_two_member_test(&mut server, host_cx, remote_cx).await; - - let (host_project_id, _) = host_zed.host_project(None).await; - remote_zed.join_project(host_project_id).await; - - let (_client_host, _host_workspace, host_project, host_cx) = host_zed.expand().await; - let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - remote_cx.run_until_parked(); - - let task = host_project.update(host_cx, |project, cx| { - project.start_debug_session(dap::test_config(DebugRequestType::Launch, None, None), cx) - }); - - let session = task.await.unwrap(); - let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - client - .on_request::(move |_, _| { - Ok(dap::Capabilities { - supports_step_back: Some(false), - ..Default::default() - }) - }) - .await; - - client.on_request::(move |_, _| Ok(())).await; - - client - .on_request::(move |_, _| { - Ok(dap::StackTraceResponse { - stack_frames: Vec::default(), - total_frames: None, - }) - }) - .await; - - client - .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - reason: dap::StoppedEventReason::Pause, - description: None, - thread_id: Some(1), - preserve_focus_hint: None, - text: None, - all_threads_stopped: None, - hit_breakpoint_ids: None, - })) - .await; - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - remote_workspace.update(remote_cx, |workspace, cx| { - let debug_panel = workspace.panel::(cx).unwrap(); - let _active_session = debug_panel - .update(cx, |this, cx| this.active_session(cx)) - .unwrap(); - - assert_eq!( - 1, - debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - ); - // assert_eq!(client.id(), active_session.read(cx).()); - // assert_eq!(1, active_session.read(cx).thread_id().0); - // todo(debugger) check selected thread id - }); - - let shutdown_client = host_project.update(host_cx, |project, cx| { - project.dap_store().update(cx, |dap_store, cx| { - dap_store.shutdown_session(session.read(cx).session_id(), cx) - }) - }); - - shutdown_client.await.unwrap(); -} - -#[gpui::test] -async fn test_active_debug_panel_item_set_on_join_project( - host_cx: &mut TestAppContext, - remote_cx: &mut TestAppContext, -) { - let executor = host_cx.executor(); - let mut server = TestServer::start(executor).await; - - let (mut host_zed, mut remote_zed) = - setup_two_member_test(&mut server, host_cx, remote_cx).await; - - let (host_project_id, _) = host_zed.host_project(None).await; - - let (_client_host, _host_workspace, host_project, host_cx) = host_zed.expand().await; - - host_cx.run_until_parked(); - - let task = host_project.update(host_cx, |project, cx| { - project.start_debug_session(dap::test_config(DebugRequestType::Launch, None, None), cx) - }); - - let session = task.await.unwrap(); - let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - client - .on_request::(move |_, _| { - Ok(dap::Capabilities { - supports_step_back: Some(false), - ..Default::default() - }) - }) - .await; - - client.on_request::(move |_, _| Ok(())).await; - - client - .on_request::(move |_, _| { - Ok(dap::StackTraceResponse { - stack_frames: Vec::default(), - total_frames: None, - }) - }) - .await; - - client - .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - reason: dap::StoppedEventReason::Pause, - description: None, - thread_id: Some(1), - preserve_focus_hint: None, - text: None, - all_threads_stopped: None, - hit_breakpoint_ids: None, - })) - .await; - - // Give host_client time to send a debug panel item to collab server - host_cx.run_until_parked(); - - remote_zed.join_project(host_project_id).await; - let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - remote_workspace.update(remote_cx, |workspace, cx| { - let debug_panel = workspace.panel::(cx).unwrap(); - let _active_session = debug_panel - .update(cx, |this, cx| this.active_session(cx)) - .unwrap(); - - assert_eq!( - 1, - debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - ); - // assert_eq!(cl, active_session.read(cx).client_id()); - // assert_eq!(1, active_session.read(cx).thread_id().0); - // todo(debugger) - }); - - let shutdown_client = host_project.update(host_cx, |project, cx| { - project.dap_store().update(cx, |dap_store, cx| { - dap_store.shutdown_session(session.read(cx).session_id(), cx) - }) - }); - - shutdown_client.await.unwrap(); - - remote_cx.run_until_parked(); - - // assert we don't have a debug panel item anymore because the client shutdown - remote_workspace.update(remote_cx, |workspace, cx| { - let debug_panel = workspace.panel::(cx).unwrap(); - - debug_panel.update(cx, |this, cx| { - assert!(this.active_session(cx).is_none()); - assert_eq!(0, this.pane().unwrap().read(cx).items_len()); - }); - }); -} - -#[gpui::test] -async fn test_debug_panel_remote_button_presses( - _host_cx: &mut TestAppContext, - _remote_cx: &mut TestAppContext, -) { - unimplemented!("Collab is still being refactored"); - // let executor = host_cx.executor(); - // let mut server = TestServer::start(executor).await; - - // let (mut host_zed, mut remote_zed) = - // setup_two_member_test(&mut server, host_cx, remote_cx).await; - - // let (host_project_id, _) = host_zed.host_project(None).await; - // remote_zed.join_project(host_project_id).await; - - // let (_client_host, host_workspace, host_project, host_cx) = host_zed.expand().await; - // let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - // let task = host_project.update(host_cx, |project, cx| { - // project.start_debug_session(dap::test_config(None), cx) - // }); - - // let session = task.await.unwrap(); - // let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - // client - // .on_request::(move |_, _| { - // Ok(dap::Capabilities { - // supports_step_back: Some(true), - // ..Default::default() - // }) - // }) - // .await; - - // client.on_request::(move |_, _| Ok(())).await; - - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // client - // .on_request::(move |_, _| { - // Ok(dap::ContinueResponse { - // all_threads_continued: Some(true), - // }) - // }) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // let remote_debug_item = remote_workspace.update(remote_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // assert_eq!( - // 1, - // debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - // ); - // // assert_eq!(client.id(), active_session.read(cx).client_id()); - // // assert_eq!(1, active_session.read(cx).thread_id().0); - // // todo(debugger) - // active_session - // }); - - // let local_debug_item = host_workspace.update(host_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // assert_eq!( - // 1, - // debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - // ); - // // assert_eq!(client.id(), active_session.read(cx).client_id()); - // // assert_eq!(1, active_session.read(cx).thread_id().0); - // // todo(debugger) - // active_session - // }); - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.continue_thread(cx); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // local_debug_item.update(host_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Running, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // remote_debug_item.update(remote_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Running, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // local_debug_item.update(host_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Stopped, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // remote_debug_item.update(remote_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Stopped, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // client - // .on_request::(move |_, _| { - // Ok(dap::ContinueResponse { - // all_threads_continued: Some(true), - // }) - // }) - // .await; - - // local_debug_item.update(host_cx, |this, cx| { - // this.continue_thread(cx); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // local_debug_item.update(host_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Running, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // remote_debug_item.update(remote_cx, |debug_panel_item, cx| { - // assert_eq!( - // debugger_ui::debugger_panel::ThreadStatus::Running, - // debug_panel_item.thread_state().read(cx).status, - // ); - // }); - - // client - // .on_request::(move |_, _| Ok(())) - // .await; - - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.pause_thread(cx); - // }); - - // remote_cx.run_until_parked(); - // host_cx.run_until_parked(); - - // client - // .on_request::(move |_, _| Ok(())) - // .await; - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.step_out(cx); - // }); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // remote_cx.run_until_parked(); - // host_cx.run_until_parked(); - - // client - // .on_request::(move |_, _| Ok(())) - // .await; - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.step_over(cx); - // }); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // remote_cx.run_until_parked(); - // host_cx.run_until_parked(); - - // client - // .on_request::(move |_, _| Ok(())) - // .await; - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.step_in(cx); - // }); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // remote_cx.run_until_parked(); - // host_cx.run_until_parked(); - - // client - // .on_request::(move |_, _| Ok(())) - // .await; - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.step_back(cx); - // }); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // remote_cx.run_until_parked(); - // host_cx.run_until_parked(); - - // remote_debug_item.update(remote_cx, |this, cx| { - // this.stop_thread(cx); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // // assert we don't have a debug panel item anymore because the client shutdown - // remote_workspace.update(remote_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - - // debug_panel.update(cx, |this, cx| { - // assert!(this.active_session(cx).is_none()); - // assert_eq!(0, this.pane().unwrap().read(cx).items_len()); - // }); - // }); -} - -#[gpui::test] -async fn test_restart_stack_frame(_host_cx: &mut TestAppContext, _remote_cx: &mut TestAppContext) { - unimplemented!("Collab is still being refactored"); - // let executor = host_cx.executor(); - // let mut server = TestServer::start(executor).await; - - // let (mut host_zed, mut remote_zed) = - // setup_two_member_test(&mut server, host_cx, remote_cx).await; - - // let (host_project_id, _) = host_zed.host_project(None).await; - // remote_zed.join_project(host_project_id).await; - - // let (_client_host, _host_workspace, host_project, host_cx) = host_zed.expand().await; - // let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - // let called_restart_frame = Arc::new(AtomicBool::new(false)); - - // let task = host_project.update(host_cx, |project, cx| { - // project.start_debug_session(dap::test_config(None), cx) - // }); - - // let session = task.await.unwrap(); - // let client = session.read(cx).adapter_client().unwrap(); - - // client - // .on_request::(move |_, _| { - // Ok(dap::Capabilities { - // supports_restart_frame: Some(true), - // ..Default::default() - // }) - // }) - // .await; - - // client.on_request::(move |_, _| Ok(())).await; - - // let stack_frames = vec![StackFrame { - // id: 1, - // name: "Stack Frame 1".into(), - // source: Some(dap::Source { - // name: Some("test.js".into()), - // path: Some("/project/src/test.js".into()), - // source_reference: None, - // presentation_hint: None, - // origin: None, - // sources: None, - // adapter_data: None, - // checksums: None, - // }), - // line: 3, - // column: 1, - // end_line: None, - // end_column: None, - // can_restart: None, - // instruction_pointer_reference: None, - // module_id: None, - // presentation_hint: None, - // }]; - - // client - // .on_request::({ - // let stack_frames = Arc::new(stack_frames.clone()); - // move |_, args| { - // assert_eq!(1, args.thread_id); - - // Ok(dap::StackTraceResponse { - // stack_frames: (*stack_frames).clone(), - // total_frames: None, - // }) - // } - // }) - // .await; - - // client - // .on_request::({ - // let called_restart_frame = called_restart_frame.clone(); - // move |_, args| { - // assert_eq!(1, args.frame_id); - - // called_restart_frame.store(true, Ordering::SeqCst); - - // Ok(()) - // } - // }) - // .await; - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // // try to restart stack frame 1 from the guest side - // remote_workspace.update(remote_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // active_session.update(cx, |debug_panel_item, cx| { - // debug_panel_item - // .stack_frame_list() - // .update(cx, |stack_frame_list, cx| { - // stack_frame_list.restart_stack_frame(1, cx); - // }); - // }); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_restart_frame.load(std::sync::atomic::Ordering::SeqCst), - // "Restart stack frame was not called" - // ); - - // let shutdown_client = host_project.update(host_cx, |project, cx| { - // project.dap_store().update(cx, |dap_store, cx| { - // dap_store.shutdown_session(&session.read(cx).session_id(), cx) - // }) - // }); - - // shutdown_client.await.unwrap(); -} - -#[gpui::test] -async fn test_updated_breakpoints_send_to_dap( - host_cx: &mut TestAppContext, - remote_cx: &mut TestAppContext, -) { - let executor = host_cx.executor(); - let mut server = TestServer::start(executor).await; - - let (mut host_zed, mut remote_zed) = - setup_two_member_test(&mut server, host_cx, remote_cx).await; - - let (host_project_id, worktree_id) = host_zed - .host_project(Some(json!({"test.txt": "one\ntwo\nthree\nfour\nfive"}))) - .await; - - remote_zed.join_project(host_project_id).await; - - let (_client_host, host_workspace, host_project, host_cx) = host_zed.expand().await; - let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - let project_path = ProjectPath { - worktree_id, - path: Arc::from(Path::new(&"test.txt")), - }; - - let task = host_project.update(host_cx, |project, cx| { - project.start_debug_session(dap::test_config(DebugRequestType::Launch, None, None), cx) - }); - - let session = task.await.unwrap(); - let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - client - .on_request::(move |_, _| { - Ok(dap::Capabilities { - supports_restart_frame: Some(true), - ..Default::default() - }) - }) - .await; - - client.on_request::(move |_, _| Ok(())).await; - client - .on_request::(move |_, _| { - Ok(dap::StackTraceResponse { - stack_frames: Vec::default(), - total_frames: None, - }) - }) - .await; - - let called_set_breakpoints = Arc::new(AtomicBool::new(false)); - client - .on_request::({ - let called_set_breakpoints = called_set_breakpoints.clone(); - move |_, args| { - assert_eq!("/project/test.txt", args.source.path.unwrap()); - assert_eq!( - vec![SourceBreakpoint { - line: 3, - column: None, - condition: None, - hit_condition: None, - log_message: None, - mode: None - }], - args.breakpoints.unwrap() - ); - // assert!(!args.source_modified.unwrap()); - // todo(debugger): Implement source_modified handling - - called_set_breakpoints.store(true, Ordering::SeqCst); - - Ok(dap::SetBreakpointsResponse { - breakpoints: Vec::default(), - }) - } - }) - .await; - - client - .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - reason: dap::StoppedEventReason::Pause, - description: None, - thread_id: Some(1), - preserve_focus_hint: None, - text: None, - all_threads_stopped: None, - hit_breakpoint_ids: None, - })) - .await; - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - // Client B opens an editor. - let editor_b = remote_workspace - .update_in(remote_cx, |workspace, window, cx| { - workspace.open_path(project_path.clone(), None, true, window, cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - editor_b.update_in(remote_cx, |editor, window, cx| { - editor.move_down(&zed_actions::editor::MoveDown, window, cx); - editor.move_down(&zed_actions::editor::MoveDown, window, cx); - editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - }); - - // Client A opens an editor. - let editor_a = host_workspace - .update_in(host_cx, |workspace, window, cx| { - workspace.open_path(project_path.clone(), None, true, window, cx) - }) - .await - .unwrap() - .downcast::() - .unwrap(); - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - let called_set_breakpoints = Arc::new(AtomicBool::new(false)); - client - .on_request::({ - let called_set_breakpoints = called_set_breakpoints.clone(); - move |_, args| { - assert_eq!("/project/test.txt", args.source.path.unwrap()); - assert!(args.breakpoints.unwrap().is_empty()); - // assert!(!args.source_modified.unwrap()); - // todo(debugger) Implement source modified support - - called_set_breakpoints.store(true, Ordering::SeqCst); - - Ok(dap::SetBreakpointsResponse { - breakpoints: Vec::default(), - }) - } - }) - .await; - - // remove the breakpoint that client B added - editor_a.update_in(host_cx, |editor, window, cx| { - editor.move_down(&zed_actions::editor::MoveDown, window, cx); - editor.move_down(&zed_actions::editor::MoveDown, window, cx); - editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - }); - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - assert!( - called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - "SetBreakpoint request must be called" - ); - - let called_set_breakpoints = Arc::new(AtomicBool::new(false)); - client - .on_request::({ - let called_set_breakpoints = called_set_breakpoints.clone(); - move |_, args| { - assert_eq!("/project/test.txt", args.source.path.unwrap()); - let mut breakpoints = args.breakpoints.unwrap(); - breakpoints.sort_by_key(|b| b.line); - assert_eq!( - vec![ - SourceBreakpoint { - line: 2, - column: None, - condition: None, - hit_condition: None, - log_message: None, - mode: None - }, - SourceBreakpoint { - line: 3, - column: None, - condition: None, - hit_condition: None, - log_message: None, - mode: None - } - ], - breakpoints - ); - // assert!(!args.source_modified.unwrap()); - // todo(debugger) Implement source modified support - - called_set_breakpoints.store(true, Ordering::SeqCst); - - Ok(dap::SetBreakpointsResponse { - breakpoints: Vec::default(), - }) - } - }) - .await; - - // Add our own breakpoint now - editor_a.update_in(host_cx, |editor, window, cx| { - editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - editor.move_up(&zed_actions::editor::MoveUp, window, cx); - editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - }); - - host_cx.run_until_parked(); - remote_cx.run_until_parked(); - - assert!( - called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - "SetBreakpoint request must be called" - ); - - let shutdown_client = host_project.update(host_cx, |project, cx| { - project.dap_store().update(cx, |dap_store, cx| { - dap_store.shutdown_session(session.read(cx).session_id(), cx) - }) - }); - - shutdown_client.await.unwrap(); -} - -#[gpui::test] -async fn test_module_list( - _host_cx: &mut TestAppContext, - _remote_cx: &mut TestAppContext, - _late_join_cx: &mut TestAppContext, -) { - unimplemented!("Collab is still being refactored"); - // let executor = host_cx.executor(); - // let mut server = TestServer::start(executor).await; - - // let (mut host_zed, mut remote_zed, mut late_join_zed) = - // setup_three_member_test(&mut server, host_cx, remote_cx, late_join_cx).await; - - // let (host_project_id, _worktree_id) = host_zed.host_project(None).await; - - // remote_zed.join_project(host_project_id).await; - - // let (_client_host, host_workspace, host_project, host_cx) = host_zed.expand().await; - // let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - // let task = host_project.update(host_cx, |project, cx| { - // project.start_debug_session(dap::test_config(None), cx) - // }); - - // let session = task.await.unwrap(); - // let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - // let called_initialize = Arc::new(AtomicBool::new(false)); - - // client - // .on_request::({ - // let called_initialize = called_initialize.clone(); - // move |_, _| { - // called_initialize.store(true, Ordering::SeqCst); - // Ok(dap::Capabilities { - // supports_restart_frame: Some(true), - // supports_modules_request: Some(true), - // ..Default::default() - // }) - // } - // }) - // .await; - - // client.on_request::(move |_, _| Ok(())).await; - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // let called_modules = Arc::new(AtomicBool::new(false)); - // let modules = vec![ - // dap::Module { - // id: dap::ModuleId::Number(1), - // name: "First Module".into(), - // address_range: None, - // date_time_stamp: None, - // path: None, - // symbol_file_path: None, - // symbol_status: None, - // version: None, - // is_optimized: None, - // is_user_code: None, - // }, - // dap::Module { - // id: dap::ModuleId::Number(2), - // name: "Second Module".into(), - // address_range: None, - // date_time_stamp: None, - // path: None, - // symbol_file_path: None, - // symbol_status: None, - // version: None, - // is_optimized: None, - // is_user_code: None, - // }, - // ]; - - // client - // .on_request::({ - // let called_modules = called_modules.clone(); - // let modules = modules.clone(); - // move |_, _| unsafe { - // static mut REQUEST_COUNT: i32 = 1; - // assert_eq!( - // 1, REQUEST_COUNT, - // "This request should only be called once from the host" - // ); - // REQUEST_COUNT += 1; - // called_modules.store(true, Ordering::SeqCst); - - // Ok(dap::ModulesResponse { - // modules: modules.clone(), - // total_modules: Some(2u64), - // }) - // } - // }) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_initialize.load(std::sync::atomic::Ordering::SeqCst), - // "Request Initialize must be called" - // ); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_modules.load(std::sync::atomic::Ordering::SeqCst), - // "Request Modules must be called" - // ); - - // host_workspace.update(host_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let debug_panel_item = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // debug_panel_item.update(cx, |item, cx| { - // assert_eq!( - // true, - // item.capabilities(cx).supports_modules_request.unwrap(), - // "Local supports modules request should be true" - // ); - - // let local_module_list = item.module_list().update(cx, |list, cx| list.modules(cx)); - - // assert_eq!( - // 2usize, - // local_module_list.len(), - // "Local module list should have two items in it" - // ); - // assert_eq!( - // modules.clone(), - // local_module_list, - // "Local module list should match module list from response" - // ); - // }) - // }); - - // remote_workspace.update(remote_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let debug_panel_item = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // debug_panel_item.update(cx, |item, cx| { - // assert_eq!( - // true, - // item.capabilities(cx).supports_modules_request.unwrap(), - // "Remote capabilities supports modules request should be true" - // ); - // let remote_module_list = item.module_list().update(cx, |list, cx| list.modules(cx)); - - // assert_eq!( - // 2usize, - // remote_module_list.len(), - // "Remote module list should have two items in it" - // ); - // assert_eq!( - // modules.clone(), - // remote_module_list, - // "Remote module list should match module list from response" - // ); - // }) - // }); - - // late_join_zed.join_project(host_project_id).await; - // let (_late_join_client, late_join_workspace, _late_join_project, late_join_cx) = - // late_join_zed.expand().await; - - // late_join_workspace.update(late_join_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let debug_panel_item = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // debug_panel_item.update(cx, |item, cx| { - // assert_eq!( - // true, - // item.capabilities(cx).supports_modules_request.unwrap(), - // "Remote (mid session join) capabilities supports modules request should be true" - // ); - // let remote_module_list = item.module_list().update(cx, |list, cx| list.modules(cx)); - - // assert_eq!( - // 2usize, - // remote_module_list.len(), - // "Remote (mid session join) module list should have two items in it" - // ); - // assert_eq!( - // modules.clone(), - // remote_module_list, - // "Remote (mid session join) module list should match module list from response" - // ); - // }) - // }); - - // let shutdown_client = host_project.update(host_cx, |project, cx| { - // project.dap_store().update(cx, |dap_store, cx| { - // dap_store.shutdown_session(&session.read(cx).id(), cx) - // }) - // }); - - // shutdown_client.await.unwrap(); -} - -// #[gpui::test] -// async fn test_variable_list( -// host_cx: &mut TestAppContext, -// remote_cx: &mut TestAppContext, -// late_join_cx: &mut TestAppContext, -// ) { -// let executor = host_cx.executor(); -// let mut server = TestServer::start(executor).await; - -// let (mut host_zed, mut remote_zed, mut late_join_zed) = -// setup_three_member_test(&mut server, host_cx, remote_cx, late_join_cx).await; - -// let (host_project_id, _worktree_id) = host_zed -// .host_project(Some(json!({"test.txt": "one\ntwo\nthree\nfour\nfive"}))) -// .await; - -// remote_zed.join_project(host_project_id).await; - -// let (_client_host, host_workspace, host_project, host_cx) = host_zed.expand().await; -// let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - -// let task = host_project.update(host_cx, |project, cx| { -// project.start_debug_session( -// dap::DebugAdapterConfig { -// label: "test config".into(), -// kind: dap::DebugAdapterKind::Fake, -// request: dap::DebugRequestType::Launch, -// program: None, -// cwd: None, -// initialize_args: None, -// }, -// cx, -// ) -// }); - -// let (session, client) = task.await.unwrap(); - -// client -// .on_request::(move |_, _| { -// Ok(dap::Capabilities { -// supports_step_back: Some(true), -// ..Default::default() -// }) -// }) -// .await; - -// client.on_request::(move |_, _| Ok(())).await; - -// let stack_frames = vec![dap::StackFrame { -// id: 1, -// name: "Stack Frame 1".into(), -// source: Some(dap::Source { -// name: Some("test.js".into()), -// path: Some("/project/src/test.js".into()), -// source_reference: None, -// presentation_hint: None, -// origin: None, -// sources: None, -// adapter_data: None, -// checksums: None, -// }), -// line: 1, -// column: 1, -// end_line: None, -// end_column: None, -// can_restart: None, -// instruction_pointer_reference: None, -// module_id: None, -// presentation_hint: None, -// }]; - -// let scopes = vec![Scope { -// name: "Scope 1".into(), -// presentation_hint: None, -// variables_reference: 1, -// named_variables: None, -// indexed_variables: None, -// expensive: false, -// source: None, -// line: None, -// column: None, -// end_line: None, -// end_column: None, -// }]; - -// let variable_1 = Variable { -// name: "variable 1".into(), -// value: "1".into(), -// type_: None, -// presentation_hint: None, -// evaluate_name: None, -// variables_reference: 2, -// named_variables: None, -// indexed_variables: None, -// memory_reference: None, -// }; - -// let variable_2 = Variable { -// name: "variable 2".into(), -// value: "2".into(), -// type_: None, -// presentation_hint: None, -// evaluate_name: None, -// variables_reference: 3, -// named_variables: None, -// indexed_variables: None, -// memory_reference: None, -// }; - -// let variable_3 = Variable { -// name: "variable 3".into(), -// value: "hello world".into(), -// type_: None, -// presentation_hint: None, -// evaluate_name: None, -// variables_reference: 4, -// named_variables: None, -// indexed_variables: None, -// memory_reference: None, -// }; - -// let variable_4 = Variable { -// name: "variable 4".into(), -// value: "hello world this is the final variable".into(), -// type_: None, -// presentation_hint: None, -// evaluate_name: None, -// variables_reference: 0, -// named_variables: None, -// indexed_variables: None, -// memory_reference: None, -// }; - -// client -// .on_request::({ -// let stack_frames = std::sync::Arc::new(stack_frames.clone()); -// move |_, args| { -// assert_eq!(1, args.thread_id); - -// Ok(dap::StackTraceResponse { -// stack_frames: (*stack_frames).clone(), -// total_frames: None, -// }) -// } -// }) -// .await; - -// client -// .on_request::({ -// let scopes = Arc::new(scopes.clone()); -// move |_, args| { -// assert_eq!(1, args.frame_id); - -// Ok(dap::ScopesResponse { -// scopes: (*scopes).clone(), -// }) -// } -// }) -// .await; - -// let first_variable_request = vec![variable_1.clone(), variable_2.clone()]; - -// client -// .on_request::({ -// move |_, args| { -// assert_eq!(1, args.variables_reference); - -// Ok(dap::VariablesResponse { -// variables: first_variable_request.clone(), -// }) -// } -// }) -// .await; - -// client -// .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { -// reason: dap::StoppedEventReason::Pause, -// description: None, -// thread_id: Some(1), -// preserve_focus_hint: None, -// text: None, -// all_threads_stopped: None, -// hit_breakpoint_ids: None, -// })) -// .await; - -// host_cx.run_until_parked(); -// remote_cx.run_until_parked(); - -// let local_debug_item = host_workspace.update(host_cx, |workspace, cx| { -// let debug_panel = workspace.panel::(cx).unwrap(); -// let active_debug_panel_item = debug_panel -// .update(cx, |this, cx| this.active_debug_panel_item(cx)) -// .unwrap(); - -// assert_eq!( -// 1, -// debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) -// ); -// assert_eq!(client.id(), active_debug_panel_item.read(cx).client_id()); -// assert_eq!(1, active_debug_panel_item.read(cx).thread_id()); -// active_debug_panel_item -// }); - -// let remote_debug_item = remote_workspace.update(remote_cx, |workspace, cx| { -// let debug_panel = workspace.panel::(cx).unwrap(); -// let active_debug_panel_item = debug_panel -// .update(cx, |this, cx| this.active_debug_panel_item(cx)) -// .unwrap(); - -// assert_eq!( -// 1, -// debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) -// ); -// assert_eq!(client.id(), active_debug_panel_item.read(cx).client_id()); -// assert_eq!(1, active_debug_panel_item.read(cx).thread_id()); -// active_debug_panel_item -// }); - -// let first_visual_entries = vec!["v Scope 1", " > variable 1", " > variable 2"]; -// let first_variable_containers = vec![ -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_1.clone(), -// depth: 1, -// }, -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_2.clone(), -// depth: 1, -// }, -// ]; - -// local_debug_item -// .update(host_cx, |this, _| this.variable_list().clone()) -// .update(host_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&first_variable_containers, &variable_list.variables()); - -// variable_list.assert_visual_entries(first_visual_entries.clone(), cx); -// }); - -// client -// .on_request::({ -// let variables = Arc::new(vec![variable_3.clone()]); -// move |_, args| { -// assert_eq!(2, args.variables_reference); - -// Ok(dap::VariablesResponse { -// variables: (*variables).clone(), -// }) -// } -// }) -// .await; - -// remote_debug_item -// .update(remote_cx, |this, _| this.variable_list().clone()) -// .update(remote_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&first_variable_containers, &variable_list.variables()); - -// variable_list.assert_visual_entries(first_visual_entries.clone(), cx); - -// variable_list.toggle_variable(&scopes[0], &variable_1, 1, cx); -// }); - -// host_cx.run_until_parked(); -// remote_cx.run_until_parked(); - -// let second_req_variable_list = vec![ -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_1.clone(), -// depth: 1, -// }, -// VariableContainer { -// container_reference: variable_1.variables_reference, -// variable: variable_3.clone(), -// depth: 2, -// }, -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_2.clone(), -// depth: 1, -// }, -// ]; - -// remote_debug_item -// .update(remote_cx, |this, _| this.variable_list().clone()) -// .update(remote_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(3, variable_list.variables().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&second_req_variable_list, &variable_list.variables()); - -// variable_list.assert_visual_entries( -// vec![ -// "v Scope 1", -// " v variable 1", -// " > variable 3", -// " > variable 2", -// ], -// cx, -// ); -// }); - -// client -// .on_request::({ -// let variables = Arc::new(vec![variable_4.clone()]); -// move |_, args| { -// assert_eq!(3, args.variables_reference); - -// Ok(dap::VariablesResponse { -// variables: (*variables).clone(), -// }) -// } -// }) -// .await; - -// local_debug_item -// .update(host_cx, |this, _| this.variable_list().clone()) -// .update(host_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(3, variable_list.variables().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&second_req_variable_list, &variable_list.variables()); - -// variable_list.assert_visual_entries(first_visual_entries.clone(), cx); - -// variable_list.toggle_variable(&scopes[0], &variable_2.clone(), 1, cx); -// }); - -// host_cx.run_until_parked(); -// remote_cx.run_until_parked(); - -// let final_variable_containers: Vec = vec![ -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_1.clone(), -// depth: 1, -// }, -// VariableContainer { -// container_reference: variable_1.variables_reference, -// variable: variable_3.clone(), -// depth: 2, -// }, -// VariableContainer { -// container_reference: scopes[0].variables_reference, -// variable: variable_2.clone(), -// depth: 1, -// }, -// VariableContainer { -// container_reference: variable_2.variables_reference, -// variable: variable_4.clone(), -// depth: 2, -// }, -// ]; - -// remote_debug_item -// .update(remote_cx, |this, _| this.variable_list().clone()) -// .update(remote_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(4, variable_list.variables().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&final_variable_containers, &variable_list.variables()); - -// variable_list.assert_visual_entries( -// vec![ -// "v Scope 1", -// " v variable 1", -// " > variable 3", -// " > variable 2", -// ], -// cx, -// ); -// }); - -// local_debug_item -// .update(host_cx, |this, _| this.variable_list().clone()) -// .update(host_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(4, variable_list.variables().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(&final_variable_containers, &variable_list.variables()); - -// variable_list.assert_visual_entries( -// vec![ -// "v Scope 1", -// " > variable 1", -// " v variable 2", -// " > variable 4", -// ], -// cx, -// ); -// }); - -// late_join_zed.join_project(host_project_id).await; -// let (_late_join_client, late_join_workspace, _late_join_project, late_join_cx) = -// late_join_zed.expand().await; - -// late_join_cx.run_until_parked(); - -// let last_join_remote_item = late_join_workspace.update(late_join_cx, |workspace, cx| { -// let debug_panel = workspace.panel::(cx).unwrap(); -// let active_debug_panel_item = debug_panel -// .update(cx, |this, cx| this.active_debug_panel_item(cx)) -// .unwrap(); - -// assert_eq!( -// 1, -// debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) -// ); -// assert_eq!(client.id(), active_debug_panel_item.read(cx).client_id()); -// assert_eq!(1, active_debug_panel_item.read(cx).thread_id()); -// active_debug_panel_item -// }); - -// last_join_remote_item -// .update(late_join_cx, |this, _| this.variable_list().clone()) -// .update(late_join_cx, |variable_list, cx| { -// assert_eq!(1, variable_list.scopes().len()); -// assert_eq!(4, variable_list.variables().len()); -// assert_eq!(scopes, variable_list.scopes().get(&1).unwrap().clone()); -// assert_eq!(final_variable_containers, variable_list.variables()); - -// variable_list.assert_visual_entries(first_visual_entries, cx); -// }); - -// let shutdown_client = host_project.update(host_cx, |project, cx| { -// project.dap_store().update(cx, |dap_store, cx| { -// dap_store.shutdown_session(&session.read(cx).id(), cx) -// }) -// }); - -// shutdown_client.await.unwrap(); -// } - -#[gpui::test] -async fn test_ignore_breakpoints( - _host_cx: &mut TestAppContext, - _remote_cx: &mut TestAppContext, - _cx_c: &mut TestAppContext, -) { - unimplemented!("Collab is still being refactored"); - // let executor = host_cx.executor(); - // let mut server = TestServer::start(executor).await; - - // let (mut host_zed, mut remote_zed, mut late_join_zed) = - // setup_three_member_test(&mut server, host_cx, remote_cx, cx_c).await; - - // let (host_project_id, worktree_id) = host_zed - // .host_project(Some(json!({"test.txt": "one\ntwo\nthree\nfour\nfive"}))) - // .await; - - // remote_zed.join_project(host_project_id).await; - - // let (_client_host, host_workspace, host_project, host_cx) = host_zed.expand().await; - // let (_client_remote, remote_workspace, remote_project, remote_cx) = remote_zed.expand().await; - - // let project_path = ProjectPath { - // worktree_id, - // path: Arc::from(Path::new(&"test.txt")), - // }; - - // let local_editor = host_workspace - // .update_in(host_cx, |workspace, window, cx| { - // workspace.open_path(project_path.clone(), None, true, window, cx) - // }) - // .await - // .unwrap() - // .downcast::() - // .unwrap(); - - // local_editor.update_in(host_cx, |editor, window, cx| { - // editor.move_down(&zed_actions::editor::MoveDown, window, cx); - // editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); // Line 2 - // editor.move_down(&zed_actions::editor::MoveDown, window, cx); - // editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - // // Line 3 - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // let task = host_project.update(host_cx, |project, cx| { - // project.start_debug_session(dap::test_config(None), cx) - // }); - - // let session = task.await.unwrap(); - // let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - // let client_id = client.id(); - - // client - // .on_request::(move |_, _| { - // Ok(dap::Capabilities { - // supports_configuration_done_request: Some(true), - // ..Default::default() - // }) - // }) - // .await; - - // let called_set_breakpoints = Arc::new(AtomicBool::new(false)); - // client - // .on_request::({ - // let called_set_breakpoints = called_set_breakpoints.clone(); - // move |_, args| { - // assert_eq!("/project/test.txt", args.source.path.unwrap()); - - // let mut actual_breakpoints = args.breakpoints.unwrap(); - // actual_breakpoints.sort_by_key(|b| b.line); - - // let expected_breakpoints = vec![ - // SourceBreakpoint { - // line: 2, - // column: None, - // condition: None, - // hit_condition: None, - // log_message: None, - // mode: None, - // }, - // SourceBreakpoint { - // line: 3, - // column: None, - // condition: None, - // hit_condition: None, - // log_message: None, - // mode: None, - // }, - // ]; - - // assert_eq!(actual_breakpoints, expected_breakpoints); - - // called_set_breakpoints.store(true, Ordering::SeqCst); - - // Ok(dap::SetBreakpointsResponse { - // breakpoints: Vec::default(), - // }) - // } - // }) - // .await; - - // client.on_request::(move |_, _| Ok(())).await; - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // client - // .fake_event(dap::messages::Events::Initialized(Some( - // dap::Capabilities { - // supports_configuration_done_request: Some(true), - // ..Default::default() - // }, - // ))) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - // "SetBreakpoint request must be called when starting debug session" - // ); - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // let remote_debug_item = remote_workspace.update(remote_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // assert_eq!( - // 1, - // debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - // ); - - // let session_id = debug_panel.update(cx, |this, cx| { - // this.dap_store() - // .read(cx) - // .session_by_client_id(client.id()) - // .unwrap() - // .read(cx) - // .id() - // }); - - // let breakpoints_ignored = active_session.read(cx).are_breakpoints_ignored(cx); - - // assert_eq!(session_id, active_session.read(cx).session().read(cx).id()); - // assert_eq!(false, breakpoints_ignored); - // assert_eq!(client.id(), active_session.read(cx).client_id()); - // assert_eq!(1, active_session.read(cx).thread_id().0); - // active_session - // }); - - // called_set_breakpoints.store(false, Ordering::SeqCst); - - // client - // .on_request::({ - // let called_set_breakpoints = called_set_breakpoints.clone(); - // move |_, args| { - // assert_eq!("/project/test.txt", args.source.path.unwrap()); - // assert_eq!(args.breakpoints, Some(vec![])); - - // called_set_breakpoints.store(true, Ordering::SeqCst); - - // Ok(dap::SetBreakpointsResponse { - // breakpoints: Vec::default(), - // }) - // } - // }) - // .await; - - // let local_debug_item = host_workspace.update(host_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // assert_eq!( - // 1, - // debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - // ); - - // assert_eq!(false, active_session.read(cx).are_breakpoints_ignored(cx)); - // assert_eq!(client.id(), active_session.read(cx).client_id()); - // assert_eq!(1, active_session.read(cx).thread_id().0); - - // active_session - // }); - - // local_debug_item.update(host_cx, |item, cx| { - // item.toggle_ignore_breakpoints(cx); // Set to true - // assert_eq!(true, item.are_breakpoints_ignored(cx)); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - // "SetBreakpoint request must be called to ignore breakpoints" - // ); - - // client - // .on_request::({ - // let called_set_breakpoints = called_set_breakpoints.clone(); - // move |_, _args| { - // called_set_breakpoints.store(true, Ordering::SeqCst); - - // Ok(dap::SetBreakpointsResponse { - // breakpoints: Vec::default(), - // }) - // } - // }) - // .await; - - // let remote_editor = remote_workspace - // .update_in(remote_cx, |workspace, window, cx| { - // workspace.open_path(project_path.clone(), None, true, window, cx) - // }) - // .await - // .unwrap() - // .downcast::() - // .unwrap(); - - // called_set_breakpoints.store(false, std::sync::atomic::Ordering::SeqCst); - - // remote_editor.update_in(remote_cx, |editor, window, cx| { - // // Line 1 - // editor.toggle_breakpoint(&editor::actions::ToggleBreakpoint, window, cx); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // assert!( - // called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - // "SetBreakpoint request be called whenever breakpoints are toggled but with not breakpoints" - // ); - - // remote_debug_item.update(remote_cx, |debug_panel, cx| { - // let breakpoints_ignored = debug_panel.are_breakpoints_ignored(cx); - - // assert_eq!(true, breakpoints_ignored); - // assert_eq!(client.id(), debug_panel.client_id()); - // assert_eq!(1, debug_panel.thread_id().0); - // }); - - // client - // .on_request::({ - // let called_set_breakpoints = called_set_breakpoints.clone(); - // move |_, args| { - // assert_eq!("/project/test.txt", args.source.path.unwrap()); - - // let mut actual_breakpoints = args.breakpoints.unwrap(); - // actual_breakpoints.sort_by_key(|b| b.line); - - // let expected_breakpoints = vec![ - // SourceBreakpoint { - // line: 1, - // column: None, - // condition: None, - // hit_condition: None, - // log_message: None, - // mode: None, - // }, - // SourceBreakpoint { - // line: 2, - // column: None, - // condition: None, - // hit_condition: None, - // log_message: None, - // mode: None, - // }, - // SourceBreakpoint { - // line: 3, - // column: None, - // condition: None, - // hit_condition: None, - // log_message: None, - // mode: None, - // }, - // ]; - - // assert_eq!(actual_breakpoints, expected_breakpoints); - - // called_set_breakpoints.store(true, Ordering::SeqCst); - - // Ok(dap::SetBreakpointsResponse { - // breakpoints: Vec::default(), - // }) - // } - // }) - // .await; - - // late_join_zed.join_project(host_project_id).await; - // let (_late_join_client, late_join_workspace, late_join_project, late_join_cx) = - // late_join_zed.expand().await; - - // late_join_cx.run_until_parked(); - - // let last_join_remote_item = late_join_workspace.update(late_join_cx, |workspace, cx| { - // let debug_panel = workspace.panel::(cx).unwrap(); - // let active_session = debug_panel - // .update(cx, |this, cx| this.active_session(cx)) - // .unwrap(); - - // let breakpoints_ignored = active_session.read(cx).are_breakpoints_ignored(cx); - - // assert_eq!(true, breakpoints_ignored); - - // assert_eq!( - // 1, - // debug_panel.update(cx, |this, cx| this.pane().unwrap().read(cx).items_len()) - // ); - // assert_eq!(client.id(), active_session.read(cx).client_id()); - // assert_eq!(1, active_session.read(cx).thread_id().0); - // active_session - // }); - - // remote_debug_item.update(remote_cx, |item, cx| { - // item.toggle_ignore_breakpoints(cx); - // }); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - // late_join_cx.run_until_parked(); - - // assert!( - // called_set_breakpoints.load(std::sync::atomic::Ordering::SeqCst), - // "SetBreakpoint request should be called to update breakpoints" - // ); - - // client - // .on_request::({ - // let called_set_breakpoints = called_set_breakpoints.clone(); - // move |_, args| { - // assert_eq!("/project/test.txt", args.source.path.unwrap()); - // assert_eq!(args.breakpoints, Some(vec![])); - - // called_set_breakpoints.store(true, Ordering::SeqCst); - - // Ok(dap::SetBreakpointsResponse { - // breakpoints: Vec::default(), - // }) - // } - // }) - // .await; - - // local_debug_item.update(host_cx, |debug_panel_item, cx| { - // assert_eq!( - // false, - // debug_panel_item.are_breakpoints_ignored(cx), - // "Remote client set this to false" - // ); - // }); - - // remote_debug_item.update(remote_cx, |debug_panel_item, cx| { - // assert_eq!( - // false, - // debug_panel_item.are_breakpoints_ignored(cx), - // "Remote client set this to false" - // ); - // }); - - // last_join_remote_item.update(late_join_cx, |debug_panel_item, cx| { - // assert_eq!( - // false, - // debug_panel_item.are_breakpoints_ignored(cx), - // "Remote client set this to false" - // ); - // }); - - // let shutdown_client = host_project.update(host_cx, |project, cx| { - // project.dap_store().update(cx, |dap_store, cx| { - // dap_store.shutdown_session(&session.read(cx).id(), cx) - // }) - // }); - - // shutdown_client.await.unwrap(); - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // remote_project.update(remote_cx, |project, cx| { - // project.dap_store().update(cx, |dap_store, _cx| { - // let sessions = dap_store.sessions().collect::>(); - - // assert_eq!( - // None, - // dap_store.session_by_client_id(&client_id), - // "No client_id to session mapping should exist after shutdown" - // ); - // assert_eq!( - // 0, - // sessions.len(), - // "No sessions should be left after shutdown" - // ); - // }) - // }); - - // late_join_project.update(late_join_cx, |project, cx| { - // project.dap_store().update(cx, |dap_store, _cx| { - // let sessions = dap_store.sessions().collect::>(); - - // assert_eq!( - // None, - // dap_store.session_by_client_id(&client_id), - // "No client_id to session mapping should exist after shutdown" - // ); - // assert_eq!( - // 0, - // sessions.len(), - // "No sessions should be left after shutdown" - // ); - // }) - // }); -} - -#[gpui::test] -async fn test_debug_panel_console(_host_cx: &mut TestAppContext, _remote_cx: &mut TestAppContext) { - unimplemented!("Collab is still being refactored"); - // let executor = host_cx.executor(); - // let mut server = TestServer::start(executor).await; - - // let (mut host_zed, mut remote_zed) = - // setup_two_member_test(&mut server, host_cx, remote_cx).await; - - // let (host_project_id, _) = host_zed.host_project(None).await; - // remote_zed.join_project(host_project_id).await; - - // let (_client_host, _host_workspace, host_project, host_cx) = host_zed.expand().await; - // let (_client_remote, remote_workspace, _remote_project, remote_cx) = remote_zed.expand().await; - - // remote_cx.run_until_parked(); - - // let task = host_project.update(host_cx, |project, cx| { - // project.start_debug_session(dap::test_config(None), cx) - // }); - - // let session = task.await.unwrap(); - // let client = session.read_with(host_cx, |project, _| project.adapter_client().unwrap()); - - // client - // .on_request::(move |_, _| { - // Ok(dap::Capabilities { - // supports_step_back: Some(false), - // ..Default::default() - // }) - // }) - // .await; - - // client.on_request::(move |_, _| Ok(())).await; - - // client - // .on_request::(move |_, _| { - // Ok(dap::StackTraceResponse { - // stack_frames: Vec::default(), - // total_frames: None, - // }) - // }) - // .await; - - // client - // .fake_event(dap::messages::Events::Stopped(dap::StoppedEvent { - // reason: dap::StoppedEventReason::Pause, - // description: None, - // thread_id: Some(1), - // preserve_focus_hint: None, - // text: None, - // all_threads_stopped: None, - // hit_breakpoint_ids: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: None, - // output: "First line".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "First group".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::Start), - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "First item in group 1".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Second item in group 1".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Second group".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::Start), - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "First item in group 2".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Second item in group 2".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "End group 2".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::End), - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Third group".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::StartCollapsed), - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "First item in group 3".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Second item in group 3".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "End group 3".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::End), - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Third item in group 1".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: None, - // location_reference: None, - // })) - // .await; - - // client - // .fake_event(dap::messages::Events::Output(dap::OutputEvent { - // category: Some(dap::OutputEventCategory::Stdout), - // output: "Second item".to_string(), - // data: None, - // variables_reference: None, - // source: None, - // line: None, - // column: None, - // group: Some(dap::OutputEventGroup::End), - // location_reference: None, - // })) - // .await; - - // host_cx.run_until_parked(); - // remote_cx.run_until_parked(); - - // active_session(remote_workspace, remote_cx).update(remote_cx, |session_item, cx| { - // session_item - // .mode() - // .as_running() - // .unwrap() - // .read(cx) - // .console() - // .update(cx, |console, cx| { - // console.editor().update(cx, |editor, cx| { - // pretty_assertions::assert_eq!( - // " - // , + pending: Vec, +} + +fn room_participants(room: &Entity, cx: &mut TestAppContext) -> RoomParticipants { + room.read_with(cx, |room, _| { + let mut remote = room + .remote_participants() + .values() + .map(|participant| participant.user.github_login.clone().to_string()) + .collect::>(); + let mut pending = room + .pending_participants() + .iter() + .map(|user| user.github_login.clone().to_string()) + .collect::>(); + remote.sort(); + pending.sort(); + RoomParticipants { remote, pending } + }) +} + +fn channel_id(room: &Entity, cx: &mut TestAppContext) -> Option { + cx.read(|cx| room.read(cx).channel_id()) +} + +mod auth_token_tests { + use collab::auth::{ + AccessTokenJson, MAX_ACCESS_TOKENS_TO_STORE, VerifyAccessTokenResult, create_access_token, + verify_access_token, + }; + use rand::prelude::*; + use scrypt::Scrypt; + use scrypt::password_hash::{PasswordHasher, SaltString}; + use sea_orm::EntityTrait; + + use collab::db::{Database, NewUserParams, UserId, access_token}; + use collab::*; + + #[gpui::test] + async fn test_verify_access_token(cx: &mut gpui::TestAppContext) { + let test_db = crate::db_tests::TestDb::sqlite(cx.executor()); + let db = test_db.db(); + + let user = db + .create_user( + "example@example.com", + None, + false, + NewUserParams { + github_login: "example".into(), + github_user_id: 1, + }, + ) + .await + .unwrap(); + + let token = create_access_token(db, user.user_id, None).await.unwrap(); + assert!(matches!( + verify_access_token(&token, user.user_id, db).await.unwrap(), + VerifyAccessTokenResult { + is_valid: true, + impersonator_id: None, + } + )); + + let old_token = create_previous_access_token(user.user_id, None, db) + .await + .unwrap(); + + let old_token_id = serde_json::from_str::(&old_token) + .unwrap() + .id; + + let hash = db + .transaction(|tx| async move { + Ok(access_token::Entity::find_by_id(old_token_id) + .one(&*tx) + .await?) + }) + .await + .unwrap() + .unwrap() + .hash; + assert!(hash.starts_with("$scrypt$")); + + assert!(matches!( + verify_access_token(&old_token, user.user_id, db) + .await + .unwrap(), + VerifyAccessTokenResult { + is_valid: true, + impersonator_id: None, + } + )); + + let hash = db + .transaction(|tx| async move { + Ok(access_token::Entity::find_by_id(old_token_id) + .one(&*tx) + .await?) + }) + .await + .unwrap() + .unwrap() + .hash; + assert!(hash.starts_with("$sha256$")); + + assert!(matches!( + verify_access_token(&old_token, user.user_id, db) + .await + .unwrap(), + VerifyAccessTokenResult { + is_valid: true, + impersonator_id: None, + } + )); + + assert!(matches!( + verify_access_token(&token, user.user_id, db).await.unwrap(), + VerifyAccessTokenResult { + is_valid: true, + impersonator_id: None, + } + )); + } + + async fn create_previous_access_token( + user_id: UserId, + impersonated_user_id: Option, + db: &Database, + ) -> Result { + let access_token = collab::auth::random_token(); + let access_token_hash = previous_hash_access_token(&access_token)?; + let id = db + .create_access_token( + user_id, + impersonated_user_id, + &access_token_hash, + MAX_ACCESS_TOKENS_TO_STORE, + ) + .await?; + Ok(serde_json::to_string(&AccessTokenJson { + version: 1, + id, + token: access_token, + })?) + } + + #[expect(clippy::result_large_err)] + fn previous_hash_access_token(token: &str) -> Result { + // Avoid slow hashing in debug mode. + let params = if cfg!(debug_assertions) { + scrypt::Params::new(1, 1, 1, scrypt::Params::RECOMMENDED_LEN).unwrap() + } else { + scrypt::Params::new(14, 8, 1, scrypt::Params::RECOMMENDED_LEN).unwrap() + }; + + Ok(Scrypt + .hash_password_customized( + token.as_bytes(), + None, + None, + params, + &SaltString::generate(PasswordHashRngCompat::new()), + ) + .map_err(anyhow::Error::new)? + .to_string()) + } + + // TODO: remove once we password_hash v0.6 is released. + struct PasswordHashRngCompat(rand::rngs::ThreadRng); + + impl PasswordHashRngCompat { + fn new() -> Self { + Self(rand::rng()) + } + } + + impl scrypt::password_hash::rand_core::RngCore for PasswordHashRngCompat { + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest); + } + + fn try_fill_bytes( + &mut self, + dest: &mut [u8], + ) -> Result<(), scrypt::password_hash::rand_core::Error> { + self.fill_bytes(dest); + Ok(()) + } + } + + impl scrypt::password_hash::rand_core::CryptoRng for PasswordHashRngCompat {} +} diff --git a/crates/collab/tests/integration/db_tests.rs b/crates/collab/tests/integration/db_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..20e3826a74969b077b229c56ba15ff6a4ab797f2 --- /dev/null +++ b/crates/collab/tests/integration/db_tests.rs @@ -0,0 +1,219 @@ +mod buffer_tests; +mod channel_tests; +mod db_tests; +mod extension_tests; +mod migrations; + +use std::sync::Arc; +use std::sync::atomic::{AtomicI32, Ordering::SeqCst}; +use std::time::Duration; + +use collections::HashSet; +use gpui::BackgroundExecutor; +use parking_lot::Mutex; +use rand::prelude::*; +use sea_orm::ConnectionTrait; +use sqlx::migrate::MigrateDatabase; + +use self::migrations::run_database_migrations; + +use collab::db::*; + +pub struct TestDb { + pub db: Option>, + pub connection: Option, +} + +impl TestDb { + pub fn sqlite(executor: BackgroundExecutor) -> Self { + let url = "sqlite::memory:"; + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + + let mut db = runtime.block_on(async { + let mut options = ConnectOptions::new(url); + options.max_connections(5); + let mut db = Database::new(options).await.unwrap(); + let sql = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/migrations.sqlite/20221109000000_test_schema.sql" + )); + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + sql, + )) + .await + .unwrap(); + db.initialize_notification_kinds().await.unwrap(); + db + }); + + db.test_options = Some(DatabaseTestOptions { + executor, + runtime, + query_failure_probability: parking_lot::Mutex::new(0.0), + }); + + Self { + db: Some(Arc::new(db)), + connection: None, + } + } + + pub fn postgres(executor: BackgroundExecutor) -> Self { + static LOCK: Mutex<()> = Mutex::new(()); + + let _guard = LOCK.lock(); + let mut rng = StdRng::from_os_rng(); + let url = format!( + "postgres://postgres@localhost/zed-test-{}", + rng.random::() + ); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_io() + .enable_time() + .build() + .unwrap(); + + let mut db = runtime.block_on(async { + sqlx::Postgres::create_database(&url) + .await + .expect("failed to create test db"); + let mut options = ConnectOptions::new(url); + options + .max_connections(5) + .idle_timeout(Duration::from_secs(0)); + let mut db = Database::new(options).await.unwrap(); + let migrations_path = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"); + run_database_migrations(db.options(), migrations_path) + .await + .unwrap(); + db.initialize_notification_kinds().await.unwrap(); + db + }); + + db.test_options = Some(DatabaseTestOptions { + executor, + runtime, + query_failure_probability: parking_lot::Mutex::new(0.0), + }); + + Self { + db: Some(Arc::new(db)), + connection: None, + } + } + + pub fn db(&self) -> &Arc { + self.db.as_ref().unwrap() + } + + pub fn set_query_failure_probability(&self, probability: f64) { + let database = self.db.as_ref().unwrap(); + let test_options = database.test_options.as_ref().unwrap(); + *test_options.query_failure_probability.lock() = probability; + } +} + +#[macro_export] +macro_rules! test_both_dbs { + ($test_name:ident, $postgres_test_name:ident, $sqlite_test_name:ident) => { + #[cfg(target_os = "macos")] + #[gpui::test] + async fn $postgres_test_name(cx: &mut gpui::TestAppContext) { + let test_db = $crate::db_tests::TestDb::postgres(cx.executor().clone()); + $test_name(test_db.db()).await; + } + + #[gpui::test] + async fn $sqlite_test_name(cx: &mut gpui::TestAppContext) { + let test_db = $crate::db_tests::TestDb::sqlite(cx.executor().clone()); + $test_name(test_db.db()).await; + } + }; +} + +impl Drop for TestDb { + fn drop(&mut self) { + let db = self.db.take().unwrap(); + if let sea_orm::DatabaseBackend::Postgres = db.pool.get_database_backend() { + db.test_options.as_ref().unwrap().runtime.block_on(async { + use util::ResultExt; + let query = " + SELECT pg_terminate_backend(pg_stat_activity.pid) + FROM pg_stat_activity + WHERE + pg_stat_activity.datname = current_database() AND + pid <> pg_backend_pid(); + "; + db.pool + .execute(sea_orm::Statement::from_string( + db.pool.get_database_backend(), + query, + )) + .await + .log_err(); + sqlx::Postgres::drop_database(db.options.get_url()) + .await + .log_err(); + }) + } + } +} + +#[track_caller] +fn assert_channel_tree_matches(actual: Vec, expected: Vec) { + let expected_channels = expected.into_iter().collect::>(); + let actual_channels = actual.into_iter().collect::>(); + pretty_assertions::assert_eq!(expected_channels, actual_channels); +} + +fn channel_tree(channels: &[(ChannelId, &[ChannelId], &'static str)]) -> Vec { + use std::collections::HashMap; + + let mut result = Vec::new(); + let mut order_by_parent: HashMap, i32> = HashMap::new(); + + for (id, parent_path, name) in channels { + let parent_key = parent_path.to_vec(); + let order = if parent_key.is_empty() { + 1 + } else { + *order_by_parent + .entry(parent_key.clone()) + .and_modify(|e| *e += 1) + .or_insert(1) + }; + + result.push(Channel { + id: *id, + name: (*name).to_owned(), + visibility: ChannelVisibility::Members, + parent_path: parent_key, + channel_order: order, + }); + } + + result +} + +static GITHUB_USER_ID: AtomicI32 = AtomicI32::new(5); + +async fn new_test_user(db: &Arc, email: &str) -> UserId { + db.create_user( + email, + None, + false, + NewUserParams { + github_login: email[0..email.find('@').unwrap()].to_string(), + github_user_id: GITHUB_USER_ID.fetch_add(1, SeqCst), + }, + ) + .await + .unwrap() + .user_id +} diff --git a/crates/collab/src/db/tests/buffer_tests.rs b/crates/collab/tests/integration/db_tests/buffer_tests.rs similarity index 99% rename from crates/collab/src/db/tests/buffer_tests.rs rename to crates/collab/tests/integration/db_tests/buffer_tests.rs index 4eae7a54cba4a906351f05e5945cff5691fd1126..6fca75a9e8aae039775b07837352053f52428c79 100644 --- a/crates/collab/src/db/tests/buffer_tests.rs +++ b/crates/collab/tests/integration/db_tests/buffer_tests.rs @@ -1,6 +1,7 @@ use super::*; use crate::test_both_dbs; use language::proto::{self, serialize_version}; +use rpc::ConnectionId; use text::{Buffer, ReplicaId}; test_both_dbs!( diff --git a/crates/collab/src/db/tests/channel_tests.rs b/crates/collab/tests/integration/db_tests/channel_tests.rs similarity index 98% rename from crates/collab/src/db/tests/channel_tests.rs rename to crates/collab/tests/integration/db_tests/channel_tests.rs index 705dbba5ead0170acd629149b8d77b847a5784b0..fc3f8770f839b24c2be34919e883a03c34b14af6 100644 --- a/crates/collab/src/db/tests/channel_tests.rs +++ b/crates/collab/tests/integration/db_tests/channel_tests.rs @@ -1,10 +1,6 @@ -use crate::{ - db::{ - Channel, ChannelId, ChannelRole, Database, NewUserParams, RoomId, UserId, - tests::{assert_channel_tree_matches, channel_tree, new_test_user}, - }, - test_both_dbs, -}; +use super::{assert_channel_tree_matches, channel_tree, new_test_user}; +use crate::test_both_dbs; +use collab::db::{Channel, ChannelId, ChannelRole, Database, NewUserParams, RoomId, UserId}; use rpc::{ ConnectionId, proto::{self, reorder_channel}, @@ -689,12 +685,12 @@ async fn test_user_is_channel_participant(db: &Arc) { .await .unwrap(); - db.set_channel_visibility(zed_channel, crate::db::ChannelVisibility::Public, admin) + db.set_channel_visibility(zed_channel, collab::db::ChannelVisibility::Public, admin) .await .unwrap(); db.set_channel_visibility( public_channel_id, - crate::db::ChannelVisibility::Public, + collab::db::ChannelVisibility::Public, admin, ) .await diff --git a/crates/collab/src/db/tests/db_tests.rs b/crates/collab/tests/integration/db_tests/db_tests.rs similarity index 98% rename from crates/collab/src/db/tests/db_tests.rs rename to crates/collab/tests/integration/db_tests/db_tests.rs index d998e866ce353163498aea752b0277b332de0ed5..05d77b6c109496ce395258d0a704d6cb0fff9d66 100644 --- a/crates/collab/src/db/tests/db_tests.rs +++ b/crates/collab/tests/integration/db_tests/db_tests.rs @@ -1,7 +1,11 @@ -use super::*; use crate::test_both_dbs; + +use super::*; use chrono::Utc; +use collab::db::RoomId; +use collab::db::*; use pretty_assertions::assert_eq; +use rpc::ConnectionId; use std::sync::Arc; test_both_dbs!( @@ -541,7 +545,7 @@ fn test_fuzzy_like_string() { #[cfg(target_os = "macos")] #[gpui::test] async fn test_fuzzy_search_users(cx: &mut gpui::TestAppContext) { - let test_db = tests::TestDb::postgres(cx.executor()); + let test_db = TestDb::postgres(cx.executor()); let db = test_db.db(); for (i, github_login) in [ "California", @@ -594,7 +598,7 @@ test_both_dbs!( ); async fn test_upsert_shared_thread(db: &Arc) { - use crate::db::SharedThreadId; + use collab::db::SharedThreadId; use uuid::Uuid; let user_id = new_test_user(db, "user1@example.com").await; @@ -624,7 +628,7 @@ test_both_dbs!( ); async fn test_upsert_shared_thread_updates_existing(db: &Arc) { - use crate::db::SharedThreadId; + use collab::db::SharedThreadId; use uuid::Uuid; let user_id = new_test_user(db, "user1@example.com").await; @@ -665,7 +669,7 @@ test_both_dbs!( ); async fn test_cannot_update_another_users_shared_thread(db: &Arc) { - use crate::db::SharedThreadId; + use collab::db::SharedThreadId; use uuid::Uuid; let user1_id = new_test_user(db, "user1@example.com").await; @@ -694,7 +698,7 @@ test_both_dbs!( ); async fn test_get_nonexistent_shared_thread(db: &Arc) { - use crate::db::SharedThreadId; + use collab::db::SharedThreadId; use uuid::Uuid; let result = db diff --git a/crates/collab/src/db/tests/extension_tests.rs b/crates/collab/tests/integration/db_tests/extension_tests.rs similarity index 98% rename from crates/collab/src/db/tests/extension_tests.rs rename to crates/collab/tests/integration/db_tests/extension_tests.rs index cb58f6af2a6559b8ca3bb4c19c694a263e73d878..0f1939955376285bd1e1709812d18dad61326ac3 100644 --- a/crates/collab/src/db/tests/extension_tests.rs +++ b/crates/collab/tests/integration/db_tests/extension_tests.rs @@ -3,13 +3,11 @@ use std::sync::Arc; use rpc::ExtensionProvides; -use super::Database; -use crate::db::ExtensionVersionConstraints; -use crate::{ - db::{ExtensionMetadata, NewExtensionVersion, queries::extensions::convert_time_to_chrono}, - test_both_dbs, -}; - +use crate::test_both_dbs; +use collab::db::Database; +use collab::db::ExtensionVersionConstraints; +use collab::db::{NewExtensionVersion, queries::extensions::convert_time_to_chrono}; +use rpc::ExtensionMetadata; test_both_dbs!( test_extensions, test_extensions_postgres, diff --git a/crates/collab/src/db/tests/migrations.rs b/crates/collab/tests/integration/db_tests/migrations.rs similarity index 100% rename from crates/collab/src/db/tests/migrations.rs rename to crates/collab/tests/integration/db_tests/migrations.rs diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/tests/integration/editor_tests.rs similarity index 99% rename from crates/collab/src/tests/editor_tests.rs rename to crates/collab/tests/integration/editor_tests.rs index 06cf7c98fdad170bee90f40203a68329cce73ae2..df21217745abb586fdffcc8115188999084800dd 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/tests/integration/editor_tests.rs @@ -1,5 +1,6 @@ -use crate::{rpc::RECONNECT_TIMEOUT, tests::TestServer}; +use crate::TestServer; use call::ActiveCall; +use collab::rpc::RECONNECT_TIMEOUT; use collections::{HashMap, HashSet}; use editor::{ DocumentColorsRenderMode, Editor, FETCH_COLORS_DEBOUNCE_TIMEOUT, MultiBufferOffset, RowInfo, diff --git a/crates/collab/src/tests/following_tests.rs b/crates/collab/tests/integration/following_tests.rs similarity index 99% rename from crates/collab/src/tests/following_tests.rs rename to crates/collab/tests/integration/following_tests.rs index fee6fe1786e84fda9ce30663d49e21b94274d5fc..295105ecbd9f8663469276fe4d0d197708a4254e 100644 --- a/crates/collab/src/tests/following_tests.rs +++ b/crates/collab/tests/integration/following_tests.rs @@ -1,5 +1,5 @@ #![allow(clippy::reversed_empty_ranges)] -use crate::tests::TestServer; +use crate::TestServer; use call::{ActiveCall, ParticipantLocation}; use client::ChannelId; use collab_ui::{ @@ -462,7 +462,7 @@ async fn test_basic_following( // #[cfg(all(not(target_os = "macos"), not(target_os = "windows")))] { - use crate::rpc::RECONNECT_TIMEOUT; + use collab::rpc::RECONNECT_TIMEOUT; use gpui::TestScreenCaptureSource; use workspace::{ dock::{DockPosition, test::TestPanel}, diff --git a/crates/collab/src/tests/git_tests.rs b/crates/collab/tests/integration/git_tests.rs similarity index 99% rename from crates/collab/src/tests/git_tests.rs rename to crates/collab/tests/integration/git_tests.rs index 6843c4a22a27494f630d65c73dbad0f114522a92..1378fcf95c63c883ee8dd424dc10ac67ccd774bd 100644 --- a/crates/collab/src/tests/git_tests.rs +++ b/crates/collab/tests/integration/git_tests.rs @@ -10,7 +10,7 @@ use util::{path, rel_path::rel_path}; use workspace::Workspace; // -use crate::tests::TestServer; +use crate::TestServer; #[gpui::test] async fn test_project_diff(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/tests/integration/integration_tests.rs similarity index 99% rename from crates/collab/src/tests/integration_tests.rs rename to crates/collab/tests/integration/integration_tests.rs index 110dece999cfa307cb544107b0980d85f98caefb..18d6f2bb495626672dafbf8031186a4d4c7ddd5e 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/tests/integration/integration_tests.rs @@ -1,9 +1,6 @@ use crate::{ - rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, - tests::{ - RoomParticipants, TestClient, TestServer, channel_id, following_tests::join_channel, - room_participants, - }, + RoomParticipants, TestClient, TestServer, channel_id, following_tests::join_channel, + room_participants, }; use anyhow::{Result, anyhow}; use assistant_slash_command::SlashCommandWorkingSet; @@ -11,6 +8,7 @@ use assistant_text_thread::TextThreadStore; use buffer_diff::{DiffHunkSecondaryStatus, DiffHunkStatus, assert_hunks}; use call::{ActiveCall, ParticipantLocation, Room, room}; use client::{RECEIVE_TIMEOUT, User}; +use collab::rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}; use collections::{BTreeMap, HashMap, HashSet}; use fs::{FakeFs, Fs as _, RemoveOptions}; use futures::{StreamExt as _, channel::mpsc}; diff --git a/crates/collab/src/tests/notification_tests.rs b/crates/collab/tests/integration/notification_tests.rs similarity index 99% rename from crates/collab/src/tests/notification_tests.rs rename to crates/collab/tests/integration/notification_tests.rs index 9bf906694ef84e66f529269fdadfd1fe6ad1fd08..27ad30a36e4b540209ddf7c2030247c3d95c593d 100644 --- a/crates/collab/src/tests/notification_tests.rs +++ b/crates/collab/tests/integration/notification_tests.rs @@ -6,7 +6,7 @@ use parking_lot::Mutex; use pretty_assertions::assert_eq; use rpc::{Notification, proto}; -use crate::tests::TestServer; +use crate::TestServer; #[gpui::test] async fn test_notifications( diff --git a/crates/collab/src/tests/random_channel_buffer_tests.rs b/crates/collab/tests/integration/random_channel_buffer_tests.rs similarity index 98% rename from crates/collab/src/tests/random_channel_buffer_tests.rs rename to crates/collab/tests/integration/random_channel_buffer_tests.rs index cd4a575bb2f2bd17d9abd747132416c0be16c6a6..5eb1b249a768446e7703c74d6fa575c3f7518336 100644 --- a/crates/collab/src/tests/random_channel_buffer_tests.rs +++ b/crates/collab/tests/integration/random_channel_buffer_tests.rs @@ -1,4 +1,4 @@ -use crate::db::ChannelRole; +use collab::db::*; use super::{RandomizedTest, TestClient, TestError, TestServer, UserTestPlan, run_randomized_test}; use anyhow::Result; @@ -13,10 +13,7 @@ use std::{ }; use text::Bias; -#[gpui::test( - iterations = 100, - on_failure = "crate::tests::save_randomized_test_plan" -)] +#[gpui::test(iterations = 100, on_failure = "crate::save_randomized_test_plan")] async fn test_random_channel_buffers( cx: &mut TestAppContext, executor: BackgroundExecutor, diff --git a/crates/collab/src/tests/random_project_collaboration_tests.rs b/crates/collab/tests/integration/random_project_collaboration_tests.rs similarity index 99% rename from crates/collab/src/tests/random_project_collaboration_tests.rs rename to crates/collab/tests/integration/random_project_collaboration_tests.rs index 34734c4af92eb38e15a84bdf47512e1272c48653..ab5bde6d3215fa60790d5f55d8b06ea6482098cc 100644 --- a/crates/collab/src/tests/random_project_collaboration_tests.rs +++ b/crates/collab/tests/integration/random_project_collaboration_tests.rs @@ -1,8 +1,8 @@ -use super::{RandomizedTest, TestClient, TestError, TestServer, UserTestPlan}; -use crate::{db::UserId, tests::run_randomized_test}; +use crate::{RandomizedTest, TestClient, TestError, TestServer, UserTestPlan, run_randomized_test}; use anyhow::{Context as _, Result}; use async_trait::async_trait; use call::ActiveCall; +use collab::db::UserId; use collections::{BTreeMap, HashMap}; use editor::Bias; use fs::{FakeFs, Fs as _}; @@ -33,10 +33,7 @@ use util::{ rel_path::{RelPath, RelPathBuf, rel_path}, }; -#[gpui::test( - iterations = 100, - on_failure = "crate::tests::save_randomized_test_plan" -)] +#[gpui::test(iterations = 100, on_failure = "crate::save_randomized_test_plan")] async fn test_random_project_collaboration( cx: &mut TestAppContext, executor: BackgroundExecutor, diff --git a/crates/collab/src/tests/randomized_test_helpers.rs b/crates/collab/tests/integration/randomized_test_helpers.rs similarity index 99% rename from crates/collab/src/tests/randomized_test_helpers.rs rename to crates/collab/tests/integration/randomized_test_helpers.rs index 0c2707b56f72fbe5144ac98c7d8d2872b7469b9b..e3e4a122d1df069385ef850aeccaa4c5788d253d 100644 --- a/crates/collab/src/tests/randomized_test_helpers.rs +++ b/crates/collab/tests/integration/randomized_test_helpers.rs @@ -1,9 +1,9 @@ -use crate::{ +use crate::{TestClient, TestServer}; +use async_trait::async_trait; +use collab::{ db::{self, NewUserParams, UserId}, rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, - tests::{TestClient, TestServer}, }; -use async_trait::async_trait; use futures::StreamExt; use gpui::{BackgroundExecutor, Task, TestAppContext}; use parking_lot::Mutex; diff --git a/crates/collab/src/tests/remote_editing_collaboration_tests.rs b/crates/collab/tests/integration/remote_editing_collaboration_tests.rs similarity index 99% rename from crates/collab/src/tests/remote_editing_collaboration_tests.rs rename to crates/collab/tests/integration/remote_editing_collaboration_tests.rs index 090bb9499f94662a5140927105ff1a9ce6c6c000..1f4dd0d353234f61675b5beefd2226c3d684c062 100644 --- a/crates/collab/src/tests/remote_editing_collaboration_tests.rs +++ b/crates/collab/tests/integration/remote_editing_collaboration_tests.rs @@ -1,4 +1,4 @@ -use crate::tests::TestServer; +use crate::TestServer; use call::ActiveCall; use collections::{HashMap, HashSet}; diff --git a/crates/collab/src/tests/test_server.rs b/crates/collab/tests/integration/test_server.rs similarity index 99% rename from crates/collab/src/tests/test_server.rs rename to crates/collab/tests/integration/test_server.rs index 1f0204c1d57f8479319d1cf77ae3e95fda86f2ba..f28d247f67a149ef6d489b9bc6ab7b43eb77350f 100644 --- a/crates/collab/src/tests/test_server.rs +++ b/crates/collab/tests/integration/test_server.rs @@ -1,9 +1,3 @@ -use crate::{ - AppState, Config, - db::{NewUserParams, UserId, tests::TestDb}, - executor::Executor, - rpc::{CLEANUP_TIMEOUT, Principal, RECONNECT_TIMEOUT, Server, ZedVersion}, -}; use anyhow::anyhow; use call::ActiveCall; use channel::{ChannelBuffer, ChannelStore}; @@ -13,6 +7,12 @@ use client::{ proto::PeerId, }; use clock::FakeSystemClock; +use collab::{ + AppState, Config, + db::{NewUserParams, UserId}, + executor::Executor, + rpc::{CLEANUP_TIMEOUT, Principal, RECONNECT_TIMEOUT, Server, ZedVersion}, +}; use collab_ui::channel_view::ChannelView; use collections::{HashMap, HashSet}; @@ -49,6 +49,8 @@ use workspace::{Workspace, WorkspaceStore}; use livekit_client::test::TestServer as LivekitTestServer; +use crate::db_tests::TestDb; + pub struct TestServer { pub app_state: Arc, pub test_livekit_server: Arc,