Cargo.lock 🔗
@@ -1456,6 +1456,7 @@ name = "collab"
version = "0.19.0"
dependencies = [
"anyhow",
+ "async-trait",
"async-tungstenite",
"audio",
"axum",
Max Brunsfeld created
Cargo.lock | 1
crates/collab/Cargo.toml | 1
crates/collab/src/tests.rs | 557 -
crates/collab/src/tests/random_channel_buffer_tests.rs | 49
crates/collab/src/tests/random_project_collaboration_tests.rs | 1573 +++
crates/collab/src/tests/randomized_integration_tests.rs | 2199 -----
crates/collab/src/tests/randomized_test_helpers.rs | 694 +
crates/collab/src/tests/test_server.rs | 551 +
crates/gpui_macros/src/gpui_macros.rs | 10
9 files changed, 2,887 insertions(+), 2,748 deletions(-)
@@ -1456,6 +1456,7 @@ name = "collab"
version = "0.19.0"
dependencies = [
"anyhow",
+ "async-trait",
"async-tungstenite",
"audio",
"axum",
@@ -80,6 +80,7 @@ theme = { path = "../theme" }
workspace = { path = "../workspace", features = ["test-support"] }
collab_ui = { path = "../collab_ui", features = ["test-support"] }
+async-trait.workspace = true
ctor.workspace = true
env_logger.workspace = true
indoc.workspace = true
@@ -1,555 +1,18 @@
-use crate::{
- db::{tests::TestDb, NewUserParams, UserId},
- executor::Executor,
- rpc::{Server, CLEANUP_TIMEOUT},
- AppState,
-};
-use anyhow::anyhow;
-use call::{ActiveCall, Room};
-use channel::ChannelStore;
-use client::{
- self, proto::PeerId, Client, Connection, Credentials, EstablishConnectionError, UserStore,
-};
-use collections::{HashMap, HashSet};
-use fs::FakeFs;
-use futures::{channel::oneshot, StreamExt as _};
-use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext, WindowHandle};
-use language::LanguageRegistry;
-use parking_lot::Mutex;
-use project::{Project, WorktreeId};
-use settings::SettingsStore;
-use std::{
- cell::{Ref, RefCell, RefMut},
- env,
- ops::{Deref, DerefMut},
- path::Path,
- sync::{
- atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
- Arc,
- },
-};
-use util::http::FakeHttpClient;
-use workspace::Workspace;
+use call::Room;
+use gpui::{ModelHandle, TestAppContext};
mod channel_buffer_tests;
mod channel_tests;
mod integration_tests;
-mod randomized_integration_tests;
-
-struct TestServer {
- app_state: Arc<AppState>,
- server: Arc<Server>,
- connection_killers: Arc<Mutex<HashMap<PeerId, Arc<AtomicBool>>>>,
- forbid_connections: Arc<AtomicBool>,
- _test_db: TestDb,
- test_live_kit_server: Arc<live_kit_client::TestServer>,
-}
-
-impl TestServer {
- async fn start(deterministic: &Arc<Deterministic>) -> Self {
- static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0);
-
- let use_postgres = env::var("USE_POSTGRES").ok();
- let use_postgres = use_postgres.as_deref();
- let test_db = if use_postgres == Some("true") || use_postgres == Some("1") {
- TestDb::postgres(deterministic.build_background())
- } else {
- TestDb::sqlite(deterministic.build_background())
- };
- let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst);
- let live_kit_server = live_kit_client::TestServer::create(
- format!("http://livekit.{}.test", live_kit_server_id),
- format!("devkey-{}", live_kit_server_id),
- format!("secret-{}", live_kit_server_id),
- deterministic.build_background(),
- )
- .unwrap();
- let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
- let epoch = app_state
- .db
- .create_server(&app_state.config.zed_environment)
- .await
- .unwrap();
- let server = Server::new(
- epoch,
- app_state.clone(),
- Executor::Deterministic(deterministic.build_background()),
- );
- server.start().await.unwrap();
- // Advance clock to ensure the server's cleanup task is finished.
- deterministic.advance_clock(CLEANUP_TIMEOUT);
- Self {
- app_state,
- server,
- connection_killers: Default::default(),
- forbid_connections: Default::default(),
- _test_db: test_db,
- test_live_kit_server: live_kit_server,
- }
- }
-
- async fn reset(&self) {
- self.app_state.db.reset();
- let epoch = self
- .app_state
- .db
- .create_server(&self.app_state.config.zed_environment)
- .await
- .unwrap();
- self.server.reset(epoch);
- }
-
- async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient {
- cx.update(|cx| {
- if cx.has_global::<SettingsStore>() {
- panic!("Same cx used to create two test clients")
- }
- cx.set_global(SettingsStore::test(cx));
- });
-
- let http = FakeHttpClient::with_404_response();
- let user_id = if let Ok(Some(user)) = self.app_state.db.get_user_by_github_login(name).await
- {
- user.id
- } else {
- self.app_state
- .db
- .create_user(
- &format!("{name}@example.com"),
- false,
- NewUserParams {
- github_login: name.into(),
- github_user_id: 0,
- invite_count: 0,
- },
- )
- .await
- .expect("creating user failed")
- .user_id
- };
- let client_name = name.to_string();
- let mut client = cx.read(|cx| Client::new(http.clone(), cx));
- let server = self.server.clone();
- let db = self.app_state.db.clone();
- let connection_killers = self.connection_killers.clone();
- let forbid_connections = self.forbid_connections.clone();
-
- Arc::get_mut(&mut client)
- .unwrap()
- .set_id(user_id.0 as usize)
- .override_authenticate(move |cx| {
- cx.spawn(|_| async move {
- let access_token = "the-token".to_string();
- Ok(Credentials {
- user_id: user_id.0 as u64,
- access_token,
- })
- })
- })
- .override_establish_connection(move |credentials, cx| {
- assert_eq!(credentials.user_id, user_id.0 as u64);
- assert_eq!(credentials.access_token, "the-token");
-
- let server = server.clone();
- let db = db.clone();
- let connection_killers = connection_killers.clone();
- let forbid_connections = forbid_connections.clone();
- let client_name = client_name.clone();
- cx.spawn(move |cx| async move {
- if forbid_connections.load(SeqCst) {
- Err(EstablishConnectionError::other(anyhow!(
- "server is forbidding connections"
- )))
- } else {
- let (client_conn, server_conn, killed) =
- Connection::in_memory(cx.background());
- let (connection_id_tx, connection_id_rx) = oneshot::channel();
- let user = db
- .get_user_by_id(user_id)
- .await
- .expect("retrieving user failed")
- .unwrap();
- cx.background()
- .spawn(server.handle_connection(
- server_conn,
- client_name,
- user,
- Some(connection_id_tx),
- Executor::Deterministic(cx.background()),
- ))
- .detach();
- let connection_id = connection_id_rx.await.unwrap();
- connection_killers
- .lock()
- .insert(connection_id.into(), killed);
- Ok(client_conn)
- }
- })
- });
-
- let fs = FakeFs::new(cx.background());
- let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
- let channel_store =
- cx.add_model(|cx| ChannelStore::new(client.clone(), user_store.clone(), cx));
- let app_state = Arc::new(workspace::AppState {
- client: client.clone(),
- user_store: user_store.clone(),
- channel_store: channel_store.clone(),
- languages: Arc::new(LanguageRegistry::test()),
- fs: fs.clone(),
- build_window_options: |_, _, _| Default::default(),
- initialize_workspace: |_, _, _, _| Task::ready(Ok(())),
- background_actions: || &[],
- });
-
- cx.update(|cx| {
- theme::init((), cx);
- Project::init(&client, cx);
- client::init(&client, cx);
- language::init(cx);
- editor::init_settings(cx);
- workspace::init(app_state.clone(), cx);
- audio::init((), cx);
- call::init(client.clone(), user_store.clone(), cx);
- channel::init(&client);
- });
-
- client
- .authenticate_and_connect(false, &cx.to_async())
- .await
- .unwrap();
-
- let client = TestClient {
- app_state,
- username: name.to_string(),
- state: Default::default(),
- };
- client.wait_for_current_user(cx).await;
- client
- }
-
- fn disconnect_client(&self, peer_id: PeerId) {
- self.connection_killers
- .lock()
- .remove(&peer_id)
- .unwrap()
- .store(true, SeqCst);
- }
-
- fn forbid_connections(&self) {
- self.forbid_connections.store(true, SeqCst);
- }
-
- fn allow_connections(&self) {
- self.forbid_connections.store(false, SeqCst);
- }
-
- async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
- for ix in 1..clients.len() {
- let (left, right) = clients.split_at_mut(ix);
- let (client_a, cx_a) = left.last_mut().unwrap();
- for (client_b, cx_b) in right {
- client_a
- .app_state
- .user_store
- .update(*cx_a, |store, cx| {
- store.request_contact(client_b.user_id().unwrap(), cx)
- })
- .await
- .unwrap();
- cx_a.foreground().run_until_parked();
- client_b
- .app_state
- .user_store
- .update(*cx_b, |store, cx| {
- store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx)
- })
- .await
- .unwrap();
- }
- }
- }
-
- async fn make_channel(
- &self,
- channel: &str,
- admin: (&TestClient, &mut TestAppContext),
- members: &mut [(&TestClient, &mut TestAppContext)],
- ) -> u64 {
- let (admin_client, admin_cx) = admin;
- let channel_id = admin_client
- .app_state
- .channel_store
- .update(admin_cx, |channel_store, cx| {
- channel_store.create_channel(channel, None, cx)
- })
- .await
- .unwrap();
-
- for (member_client, member_cx) in members {
- admin_client
- .app_state
- .channel_store
- .update(admin_cx, |channel_store, cx| {
- channel_store.invite_member(
- channel_id,
- member_client.user_id().unwrap(),
- false,
- cx,
- )
- })
- .await
- .unwrap();
-
- admin_cx.foreground().run_until_parked();
-
- member_client
- .app_state
- .channel_store
- .update(*member_cx, |channels, _| {
- channels.respond_to_channel_invite(channel_id, true)
- })
- .await
- .unwrap();
- }
-
- channel_id
- }
-
- async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
- self.make_contacts(clients).await;
-
- let (left, right) = clients.split_at_mut(1);
- let (_client_a, cx_a) = &mut left[0];
- let active_call_a = cx_a.read(ActiveCall::global);
-
- for (client_b, cx_b) in right {
- let user_id_b = client_b.current_user_id(*cx_b).to_proto();
- active_call_a
- .update(*cx_a, |call, cx| call.invite(user_id_b, None, cx))
- .await
- .unwrap();
-
- cx_b.foreground().run_until_parked();
- let active_call_b = cx_b.read(ActiveCall::global);
- active_call_b
- .update(*cx_b, |call, cx| call.accept_incoming(cx))
- .await
- .unwrap();
- }
- }
-
- async fn build_app_state(
- test_db: &TestDb,
- fake_server: &live_kit_client::TestServer,
- ) -> Arc<AppState> {
- Arc::new(AppState {
- db: test_db.db().clone(),
- live_kit_client: Some(Arc::new(fake_server.create_api_client())),
- config: Default::default(),
- })
- }
-}
-
-impl Deref for TestServer {
- type Target = Server;
-
- fn deref(&self) -> &Self::Target {
- &self.server
- }
-}
-
-impl Drop for TestServer {
- fn drop(&mut self) {
- self.server.teardown();
- self.test_live_kit_server.teardown().unwrap();
- }
-}
-
-struct TestClient {
- username: String,
- state: RefCell<TestClientState>,
- app_state: Arc<workspace::AppState>,
-}
-
-#[derive(Default)]
-struct TestClientState {
- local_projects: Vec<ModelHandle<Project>>,
- remote_projects: Vec<ModelHandle<Project>>,
- buffers: HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>,
-}
-
-impl Deref for TestClient {
- type Target = Arc<Client>;
-
- fn deref(&self) -> &Self::Target {
- &self.app_state.client
- }
-}
-
-struct ContactsSummary {
- pub current: Vec<String>,
- pub outgoing_requests: Vec<String>,
- pub incoming_requests: Vec<String>,
-}
-
-impl TestClient {
- pub fn fs(&self) -> &FakeFs {
- self.app_state.fs.as_fake()
- }
-
- pub fn channel_store(&self) -> &ModelHandle<ChannelStore> {
- &self.app_state.channel_store
- }
-
- pub fn user_store(&self) -> &ModelHandle<UserStore> {
- &self.app_state.user_store
- }
-
- pub fn language_registry(&self) -> &Arc<LanguageRegistry> {
- &self.app_state.languages
- }
-
- pub fn client(&self) -> &Arc<Client> {
- &self.app_state.client
- }
+mod random_channel_buffer_tests;
+mod random_project_collaboration_tests;
+mod randomized_test_helpers;
+mod test_server;
- pub fn current_user_id(&self, cx: &TestAppContext) -> UserId {
- UserId::from_proto(
- self.app_state
- .user_store
- .read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
- )
- }
-
- async fn wait_for_current_user(&self, cx: &TestAppContext) {
- let mut authed_user = self
- .app_state
- .user_store
- .read_with(cx, |user_store, _| user_store.watch_current_user());
- while authed_user.next().await.unwrap().is_none() {}
- }
-
- async fn clear_contacts(&self, cx: &mut TestAppContext) {
- self.app_state
- .user_store
- .update(cx, |store, _| store.clear_contacts())
- .await;
- }
-
- fn local_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
- Ref::map(self.state.borrow(), |state| &state.local_projects)
- }
-
- fn remote_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
- Ref::map(self.state.borrow(), |state| &state.remote_projects)
- }
-
- fn local_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
- RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects)
- }
-
- fn remote_projects_mut<'a>(&'a self) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
- RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects)
- }
-
- fn buffers_for_project<'a>(
- &'a self,
- project: &ModelHandle<Project>,
- ) -> impl DerefMut<Target = HashSet<ModelHandle<language::Buffer>>> + 'a {
- RefMut::map(self.state.borrow_mut(), |state| {
- state.buffers.entry(project.clone()).or_default()
- })
- }
-
- fn buffers<'a>(
- &'a self,
- ) -> impl DerefMut<Target = HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>> + 'a
- {
- RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers)
- }
-
- fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary {
- self.app_state
- .user_store
- .read_with(cx, |store, _| ContactsSummary {
- current: store
- .contacts()
- .iter()
- .map(|contact| contact.user.github_login.clone())
- .collect(),
- outgoing_requests: store
- .outgoing_contact_requests()
- .iter()
- .map(|user| user.github_login.clone())
- .collect(),
- incoming_requests: store
- .incoming_contact_requests()
- .iter()
- .map(|user| user.github_login.clone())
- .collect(),
- })
- }
-
- async fn build_local_project(
- &self,
- root_path: impl AsRef<Path>,
- cx: &mut TestAppContext,
- ) -> (ModelHandle<Project>, WorktreeId) {
- let project = cx.update(|cx| {
- Project::local(
- self.client().clone(),
- self.app_state.user_store.clone(),
- self.app_state.languages.clone(),
- self.app_state.fs.clone(),
- cx,
- )
- });
- let (worktree, _) = project
- .update(cx, |p, cx| {
- p.find_or_create_local_worktree(root_path, true, cx)
- })
- .await
- .unwrap();
- worktree
- .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete())
- .await;
- (project, worktree.read_with(cx, |tree, _| tree.id()))
- }
-
- async fn build_remote_project(
- &self,
- host_project_id: u64,
- guest_cx: &mut TestAppContext,
- ) -> ModelHandle<Project> {
- let active_call = guest_cx.read(ActiveCall::global);
- let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone());
- room.update(guest_cx, |room, cx| {
- room.join_project(
- host_project_id,
- self.app_state.languages.clone(),
- self.app_state.fs.clone(),
- cx,
- )
- })
- .await
- .unwrap()
- }
-
- fn build_workspace(
- &self,
- project: &ModelHandle<Project>,
- cx: &mut TestAppContext,
- ) -> WindowHandle<Workspace> {
- cx.add_window(|cx| Workspace::new(0, project.clone(), self.app_state.clone(), cx))
- }
-}
-
-impl Drop for TestClient {
- fn drop(&mut self) {
- self.app_state.client.teardown();
- }
-}
+pub use randomized_test_helpers::{
+ run_randomized_test, save_randomized_test_plan, RandomizedTest, TestError, UserTestPlan,
+};
+pub use test_server::{TestClient, TestServer};
#[derive(Debug, Eq, PartialEq)]
struct RoomParticipants {
@@ -0,0 +1,49 @@
+use crate::tests::{run_randomized_test, RandomizedTest, TestClient, TestError, UserTestPlan};
+use anyhow::Result;
+use async_trait::async_trait;
+use gpui::{executor::Deterministic, TestAppContext};
+use rand::rngs::StdRng;
+use serde_derive::{Deserialize, Serialize};
+use std::{rc::Rc, sync::Arc};
+
+#[gpui::test]
+async fn test_random_channel_buffers(
+ cx: &mut TestAppContext,
+ deterministic: Arc<Deterministic>,
+ rng: StdRng,
+) {
+ run_randomized_test::<RandomChannelBufferTest>(cx, deterministic, rng).await;
+}
+
+struct RandomChannelBufferTest;
+
+#[derive(Clone, Serialize, Deserialize)]
+enum ChannelBufferOperation {
+ Join,
+}
+
+#[async_trait(?Send)]
+impl RandomizedTest for RandomChannelBufferTest {
+ type Operation = ChannelBufferOperation;
+
+ fn generate_operation(
+ client: &TestClient,
+ rng: &mut StdRng,
+ plan: &mut UserTestPlan,
+ cx: &TestAppContext,
+ ) -> ChannelBufferOperation {
+ ChannelBufferOperation::Join
+ }
+
+ async fn apply_operation(
+ client: &TestClient,
+ operation: ChannelBufferOperation,
+ cx: &mut TestAppContext,
+ ) -> Result<(), TestError> {
+ Ok(())
+ }
+
+ async fn on_client_added(client: &Rc<TestClient>) {}
+
+ fn on_clients_quiesced(clients: &[(Rc<TestClient>, TestAppContext)]) {}
+}
@@ -0,0 +1,1573 @@
+use crate::{
+ db::UserId,
+ tests::{run_randomized_test, RandomizedTest, TestClient, TestError, UserTestPlan},
+};
+use anyhow::{anyhow, Result};
+use async_trait::async_trait;
+use call::ActiveCall;
+use collections::{BTreeMap, HashMap};
+use editor::Bias;
+use fs::{repository::GitFileStatus, FakeFs, Fs as _};
+use futures::StreamExt;
+use gpui::{executor::Deterministic, ModelHandle, TestAppContext};
+use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
+use lsp::FakeLanguageServer;
+use pretty_assertions::assert_eq;
+use project::{search::SearchQuery, Project, ProjectPath};
+use rand::{
+ distributions::{Alphanumeric, DistString},
+ prelude::*,
+};
+use serde::{Deserialize, Serialize};
+use std::{
+ ops::Range,
+ path::{Path, PathBuf},
+ rc::Rc,
+ sync::Arc,
+};
+use util::ResultExt;
+
+#[gpui::test(
+ iterations = 100,
+ on_failure = "crate::tests::save_randomized_test_plan"
+)]
+async fn test_random_project_collaboration(
+ cx: &mut TestAppContext,
+ deterministic: Arc<Deterministic>,
+ rng: StdRng,
+) {
+ run_randomized_test::<ProjectCollaborationTest>(cx, deterministic, rng).await;
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+enum ClientOperation {
+ AcceptIncomingCall,
+ RejectIncomingCall,
+ LeaveCall,
+ InviteContactToCall {
+ user_id: UserId,
+ },
+ OpenLocalProject {
+ first_root_name: String,
+ },
+ OpenRemoteProject {
+ host_id: UserId,
+ first_root_name: String,
+ },
+ AddWorktreeToProject {
+ project_root_name: String,
+ new_root_path: PathBuf,
+ },
+ CloseRemoteProject {
+ project_root_name: String,
+ },
+ OpenBuffer {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ },
+ SearchProject {
+ project_root_name: String,
+ is_local: bool,
+ query: String,
+ detach: bool,
+ },
+ EditBuffer {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ edits: Vec<(Range<usize>, Arc<str>)>,
+ },
+ CloseBuffer {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ },
+ SaveBuffer {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ detach: bool,
+ },
+ RequestLspDataInBuffer {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ offset: usize,
+ kind: LspRequestKind,
+ detach: bool,
+ },
+ CreateWorktreeEntry {
+ project_root_name: String,
+ is_local: bool,
+ full_path: PathBuf,
+ is_dir: bool,
+ },
+ WriteFsEntry {
+ path: PathBuf,
+ is_dir: bool,
+ content: String,
+ },
+ GitOperation {
+ operation: GitOperation,
+ },
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+enum GitOperation {
+ WriteGitIndex {
+ repo_path: PathBuf,
+ contents: Vec<(PathBuf, String)>,
+ },
+ WriteGitBranch {
+ repo_path: PathBuf,
+ new_branch: Option<String>,
+ },
+ WriteGitStatuses {
+ repo_path: PathBuf,
+ statuses: Vec<(PathBuf, GitFileStatus)>,
+ git_operation: bool,
+ },
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+enum LspRequestKind {
+ Rename,
+ Completion,
+ CodeAction,
+ Definition,
+ Highlights,
+}
+
+struct ProjectCollaborationTest;
+
+#[async_trait(?Send)]
+impl RandomizedTest for ProjectCollaborationTest {
+ type Operation = ClientOperation;
+
+ fn generate_operation(
+ client: &TestClient,
+ rng: &mut StdRng,
+ plan: &mut UserTestPlan,
+ cx: &TestAppContext,
+ ) -> ClientOperation {
+ let call = cx.read(ActiveCall::global);
+ loop {
+ match rng.gen_range(0..100_u32) {
+ // Mutate the call
+ 0..=29 => {
+ // Respond to an incoming call
+ if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) {
+ break if rng.gen_bool(0.7) {
+ ClientOperation::AcceptIncomingCall
+ } else {
+ ClientOperation::RejectIncomingCall
+ };
+ }
+
+ match rng.gen_range(0..100_u32) {
+ // Invite a contact to the current call
+ 0..=70 => {
+ let available_contacts =
+ client.user_store().read_with(cx, |user_store, _| {
+ user_store
+ .contacts()
+ .iter()
+ .filter(|contact| contact.online && !contact.busy)
+ .cloned()
+ .collect::<Vec<_>>()
+ });
+ if !available_contacts.is_empty() {
+ let contact = available_contacts.choose(rng).unwrap();
+ break ClientOperation::InviteContactToCall {
+ user_id: UserId(contact.user.id as i32),
+ };
+ }
+ }
+
+ // Leave the current call
+ 71.. => {
+ if plan.allow_client_disconnection
+ && call.read_with(cx, |call, _| call.room().is_some())
+ {
+ break ClientOperation::LeaveCall;
+ }
+ }
+ }
+ }
+
+ // Mutate projects
+ 30..=59 => match rng.gen_range(0..100_u32) {
+ // Open a new project
+ 0..=70 => {
+ // Open a remote project
+ if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) {
+ let existing_remote_project_ids = cx.read(|cx| {
+ client
+ .remote_projects()
+ .iter()
+ .map(|p| p.read(cx).remote_id().unwrap())
+ .collect::<Vec<_>>()
+ });
+ let new_remote_projects = room.read_with(cx, |room, _| {
+ room.remote_participants()
+ .values()
+ .flat_map(|participant| {
+ participant.projects.iter().filter_map(|project| {
+ if existing_remote_project_ids.contains(&project.id) {
+ None
+ } else {
+ Some((
+ UserId::from_proto(participant.user.id),
+ project.worktree_root_names[0].clone(),
+ ))
+ }
+ })
+ })
+ .collect::<Vec<_>>()
+ });
+ if !new_remote_projects.is_empty() {
+ let (host_id, first_root_name) =
+ new_remote_projects.choose(rng).unwrap().clone();
+ break ClientOperation::OpenRemoteProject {
+ host_id,
+ first_root_name,
+ };
+ }
+ }
+ // Open a local project
+ else {
+ let first_root_name = plan.next_root_dir_name();
+ break ClientOperation::OpenLocalProject { first_root_name };
+ }
+ }
+
+ // Close a remote project
+ 71..=80 => {
+ if !client.remote_projects().is_empty() {
+ let project = client.remote_projects().choose(rng).unwrap().clone();
+ let first_root_name = root_name_for_project(&project, cx);
+ break ClientOperation::CloseRemoteProject {
+ project_root_name: first_root_name,
+ };
+ }
+ }
+
+ // Mutate project worktrees
+ 81.. => match rng.gen_range(0..100_u32) {
+ // Add a worktree to a local project
+ 0..=50 => {
+ let Some(project) = client.local_projects().choose(rng).cloned() else {
+ continue;
+ };
+ let project_root_name = root_name_for_project(&project, cx);
+ let mut paths = client.fs().paths(false);
+ paths.remove(0);
+ let new_root_path = if paths.is_empty() || rng.gen() {
+ Path::new("/").join(&plan.next_root_dir_name())
+ } else {
+ paths.choose(rng).unwrap().clone()
+ };
+ break ClientOperation::AddWorktreeToProject {
+ project_root_name,
+ new_root_path,
+ };
+ }
+
+ // Add an entry to a worktree
+ _ => {
+ let Some(project) = choose_random_project(client, rng) else {
+ continue;
+ };
+ let project_root_name = root_name_for_project(&project, cx);
+ let is_local = project.read_with(cx, |project, _| project.is_local());
+ let worktree = project.read_with(cx, |project, cx| {
+ project
+ .worktrees(cx)
+ .filter(|worktree| {
+ let worktree = worktree.read(cx);
+ worktree.is_visible()
+ && worktree.entries(false).any(|e| e.is_file())
+ && worktree.root_entry().map_or(false, |e| e.is_dir())
+ })
+ .choose(rng)
+ });
+ let Some(worktree) = worktree else { continue };
+ let is_dir = rng.gen::<bool>();
+ let mut full_path =
+ worktree.read_with(cx, |w, _| PathBuf::from(w.root_name()));
+ full_path.push(gen_file_name(rng));
+ if !is_dir {
+ full_path.set_extension("rs");
+ }
+ break ClientOperation::CreateWorktreeEntry {
+ project_root_name,
+ is_local,
+ full_path,
+ is_dir,
+ };
+ }
+ },
+ },
+
+ // Query and mutate buffers
+ 60..=90 => {
+ let Some(project) = choose_random_project(client, rng) else {
+ continue;
+ };
+ let project_root_name = root_name_for_project(&project, cx);
+ let is_local = project.read_with(cx, |project, _| project.is_local());
+
+ match rng.gen_range(0..100_u32) {
+ // Manipulate an existing buffer
+ 0..=70 => {
+ let Some(buffer) = client
+ .buffers_for_project(&project)
+ .iter()
+ .choose(rng)
+ .cloned()
+ else {
+ continue;
+ };
+
+ let full_path = buffer
+ .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
+
+ match rng.gen_range(0..100_u32) {
+ // Close the buffer
+ 0..=15 => {
+ break ClientOperation::CloseBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ };
+ }
+ // Save the buffer
+ 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => {
+ let detach = rng.gen_bool(0.3);
+ break ClientOperation::SaveBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ detach,
+ };
+ }
+ // Edit the buffer
+ 30..=69 => {
+ let edits = buffer
+ .read_with(cx, |buffer, _| buffer.get_random_edits(rng, 3));
+ break ClientOperation::EditBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ edits,
+ };
+ }
+ // Make an LSP request
+ _ => {
+ let offset = buffer.read_with(cx, |buffer, _| {
+ buffer.clip_offset(
+ rng.gen_range(0..=buffer.len()),
+ language::Bias::Left,
+ )
+ });
+ let detach = rng.gen();
+ break ClientOperation::RequestLspDataInBuffer {
+ project_root_name,
+ full_path,
+ offset,
+ is_local,
+ kind: match rng.gen_range(0..5_u32) {
+ 0 => LspRequestKind::Rename,
+ 1 => LspRequestKind::Highlights,
+ 2 => LspRequestKind::Definition,
+ 3 => LspRequestKind::CodeAction,
+ 4.. => LspRequestKind::Completion,
+ },
+ detach,
+ };
+ }
+ }
+ }
+
+ 71..=80 => {
+ let query = rng.gen_range('a'..='z').to_string();
+ let detach = rng.gen_bool(0.3);
+ break ClientOperation::SearchProject {
+ project_root_name,
+ is_local,
+ query,
+ detach,
+ };
+ }
+
+ // Open a buffer
+ 81.. => {
+ let worktree = project.read_with(cx, |project, cx| {
+ project
+ .worktrees(cx)
+ .filter(|worktree| {
+ let worktree = worktree.read(cx);
+ worktree.is_visible()
+ && worktree.entries(false).any(|e| e.is_file())
+ })
+ .choose(rng)
+ });
+ let Some(worktree) = worktree else { continue };
+ let full_path = worktree.read_with(cx, |worktree, _| {
+ let entry = worktree
+ .entries(false)
+ .filter(|e| e.is_file())
+ .choose(rng)
+ .unwrap();
+ if entry.path.as_ref() == Path::new("") {
+ Path::new(worktree.root_name()).into()
+ } else {
+ Path::new(worktree.root_name()).join(&entry.path)
+ }
+ });
+ break ClientOperation::OpenBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ };
+ }
+ }
+ }
+
+ // Update a git related action
+ 91..=95 => {
+ break ClientOperation::GitOperation {
+ operation: generate_git_operation(rng, client),
+ };
+ }
+
+ // Create or update a file or directory
+ 96.. => {
+ let is_dir = rng.gen::<bool>();
+ let content;
+ let mut path;
+ let dir_paths = client.fs().directories(false);
+
+ if is_dir {
+ content = String::new();
+ path = dir_paths.choose(rng).unwrap().clone();
+ path.push(gen_file_name(rng));
+ } else {
+ content = Alphanumeric.sample_string(rng, 16);
+
+ // Create a new file or overwrite an existing file
+ let file_paths = client.fs().files();
+ if file_paths.is_empty() || rng.gen_bool(0.5) {
+ path = dir_paths.choose(rng).unwrap().clone();
+ path.push(gen_file_name(rng));
+ path.set_extension("rs");
+ } else {
+ path = file_paths.choose(rng).unwrap().clone()
+ };
+ }
+ break ClientOperation::WriteFsEntry {
+ path,
+ is_dir,
+ content,
+ };
+ }
+ }
+ }
+ }
+
+ async fn apply_operation(
+ client: &TestClient,
+ operation: ClientOperation,
+ cx: &mut TestAppContext,
+ ) -> Result<(), TestError> {
+ match operation {
+ ClientOperation::AcceptIncomingCall => {
+ let active_call = cx.read(ActiveCall::global);
+ if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
+ Err(TestError::Inapplicable)?;
+ }
+
+ log::info!("{}: accepting incoming call", client.username);
+ active_call
+ .update(cx, |call, cx| call.accept_incoming(cx))
+ .await?;
+ }
+
+ ClientOperation::RejectIncomingCall => {
+ let active_call = cx.read(ActiveCall::global);
+ if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
+ Err(TestError::Inapplicable)?;
+ }
+
+ log::info!("{}: declining incoming call", client.username);
+ active_call.update(cx, |call, cx| call.decline_incoming(cx))?;
+ }
+
+ ClientOperation::LeaveCall => {
+ let active_call = cx.read(ActiveCall::global);
+ if active_call.read_with(cx, |call, _| call.room().is_none()) {
+ Err(TestError::Inapplicable)?;
+ }
+
+ log::info!("{}: hanging up", client.username);
+ active_call.update(cx, |call, cx| call.hang_up(cx)).await?;
+ }
+
+ ClientOperation::InviteContactToCall { user_id } => {
+ let active_call = cx.read(ActiveCall::global);
+
+ log::info!("{}: inviting {}", client.username, user_id,);
+ active_call
+ .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx))
+ .await
+ .log_err();
+ }
+
+ ClientOperation::OpenLocalProject { first_root_name } => {
+ log::info!(
+ "{}: opening local project at {:?}",
+ client.username,
+ first_root_name
+ );
+
+ let root_path = Path::new("/").join(&first_root_name);
+ client.fs().create_dir(&root_path).await.unwrap();
+ client
+ .fs()
+ .create_file(&root_path.join("main.rs"), Default::default())
+ .await
+ .unwrap();
+ let project = client.build_local_project(root_path, cx).await.0;
+ ensure_project_shared(&project, client, cx).await;
+ client.local_projects_mut().push(project.clone());
+ }
+
+ ClientOperation::AddWorktreeToProject {
+ project_root_name,
+ new_root_path,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: finding/creating local worktree at {:?} to project with root path {}",
+ client.username,
+ new_root_path,
+ project_root_name
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ if !client.fs().paths(false).contains(&new_root_path) {
+ client.fs().create_dir(&new_root_path).await.unwrap();
+ }
+ project
+ .update(cx, |project, cx| {
+ project.find_or_create_local_worktree(&new_root_path, true, cx)
+ })
+ .await
+ .unwrap();
+ }
+
+ ClientOperation::CloseRemoteProject { project_root_name } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: closing remote project with root path {}",
+ client.username,
+ project_root_name,
+ );
+
+ let ix = client
+ .remote_projects()
+ .iter()
+ .position(|p| p == &project)
+ .unwrap();
+ cx.update(|_| {
+ client.remote_projects_mut().remove(ix);
+ client.buffers().retain(|p, _| *p != project);
+ drop(project);
+ });
+ }
+
+ ClientOperation::OpenRemoteProject {
+ host_id,
+ first_root_name,
+ } => {
+ let active_call = cx.read(ActiveCall::global);
+ let project = active_call
+ .update(cx, |call, cx| {
+ let room = call.room().cloned()?;
+ let participant = room
+ .read(cx)
+ .remote_participants()
+ .get(&host_id.to_proto())?;
+ let project_id = participant
+ .projects
+ .iter()
+ .find(|project| project.worktree_root_names[0] == first_root_name)?
+ .id;
+ Some(room.update(cx, |room, cx| {
+ room.join_project(
+ project_id,
+ client.language_registry().clone(),
+ FakeFs::new(cx.background().clone()),
+ cx,
+ )
+ }))
+ })
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: joining remote project of user {}, root name {}",
+ client.username,
+ host_id,
+ first_root_name,
+ );
+
+ let project = project.await?;
+ client.remote_projects_mut().push(project.clone());
+ }
+
+ ClientOperation::CreateWorktreeEntry {
+ project_root_name,
+ is_local,
+ full_path,
+ is_dir,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let project_path = project_path_for_full_path(&project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: creating {} at path {:?} in {} project {}",
+ client.username,
+ if is_dir { "dir" } else { "file" },
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ project
+ .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx))
+ .unwrap()
+ .await?;
+ }
+
+ ClientOperation::OpenBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let project_path = project_path_for_full_path(&project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: opening buffer {:?} in {} project {}",
+ client.username,
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ let buffer = project
+ .update(cx, |project, cx| project.open_buffer(project_path, cx))
+ .await?;
+ client.buffers_for_project(&project).insert(buffer);
+ }
+
+ ClientOperation::EditBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ edits,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let buffer = buffer_for_full_path(client, &project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: editing buffer {:?} in {} project {} with {:?}",
+ client.username,
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ edits
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ buffer.update(cx, |buffer, cx| {
+ let snapshot = buffer.snapshot();
+ buffer.edit(
+ edits.into_iter().map(|(range, text)| {
+ let start = snapshot.clip_offset(range.start, Bias::Left);
+ let end = snapshot.clip_offset(range.end, Bias::Right);
+ (start..end, text)
+ }),
+ None,
+ cx,
+ );
+ });
+ }
+
+ ClientOperation::CloseBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let buffer = buffer_for_full_path(client, &project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: closing buffer {:?} in {} project {}",
+ client.username,
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ cx.update(|_| {
+ client.buffers_for_project(&project).remove(&buffer);
+ drop(buffer);
+ });
+ }
+
+ ClientOperation::SaveBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ detach,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let buffer = buffer_for_full_path(client, &project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: saving buffer {:?} in {} project {}, {}",
+ client.username,
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ if detach { "detaching" } else { "awaiting" }
+ );
+
+ ensure_project_shared(&project, client, cx).await;
+ let requested_version = buffer.read_with(cx, |buffer, _| buffer.version());
+ let save =
+ project.update(cx, |project, cx| project.save_buffer(buffer.clone(), cx));
+ let save = cx.spawn(|cx| async move {
+ save.await
+ .map_err(|err| anyhow!("save request failed: {:?}", err))?;
+ assert!(buffer
+ .read_with(&cx, |buffer, _| { buffer.saved_version().to_owned() })
+ .observed_all(&requested_version));
+ anyhow::Ok(())
+ });
+ if detach {
+ cx.update(|cx| save.detach_and_log_err(cx));
+ } else {
+ save.await?;
+ }
+ }
+
+ ClientOperation::RequestLspDataInBuffer {
+ project_root_name,
+ is_local,
+ full_path,
+ offset,
+ kind,
+ detach,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+ let buffer = buffer_for_full_path(client, &project, &full_path, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: request LSP {:?} for buffer {:?} in {} project {}, {}",
+ client.username,
+ kind,
+ full_path,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ if detach { "detaching" } else { "awaiting" }
+ );
+
+ use futures::{FutureExt as _, TryFutureExt as _};
+ let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left));
+ let request = cx.foreground().spawn(project.update(cx, |project, cx| {
+ match kind {
+ LspRequestKind::Rename => project
+ .prepare_rename(buffer, offset, cx)
+ .map_ok(|_| ())
+ .boxed(),
+ LspRequestKind::Completion => project
+ .completions(&buffer, offset, cx)
+ .map_ok(|_| ())
+ .boxed(),
+ LspRequestKind::CodeAction => project
+ .code_actions(&buffer, offset..offset, cx)
+ .map_ok(|_| ())
+ .boxed(),
+ LspRequestKind::Definition => project
+ .definition(&buffer, offset, cx)
+ .map_ok(|_| ())
+ .boxed(),
+ LspRequestKind::Highlights => project
+ .document_highlights(&buffer, offset, cx)
+ .map_ok(|_| ())
+ .boxed(),
+ }
+ }));
+ if detach {
+ request.detach();
+ } else {
+ request.await?;
+ }
+ }
+
+ ClientOperation::SearchProject {
+ project_root_name,
+ is_local,
+ query,
+ detach,
+ } => {
+ let project = project_for_root_name(client, &project_root_name, cx)
+ .ok_or(TestError::Inapplicable)?;
+
+ log::info!(
+ "{}: search {} project {} for {:?}, {}",
+ client.username,
+ if is_local { "local" } else { "remote" },
+ project_root_name,
+ query,
+ if detach { "detaching" } else { "awaiting" }
+ );
+
+ let mut search = project.update(cx, |project, cx| {
+ project.search(
+ SearchQuery::text(query, false, false, Vec::new(), Vec::new()),
+ cx,
+ )
+ });
+ drop(project);
+ let search = cx.background().spawn(async move {
+ let mut results = HashMap::default();
+ while let Some((buffer, ranges)) = search.next().await {
+ results.entry(buffer).or_insert(ranges);
+ }
+ results
+ });
+ search.await;
+ }
+
+ ClientOperation::WriteFsEntry {
+ path,
+ is_dir,
+ content,
+ } => {
+ if !client
+ .fs()
+ .directories(false)
+ .contains(&path.parent().unwrap().to_owned())
+ {
+ return Err(TestError::Inapplicable);
+ }
+
+ if is_dir {
+ log::info!("{}: creating dir at {:?}", client.username, path);
+ client.fs().create_dir(&path).await.unwrap();
+ } else {
+ let exists = client.fs().metadata(&path).await?.is_some();
+ let verb = if exists { "updating" } else { "creating" };
+ log::info!("{}: {} file at {:?}", verb, client.username, path);
+
+ client
+ .fs()
+ .save(&path, &content.as_str().into(), text::LineEnding::Unix)
+ .await
+ .unwrap();
+ }
+ }
+
+ ClientOperation::GitOperation { operation } => match operation {
+ GitOperation::WriteGitIndex {
+ repo_path,
+ contents,
+ } => {
+ if !client.fs().directories(false).contains(&repo_path) {
+ return Err(TestError::Inapplicable);
+ }
+
+ for (path, _) in contents.iter() {
+ if !client.fs().files().contains(&repo_path.join(path)) {
+ return Err(TestError::Inapplicable);
+ }
+ }
+
+ log::info!(
+ "{}: writing git index for repo {:?}: {:?}",
+ client.username,
+ repo_path,
+ contents
+ );
+
+ let dot_git_dir = repo_path.join(".git");
+ let contents = contents
+ .iter()
+ .map(|(path, contents)| (path.as_path(), contents.clone()))
+ .collect::<Vec<_>>();
+ if client.fs().metadata(&dot_git_dir).await?.is_none() {
+ client.fs().create_dir(&dot_git_dir).await?;
+ }
+ client.fs().set_index_for_repo(&dot_git_dir, &contents);
+ }
+ GitOperation::WriteGitBranch {
+ repo_path,
+ new_branch,
+ } => {
+ if !client.fs().directories(false).contains(&repo_path) {
+ return Err(TestError::Inapplicable);
+ }
+
+ log::info!(
+ "{}: writing git branch for repo {:?}: {:?}",
+ client.username,
+ repo_path,
+ new_branch
+ );
+
+ let dot_git_dir = repo_path.join(".git");
+ if client.fs().metadata(&dot_git_dir).await?.is_none() {
+ client.fs().create_dir(&dot_git_dir).await?;
+ }
+ client
+ .fs()
+ .set_branch_name(&dot_git_dir, new_branch.clone());
+ }
+ GitOperation::WriteGitStatuses {
+ repo_path,
+ statuses,
+ git_operation,
+ } => {
+ if !client.fs().directories(false).contains(&repo_path) {
+ return Err(TestError::Inapplicable);
+ }
+ for (path, _) in statuses.iter() {
+ if !client.fs().files().contains(&repo_path.join(path)) {
+ return Err(TestError::Inapplicable);
+ }
+ }
+
+ log::info!(
+ "{}: writing git statuses for repo {:?}: {:?}",
+ client.username,
+ repo_path,
+ statuses
+ );
+
+ let dot_git_dir = repo_path.join(".git");
+
+ let statuses = statuses
+ .iter()
+ .map(|(path, val)| (path.as_path(), val.clone()))
+ .collect::<Vec<_>>();
+
+ if client.fs().metadata(&dot_git_dir).await?.is_none() {
+ client.fs().create_dir(&dot_git_dir).await?;
+ }
+
+ if git_operation {
+ client.fs().set_status_for_repo_via_git_operation(
+ &dot_git_dir,
+ statuses.as_slice(),
+ );
+ } else {
+ client.fs().set_status_for_repo_via_working_copy_change(
+ &dot_git_dir,
+ statuses.as_slice(),
+ );
+ }
+ }
+ },
+ }
+ Ok(())
+ }
+
+ async fn on_client_added(client: &Rc<TestClient>) {
+ let mut language = Language::new(
+ LanguageConfig {
+ name: "Rust".into(),
+ path_suffixes: vec!["rs".to_string()],
+ ..Default::default()
+ },
+ None,
+ );
+ language
+ .set_fake_lsp_adapter(Arc::new(FakeLspAdapter {
+ name: "the-fake-language-server",
+ capabilities: lsp::LanguageServer::full_capabilities(),
+ initializer: Some(Box::new({
+ let fs = client.app_state.fs.clone();
+ move |fake_server: &mut FakeLanguageServer| {
+ fake_server.handle_request::<lsp::request::Completion, _, _>(
+ |_, _| async move {
+ Ok(Some(lsp::CompletionResponse::Array(vec![
+ lsp::CompletionItem {
+ text_edit: Some(lsp::CompletionTextEdit::Edit(
+ lsp::TextEdit {
+ range: lsp::Range::new(
+ lsp::Position::new(0, 0),
+ lsp::Position::new(0, 0),
+ ),
+ new_text: "the-new-text".to_string(),
+ },
+ )),
+ ..Default::default()
+ },
+ ])))
+ },
+ );
+
+ fake_server.handle_request::<lsp::request::CodeActionRequest, _, _>(
+ |_, _| async move {
+ Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction(
+ lsp::CodeAction {
+ title: "the-code-action".to_string(),
+ ..Default::default()
+ },
+ )]))
+ },
+ );
+
+ fake_server.handle_request::<lsp::request::PrepareRenameRequest, _, _>(
+ |params, _| async move {
+ Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new(
+ params.position,
+ params.position,
+ ))))
+ },
+ );
+
+ fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
+ let fs = fs.clone();
+ move |_, cx| {
+ let background = cx.background();
+ let mut rng = background.rng();
+ let count = rng.gen_range::<usize, _>(1..3);
+ let files = fs.as_fake().files();
+ let files = (0..count)
+ .map(|_| files.choose(&mut *rng).unwrap().clone())
+ .collect::<Vec<_>>();
+ async move {
+ log::info!("LSP: Returning definitions in files {:?}", &files);
+ Ok(Some(lsp::GotoDefinitionResponse::Array(
+ files
+ .into_iter()
+ .map(|file| lsp::Location {
+ uri: lsp::Url::from_file_path(file).unwrap(),
+ range: Default::default(),
+ })
+ .collect(),
+ )))
+ }
+ }
+ });
+
+ fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>(
+ move |_, cx| {
+ let mut highlights = Vec::new();
+ let background = cx.background();
+ let mut rng = background.rng();
+
+ let highlight_count = rng.gen_range(1..=5);
+ for _ in 0..highlight_count {
+ let start_row = rng.gen_range(0..100);
+ let start_column = rng.gen_range(0..100);
+ let end_row = rng.gen_range(0..100);
+ let end_column = rng.gen_range(0..100);
+ let start = PointUtf16::new(start_row, start_column);
+ let end = PointUtf16::new(end_row, end_column);
+ let range = if start > end { end..start } else { start..end };
+ highlights.push(lsp::DocumentHighlight {
+ range: range_to_lsp(range.clone()),
+ kind: Some(lsp::DocumentHighlightKind::READ),
+ });
+ }
+ highlights.sort_unstable_by_key(|highlight| {
+ (highlight.range.start, highlight.range.end)
+ });
+ async move { Ok(Some(highlights)) }
+ },
+ );
+ }
+ })),
+ ..Default::default()
+ }))
+ .await;
+ client.app_state.languages.add(Arc::new(language));
+ }
+
+ fn on_clients_quiesced(clients: &[(Rc<TestClient>, TestAppContext)]) {
+ for (client, client_cx) in clients {
+ for guest_project in client.remote_projects().iter() {
+ guest_project.read_with(client_cx, |guest_project, cx| {
+ let host_project = clients.iter().find_map(|(client, cx)| {
+ let project = client
+ .local_projects()
+ .iter()
+ .find(|host_project| {
+ host_project.read_with(cx, |host_project, _| {
+ host_project.remote_id() == guest_project.remote_id()
+ })
+ })?
+ .clone();
+ Some((project, cx))
+ });
+
+ if !guest_project.is_read_only() {
+ if let Some((host_project, host_cx)) = host_project {
+ let host_worktree_snapshots =
+ host_project.read_with(host_cx, |host_project, cx| {
+ host_project
+ .worktrees(cx)
+ .map(|worktree| {
+ let worktree = worktree.read(cx);
+ (worktree.id(), worktree.snapshot())
+ })
+ .collect::<BTreeMap<_, _>>()
+ });
+ let guest_worktree_snapshots = guest_project
+ .worktrees(cx)
+ .map(|worktree| {
+ let worktree = worktree.read(cx);
+ (worktree.id(), worktree.snapshot())
+ })
+ .collect::<BTreeMap<_, _>>();
+
+ assert_eq!(
+ guest_worktree_snapshots.values().map(|w| w.abs_path()).collect::<Vec<_>>(),
+ host_worktree_snapshots.values().map(|w| w.abs_path()).collect::<Vec<_>>(),
+ "{} has different worktrees than the host for project {:?}",
+ client.username, guest_project.remote_id(),
+ );
+
+ for (id, host_snapshot) in &host_worktree_snapshots {
+ let guest_snapshot = &guest_worktree_snapshots[id];
+ assert_eq!(
+ guest_snapshot.root_name(),
+ host_snapshot.root_name(),
+ "{} has different root name than the host for worktree {}, project {:?}",
+ client.username,
+ id,
+ guest_project.remote_id(),
+ );
+ assert_eq!(
+ guest_snapshot.abs_path(),
+ host_snapshot.abs_path(),
+ "{} has different abs path than the host for worktree {}, project: {:?}",
+ client.username,
+ id,
+ guest_project.remote_id(),
+ );
+ assert_eq!(
+ guest_snapshot.entries(false).collect::<Vec<_>>(),
+ host_snapshot.entries(false).collect::<Vec<_>>(),
+ "{} has different snapshot than the host for worktree {:?} ({:?}) and project {:?}",
+ client.username,
+ host_snapshot.abs_path(),
+ id,
+ guest_project.remote_id(),
+ );
+ assert_eq!(guest_snapshot.repositories().collect::<Vec<_>>(), host_snapshot.repositories().collect::<Vec<_>>(),
+ "{} has different repositories than the host for worktree {:?} and project {:?}",
+ client.username,
+ host_snapshot.abs_path(),
+ guest_project.remote_id(),
+ );
+ assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(),
+ "{} has different scan id than the host for worktree {:?} and project {:?}",
+ client.username,
+ host_snapshot.abs_path(),
+ guest_project.remote_id(),
+ );
+ }
+ }
+ }
+
+ for buffer in guest_project.opened_buffers(cx) {
+ let buffer = buffer.read(cx);
+ assert_eq!(
+ buffer.deferred_ops_len(),
+ 0,
+ "{} has deferred operations for buffer {:?} in project {:?}",
+ client.username,
+ buffer.file().unwrap().full_path(cx),
+ guest_project.remote_id(),
+ );
+ }
+ });
+ }
+
+ let buffers = client.buffers().clone();
+ for (guest_project, guest_buffers) in &buffers {
+ let project_id = if guest_project.read_with(client_cx, |project, _| {
+ project.is_local() || project.is_read_only()
+ }) {
+ continue;
+ } else {
+ guest_project
+ .read_with(client_cx, |project, _| project.remote_id())
+ .unwrap()
+ };
+ let guest_user_id = client.user_id().unwrap();
+
+ let host_project = clients.iter().find_map(|(client, cx)| {
+ let project = client
+ .local_projects()
+ .iter()
+ .find(|host_project| {
+ host_project.read_with(cx, |host_project, _| {
+ host_project.remote_id() == Some(project_id)
+ })
+ })?
+ .clone();
+ Some((client.user_id().unwrap(), project, cx))
+ });
+
+ let (host_user_id, host_project, host_cx) =
+ if let Some((host_user_id, host_project, host_cx)) = host_project {
+ (host_user_id, host_project, host_cx)
+ } else {
+ continue;
+ };
+
+ for guest_buffer in guest_buffers {
+ let buffer_id =
+ guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
+ let host_buffer = host_project.read_with(host_cx, |project, cx| {
+ project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
+ panic!(
+ "host does not have buffer for guest:{}, peer:{:?}, id:{}",
+ client.username,
+ client.peer_id(),
+ buffer_id
+ )
+ })
+ });
+ let path = host_buffer
+ .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
+
+ assert_eq!(
+ guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()),
+ 0,
+ "{}, buffer {}, path {:?} has deferred operations",
+ client.username,
+ buffer_id,
+ path,
+ );
+ assert_eq!(
+ guest_buffer.read_with(client_cx, |buffer, _| buffer.text()),
+ host_buffer.read_with(host_cx, |buffer, _| buffer.text()),
+ "{}, buffer {}, path {:?}, differs from the host's buffer",
+ client.username,
+ buffer_id,
+ path
+ );
+
+ let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned());
+ let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned());
+ match (host_file, guest_file) {
+ (Some(host_file), Some(guest_file)) => {
+ assert_eq!(guest_file.path(), host_file.path());
+ assert_eq!(guest_file.is_deleted(), host_file.is_deleted());
+ assert_eq!(
+ guest_file.mtime(),
+ host_file.mtime(),
+ "guest {} mtime does not match host {} for path {:?} in project {}",
+ guest_user_id,
+ host_user_id,
+ guest_file.path(),
+ project_id,
+ );
+ }
+ (None, None) => {}
+ (None, _) => panic!("host's file is None, guest's isn't"),
+ (_, None) => panic!("guest's file is None, hosts's isn't"),
+ }
+
+ let host_diff_base = host_buffer
+ .read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string));
+ let guest_diff_base = guest_buffer
+ .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string));
+ assert_eq!(
+ guest_diff_base, host_diff_base,
+ "guest {} diff base does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+
+ let host_saved_version =
+ host_buffer.read_with(host_cx, |b, _| b.saved_version().clone());
+ let guest_saved_version =
+ guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone());
+ assert_eq!(
+ guest_saved_version, host_saved_version,
+ "guest {} saved version does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+
+ let host_saved_version_fingerprint =
+ host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint());
+ let guest_saved_version_fingerprint =
+ guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint());
+ assert_eq!(
+ guest_saved_version_fingerprint, host_saved_version_fingerprint,
+ "guest {} saved fingerprint does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+
+ let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime());
+ let guest_saved_mtime =
+ guest_buffer.read_with(client_cx, |b, _| b.saved_mtime());
+ assert_eq!(
+ guest_saved_mtime, host_saved_mtime,
+ "guest {} saved mtime does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+
+ let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty());
+ let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty());
+ assert_eq!(guest_is_dirty, host_is_dirty,
+ "guest {} dirty status does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+
+ let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict());
+ let guest_has_conflict =
+ guest_buffer.read_with(client_cx, |b, _| b.has_conflict());
+ assert_eq!(guest_has_conflict, host_has_conflict,
+ "guest {} conflict status does not match host's for path {path:?} in project {project_id}",
+ client.username
+ );
+ }
+ }
+ }
+ }
+}
+
+fn generate_git_operation(rng: &mut StdRng, client: &TestClient) -> GitOperation {
+ fn generate_file_paths(
+ repo_path: &Path,
+ rng: &mut StdRng,
+ client: &TestClient,
+ ) -> Vec<PathBuf> {
+ let mut paths = client
+ .fs()
+ .files()
+ .into_iter()
+ .filter(|path| path.starts_with(repo_path))
+ .collect::<Vec<_>>();
+
+ let count = rng.gen_range(0..=paths.len());
+ paths.shuffle(rng);
+ paths.truncate(count);
+
+ paths
+ .iter()
+ .map(|path| path.strip_prefix(repo_path).unwrap().to_path_buf())
+ .collect::<Vec<_>>()
+ }
+
+ let repo_path = client.fs().directories(false).choose(rng).unwrap().clone();
+
+ match rng.gen_range(0..100_u32) {
+ 0..=25 => {
+ let file_paths = generate_file_paths(&repo_path, rng, client);
+
+ let contents = file_paths
+ .into_iter()
+ .map(|path| (path, Alphanumeric.sample_string(rng, 16)))
+ .collect();
+
+ GitOperation::WriteGitIndex {
+ repo_path,
+ contents,
+ }
+ }
+ 26..=63 => {
+ let new_branch = (rng.gen_range(0..10) > 3).then(|| Alphanumeric.sample_string(rng, 8));
+
+ GitOperation::WriteGitBranch {
+ repo_path,
+ new_branch,
+ }
+ }
+ 64..=100 => {
+ let file_paths = generate_file_paths(&repo_path, rng, client);
+
+ let statuses = file_paths
+ .into_iter()
+ .map(|paths| {
+ (
+ paths,
+ match rng.gen_range(0..3_u32) {
+ 0 => GitFileStatus::Added,
+ 1 => GitFileStatus::Modified,
+ 2 => GitFileStatus::Conflict,
+ _ => unreachable!(),
+ },
+ )
+ })
+ .collect::<Vec<_>>();
+
+ let git_operation = rng.gen::<bool>();
+
+ GitOperation::WriteGitStatuses {
+ repo_path,
+ statuses,
+ git_operation,
+ }
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn buffer_for_full_path(
+ client: &TestClient,
+ project: &ModelHandle<Project>,
+ full_path: &PathBuf,
+ cx: &TestAppContext,
+) -> Option<ModelHandle<language::Buffer>> {
+ client
+ .buffers_for_project(project)
+ .iter()
+ .find(|buffer| {
+ buffer.read_with(cx, |buffer, cx| {
+ buffer.file().unwrap().full_path(cx) == *full_path
+ })
+ })
+ .cloned()
+}
+
+fn project_for_root_name(
+ client: &TestClient,
+ root_name: &str,
+ cx: &TestAppContext,
+) -> Option<ModelHandle<Project>> {
+ if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) {
+ return Some(client.local_projects()[ix].clone());
+ }
+ if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) {
+ return Some(client.remote_projects()[ix].clone());
+ }
+ None
+}
+
+fn project_ix_for_root_name(
+ projects: &[ModelHandle<Project>],
+ root_name: &str,
+ cx: &TestAppContext,
+) -> Option<usize> {
+ projects.iter().position(|project| {
+ project.read_with(cx, |project, cx| {
+ let worktree = project.visible_worktrees(cx).next().unwrap();
+ worktree.read(cx).root_name() == root_name
+ })
+ })
+}
+
+fn root_name_for_project(project: &ModelHandle<Project>, cx: &TestAppContext) -> String {
+ project.read_with(cx, |project, cx| {
+ project
+ .visible_worktrees(cx)
+ .next()
+ .unwrap()
+ .read(cx)
+ .root_name()
+ .to_string()
+ })
+}
+
+fn project_path_for_full_path(
+ project: &ModelHandle<Project>,
+ full_path: &Path,
+ cx: &TestAppContext,
+) -> Option<ProjectPath> {
+ let mut components = full_path.components();
+ let root_name = components.next().unwrap().as_os_str().to_str().unwrap();
+ let path = components.as_path().into();
+ let worktree_id = project.read_with(cx, |project, cx| {
+ project.worktrees(cx).find_map(|worktree| {
+ let worktree = worktree.read(cx);
+ if worktree.root_name() == root_name {
+ Some(worktree.id())
+ } else {
+ None
+ }
+ })
+ })?;
+ Some(ProjectPath { worktree_id, path })
+}
+
+async fn ensure_project_shared(
+ project: &ModelHandle<Project>,
+ client: &TestClient,
+ cx: &mut TestAppContext,
+) {
+ let first_root_name = root_name_for_project(project, cx);
+ let active_call = cx.read(ActiveCall::global);
+ if active_call.read_with(cx, |call, _| call.room().is_some())
+ && project.read_with(cx, |project, _| project.is_local() && !project.is_shared())
+ {
+ match active_call
+ .update(cx, |call, cx| call.share_project(project.clone(), cx))
+ .await
+ {
+ Ok(project_id) => {
+ log::info!(
+ "{}: shared project {} with id {}",
+ client.username,
+ first_root_name,
+ project_id
+ );
+ }
+ Err(error) => {
+ log::error!(
+ "{}: error sharing project {}: {:?}",
+ client.username,
+ first_root_name,
+ error
+ );
+ }
+ }
+ }
+}
+
+fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option<ModelHandle<Project>> {
+ client
+ .local_projects()
+ .iter()
+ .chain(client.remote_projects().iter())
+ .choose(rng)
+ .cloned()
+}
+
+fn gen_file_name(rng: &mut StdRng) -> String {
+ let mut name = String::new();
+ for _ in 0..10 {
+ let letter = rng.gen_range('a'..='z');
+ name.push(letter);
+ }
+ name
+}
@@ -1,2199 +0,0 @@
-use crate::{
- db::{self, NewUserParams, UserId},
- rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
- tests::{TestClient, TestServer},
-};
-use anyhow::{anyhow, Result};
-use call::ActiveCall;
-use client::RECEIVE_TIMEOUT;
-use collections::{BTreeMap, HashMap};
-use editor::Bias;
-use fs::{repository::GitFileStatus, FakeFs, Fs as _};
-use futures::StreamExt as _;
-use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext};
-use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
-use lsp::FakeLanguageServer;
-use parking_lot::Mutex;
-use pretty_assertions::assert_eq;
-use project::{search::SearchQuery, Project, ProjectPath};
-use rand::{
- distributions::{Alphanumeric, DistString},
- prelude::*,
-};
-use serde::{Deserialize, Serialize};
-use settings::SettingsStore;
-use std::{
- env,
- ops::Range,
- path::{Path, PathBuf},
- rc::Rc,
- sync::{
- atomic::{AtomicBool, Ordering::SeqCst},
- Arc,
- },
-};
-use util::ResultExt;
-
-lazy_static::lazy_static! {
- static ref PLAN_LOAD_PATH: Option<PathBuf> = path_env_var("LOAD_PLAN");
- static ref PLAN_SAVE_PATH: Option<PathBuf> = path_env_var("SAVE_PLAN");
-}
-static LOADED_PLAN_JSON: Mutex<Option<Vec<u8>>> = Mutex::new(None);
-static PLAN: Mutex<Option<Arc<Mutex<TestPlan>>>> = Mutex::new(None);
-
-#[gpui::test(iterations = 100, on_failure = "on_failure")]
-async fn test_random_collaboration(
- cx: &mut TestAppContext,
- deterministic: Arc<Deterministic>,
- rng: StdRng,
-) {
- deterministic.forbid_parking();
-
- let max_peers = env::var("MAX_PEERS")
- .map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
- .unwrap_or(3);
- let max_operations = env::var("OPERATIONS")
- .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
- .unwrap_or(10);
-
- let mut server = TestServer::start(&deterministic).await;
- let db = server.app_state.db.clone();
-
- let mut users = Vec::new();
- for ix in 0..max_peers {
- let username = format!("user-{}", ix + 1);
- let user_id = db
- .create_user(
- &format!("{username}@example.com"),
- false,
- NewUserParams {
- github_login: username.clone(),
- github_user_id: (ix + 1) as i32,
- invite_count: 0,
- },
- )
- .await
- .unwrap()
- .user_id;
- users.push(UserTestPlan {
- user_id,
- username,
- online: false,
- next_root_id: 0,
- operation_ix: 0,
- });
- }
-
- for (ix, user_a) in users.iter().enumerate() {
- for user_b in &users[ix + 1..] {
- server
- .app_state
- .db
- .send_contact_request(user_a.user_id, user_b.user_id)
- .await
- .unwrap();
- server
- .app_state
- .db
- .respond_to_contact_request(user_b.user_id, user_a.user_id, true)
- .await
- .unwrap();
- }
- }
-
- let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations)));
-
- if let Some(path) = &*PLAN_LOAD_PATH {
- let json = LOADED_PLAN_JSON
- .lock()
- .get_or_insert_with(|| {
- eprintln!("loaded test plan from path {:?}", path);
- std::fs::read(path).unwrap()
- })
- .clone();
- plan.lock().deserialize(json);
- }
-
- PLAN.lock().replace(plan.clone());
-
- let mut clients = Vec::new();
- let mut client_tasks = Vec::new();
- let mut operation_channels = Vec::new();
-
- loop {
- let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else {
- break;
- };
- applied.store(true, SeqCst);
- let did_apply = apply_server_operation(
- deterministic.clone(),
- &mut server,
- &mut clients,
- &mut client_tasks,
- &mut operation_channels,
- plan.clone(),
- next_operation,
- cx,
- )
- .await;
- if !did_apply {
- applied.store(false, SeqCst);
- }
- }
-
- drop(operation_channels);
- deterministic.start_waiting();
- futures::future::join_all(client_tasks).await;
- deterministic.finish_waiting();
- deterministic.run_until_parked();
-
- check_consistency_between_clients(&clients);
-
- for (client, mut cx) in clients {
- cx.update(|cx| {
- let store = cx.remove_global::<SettingsStore>();
- cx.clear_globals();
- cx.set_global(store);
- drop(client);
- });
- }
-
- deterministic.run_until_parked();
-}
-
-fn on_failure() {
- if let Some(plan) = PLAN.lock().clone() {
- if let Some(path) = &*PLAN_SAVE_PATH {
- eprintln!("saved test plan to path {:?}", path);
- std::fs::write(path, plan.lock().serialize()).unwrap();
- }
- }
-}
-
-async fn apply_server_operation(
- deterministic: Arc<Deterministic>,
- server: &mut TestServer,
- clients: &mut Vec<(Rc<TestClient>, TestAppContext)>,
- client_tasks: &mut Vec<Task<()>>,
- operation_channels: &mut Vec<futures::channel::mpsc::UnboundedSender<usize>>,
- plan: Arc<Mutex<TestPlan>>,
- operation: Operation,
- cx: &mut TestAppContext,
-) -> bool {
- match operation {
- Operation::AddConnection { user_id } => {
- let username;
- {
- let mut plan = plan.lock();
- let user = plan.user(user_id);
- if user.online {
- return false;
- }
- user.online = true;
- username = user.username.clone();
- };
- log::info!("Adding new connection for {}", username);
- let next_entity_id = (user_id.0 * 10_000) as usize;
- let mut client_cx = TestAppContext::new(
- cx.foreground_platform(),
- cx.platform(),
- deterministic.build_foreground(user_id.0 as usize),
- deterministic.build_background(),
- cx.font_cache(),
- cx.leak_detector(),
- next_entity_id,
- cx.function_name.clone(),
- );
-
- let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded();
- let client = Rc::new(server.create_client(&mut client_cx, &username).await);
- operation_channels.push(operation_tx);
- clients.push((client.clone(), client_cx.clone()));
- client_tasks.push(client_cx.foreground().spawn(simulate_client(
- client,
- operation_rx,
- plan.clone(),
- client_cx,
- )));
-
- log::info!("Added connection for {}", username);
- }
-
- Operation::RemoveConnection {
- user_id: removed_user_id,
- } => {
- log::info!("Simulating full disconnection of user {}", removed_user_id);
- let client_ix = clients
- .iter()
- .position(|(client, cx)| client.current_user_id(cx) == removed_user_id);
- let Some(client_ix) = client_ix else {
- return false;
- };
- let user_connection_ids = server
- .connection_pool
- .lock()
- .user_connection_ids(removed_user_id)
- .collect::<Vec<_>>();
- assert_eq!(user_connection_ids.len(), 1);
- let removed_peer_id = user_connection_ids[0].into();
- let (client, mut client_cx) = clients.remove(client_ix);
- let client_task = client_tasks.remove(client_ix);
- operation_channels.remove(client_ix);
- server.forbid_connections();
- server.disconnect_client(removed_peer_id);
- deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
- deterministic.start_waiting();
- log::info!("Waiting for user {} to exit...", removed_user_id);
- client_task.await;
- deterministic.finish_waiting();
- server.allow_connections();
-
- for project in client.remote_projects().iter() {
- project.read_with(&client_cx, |project, _| {
- assert!(
- project.is_read_only(),
- "project {:?} should be read only",
- project.remote_id()
- )
- });
- }
-
- for (client, cx) in clients {
- let contacts = server
- .app_state
- .db
- .get_contacts(client.current_user_id(cx))
- .await
- .unwrap();
- let pool = server.connection_pool.lock();
- for contact in contacts {
- if let db::Contact::Accepted { user_id, busy, .. } = contact {
- if user_id == removed_user_id {
- assert!(!pool.is_user_online(user_id));
- assert!(!busy);
- }
- }
- }
- }
-
- log::info!("{} removed", client.username);
- plan.lock().user(removed_user_id).online = false;
- client_cx.update(|cx| {
- cx.clear_globals();
- drop(client);
- });
- }
-
- Operation::BounceConnection { user_id } => {
- log::info!("Simulating temporary disconnection of user {}", user_id);
- let user_connection_ids = server
- .connection_pool
- .lock()
- .user_connection_ids(user_id)
- .collect::<Vec<_>>();
- if user_connection_ids.is_empty() {
- return false;
- }
- assert_eq!(user_connection_ids.len(), 1);
- let peer_id = user_connection_ids[0].into();
- server.disconnect_client(peer_id);
- deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
- }
-
- Operation::RestartServer => {
- log::info!("Simulating server restart");
- server.reset().await;
- deterministic.advance_clock(RECEIVE_TIMEOUT);
- server.start().await.unwrap();
- deterministic.advance_clock(CLEANUP_TIMEOUT);
- let environment = &server.app_state.config.zed_environment;
- let (stale_room_ids, _) = server
- .app_state
- .db
- .stale_server_resource_ids(environment, server.id())
- .await
- .unwrap();
- assert_eq!(stale_room_ids, vec![]);
- }
-
- Operation::MutateClients {
- user_ids,
- batch_id,
- quiesce,
- } => {
- let mut applied = false;
- for user_id in user_ids {
- let client_ix = clients
- .iter()
- .position(|(client, cx)| client.current_user_id(cx) == user_id);
- let Some(client_ix) = client_ix else { continue };
- applied = true;
- if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) {
- log::error!("error signaling user {user_id}: {err}");
- }
- }
-
- if quiesce && applied {
- deterministic.run_until_parked();
- check_consistency_between_clients(&clients);
- }
-
- return applied;
- }
- }
- true
-}
-
-async fn apply_client_operation(
- client: &TestClient,
- operation: ClientOperation,
- cx: &mut TestAppContext,
-) -> Result<(), TestError> {
- match operation {
- ClientOperation::AcceptIncomingCall => {
- let active_call = cx.read(ActiveCall::global);
- if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
- Err(TestError::Inapplicable)?;
- }
-
- log::info!("{}: accepting incoming call", client.username);
- active_call
- .update(cx, |call, cx| call.accept_incoming(cx))
- .await?;
- }
-
- ClientOperation::RejectIncomingCall => {
- let active_call = cx.read(ActiveCall::global);
- if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
- Err(TestError::Inapplicable)?;
- }
-
- log::info!("{}: declining incoming call", client.username);
- active_call.update(cx, |call, cx| call.decline_incoming(cx))?;
- }
-
- ClientOperation::LeaveCall => {
- let active_call = cx.read(ActiveCall::global);
- if active_call.read_with(cx, |call, _| call.room().is_none()) {
- Err(TestError::Inapplicable)?;
- }
-
- log::info!("{}: hanging up", client.username);
- active_call.update(cx, |call, cx| call.hang_up(cx)).await?;
- }
-
- ClientOperation::InviteContactToCall { user_id } => {
- let active_call = cx.read(ActiveCall::global);
-
- log::info!("{}: inviting {}", client.username, user_id,);
- active_call
- .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx))
- .await
- .log_err();
- }
-
- ClientOperation::OpenLocalProject { first_root_name } => {
- log::info!(
- "{}: opening local project at {:?}",
- client.username,
- first_root_name
- );
-
- let root_path = Path::new("/").join(&first_root_name);
- client.fs().create_dir(&root_path).await.unwrap();
- client
- .fs()
- .create_file(&root_path.join("main.rs"), Default::default())
- .await
- .unwrap();
- let project = client.build_local_project(root_path, cx).await.0;
- ensure_project_shared(&project, client, cx).await;
- client.local_projects_mut().push(project.clone());
- }
-
- ClientOperation::AddWorktreeToProject {
- project_root_name,
- new_root_path,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: finding/creating local worktree at {:?} to project with root path {}",
- client.username,
- new_root_path,
- project_root_name
- );
-
- ensure_project_shared(&project, client, cx).await;
- if !client.fs().paths(false).contains(&new_root_path) {
- client.fs().create_dir(&new_root_path).await.unwrap();
- }
- project
- .update(cx, |project, cx| {
- project.find_or_create_local_worktree(&new_root_path, true, cx)
- })
- .await
- .unwrap();
- }
-
- ClientOperation::CloseRemoteProject { project_root_name } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: closing remote project with root path {}",
- client.username,
- project_root_name,
- );
-
- let ix = client
- .remote_projects()
- .iter()
- .position(|p| p == &project)
- .unwrap();
- cx.update(|_| {
- client.remote_projects_mut().remove(ix);
- client.buffers().retain(|p, _| *p != project);
- drop(project);
- });
- }
-
- ClientOperation::OpenRemoteProject {
- host_id,
- first_root_name,
- } => {
- let active_call = cx.read(ActiveCall::global);
- let project = active_call
- .update(cx, |call, cx| {
- let room = call.room().cloned()?;
- let participant = room
- .read(cx)
- .remote_participants()
- .get(&host_id.to_proto())?;
- let project_id = participant
- .projects
- .iter()
- .find(|project| project.worktree_root_names[0] == first_root_name)?
- .id;
- Some(room.update(cx, |room, cx| {
- room.join_project(
- project_id,
- client.language_registry().clone(),
- FakeFs::new(cx.background().clone()),
- cx,
- )
- }))
- })
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: joining remote project of user {}, root name {}",
- client.username,
- host_id,
- first_root_name,
- );
-
- let project = project.await?;
- client.remote_projects_mut().push(project.clone());
- }
-
- ClientOperation::CreateWorktreeEntry {
- project_root_name,
- is_local,
- full_path,
- is_dir,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let project_path = project_path_for_full_path(&project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: creating {} at path {:?} in {} project {}",
- client.username,
- if is_dir { "dir" } else { "file" },
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name,
- );
-
- ensure_project_shared(&project, client, cx).await;
- project
- .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx))
- .unwrap()
- .await?;
- }
-
- ClientOperation::OpenBuffer {
- project_root_name,
- is_local,
- full_path,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let project_path = project_path_for_full_path(&project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: opening buffer {:?} in {} project {}",
- client.username,
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name,
- );
-
- ensure_project_shared(&project, client, cx).await;
- let buffer = project
- .update(cx, |project, cx| project.open_buffer(project_path, cx))
- .await?;
- client.buffers_for_project(&project).insert(buffer);
- }
-
- ClientOperation::EditBuffer {
- project_root_name,
- is_local,
- full_path,
- edits,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let buffer = buffer_for_full_path(client, &project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: editing buffer {:?} in {} project {} with {:?}",
- client.username,
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name,
- edits
- );
-
- ensure_project_shared(&project, client, cx).await;
- buffer.update(cx, |buffer, cx| {
- let snapshot = buffer.snapshot();
- buffer.edit(
- edits.into_iter().map(|(range, text)| {
- let start = snapshot.clip_offset(range.start, Bias::Left);
- let end = snapshot.clip_offset(range.end, Bias::Right);
- (start..end, text)
- }),
- None,
- cx,
- );
- });
- }
-
- ClientOperation::CloseBuffer {
- project_root_name,
- is_local,
- full_path,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let buffer = buffer_for_full_path(client, &project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: closing buffer {:?} in {} project {}",
- client.username,
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name
- );
-
- ensure_project_shared(&project, client, cx).await;
- cx.update(|_| {
- client.buffers_for_project(&project).remove(&buffer);
- drop(buffer);
- });
- }
-
- ClientOperation::SaveBuffer {
- project_root_name,
- is_local,
- full_path,
- detach,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let buffer = buffer_for_full_path(client, &project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: saving buffer {:?} in {} project {}, {}",
- client.username,
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name,
- if detach { "detaching" } else { "awaiting" }
- );
-
- ensure_project_shared(&project, client, cx).await;
- let requested_version = buffer.read_with(cx, |buffer, _| buffer.version());
- let save = project.update(cx, |project, cx| project.save_buffer(buffer.clone(), cx));
- let save = cx.spawn(|cx| async move {
- save.await
- .map_err(|err| anyhow!("save request failed: {:?}", err))?;
- assert!(buffer
- .read_with(&cx, |buffer, _| { buffer.saved_version().to_owned() })
- .observed_all(&requested_version));
- anyhow::Ok(())
- });
- if detach {
- cx.update(|cx| save.detach_and_log_err(cx));
- } else {
- save.await?;
- }
- }
-
- ClientOperation::RequestLspDataInBuffer {
- project_root_name,
- is_local,
- full_path,
- offset,
- kind,
- detach,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
- let buffer = buffer_for_full_path(client, &project, &full_path, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: request LSP {:?} for buffer {:?} in {} project {}, {}",
- client.username,
- kind,
- full_path,
- if is_local { "local" } else { "remote" },
- project_root_name,
- if detach { "detaching" } else { "awaiting" }
- );
-
- use futures::{FutureExt as _, TryFutureExt as _};
- let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left));
- let request = cx.foreground().spawn(project.update(cx, |project, cx| {
- match kind {
- LspRequestKind::Rename => project
- .prepare_rename(buffer, offset, cx)
- .map_ok(|_| ())
- .boxed(),
- LspRequestKind::Completion => project
- .completions(&buffer, offset, cx)
- .map_ok(|_| ())
- .boxed(),
- LspRequestKind::CodeAction => project
- .code_actions(&buffer, offset..offset, cx)
- .map_ok(|_| ())
- .boxed(),
- LspRequestKind::Definition => project
- .definition(&buffer, offset, cx)
- .map_ok(|_| ())
- .boxed(),
- LspRequestKind::Highlights => project
- .document_highlights(&buffer, offset, cx)
- .map_ok(|_| ())
- .boxed(),
- }
- }));
- if detach {
- request.detach();
- } else {
- request.await?;
- }
- }
-
- ClientOperation::SearchProject {
- project_root_name,
- is_local,
- query,
- detach,
- } => {
- let project = project_for_root_name(client, &project_root_name, cx)
- .ok_or(TestError::Inapplicable)?;
-
- log::info!(
- "{}: search {} project {} for {:?}, {}",
- client.username,
- if is_local { "local" } else { "remote" },
- project_root_name,
- query,
- if detach { "detaching" } else { "awaiting" }
- );
-
- let mut search = project.update(cx, |project, cx| {
- project.search(
- SearchQuery::text(query, false, false, Vec::new(), Vec::new()),
- cx,
- )
- });
- drop(project);
- let search = cx.background().spawn(async move {
- let mut results = HashMap::default();
- while let Some((buffer, ranges)) = search.next().await {
- results.entry(buffer).or_insert(ranges);
- }
- results
- });
- search.await;
- }
-
- ClientOperation::WriteFsEntry {
- path,
- is_dir,
- content,
- } => {
- if !client
- .fs()
- .directories(false)
- .contains(&path.parent().unwrap().to_owned())
- {
- return Err(TestError::Inapplicable);
- }
-
- if is_dir {
- log::info!("{}: creating dir at {:?}", client.username, path);
- client.fs().create_dir(&path).await.unwrap();
- } else {
- let exists = client.fs().metadata(&path).await?.is_some();
- let verb = if exists { "updating" } else { "creating" };
- log::info!("{}: {} file at {:?}", verb, client.username, path);
-
- client
- .fs()
- .save(&path, &content.as_str().into(), text::LineEnding::Unix)
- .await
- .unwrap();
- }
- }
-
- ClientOperation::GitOperation { operation } => match operation {
- GitOperation::WriteGitIndex {
- repo_path,
- contents,
- } => {
- if !client.fs().directories(false).contains(&repo_path) {
- return Err(TestError::Inapplicable);
- }
-
- for (path, _) in contents.iter() {
- if !client.fs().files().contains(&repo_path.join(path)) {
- return Err(TestError::Inapplicable);
- }
- }
-
- log::info!(
- "{}: writing git index for repo {:?}: {:?}",
- client.username,
- repo_path,
- contents
- );
-
- let dot_git_dir = repo_path.join(".git");
- let contents = contents
- .iter()
- .map(|(path, contents)| (path.as_path(), contents.clone()))
- .collect::<Vec<_>>();
- if client.fs().metadata(&dot_git_dir).await?.is_none() {
- client.fs().create_dir(&dot_git_dir).await?;
- }
- client.fs().set_index_for_repo(&dot_git_dir, &contents);
- }
- GitOperation::WriteGitBranch {
- repo_path,
- new_branch,
- } => {
- if !client.fs().directories(false).contains(&repo_path) {
- return Err(TestError::Inapplicable);
- }
-
- log::info!(
- "{}: writing git branch for repo {:?}: {:?}",
- client.username,
- repo_path,
- new_branch
- );
-
- let dot_git_dir = repo_path.join(".git");
- if client.fs().metadata(&dot_git_dir).await?.is_none() {
- client.fs().create_dir(&dot_git_dir).await?;
- }
- client.fs().set_branch_name(&dot_git_dir, new_branch);
- }
- GitOperation::WriteGitStatuses {
- repo_path,
- statuses,
- git_operation,
- } => {
- if !client.fs().directories(false).contains(&repo_path) {
- return Err(TestError::Inapplicable);
- }
- for (path, _) in statuses.iter() {
- if !client.fs().files().contains(&repo_path.join(path)) {
- return Err(TestError::Inapplicable);
- }
- }
-
- log::info!(
- "{}: writing git statuses for repo {:?}: {:?}",
- client.username,
- repo_path,
- statuses
- );
-
- let dot_git_dir = repo_path.join(".git");
-
- let statuses = statuses
- .iter()
- .map(|(path, val)| (path.as_path(), val.clone()))
- .collect::<Vec<_>>();
-
- if client.fs().metadata(&dot_git_dir).await?.is_none() {
- client.fs().create_dir(&dot_git_dir).await?;
- }
-
- if git_operation {
- client
- .fs()
- .set_status_for_repo_via_git_operation(&dot_git_dir, statuses.as_slice());
- } else {
- client.fs().set_status_for_repo_via_working_copy_change(
- &dot_git_dir,
- statuses.as_slice(),
- );
- }
- }
- },
- }
- Ok(())
-}
-
-fn check_consistency_between_clients(clients: &[(Rc<TestClient>, TestAppContext)]) {
- for (client, client_cx) in clients {
- for guest_project in client.remote_projects().iter() {
- guest_project.read_with(client_cx, |guest_project, cx| {
- let host_project = clients.iter().find_map(|(client, cx)| {
- let project = client
- .local_projects()
- .iter()
- .find(|host_project| {
- host_project.read_with(cx, |host_project, _| {
- host_project.remote_id() == guest_project.remote_id()
- })
- })?
- .clone();
- Some((project, cx))
- });
-
- if !guest_project.is_read_only() {
- if let Some((host_project, host_cx)) = host_project {
- let host_worktree_snapshots =
- host_project.read_with(host_cx, |host_project, cx| {
- host_project
- .worktrees(cx)
- .map(|worktree| {
- let worktree = worktree.read(cx);
- (worktree.id(), worktree.snapshot())
- })
- .collect::<BTreeMap<_, _>>()
- });
- let guest_worktree_snapshots = guest_project
- .worktrees(cx)
- .map(|worktree| {
- let worktree = worktree.read(cx);
- (worktree.id(), worktree.snapshot())
- })
- .collect::<BTreeMap<_, _>>();
-
- assert_eq!(
- guest_worktree_snapshots.values().map(|w| w.abs_path()).collect::<Vec<_>>(),
- host_worktree_snapshots.values().map(|w| w.abs_path()).collect::<Vec<_>>(),
- "{} has different worktrees than the host for project {:?}",
- client.username, guest_project.remote_id(),
- );
-
- for (id, host_snapshot) in &host_worktree_snapshots {
- let guest_snapshot = &guest_worktree_snapshots[id];
- assert_eq!(
- guest_snapshot.root_name(),
- host_snapshot.root_name(),
- "{} has different root name than the host for worktree {}, project {:?}",
- client.username,
- id,
- guest_project.remote_id(),
- );
- assert_eq!(
- guest_snapshot.abs_path(),
- host_snapshot.abs_path(),
- "{} has different abs path than the host for worktree {}, project: {:?}",
- client.username,
- id,
- guest_project.remote_id(),
- );
- assert_eq!(
- guest_snapshot.entries(false).collect::<Vec<_>>(),
- host_snapshot.entries(false).collect::<Vec<_>>(),
- "{} has different snapshot than the host for worktree {:?} ({:?}) and project {:?}",
- client.username,
- host_snapshot.abs_path(),
- id,
- guest_project.remote_id(),
- );
- assert_eq!(guest_snapshot.repositories().collect::<Vec<_>>(), host_snapshot.repositories().collect::<Vec<_>>(),
- "{} has different repositories than the host for worktree {:?} and project {:?}",
- client.username,
- host_snapshot.abs_path(),
- guest_project.remote_id(),
- );
- assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(),
- "{} has different scan id than the host for worktree {:?} and project {:?}",
- client.username,
- host_snapshot.abs_path(),
- guest_project.remote_id(),
- );
- }
- }
- }
-
- for buffer in guest_project.opened_buffers(cx) {
- let buffer = buffer.read(cx);
- assert_eq!(
- buffer.deferred_ops_len(),
- 0,
- "{} has deferred operations for buffer {:?} in project {:?}",
- client.username,
- buffer.file().unwrap().full_path(cx),
- guest_project.remote_id(),
- );
- }
- });
- }
-
- let buffers = client.buffers().clone();
- for (guest_project, guest_buffers) in &buffers {
- let project_id = if guest_project.read_with(client_cx, |project, _| {
- project.is_local() || project.is_read_only()
- }) {
- continue;
- } else {
- guest_project
- .read_with(client_cx, |project, _| project.remote_id())
- .unwrap()
- };
- let guest_user_id = client.user_id().unwrap();
-
- let host_project = clients.iter().find_map(|(client, cx)| {
- let project = client
- .local_projects()
- .iter()
- .find(|host_project| {
- host_project.read_with(cx, |host_project, _| {
- host_project.remote_id() == Some(project_id)
- })
- })?
- .clone();
- Some((client.user_id().unwrap(), project, cx))
- });
-
- let (host_user_id, host_project, host_cx) =
- if let Some((host_user_id, host_project, host_cx)) = host_project {
- (host_user_id, host_project, host_cx)
- } else {
- continue;
- };
-
- for guest_buffer in guest_buffers {
- let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
- let host_buffer = host_project.read_with(host_cx, |project, cx| {
- project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
- panic!(
- "host does not have buffer for guest:{}, peer:{:?}, id:{}",
- client.username,
- client.peer_id(),
- buffer_id
- )
- })
- });
- let path = host_buffer
- .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
-
- assert_eq!(
- guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()),
- 0,
- "{}, buffer {}, path {:?} has deferred operations",
- client.username,
- buffer_id,
- path,
- );
- assert_eq!(
- guest_buffer.read_with(client_cx, |buffer, _| buffer.text()),
- host_buffer.read_with(host_cx, |buffer, _| buffer.text()),
- "{}, buffer {}, path {:?}, differs from the host's buffer",
- client.username,
- buffer_id,
- path
- );
-
- let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned());
- let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned());
- match (host_file, guest_file) {
- (Some(host_file), Some(guest_file)) => {
- assert_eq!(guest_file.path(), host_file.path());
- assert_eq!(guest_file.is_deleted(), host_file.is_deleted());
- assert_eq!(
- guest_file.mtime(),
- host_file.mtime(),
- "guest {} mtime does not match host {} for path {:?} in project {}",
- guest_user_id,
- host_user_id,
- guest_file.path(),
- project_id,
- );
- }
- (None, None) => {}
- (None, _) => panic!("host's file is None, guest's isn't"),
- (_, None) => panic!("guest's file is None, hosts's isn't"),
- }
-
- let host_diff_base =
- host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string));
- let guest_diff_base = guest_buffer
- .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string));
- assert_eq!(
- guest_diff_base, host_diff_base,
- "guest {} diff base does not match host's for path {path:?} in project {project_id}",
- client.username
- );
-
- let host_saved_version =
- host_buffer.read_with(host_cx, |b, _| b.saved_version().clone());
- let guest_saved_version =
- guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone());
- assert_eq!(
- guest_saved_version, host_saved_version,
- "guest {} saved version does not match host's for path {path:?} in project {project_id}",
- client.username
- );
-
- let host_saved_version_fingerprint =
- host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint());
- let guest_saved_version_fingerprint =
- guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint());
- assert_eq!(
- guest_saved_version_fingerprint, host_saved_version_fingerprint,
- "guest {} saved fingerprint does not match host's for path {path:?} in project {project_id}",
- client.username
- );
-
- let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime());
- let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime());
- assert_eq!(
- guest_saved_mtime, host_saved_mtime,
- "guest {} saved mtime does not match host's for path {path:?} in project {project_id}",
- client.username
- );
-
- let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty());
- let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty());
- assert_eq!(guest_is_dirty, host_is_dirty,
- "guest {} dirty status does not match host's for path {path:?} in project {project_id}",
- client.username
- );
-
- let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict());
- let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict());
- assert_eq!(guest_has_conflict, host_has_conflict,
- "guest {} conflict status does not match host's for path {path:?} in project {project_id}",
- client.username
- );
- }
- }
- }
-}
-
-struct TestPlan {
- rng: StdRng,
- replay: bool,
- stored_operations: Vec<(StoredOperation, Arc<AtomicBool>)>,
- max_operations: usize,
- operation_ix: usize,
- users: Vec<UserTestPlan>,
- next_batch_id: usize,
- allow_server_restarts: bool,
- allow_client_reconnection: bool,
- allow_client_disconnection: bool,
-}
-
-struct UserTestPlan {
- user_id: UserId,
- username: String,
- next_root_id: usize,
- operation_ix: usize,
- online: bool,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize)]
-#[serde(untagged)]
-enum StoredOperation {
- Server(Operation),
- Client {
- user_id: UserId,
- batch_id: usize,
- operation: ClientOperation,
- },
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize)]
-enum Operation {
- AddConnection {
- user_id: UserId,
- },
- RemoveConnection {
- user_id: UserId,
- },
- BounceConnection {
- user_id: UserId,
- },
- RestartServer,
- MutateClients {
- batch_id: usize,
- #[serde(skip_serializing)]
- #[serde(skip_deserializing)]
- user_ids: Vec<UserId>,
- quiesce: bool,
- },
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize)]
-enum ClientOperation {
- AcceptIncomingCall,
- RejectIncomingCall,
- LeaveCall,
- InviteContactToCall {
- user_id: UserId,
- },
- OpenLocalProject {
- first_root_name: String,
- },
- OpenRemoteProject {
- host_id: UserId,
- first_root_name: String,
- },
- AddWorktreeToProject {
- project_root_name: String,
- new_root_path: PathBuf,
- },
- CloseRemoteProject {
- project_root_name: String,
- },
- OpenBuffer {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- },
- SearchProject {
- project_root_name: String,
- is_local: bool,
- query: String,
- detach: bool,
- },
- EditBuffer {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- edits: Vec<(Range<usize>, Arc<str>)>,
- },
- CloseBuffer {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- },
- SaveBuffer {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- detach: bool,
- },
- RequestLspDataInBuffer {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- offset: usize,
- kind: LspRequestKind,
- detach: bool,
- },
- CreateWorktreeEntry {
- project_root_name: String,
- is_local: bool,
- full_path: PathBuf,
- is_dir: bool,
- },
- WriteFsEntry {
- path: PathBuf,
- is_dir: bool,
- content: String,
- },
- GitOperation {
- operation: GitOperation,
- },
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize)]
-enum GitOperation {
- WriteGitIndex {
- repo_path: PathBuf,
- contents: Vec<(PathBuf, String)>,
- },
- WriteGitBranch {
- repo_path: PathBuf,
- new_branch: Option<String>,
- },
- WriteGitStatuses {
- repo_path: PathBuf,
- statuses: Vec<(PathBuf, GitFileStatus)>,
- git_operation: bool,
- },
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize)]
-enum LspRequestKind {
- Rename,
- Completion,
- CodeAction,
- Definition,
- Highlights,
-}
-
-enum TestError {
- Inapplicable,
- Other(anyhow::Error),
-}
-
-impl From<anyhow::Error> for TestError {
- fn from(value: anyhow::Error) -> Self {
- Self::Other(value)
- }
-}
-
-impl TestPlan {
- fn new(mut rng: StdRng, users: Vec<UserTestPlan>, max_operations: usize) -> Self {
- Self {
- replay: false,
- allow_server_restarts: rng.gen_bool(0.7),
- allow_client_reconnection: rng.gen_bool(0.7),
- allow_client_disconnection: rng.gen_bool(0.1),
- stored_operations: Vec::new(),
- operation_ix: 0,
- next_batch_id: 0,
- max_operations,
- users,
- rng,
- }
- }
-
- fn deserialize(&mut self, json: Vec<u8>) {
- let stored_operations: Vec<StoredOperation> = serde_json::from_slice(&json).unwrap();
- self.replay = true;
- self.stored_operations = stored_operations
- .iter()
- .cloned()
- .enumerate()
- .map(|(i, mut operation)| {
- if let StoredOperation::Server(Operation::MutateClients {
- batch_id: current_batch_id,
- user_ids,
- ..
- }) = &mut operation
- {
- assert!(user_ids.is_empty());
- user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| {
- if let StoredOperation::Client {
- user_id, batch_id, ..
- } = operation
- {
- if batch_id == current_batch_id {
- return Some(user_id);
- }
- }
- None
- }));
- user_ids.sort_unstable();
- }
- (operation, Arc::new(AtomicBool::new(false)))
- })
- .collect()
- }
-
- fn serialize(&mut self) -> Vec<u8> {
- // Format each operation as one line
- let mut json = Vec::new();
- json.push(b'[');
- for (operation, applied) in &self.stored_operations {
- if !applied.load(SeqCst) {
- continue;
- }
- if json.len() > 1 {
- json.push(b',');
- }
- json.extend_from_slice(b"\n ");
- serde_json::to_writer(&mut json, operation).unwrap();
- }
- json.extend_from_slice(b"\n]\n");
- json
- }
-
- fn next_server_operation(
- &mut self,
- clients: &[(Rc<TestClient>, TestAppContext)],
- ) -> Option<(Operation, Arc<AtomicBool>)> {
- if self.replay {
- while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) {
- self.operation_ix += 1;
- if let (StoredOperation::Server(operation), applied) = stored_operation {
- return Some((operation.clone(), applied.clone()));
- }
- }
- None
- } else {
- let operation = self.generate_server_operation(clients)?;
- let applied = Arc::new(AtomicBool::new(false));
- self.stored_operations
- .push((StoredOperation::Server(operation.clone()), applied.clone()));
- Some((operation, applied))
- }
- }
-
- fn next_client_operation(
- &mut self,
- client: &TestClient,
- current_batch_id: usize,
- cx: &TestAppContext,
- ) -> Option<(ClientOperation, Arc<AtomicBool>)> {
- let current_user_id = client.current_user_id(cx);
- let user_ix = self
- .users
- .iter()
- .position(|user| user.user_id == current_user_id)
- .unwrap();
- let user_plan = &mut self.users[user_ix];
-
- if self.replay {
- while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) {
- user_plan.operation_ix += 1;
- if let (
- StoredOperation::Client {
- user_id, operation, ..
- },
- applied,
- ) = stored_operation
- {
- if user_id == ¤t_user_id {
- return Some((operation.clone(), applied.clone()));
- }
- }
- }
- None
- } else {
- let operation = self.generate_client_operation(current_user_id, client, cx)?;
- let applied = Arc::new(AtomicBool::new(false));
- self.stored_operations.push((
- StoredOperation::Client {
- user_id: current_user_id,
- batch_id: current_batch_id,
- operation: operation.clone(),
- },
- applied.clone(),
- ));
- Some((operation, applied))
- }
- }
-
- fn generate_server_operation(
- &mut self,
- clients: &[(Rc<TestClient>, TestAppContext)],
- ) -> Option<Operation> {
- if self.operation_ix == self.max_operations {
- return None;
- }
-
- Some(loop {
- break match self.rng.gen_range(0..100) {
- 0..=29 if clients.len() < self.users.len() => {
- let user = self
- .users
- .iter()
- .filter(|u| !u.online)
- .choose(&mut self.rng)
- .unwrap();
- self.operation_ix += 1;
- Operation::AddConnection {
- user_id: user.user_id,
- }
- }
- 30..=34 if clients.len() > 1 && self.allow_client_disconnection => {
- let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
- let user_id = client.current_user_id(cx);
- self.operation_ix += 1;
- Operation::RemoveConnection { user_id }
- }
- 35..=39 if clients.len() > 1 && self.allow_client_reconnection => {
- let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
- let user_id = client.current_user_id(cx);
- self.operation_ix += 1;
- Operation::BounceConnection { user_id }
- }
- 40..=44 if self.allow_server_restarts && clients.len() > 1 => {
- self.operation_ix += 1;
- Operation::RestartServer
- }
- _ if !clients.is_empty() => {
- let count = self
- .rng
- .gen_range(1..10)
- .min(self.max_operations - self.operation_ix);
- let batch_id = util::post_inc(&mut self.next_batch_id);
- let mut user_ids = (0..count)
- .map(|_| {
- let ix = self.rng.gen_range(0..clients.len());
- let (client, cx) = &clients[ix];
- client.current_user_id(cx)
- })
- .collect::<Vec<_>>();
- user_ids.sort_unstable();
- Operation::MutateClients {
- user_ids,
- batch_id,
- quiesce: self.rng.gen_bool(0.7),
- }
- }
- _ => continue,
- };
- })
- }
-
- fn generate_client_operation(
- &mut self,
- user_id: UserId,
- client: &TestClient,
- cx: &TestAppContext,
- ) -> Option<ClientOperation> {
- if self.operation_ix == self.max_operations {
- return None;
- }
-
- self.operation_ix += 1;
- let call = cx.read(ActiveCall::global);
- Some(loop {
- match self.rng.gen_range(0..100_u32) {
- // Mutate the call
- 0..=29 => {
- // Respond to an incoming call
- if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) {
- break if self.rng.gen_bool(0.7) {
- ClientOperation::AcceptIncomingCall
- } else {
- ClientOperation::RejectIncomingCall
- };
- }
-
- match self.rng.gen_range(0..100_u32) {
- // Invite a contact to the current call
- 0..=70 => {
- let available_contacts =
- client.user_store().read_with(cx, |user_store, _| {
- user_store
- .contacts()
- .iter()
- .filter(|contact| contact.online && !contact.busy)
- .cloned()
- .collect::<Vec<_>>()
- });
- if !available_contacts.is_empty() {
- let contact = available_contacts.choose(&mut self.rng).unwrap();
- break ClientOperation::InviteContactToCall {
- user_id: UserId(contact.user.id as i32),
- };
- }
- }
-
- // Leave the current call
- 71.. => {
- if self.allow_client_disconnection
- && call.read_with(cx, |call, _| call.room().is_some())
- {
- break ClientOperation::LeaveCall;
- }
- }
- }
- }
-
- // Mutate projects
- 30..=59 => match self.rng.gen_range(0..100_u32) {
- // Open a new project
- 0..=70 => {
- // Open a remote project
- if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) {
- let existing_remote_project_ids = cx.read(|cx| {
- client
- .remote_projects()
- .iter()
- .map(|p| p.read(cx).remote_id().unwrap())
- .collect::<Vec<_>>()
- });
- let new_remote_projects = room.read_with(cx, |room, _| {
- room.remote_participants()
- .values()
- .flat_map(|participant| {
- participant.projects.iter().filter_map(|project| {
- if existing_remote_project_ids.contains(&project.id) {
- None
- } else {
- Some((
- UserId::from_proto(participant.user.id),
- project.worktree_root_names[0].clone(),
- ))
- }
- })
- })
- .collect::<Vec<_>>()
- });
- if !new_remote_projects.is_empty() {
- let (host_id, first_root_name) =
- new_remote_projects.choose(&mut self.rng).unwrap().clone();
- break ClientOperation::OpenRemoteProject {
- host_id,
- first_root_name,
- };
- }
- }
- // Open a local project
- else {
- let first_root_name = self.next_root_dir_name(user_id);
- break ClientOperation::OpenLocalProject { first_root_name };
- }
- }
-
- // Close a remote project
- 71..=80 => {
- if !client.remote_projects().is_empty() {
- let project = client
- .remote_projects()
- .choose(&mut self.rng)
- .unwrap()
- .clone();
- let first_root_name = root_name_for_project(&project, cx);
- break ClientOperation::CloseRemoteProject {
- project_root_name: first_root_name,
- };
- }
- }
-
- // Mutate project worktrees
- 81.. => match self.rng.gen_range(0..100_u32) {
- // Add a worktree to a local project
- 0..=50 => {
- let Some(project) =
- client.local_projects().choose(&mut self.rng).cloned()
- else {
- continue;
- };
- let project_root_name = root_name_for_project(&project, cx);
- let mut paths = client.fs().paths(false);
- paths.remove(0);
- let new_root_path = if paths.is_empty() || self.rng.gen() {
- Path::new("/").join(&self.next_root_dir_name(user_id))
- } else {
- paths.choose(&mut self.rng).unwrap().clone()
- };
- break ClientOperation::AddWorktreeToProject {
- project_root_name,
- new_root_path,
- };
- }
-
- // Add an entry to a worktree
- _ => {
- let Some(project) = choose_random_project(client, &mut self.rng) else {
- continue;
- };
- let project_root_name = root_name_for_project(&project, cx);
- let is_local = project.read_with(cx, |project, _| project.is_local());
- let worktree = project.read_with(cx, |project, cx| {
- project
- .worktrees(cx)
- .filter(|worktree| {
- let worktree = worktree.read(cx);
- worktree.is_visible()
- && worktree.entries(false).any(|e| e.is_file())
- && worktree.root_entry().map_or(false, |e| e.is_dir())
- })
- .choose(&mut self.rng)
- });
- let Some(worktree) = worktree else { continue };
- let is_dir = self.rng.gen::<bool>();
- let mut full_path =
- worktree.read_with(cx, |w, _| PathBuf::from(w.root_name()));
- full_path.push(gen_file_name(&mut self.rng));
- if !is_dir {
- full_path.set_extension("rs");
- }
- break ClientOperation::CreateWorktreeEntry {
- project_root_name,
- is_local,
- full_path,
- is_dir,
- };
- }
- },
- },
-
- // Query and mutate buffers
- 60..=90 => {
- let Some(project) = choose_random_project(client, &mut self.rng) else {
- continue;
- };
- let project_root_name = root_name_for_project(&project, cx);
- let is_local = project.read_with(cx, |project, _| project.is_local());
-
- match self.rng.gen_range(0..100_u32) {
- // Manipulate an existing buffer
- 0..=70 => {
- let Some(buffer) = client
- .buffers_for_project(&project)
- .iter()
- .choose(&mut self.rng)
- .cloned()
- else {
- continue;
- };
-
- let full_path = buffer
- .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
-
- match self.rng.gen_range(0..100_u32) {
- // Close the buffer
- 0..=15 => {
- break ClientOperation::CloseBuffer {
- project_root_name,
- is_local,
- full_path,
- };
- }
- // Save the buffer
- 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => {
- let detach = self.rng.gen_bool(0.3);
- break ClientOperation::SaveBuffer {
- project_root_name,
- is_local,
- full_path,
- detach,
- };
- }
- // Edit the buffer
- 30..=69 => {
- let edits = buffer.read_with(cx, |buffer, _| {
- buffer.get_random_edits(&mut self.rng, 3)
- });
- break ClientOperation::EditBuffer {
- project_root_name,
- is_local,
- full_path,
- edits,
- };
- }
- // Make an LSP request
- _ => {
- let offset = buffer.read_with(cx, |buffer, _| {
- buffer.clip_offset(
- self.rng.gen_range(0..=buffer.len()),
- language::Bias::Left,
- )
- });
- let detach = self.rng.gen();
- break ClientOperation::RequestLspDataInBuffer {
- project_root_name,
- full_path,
- offset,
- is_local,
- kind: match self.rng.gen_range(0..5_u32) {
- 0 => LspRequestKind::Rename,
- 1 => LspRequestKind::Highlights,
- 2 => LspRequestKind::Definition,
- 3 => LspRequestKind::CodeAction,
- 4.. => LspRequestKind::Completion,
- },
- detach,
- };
- }
- }
- }
-
- 71..=80 => {
- let query = self.rng.gen_range('a'..='z').to_string();
- let detach = self.rng.gen_bool(0.3);
- break ClientOperation::SearchProject {
- project_root_name,
- is_local,
- query,
- detach,
- };
- }
-
- // Open a buffer
- 81.. => {
- let worktree = project.read_with(cx, |project, cx| {
- project
- .worktrees(cx)
- .filter(|worktree| {
- let worktree = worktree.read(cx);
- worktree.is_visible()
- && worktree.entries(false).any(|e| e.is_file())
- })
- .choose(&mut self.rng)
- });
- let Some(worktree) = worktree else { continue };
- let full_path = worktree.read_with(cx, |worktree, _| {
- let entry = worktree
- .entries(false)
- .filter(|e| e.is_file())
- .choose(&mut self.rng)
- .unwrap();
- if entry.path.as_ref() == Path::new("") {
- Path::new(worktree.root_name()).into()
- } else {
- Path::new(worktree.root_name()).join(&entry.path)
- }
- });
- break ClientOperation::OpenBuffer {
- project_root_name,
- is_local,
- full_path,
- };
- }
- }
- }
-
- // Update a git related action
- 91..=95 => {
- break ClientOperation::GitOperation {
- operation: self.generate_git_operation(client),
- };
- }
-
- // Create or update a file or directory
- 96.. => {
- let is_dir = self.rng.gen::<bool>();
- let content;
- let mut path;
- let dir_paths = client.fs().directories(false);
-
- if is_dir {
- content = String::new();
- path = dir_paths.choose(&mut self.rng).unwrap().clone();
- path.push(gen_file_name(&mut self.rng));
- } else {
- content = Alphanumeric.sample_string(&mut self.rng, 16);
-
- // Create a new file or overwrite an existing file
- let file_paths = client.fs().files();
- if file_paths.is_empty() || self.rng.gen_bool(0.5) {
- path = dir_paths.choose(&mut self.rng).unwrap().clone();
- path.push(gen_file_name(&mut self.rng));
- path.set_extension("rs");
- } else {
- path = file_paths.choose(&mut self.rng).unwrap().clone()
- };
- }
- break ClientOperation::WriteFsEntry {
- path,
- is_dir,
- content,
- };
- }
- }
- })
- }
-
- fn generate_git_operation(&mut self, client: &TestClient) -> GitOperation {
- fn generate_file_paths(
- repo_path: &Path,
- rng: &mut StdRng,
- client: &TestClient,
- ) -> Vec<PathBuf> {
- let mut paths = client
- .fs()
- .files()
- .into_iter()
- .filter(|path| path.starts_with(repo_path))
- .collect::<Vec<_>>();
-
- let count = rng.gen_range(0..=paths.len());
- paths.shuffle(rng);
- paths.truncate(count);
-
- paths
- .iter()
- .map(|path| path.strip_prefix(repo_path).unwrap().to_path_buf())
- .collect::<Vec<_>>()
- }
-
- let repo_path = client
- .fs()
- .directories(false)
- .choose(&mut self.rng)
- .unwrap()
- .clone();
-
- match self.rng.gen_range(0..100_u32) {
- 0..=25 => {
- let file_paths = generate_file_paths(&repo_path, &mut self.rng, client);
-
- let contents = file_paths
- .into_iter()
- .map(|path| (path, Alphanumeric.sample_string(&mut self.rng, 16)))
- .collect();
-
- GitOperation::WriteGitIndex {
- repo_path,
- contents,
- }
- }
- 26..=63 => {
- let new_branch = (self.rng.gen_range(0..10) > 3)
- .then(|| Alphanumeric.sample_string(&mut self.rng, 8));
-
- GitOperation::WriteGitBranch {
- repo_path,
- new_branch,
- }
- }
- 64..=100 => {
- let file_paths = generate_file_paths(&repo_path, &mut self.rng, client);
-
- let statuses = file_paths
- .into_iter()
- .map(|paths| {
- (
- paths,
- match self.rng.gen_range(0..3_u32) {
- 0 => GitFileStatus::Added,
- 1 => GitFileStatus::Modified,
- 2 => GitFileStatus::Conflict,
- _ => unreachable!(),
- },
- )
- })
- .collect::<Vec<_>>();
-
- let git_operation = self.rng.gen::<bool>();
-
- GitOperation::WriteGitStatuses {
- repo_path,
- statuses,
- git_operation,
- }
- }
- _ => unreachable!(),
- }
- }
-
- fn next_root_dir_name(&mut self, user_id: UserId) -> String {
- let user_ix = self
- .users
- .iter()
- .position(|user| user.user_id == user_id)
- .unwrap();
- let root_id = util::post_inc(&mut self.users[user_ix].next_root_id);
- format!("dir-{user_id}-{root_id}")
- }
-
- fn user(&mut self, user_id: UserId) -> &mut UserTestPlan {
- let ix = self
- .users
- .iter()
- .position(|user| user.user_id == user_id)
- .unwrap();
- &mut self.users[ix]
- }
-}
-
-async fn simulate_client(
- client: Rc<TestClient>,
- mut operation_rx: futures::channel::mpsc::UnboundedReceiver<usize>,
- plan: Arc<Mutex<TestPlan>>,
- mut cx: TestAppContext,
-) {
- // Setup language server
- let mut language = Language::new(
- LanguageConfig {
- name: "Rust".into(),
- path_suffixes: vec!["rs".to_string()],
- ..Default::default()
- },
- None,
- );
- let _fake_language_servers = language
- .set_fake_lsp_adapter(Arc::new(FakeLspAdapter {
- name: "the-fake-language-server",
- capabilities: lsp::LanguageServer::full_capabilities(),
- initializer: Some(Box::new({
- let fs = client.app_state.fs.clone();
- move |fake_server: &mut FakeLanguageServer| {
- fake_server.handle_request::<lsp::request::Completion, _, _>(
- |_, _| async move {
- Ok(Some(lsp::CompletionResponse::Array(vec![
- lsp::CompletionItem {
- text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
- range: lsp::Range::new(
- lsp::Position::new(0, 0),
- lsp::Position::new(0, 0),
- ),
- new_text: "the-new-text".to_string(),
- })),
- ..Default::default()
- },
- ])))
- },
- );
-
- fake_server.handle_request::<lsp::request::CodeActionRequest, _, _>(
- |_, _| async move {
- Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction(
- lsp::CodeAction {
- title: "the-code-action".to_string(),
- ..Default::default()
- },
- )]))
- },
- );
-
- fake_server.handle_request::<lsp::request::PrepareRenameRequest, _, _>(
- |params, _| async move {
- Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new(
- params.position,
- params.position,
- ))))
- },
- );
-
- fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
- let fs = fs.clone();
- move |_, cx| {
- let background = cx.background();
- let mut rng = background.rng();
- let count = rng.gen_range::<usize, _>(1..3);
- let files = fs.as_fake().files();
- let files = (0..count)
- .map(|_| files.choose(&mut *rng).unwrap().clone())
- .collect::<Vec<_>>();
- async move {
- log::info!("LSP: Returning definitions in files {:?}", &files);
- Ok(Some(lsp::GotoDefinitionResponse::Array(
- files
- .into_iter()
- .map(|file| lsp::Location {
- uri: lsp::Url::from_file_path(file).unwrap(),
- range: Default::default(),
- })
- .collect(),
- )))
- }
- }
- });
-
- fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>(
- move |_, cx| {
- let mut highlights = Vec::new();
- let background = cx.background();
- let mut rng = background.rng();
-
- let highlight_count = rng.gen_range(1..=5);
- for _ in 0..highlight_count {
- let start_row = rng.gen_range(0..100);
- let start_column = rng.gen_range(0..100);
- let end_row = rng.gen_range(0..100);
- let end_column = rng.gen_range(0..100);
- let start = PointUtf16::new(start_row, start_column);
- let end = PointUtf16::new(end_row, end_column);
- let range = if start > end { end..start } else { start..end };
- highlights.push(lsp::DocumentHighlight {
- range: range_to_lsp(range.clone()),
- kind: Some(lsp::DocumentHighlightKind::READ),
- });
- }
- highlights.sort_unstable_by_key(|highlight| {
- (highlight.range.start, highlight.range.end)
- });
- async move { Ok(Some(highlights)) }
- },
- );
- }
- })),
- ..Default::default()
- }))
- .await;
- client.app_state.languages.add(Arc::new(language));
-
- while let Some(batch_id) = operation_rx.next().await {
- let Some((operation, applied)) = plan.lock().next_client_operation(&client, batch_id, &cx)
- else {
- break;
- };
- applied.store(true, SeqCst);
- match apply_client_operation(&client, operation, &mut cx).await {
- Ok(()) => {}
- Err(TestError::Inapplicable) => {
- applied.store(false, SeqCst);
- log::info!("skipped operation");
- }
- Err(TestError::Other(error)) => {
- log::error!("{} error: {}", client.username, error);
- }
- }
- cx.background().simulate_random_delay().await;
- }
- log::info!("{}: done", client.username);
-}
-
-fn buffer_for_full_path(
- client: &TestClient,
- project: &ModelHandle<Project>,
- full_path: &PathBuf,
- cx: &TestAppContext,
-) -> Option<ModelHandle<language::Buffer>> {
- client
- .buffers_for_project(project)
- .iter()
- .find(|buffer| {
- buffer.read_with(cx, |buffer, cx| {
- buffer.file().unwrap().full_path(cx) == *full_path
- })
- })
- .cloned()
-}
-
-fn project_for_root_name(
- client: &TestClient,
- root_name: &str,
- cx: &TestAppContext,
-) -> Option<ModelHandle<Project>> {
- if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) {
- return Some(client.local_projects()[ix].clone());
- }
- if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) {
- return Some(client.remote_projects()[ix].clone());
- }
- None
-}
-
-fn project_ix_for_root_name(
- projects: &[ModelHandle<Project>],
- root_name: &str,
- cx: &TestAppContext,
-) -> Option<usize> {
- projects.iter().position(|project| {
- project.read_with(cx, |project, cx| {
- let worktree = project.visible_worktrees(cx).next().unwrap();
- worktree.read(cx).root_name() == root_name
- })
- })
-}
-
-fn root_name_for_project(project: &ModelHandle<Project>, cx: &TestAppContext) -> String {
- project.read_with(cx, |project, cx| {
- project
- .visible_worktrees(cx)
- .next()
- .unwrap()
- .read(cx)
- .root_name()
- .to_string()
- })
-}
-
-fn project_path_for_full_path(
- project: &ModelHandle<Project>,
- full_path: &Path,
- cx: &TestAppContext,
-) -> Option<ProjectPath> {
- let mut components = full_path.components();
- let root_name = components.next().unwrap().as_os_str().to_str().unwrap();
- let path = components.as_path().into();
- let worktree_id = project.read_with(cx, |project, cx| {
- project.worktrees(cx).find_map(|worktree| {
- let worktree = worktree.read(cx);
- if worktree.root_name() == root_name {
- Some(worktree.id())
- } else {
- None
- }
- })
- })?;
- Some(ProjectPath { worktree_id, path })
-}
-
-async fn ensure_project_shared(
- project: &ModelHandle<Project>,
- client: &TestClient,
- cx: &mut TestAppContext,
-) {
- let first_root_name = root_name_for_project(project, cx);
- let active_call = cx.read(ActiveCall::global);
- if active_call.read_with(cx, |call, _| call.room().is_some())
- && project.read_with(cx, |project, _| project.is_local() && !project.is_shared())
- {
- match active_call
- .update(cx, |call, cx| call.share_project(project.clone(), cx))
- .await
- {
- Ok(project_id) => {
- log::info!(
- "{}: shared project {} with id {}",
- client.username,
- first_root_name,
- project_id
- );
- }
- Err(error) => {
- log::error!(
- "{}: error sharing project {}: {:?}",
- client.username,
- first_root_name,
- error
- );
- }
- }
- }
-}
-
-fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option<ModelHandle<Project>> {
- client
- .local_projects()
- .iter()
- .chain(client.remote_projects().iter())
- .choose(rng)
- .cloned()
-}
-
-fn gen_file_name(rng: &mut StdRng) -> String {
- let mut name = String::new();
- for _ in 0..10 {
- let letter = rng.gen_range('a'..='z');
- name.push(letter);
- }
- name
-}
-
-fn path_env_var(name: &str) -> Option<PathBuf> {
- let value = env::var(name).ok()?;
- let mut path = PathBuf::from(value);
- if path.is_relative() {
- let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
- abs_path.pop();
- abs_path.pop();
- abs_path.push(path);
- path = abs_path
- }
- Some(path)
-}
@@ -0,0 +1,694 @@
+use crate::{
+ db::{self, Database, NewUserParams, UserId},
+ rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
+ tests::{TestClient, TestServer},
+};
+use async_trait::async_trait;
+use futures::StreamExt;
+use gpui::{executor::Deterministic, Task, TestAppContext};
+use parking_lot::Mutex;
+use rand::prelude::*;
+use rpc::RECEIVE_TIMEOUT;
+use serde::{de::DeserializeOwned, Deserialize, Serialize};
+use settings::SettingsStore;
+use std::{
+ env,
+ path::PathBuf,
+ rc::Rc,
+ sync::{
+ atomic::{AtomicBool, Ordering::SeqCst},
+ Arc,
+ },
+};
+
+lazy_static::lazy_static! {
+ static ref PLAN_LOAD_PATH: Option<PathBuf> = path_env_var("LOAD_PLAN");
+ static ref PLAN_SAVE_PATH: Option<PathBuf> = path_env_var("SAVE_PLAN");
+ static ref MAX_PEERS: usize = env::var("MAX_PEERS")
+ .map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
+ .unwrap_or(3);
+ static ref MAX_OPERATIONS: usize = env::var("OPERATIONS")
+ .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
+ .unwrap_or(10);
+
+}
+
+static LOADED_PLAN_JSON: Mutex<Option<Vec<u8>>> = Mutex::new(None);
+static LAST_PLAN: Mutex<Option<Box<dyn Send + FnOnce() -> Vec<u8>>>> = Mutex::new(None);
+
+struct TestPlan<T: RandomizedTest> {
+ rng: StdRng,
+ replay: bool,
+ stored_operations: Vec<(StoredOperation<T::Operation>, Arc<AtomicBool>)>,
+ max_operations: usize,
+ operation_ix: usize,
+ users: Vec<UserTestPlan>,
+ next_batch_id: usize,
+ allow_server_restarts: bool,
+ allow_client_reconnection: bool,
+ allow_client_disconnection: bool,
+}
+
+pub struct UserTestPlan {
+ pub user_id: UserId,
+ pub username: String,
+ pub allow_client_reconnection: bool,
+ pub allow_client_disconnection: bool,
+ next_root_id: usize,
+ operation_ix: usize,
+ online: bool,
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+#[serde(untagged)]
+enum StoredOperation<T> {
+ Server(ServerOperation),
+ Client {
+ user_id: UserId,
+ batch_id: usize,
+ operation: T,
+ },
+}
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+enum ServerOperation {
+ AddConnection {
+ user_id: UserId,
+ },
+ RemoveConnection {
+ user_id: UserId,
+ },
+ BounceConnection {
+ user_id: UserId,
+ },
+ RestartServer,
+ MutateClients {
+ batch_id: usize,
+ #[serde(skip_serializing)]
+ #[serde(skip_deserializing)]
+ user_ids: Vec<UserId>,
+ quiesce: bool,
+ },
+}
+
+pub enum TestError {
+ Inapplicable,
+ Other(anyhow::Error),
+}
+
+#[async_trait(?Send)]
+pub trait RandomizedTest: 'static + Sized {
+ type Operation: Send + Clone + Serialize + DeserializeOwned;
+
+ fn generate_operation(
+ client: &TestClient,
+ rng: &mut StdRng,
+ plan: &mut UserTestPlan,
+ cx: &TestAppContext,
+ ) -> Self::Operation;
+
+ async fn on_client_added(client: &Rc<TestClient>);
+
+ fn on_clients_quiesced(client: &[(Rc<TestClient>, TestAppContext)]);
+
+ async fn apply_operation(
+ client: &TestClient,
+ operation: Self::Operation,
+ cx: &mut TestAppContext,
+ ) -> Result<(), TestError>;
+}
+
+pub async fn run_randomized_test<T: RandomizedTest>(
+ cx: &mut TestAppContext,
+ deterministic: Arc<Deterministic>,
+ rng: StdRng,
+) {
+ deterministic.forbid_parking();
+ let mut server = TestServer::start(&deterministic).await;
+ let plan = TestPlan::<T>::new(server.app_state.db.clone(), rng).await;
+
+ LAST_PLAN.lock().replace({
+ let plan = plan.clone();
+ Box::new(move || plan.lock().serialize())
+ });
+
+ let mut clients = Vec::new();
+ let mut client_tasks = Vec::new();
+ let mut operation_channels = Vec::new();
+ loop {
+ let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else {
+ break;
+ };
+ applied.store(true, SeqCst);
+ let did_apply = TestPlan::apply_server_operation(
+ plan.clone(),
+ deterministic.clone(),
+ &mut server,
+ &mut clients,
+ &mut client_tasks,
+ &mut operation_channels,
+ next_operation,
+ cx,
+ )
+ .await;
+ if !did_apply {
+ applied.store(false, SeqCst);
+ }
+ }
+
+ drop(operation_channels);
+ deterministic.start_waiting();
+ futures::future::join_all(client_tasks).await;
+ deterministic.finish_waiting();
+
+ deterministic.run_until_parked();
+ T::on_clients_quiesced(&clients);
+
+ for (client, mut cx) in clients {
+ cx.update(|cx| {
+ let store = cx.remove_global::<SettingsStore>();
+ cx.clear_globals();
+ cx.set_global(store);
+ drop(client);
+ });
+ }
+ deterministic.run_until_parked();
+
+ if let Some(path) = &*PLAN_SAVE_PATH {
+ eprintln!("saved test plan to path {:?}", path);
+ std::fs::write(path, plan.lock().serialize()).unwrap();
+ }
+}
+
+pub fn save_randomized_test_plan() {
+ if let Some(serialize_plan) = LAST_PLAN.lock().take() {
+ if let Some(path) = &*PLAN_SAVE_PATH {
+ eprintln!("saved test plan to path {:?}", path);
+ std::fs::write(path, serialize_plan()).unwrap();
+ }
+ }
+}
+
+impl<T: RandomizedTest> TestPlan<T> {
+ pub async fn new(db: Arc<Database>, mut rng: StdRng) -> Arc<Mutex<Self>> {
+ let allow_server_restarts = rng.gen_bool(0.7);
+ let allow_client_reconnection = rng.gen_bool(0.7);
+ let allow_client_disconnection = rng.gen_bool(0.1);
+
+ let mut users = Vec::new();
+ for ix in 0..*MAX_PEERS {
+ let username = format!("user-{}", ix + 1);
+ let user_id = db
+ .create_user(
+ &format!("{username}@example.com"),
+ false,
+ NewUserParams {
+ github_login: username.clone(),
+ github_user_id: (ix + 1) as i32,
+ invite_count: 0,
+ },
+ )
+ .await
+ .unwrap()
+ .user_id;
+ users.push(UserTestPlan {
+ user_id,
+ username,
+ online: false,
+ next_root_id: 0,
+ operation_ix: 0,
+ allow_client_disconnection,
+ allow_client_reconnection,
+ });
+ }
+
+ for (ix, user_a) in users.iter().enumerate() {
+ for user_b in &users[ix + 1..] {
+ db.send_contact_request(user_a.user_id, user_b.user_id)
+ .await
+ .unwrap();
+ db.respond_to_contact_request(user_b.user_id, user_a.user_id, true)
+ .await
+ .unwrap();
+ }
+ }
+
+ let plan = Arc::new(Mutex::new(Self {
+ replay: false,
+ allow_server_restarts,
+ allow_client_reconnection,
+ allow_client_disconnection,
+ stored_operations: Vec::new(),
+ operation_ix: 0,
+ next_batch_id: 0,
+ max_operations: *MAX_OPERATIONS,
+ users,
+ rng,
+ }));
+
+ if let Some(path) = &*PLAN_LOAD_PATH {
+ let json = LOADED_PLAN_JSON
+ .lock()
+ .get_or_insert_with(|| {
+ eprintln!("loaded test plan from path {:?}", path);
+ std::fs::read(path).unwrap()
+ })
+ .clone();
+ plan.lock().deserialize(json);
+ }
+
+ plan
+ }
+
+ fn deserialize(&mut self, json: Vec<u8>) {
+ let stored_operations: Vec<StoredOperation<T::Operation>> =
+ serde_json::from_slice(&json).unwrap();
+ self.replay = true;
+ self.stored_operations = stored_operations
+ .iter()
+ .cloned()
+ .enumerate()
+ .map(|(i, mut operation)| {
+ let did_apply = Arc::new(AtomicBool::new(false));
+ if let StoredOperation::Server(ServerOperation::MutateClients {
+ batch_id: current_batch_id,
+ user_ids,
+ ..
+ }) = &mut operation
+ {
+ assert!(user_ids.is_empty());
+ user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| {
+ if let StoredOperation::Client {
+ user_id, batch_id, ..
+ } = operation
+ {
+ if batch_id == current_batch_id {
+ return Some(user_id);
+ }
+ }
+ None
+ }));
+ user_ids.sort_unstable();
+ }
+ (operation, did_apply)
+ })
+ .collect()
+ }
+
+ fn serialize(&mut self) -> Vec<u8> {
+ // Format each operation as one line
+ let mut json = Vec::new();
+ json.push(b'[');
+ for (operation, applied) in &self.stored_operations {
+ if !applied.load(SeqCst) {
+ continue;
+ }
+ if json.len() > 1 {
+ json.push(b',');
+ }
+ json.extend_from_slice(b"\n ");
+ serde_json::to_writer(&mut json, operation).unwrap();
+ }
+ json.extend_from_slice(b"\n]\n");
+ json
+ }
+
+ fn next_server_operation(
+ &mut self,
+ clients: &[(Rc<TestClient>, TestAppContext)],
+ ) -> Option<(ServerOperation, Arc<AtomicBool>)> {
+ if self.replay {
+ while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) {
+ self.operation_ix += 1;
+ if let (StoredOperation::Server(operation), applied) = stored_operation {
+ return Some((operation.clone(), applied.clone()));
+ }
+ }
+ None
+ } else {
+ let operation = self.generate_server_operation(clients)?;
+ let applied = Arc::new(AtomicBool::new(false));
+ self.stored_operations
+ .push((StoredOperation::Server(operation.clone()), applied.clone()));
+ Some((operation, applied))
+ }
+ }
+
+ fn next_client_operation(
+ &mut self,
+ client: &TestClient,
+ current_batch_id: usize,
+ cx: &TestAppContext,
+ ) -> Option<(T::Operation, Arc<AtomicBool>)> {
+ let current_user_id = client.current_user_id(cx);
+ let user_ix = self
+ .users
+ .iter()
+ .position(|user| user.user_id == current_user_id)
+ .unwrap();
+ let user_plan = &mut self.users[user_ix];
+
+ if self.replay {
+ while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) {
+ user_plan.operation_ix += 1;
+ if let (
+ StoredOperation::Client {
+ user_id, operation, ..
+ },
+ applied,
+ ) = stored_operation
+ {
+ if user_id == ¤t_user_id {
+ return Some((operation.clone(), applied.clone()));
+ }
+ }
+ }
+ None
+ } else {
+ if self.operation_ix == self.max_operations {
+ return None;
+ }
+ self.operation_ix += 1;
+ let operation = T::generate_operation(
+ client,
+ &mut self.rng,
+ self.users
+ .iter_mut()
+ .find(|user| user.user_id == current_user_id)
+ .unwrap(),
+ cx,
+ );
+ let applied = Arc::new(AtomicBool::new(false));
+ self.stored_operations.push((
+ StoredOperation::Client {
+ user_id: current_user_id,
+ batch_id: current_batch_id,
+ operation: operation.clone(),
+ },
+ applied.clone(),
+ ));
+ Some((operation, applied))
+ }
+ }
+
+ fn generate_server_operation(
+ &mut self,
+ clients: &[(Rc<TestClient>, TestAppContext)],
+ ) -> Option<ServerOperation> {
+ if self.operation_ix == self.max_operations {
+ return None;
+ }
+
+ Some(loop {
+ break match self.rng.gen_range(0..100) {
+ 0..=29 if clients.len() < self.users.len() => {
+ let user = self
+ .users
+ .iter()
+ .filter(|u| !u.online)
+ .choose(&mut self.rng)
+ .unwrap();
+ self.operation_ix += 1;
+ ServerOperation::AddConnection {
+ user_id: user.user_id,
+ }
+ }
+ 30..=34 if clients.len() > 1 && self.allow_client_disconnection => {
+ let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
+ let user_id = client.current_user_id(cx);
+ self.operation_ix += 1;
+ ServerOperation::RemoveConnection { user_id }
+ }
+ 35..=39 if clients.len() > 1 && self.allow_client_reconnection => {
+ let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
+ let user_id = client.current_user_id(cx);
+ self.operation_ix += 1;
+ ServerOperation::BounceConnection { user_id }
+ }
+ 40..=44 if self.allow_server_restarts && clients.len() > 1 => {
+ self.operation_ix += 1;
+ ServerOperation::RestartServer
+ }
+ _ if !clients.is_empty() => {
+ let count = self
+ .rng
+ .gen_range(1..10)
+ .min(self.max_operations - self.operation_ix);
+ let batch_id = util::post_inc(&mut self.next_batch_id);
+ let mut user_ids = (0..count)
+ .map(|_| {
+ let ix = self.rng.gen_range(0..clients.len());
+ let (client, cx) = &clients[ix];
+ client.current_user_id(cx)
+ })
+ .collect::<Vec<_>>();
+ user_ids.sort_unstable();
+ ServerOperation::MutateClients {
+ user_ids,
+ batch_id,
+ quiesce: self.rng.gen_bool(0.7),
+ }
+ }
+ _ => continue,
+ };
+ })
+ }
+
+ async fn apply_server_operation(
+ plan: Arc<Mutex<Self>>,
+ deterministic: Arc<Deterministic>,
+ server: &mut TestServer,
+ clients: &mut Vec<(Rc<TestClient>, TestAppContext)>,
+ client_tasks: &mut Vec<Task<()>>,
+ operation_channels: &mut Vec<futures::channel::mpsc::UnboundedSender<usize>>,
+ operation: ServerOperation,
+ cx: &mut TestAppContext,
+ ) -> bool {
+ match operation {
+ ServerOperation::AddConnection { user_id } => {
+ let username;
+ {
+ let mut plan = plan.lock();
+ let user = plan.user(user_id);
+ if user.online {
+ return false;
+ }
+ user.online = true;
+ username = user.username.clone();
+ };
+ log::info!("adding new connection for {}", username);
+ let next_entity_id = (user_id.0 * 10_000) as usize;
+ let mut client_cx = TestAppContext::new(
+ cx.foreground_platform(),
+ cx.platform(),
+ deterministic.build_foreground(user_id.0 as usize),
+ deterministic.build_background(),
+ cx.font_cache(),
+ cx.leak_detector(),
+ next_entity_id,
+ cx.function_name.clone(),
+ );
+
+ let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded();
+ let client = Rc::new(server.create_client(&mut client_cx, &username).await);
+ operation_channels.push(operation_tx);
+ clients.push((client.clone(), client_cx.clone()));
+ client_tasks.push(client_cx.foreground().spawn(Self::simulate_client(
+ plan.clone(),
+ client,
+ operation_rx,
+ client_cx,
+ )));
+
+ log::info!("added connection for {}", username);
+ }
+
+ ServerOperation::RemoveConnection {
+ user_id: removed_user_id,
+ } => {
+ log::info!("simulating full disconnection of user {}", removed_user_id);
+ let client_ix = clients
+ .iter()
+ .position(|(client, cx)| client.current_user_id(cx) == removed_user_id);
+ let Some(client_ix) = client_ix else {
+ return false;
+ };
+ let user_connection_ids = server
+ .connection_pool
+ .lock()
+ .user_connection_ids(removed_user_id)
+ .collect::<Vec<_>>();
+ assert_eq!(user_connection_ids.len(), 1);
+ let removed_peer_id = user_connection_ids[0].into();
+ let (client, mut client_cx) = clients.remove(client_ix);
+ let client_task = client_tasks.remove(client_ix);
+ operation_channels.remove(client_ix);
+ server.forbid_connections();
+ server.disconnect_client(removed_peer_id);
+ deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
+ deterministic.start_waiting();
+ log::info!("waiting for user {} to exit...", removed_user_id);
+ client_task.await;
+ deterministic.finish_waiting();
+ server.allow_connections();
+
+ for project in client.remote_projects().iter() {
+ project.read_with(&client_cx, |project, _| {
+ assert!(
+ project.is_read_only(),
+ "project {:?} should be read only",
+ project.remote_id()
+ )
+ });
+ }
+
+ for (client, cx) in clients {
+ let contacts = server
+ .app_state
+ .db
+ .get_contacts(client.current_user_id(cx))
+ .await
+ .unwrap();
+ let pool = server.connection_pool.lock();
+ for contact in contacts {
+ if let db::Contact::Accepted { user_id, busy, .. } = contact {
+ if user_id == removed_user_id {
+ assert!(!pool.is_user_online(user_id));
+ assert!(!busy);
+ }
+ }
+ }
+ }
+
+ log::info!("{} removed", client.username);
+ plan.lock().user(removed_user_id).online = false;
+ client_cx.update(|cx| {
+ cx.clear_globals();
+ drop(client);
+ });
+ }
+
+ ServerOperation::BounceConnection { user_id } => {
+ log::info!("simulating temporary disconnection of user {}", user_id);
+ let user_connection_ids = server
+ .connection_pool
+ .lock()
+ .user_connection_ids(user_id)
+ .collect::<Vec<_>>();
+ if user_connection_ids.is_empty() {
+ return false;
+ }
+ assert_eq!(user_connection_ids.len(), 1);
+ let peer_id = user_connection_ids[0].into();
+ server.disconnect_client(peer_id);
+ deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
+ }
+
+ ServerOperation::RestartServer => {
+ log::info!("simulating server restart");
+ server.reset().await;
+ deterministic.advance_clock(RECEIVE_TIMEOUT);
+ server.start().await.unwrap();
+ deterministic.advance_clock(CLEANUP_TIMEOUT);
+ let environment = &server.app_state.config.zed_environment;
+ let (stale_room_ids, _) = server
+ .app_state
+ .db
+ .stale_server_resource_ids(environment, server.id())
+ .await
+ .unwrap();
+ assert_eq!(stale_room_ids, vec![]);
+ }
+
+ ServerOperation::MutateClients {
+ user_ids,
+ batch_id,
+ quiesce,
+ } => {
+ let mut applied = false;
+ for user_id in user_ids {
+ let client_ix = clients
+ .iter()
+ .position(|(client, cx)| client.current_user_id(cx) == user_id);
+ let Some(client_ix) = client_ix else { continue };
+ applied = true;
+ if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) {
+ log::error!("error signaling user {user_id}: {err}");
+ }
+ }
+
+ if quiesce && applied {
+ deterministic.run_until_parked();
+ T::on_clients_quiesced(&clients);
+ }
+
+ return applied;
+ }
+ }
+ true
+ }
+
+ async fn simulate_client(
+ plan: Arc<Mutex<Self>>,
+ client: Rc<TestClient>,
+ mut operation_rx: futures::channel::mpsc::UnboundedReceiver<usize>,
+ mut cx: TestAppContext,
+ ) {
+ T::on_client_added(&client).await;
+
+ while let Some(batch_id) = operation_rx.next().await {
+ let Some((operation, applied)) =
+ plan.lock().next_client_operation(&client, batch_id, &cx)
+ else {
+ break;
+ };
+ applied.store(true, SeqCst);
+ match T::apply_operation(&client, operation, &mut cx).await {
+ Ok(()) => {}
+ Err(TestError::Inapplicable) => {
+ applied.store(false, SeqCst);
+ log::info!("skipped operation");
+ }
+ Err(TestError::Other(error)) => {
+ log::error!("{} error: {}", client.username, error);
+ }
+ }
+ cx.background().simulate_random_delay().await;
+ }
+ log::info!("{}: done", client.username);
+ }
+
+ fn user(&mut self, user_id: UserId) -> &mut UserTestPlan {
+ self.users
+ .iter_mut()
+ .find(|user| user.user_id == user_id)
+ .unwrap()
+ }
+}
+
+impl UserTestPlan {
+ pub fn next_root_dir_name(&mut self) -> String {
+ let user_id = self.user_id;
+ let root_id = util::post_inc(&mut self.next_root_id);
+ format!("dir-{user_id}-{root_id}")
+ }
+}
+
+impl From<anyhow::Error> for TestError {
+ fn from(value: anyhow::Error) -> Self {
+ Self::Other(value)
+ }
+}
+
+fn path_env_var(name: &str) -> Option<PathBuf> {
+ let value = env::var(name).ok()?;
+ let mut path = PathBuf::from(value);
+ if path.is_relative() {
+ let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ abs_path.pop();
+ abs_path.pop();
+ abs_path.push(path);
+ path = abs_path
+ }
+ Some(path)
+}
@@ -0,0 +1,551 @@
+use crate::{
+ db::{tests::TestDb, NewUserParams, UserId},
+ executor::Executor,
+ rpc::{Server, CLEANUP_TIMEOUT},
+ AppState,
+};
+use anyhow::anyhow;
+use call::ActiveCall;
+use channel::ChannelStore;
+use client::{
+ self, proto::PeerId, Client, Connection, Credentials, EstablishConnectionError, UserStore,
+};
+use collections::{HashMap, HashSet};
+use fs::FakeFs;
+use futures::{channel::oneshot, StreamExt as _};
+use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext, WindowHandle};
+use language::LanguageRegistry;
+use parking_lot::Mutex;
+use project::{Project, WorktreeId};
+use settings::SettingsStore;
+use std::{
+ cell::{Ref, RefCell, RefMut},
+ env,
+ ops::{Deref, DerefMut},
+ path::Path,
+ sync::{
+ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
+ Arc,
+ },
+};
+use util::http::FakeHttpClient;
+use workspace::Workspace;
+
+pub struct TestServer {
+ pub app_state: Arc<AppState>,
+ pub test_live_kit_server: Arc<live_kit_client::TestServer>,
+ server: Arc<Server>,
+ connection_killers: Arc<Mutex<HashMap<PeerId, Arc<AtomicBool>>>>,
+ forbid_connections: Arc<AtomicBool>,
+ _test_db: TestDb,
+}
+
+pub struct TestClient {
+ pub username: String,
+ pub app_state: Arc<workspace::AppState>,
+ state: RefCell<TestClientState>,
+}
+
+#[derive(Default)]
+struct TestClientState {
+ local_projects: Vec<ModelHandle<Project>>,
+ remote_projects: Vec<ModelHandle<Project>>,
+ buffers: HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>,
+}
+
+pub struct ContactsSummary {
+ pub current: Vec<String>,
+ pub outgoing_requests: Vec<String>,
+ pub incoming_requests: Vec<String>,
+}
+
+impl TestServer {
+ pub async fn start(deterministic: &Arc<Deterministic>) -> Self {
+ static NEXT_LIVE_KIT_SERVER_ID: AtomicUsize = AtomicUsize::new(0);
+
+ let use_postgres = env::var("USE_POSTGRES").ok();
+ let use_postgres = use_postgres.as_deref();
+ let test_db = if use_postgres == Some("true") || use_postgres == Some("1") {
+ TestDb::postgres(deterministic.build_background())
+ } else {
+ TestDb::sqlite(deterministic.build_background())
+ };
+ let live_kit_server_id = NEXT_LIVE_KIT_SERVER_ID.fetch_add(1, SeqCst);
+ let live_kit_server = live_kit_client::TestServer::create(
+ format!("http://livekit.{}.test", live_kit_server_id),
+ format!("devkey-{}", live_kit_server_id),
+ format!("secret-{}", live_kit_server_id),
+ deterministic.build_background(),
+ )
+ .unwrap();
+ let app_state = Self::build_app_state(&test_db, &live_kit_server).await;
+ let epoch = app_state
+ .db
+ .create_server(&app_state.config.zed_environment)
+ .await
+ .unwrap();
+ let server = Server::new(
+ epoch,
+ app_state.clone(),
+ Executor::Deterministic(deterministic.build_background()),
+ );
+ server.start().await.unwrap();
+ // Advance clock to ensure the server's cleanup task is finished.
+ deterministic.advance_clock(CLEANUP_TIMEOUT);
+ Self {
+ app_state,
+ server,
+ connection_killers: Default::default(),
+ forbid_connections: Default::default(),
+ _test_db: test_db,
+ test_live_kit_server: live_kit_server,
+ }
+ }
+
+ pub async fn reset(&self) {
+ self.app_state.db.reset();
+ let epoch = self
+ .app_state
+ .db
+ .create_server(&self.app_state.config.zed_environment)
+ .await
+ .unwrap();
+ self.server.reset(epoch);
+ }
+
+ pub async fn create_client(&mut self, cx: &mut TestAppContext, name: &str) -> TestClient {
+ cx.update(|cx| {
+ if cx.has_global::<SettingsStore>() {
+ panic!("Same cx used to create two test clients")
+ }
+ cx.set_global(SettingsStore::test(cx));
+ });
+
+ let http = FakeHttpClient::with_404_response();
+ let user_id = if let Ok(Some(user)) = self.app_state.db.get_user_by_github_login(name).await
+ {
+ user.id
+ } else {
+ self.app_state
+ .db
+ .create_user(
+ &format!("{name}@example.com"),
+ false,
+ NewUserParams {
+ github_login: name.into(),
+ github_user_id: 0,
+ invite_count: 0,
+ },
+ )
+ .await
+ .expect("creating user failed")
+ .user_id
+ };
+ let client_name = name.to_string();
+ let mut client = cx.read(|cx| Client::new(http.clone(), cx));
+ let server = self.server.clone();
+ let db = self.app_state.db.clone();
+ let connection_killers = self.connection_killers.clone();
+ let forbid_connections = self.forbid_connections.clone();
+
+ Arc::get_mut(&mut client)
+ .unwrap()
+ .set_id(user_id.0 as usize)
+ .override_authenticate(move |cx| {
+ cx.spawn(|_| async move {
+ let access_token = "the-token".to_string();
+ Ok(Credentials {
+ user_id: user_id.0 as u64,
+ access_token,
+ })
+ })
+ })
+ .override_establish_connection(move |credentials, cx| {
+ assert_eq!(credentials.user_id, user_id.0 as u64);
+ assert_eq!(credentials.access_token, "the-token");
+
+ let server = server.clone();
+ let db = db.clone();
+ let connection_killers = connection_killers.clone();
+ let forbid_connections = forbid_connections.clone();
+ let client_name = client_name.clone();
+ cx.spawn(move |cx| async move {
+ if forbid_connections.load(SeqCst) {
+ Err(EstablishConnectionError::other(anyhow!(
+ "server is forbidding connections"
+ )))
+ } else {
+ let (client_conn, server_conn, killed) =
+ Connection::in_memory(cx.background());
+ let (connection_id_tx, connection_id_rx) = oneshot::channel();
+ let user = db
+ .get_user_by_id(user_id)
+ .await
+ .expect("retrieving user failed")
+ .unwrap();
+ cx.background()
+ .spawn(server.handle_connection(
+ server_conn,
+ client_name,
+ user,
+ Some(connection_id_tx),
+ Executor::Deterministic(cx.background()),
+ ))
+ .detach();
+ let connection_id = connection_id_rx.await.unwrap();
+ connection_killers
+ .lock()
+ .insert(connection_id.into(), killed);
+ Ok(client_conn)
+ }
+ })
+ });
+
+ let fs = FakeFs::new(cx.background());
+ let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http, cx));
+ let channel_store =
+ cx.add_model(|cx| ChannelStore::new(client.clone(), user_store.clone(), cx));
+ let app_state = Arc::new(workspace::AppState {
+ client: client.clone(),
+ user_store: user_store.clone(),
+ channel_store: channel_store.clone(),
+ languages: Arc::new(LanguageRegistry::test()),
+ fs: fs.clone(),
+ build_window_options: |_, _, _| Default::default(),
+ initialize_workspace: |_, _, _, _| Task::ready(Ok(())),
+ background_actions: || &[],
+ });
+
+ cx.update(|cx| {
+ theme::init((), cx);
+ Project::init(&client, cx);
+ client::init(&client, cx);
+ language::init(cx);
+ editor::init_settings(cx);
+ workspace::init(app_state.clone(), cx);
+ audio::init((), cx);
+ call::init(client.clone(), user_store.clone(), cx);
+ channel::init(&client);
+ });
+
+ client
+ .authenticate_and_connect(false, &cx.to_async())
+ .await
+ .unwrap();
+
+ let client = TestClient {
+ app_state,
+ username: name.to_string(),
+ state: Default::default(),
+ };
+ client.wait_for_current_user(cx).await;
+ client
+ }
+
+ pub fn disconnect_client(&self, peer_id: PeerId) {
+ self.connection_killers
+ .lock()
+ .remove(&peer_id)
+ .unwrap()
+ .store(true, SeqCst);
+ }
+
+ pub fn forbid_connections(&self) {
+ self.forbid_connections.store(true, SeqCst);
+ }
+
+ pub fn allow_connections(&self) {
+ self.forbid_connections.store(false, SeqCst);
+ }
+
+ pub async fn make_contacts(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
+ for ix in 1..clients.len() {
+ let (left, right) = clients.split_at_mut(ix);
+ let (client_a, cx_a) = left.last_mut().unwrap();
+ for (client_b, cx_b) in right {
+ client_a
+ .app_state
+ .user_store
+ .update(*cx_a, |store, cx| {
+ store.request_contact(client_b.user_id().unwrap(), cx)
+ })
+ .await
+ .unwrap();
+ cx_a.foreground().run_until_parked();
+ client_b
+ .app_state
+ .user_store
+ .update(*cx_b, |store, cx| {
+ store.respond_to_contact_request(client_a.user_id().unwrap(), true, cx)
+ })
+ .await
+ .unwrap();
+ }
+ }
+ }
+
+ pub async fn make_channel(
+ &self,
+ channel: &str,
+ admin: (&TestClient, &mut TestAppContext),
+ members: &mut [(&TestClient, &mut TestAppContext)],
+ ) -> u64 {
+ let (admin_client, admin_cx) = admin;
+ let channel_id = admin_client
+ .app_state
+ .channel_store
+ .update(admin_cx, |channel_store, cx| {
+ channel_store.create_channel(channel, None, cx)
+ })
+ .await
+ .unwrap();
+
+ for (member_client, member_cx) in members {
+ admin_client
+ .app_state
+ .channel_store
+ .update(admin_cx, |channel_store, cx| {
+ channel_store.invite_member(
+ channel_id,
+ member_client.user_id().unwrap(),
+ false,
+ cx,
+ )
+ })
+ .await
+ .unwrap();
+
+ admin_cx.foreground().run_until_parked();
+
+ member_client
+ .app_state
+ .channel_store
+ .update(*member_cx, |channels, _| {
+ channels.respond_to_channel_invite(channel_id, true)
+ })
+ .await
+ .unwrap();
+ }
+
+ channel_id
+ }
+
+ pub async fn create_room(&self, clients: &mut [(&TestClient, &mut TestAppContext)]) {
+ self.make_contacts(clients).await;
+
+ let (left, right) = clients.split_at_mut(1);
+ let (_client_a, cx_a) = &mut left[0];
+ let active_call_a = cx_a.read(ActiveCall::global);
+
+ for (client_b, cx_b) in right {
+ let user_id_b = client_b.current_user_id(*cx_b).to_proto();
+ active_call_a
+ .update(*cx_a, |call, cx| call.invite(user_id_b, None, cx))
+ .await
+ .unwrap();
+
+ cx_b.foreground().run_until_parked();
+ let active_call_b = cx_b.read(ActiveCall::global);
+ active_call_b
+ .update(*cx_b, |call, cx| call.accept_incoming(cx))
+ .await
+ .unwrap();
+ }
+ }
+
+ pub async fn build_app_state(
+ test_db: &TestDb,
+ fake_server: &live_kit_client::TestServer,
+ ) -> Arc<AppState> {
+ Arc::new(AppState {
+ db: test_db.db().clone(),
+ live_kit_client: Some(Arc::new(fake_server.create_api_client())),
+ config: Default::default(),
+ })
+ }
+}
+
+impl Deref for TestServer {
+ type Target = Server;
+
+ fn deref(&self) -> &Self::Target {
+ &self.server
+ }
+}
+
+impl Drop for TestServer {
+ fn drop(&mut self) {
+ self.server.teardown();
+ self.test_live_kit_server.teardown().unwrap();
+ }
+}
+
+impl Deref for TestClient {
+ type Target = Arc<Client>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.app_state.client
+ }
+}
+
+impl TestClient {
+ pub fn fs(&self) -> &FakeFs {
+ self.app_state.fs.as_fake()
+ }
+
+ pub fn channel_store(&self) -> &ModelHandle<ChannelStore> {
+ &self.app_state.channel_store
+ }
+
+ pub fn user_store(&self) -> &ModelHandle<UserStore> {
+ &self.app_state.user_store
+ }
+
+ pub fn language_registry(&self) -> &Arc<LanguageRegistry> {
+ &self.app_state.languages
+ }
+
+ pub fn client(&self) -> &Arc<Client> {
+ &self.app_state.client
+ }
+
+ pub fn current_user_id(&self, cx: &TestAppContext) -> UserId {
+ UserId::from_proto(
+ self.app_state
+ .user_store
+ .read_with(cx, |user_store, _| user_store.current_user().unwrap().id),
+ )
+ }
+
+ pub async fn wait_for_current_user(&self, cx: &TestAppContext) {
+ let mut authed_user = self
+ .app_state
+ .user_store
+ .read_with(cx, |user_store, _| user_store.watch_current_user());
+ while authed_user.next().await.unwrap().is_none() {}
+ }
+
+ pub async fn clear_contacts(&self, cx: &mut TestAppContext) {
+ self.app_state
+ .user_store
+ .update(cx, |store, _| store.clear_contacts())
+ .await;
+ }
+
+ pub fn local_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
+ Ref::map(self.state.borrow(), |state| &state.local_projects)
+ }
+
+ pub fn remote_projects<'a>(&'a self) -> impl Deref<Target = Vec<ModelHandle<Project>>> + 'a {
+ Ref::map(self.state.borrow(), |state| &state.remote_projects)
+ }
+
+ pub fn local_projects_mut<'a>(
+ &'a self,
+ ) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
+ RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects)
+ }
+
+ pub fn remote_projects_mut<'a>(
+ &'a self,
+ ) -> impl DerefMut<Target = Vec<ModelHandle<Project>>> + 'a {
+ RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects)
+ }
+
+ pub fn buffers_for_project<'a>(
+ &'a self,
+ project: &ModelHandle<Project>,
+ ) -> impl DerefMut<Target = HashSet<ModelHandle<language::Buffer>>> + 'a {
+ RefMut::map(self.state.borrow_mut(), |state| {
+ state.buffers.entry(project.clone()).or_default()
+ })
+ }
+
+ pub fn buffers<'a>(
+ &'a self,
+ ) -> impl DerefMut<Target = HashMap<ModelHandle<Project>, HashSet<ModelHandle<language::Buffer>>>> + 'a
+ {
+ RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers)
+ }
+
+ pub fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary {
+ self.app_state
+ .user_store
+ .read_with(cx, |store, _| ContactsSummary {
+ current: store
+ .contacts()
+ .iter()
+ .map(|contact| contact.user.github_login.clone())
+ .collect(),
+ outgoing_requests: store
+ .outgoing_contact_requests()
+ .iter()
+ .map(|user| user.github_login.clone())
+ .collect(),
+ incoming_requests: store
+ .incoming_contact_requests()
+ .iter()
+ .map(|user| user.github_login.clone())
+ .collect(),
+ })
+ }
+
+ pub async fn build_local_project(
+ &self,
+ root_path: impl AsRef<Path>,
+ cx: &mut TestAppContext,
+ ) -> (ModelHandle<Project>, WorktreeId) {
+ let project = cx.update(|cx| {
+ Project::local(
+ self.client().clone(),
+ self.app_state.user_store.clone(),
+ self.app_state.languages.clone(),
+ self.app_state.fs.clone(),
+ cx,
+ )
+ });
+ let (worktree, _) = project
+ .update(cx, |p, cx| {
+ p.find_or_create_local_worktree(root_path, true, cx)
+ })
+ .await
+ .unwrap();
+ worktree
+ .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete())
+ .await;
+ (project, worktree.read_with(cx, |tree, _| tree.id()))
+ }
+
+ pub async fn build_remote_project(
+ &self,
+ host_project_id: u64,
+ guest_cx: &mut TestAppContext,
+ ) -> ModelHandle<Project> {
+ let active_call = guest_cx.read(ActiveCall::global);
+ let room = active_call.read_with(guest_cx, |call, _| call.room().unwrap().clone());
+ room.update(guest_cx, |room, cx| {
+ room.join_project(
+ host_project_id,
+ self.app_state.languages.clone(),
+ self.app_state.fs.clone(),
+ cx,
+ )
+ })
+ .await
+ .unwrap()
+ }
+
+ pub fn build_workspace(
+ &self,
+ project: &ModelHandle<Project>,
+ cx: &mut TestAppContext,
+ ) -> WindowHandle<Workspace> {
+ cx.add_window(|cx| Workspace::new(0, project.clone(), self.app_state.clone(), cx))
+ }
+}
+
+impl Drop for TestClient {
+ fn drop(&mut self) {
+ self.app_state.client.teardown();
+ }
+}
@@ -37,8 +37,14 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream {
Some("seed") => starting_seed = parse_int(&meta.lit)?,
Some("on_failure") => {
if let Lit::Str(name) = meta.lit {
- let ident = Ident::new(&name.value(), name.span());
- on_failure_fn_name = quote!(Some(#ident));
+ let mut path = syn::Path {
+ leading_colon: None,
+ segments: Default::default(),
+ };
+ for part in name.value().split("::") {
+ path.segments.push(Ident::new(part, name.span()).into());
+ }
+ on_failure_fn_name = quote!(Some(#path));
} else {
return Err(TokenStream::from(
syn::Error::new(