From a74c5073a480c9c6133e64bdc29bac2aba0b9fc1 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 2 Jan 2023 17:24:03 -0800 Subject: [PATCH 01/80] Start work on allowing random collab test to be minimized Represent operations as an explicit enum. --- crates/collab/src/tests.rs | 55 +- .../src/tests/randomized_integration_tests.rs | 574 ++++++++++++------ crates/gpui/src/app/test_app_context.rs | 1 + 3 files changed, 438 insertions(+), 192 deletions(-) diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs index 8dc29f3d606fdeaf0f74867863b197980dc5c43b..8b52c7ddcf1830f0d379e8a89db970669f85285c 100644 --- a/crates/collab/src/tests.rs +++ b/crates/collab/src/tests.rs @@ -21,8 +21,9 @@ use parking_lot::Mutex; use project::{Project, WorktreeId}; use settings::Settings; use std::{ + cell::{Ref, RefCell, RefMut}, env, - ops::Deref, + ops::{Deref, DerefMut}, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, @@ -218,13 +219,10 @@ impl TestServer { let client = TestClient { client, username: name.to_string(), - local_projects: Default::default(), - remote_projects: Default::default(), - next_root_dir_id: 0, + state: Default::default(), user_store, fs, language_registry: Arc::new(LanguageRegistry::test()), - buffers: Default::default(), }; client.wait_for_current_user(cx).await; client @@ -323,13 +321,18 @@ impl Drop for TestServer { struct TestClient { client: Arc, username: String, - local_projects: Vec>, - remote_projects: Vec>, - next_root_dir_id: usize, + state: RefCell, pub user_store: ModelHandle, language_registry: Arc, fs: Arc, +} + +#[derive(Default)] +struct TestClientState { + local_projects: Vec>, + remote_projects: Vec>, buffers: HashMap, HashSet>>, + next_root_dir_id: usize, } impl Deref for TestClient { @@ -367,6 +370,38 @@ impl TestClient { .await; } + fn local_projects<'a>(&'a self) -> impl Deref>> + 'a { + Ref::map(self.state.borrow(), |state| &state.local_projects) + } + + fn remote_projects<'a>(&'a self) -> impl Deref>> + 'a { + Ref::map(self.state.borrow(), |state| &state.remote_projects) + } + + fn local_projects_mut<'a>(&'a self) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| &mut state.local_projects) + } + + fn remote_projects_mut<'a>(&'a self) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| &mut state.remote_projects) + } + + fn buffers_for_project<'a>( + &'a self, + project: &ModelHandle, + ) -> impl DerefMut>> + 'a { + RefMut::map(self.state.borrow_mut(), |state| { + state.buffers.entry(project.clone()).or_default() + }) + } + + fn buffers<'a>( + &'a self, + ) -> impl DerefMut, HashSet>>> + 'a + { + RefMut::map(self.state.borrow_mut(), |state| &mut state.buffers) + } + fn summarize_contacts(&self, cx: &TestAppContext) -> ContactsSummary { self.user_store.read_with(cx, |store, _| ContactsSummary { current: store @@ -449,11 +484,11 @@ impl TestClient { }) } - fn create_new_root_dir(&mut self) -> PathBuf { + fn create_new_root_dir(&self) -> PathBuf { format!( "/{}-root-{}", self.username, - util::post_inc(&mut self.next_root_dir_id) + util::post_inc(&mut self.state.borrow_mut().next_root_dir_id) ) .into() } diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a42d4f7d3237242a43f11a7242e32566b44cc2af..d9d1c1c8e48f7a15522d2f2ae6e132b04c56097e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1,5 +1,5 @@ use crate::{ - db::{self, NewUserParams}, + db::{self, NewUserParams, UserId}, rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT}, tests::{TestClient, TestServer}, }; @@ -15,16 +15,190 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project}; use rand::prelude::*; -use std::{env, path::PathBuf, sync::Arc}; +use std::{env, path::PathBuf, rc::Rc, sync::Arc}; + +struct TestPlan { + rng: StdRng, + allow_server_restarts: bool, + allow_client_reconnection: bool, + allow_client_disconnection: bool, +} + +#[derive(Debug)] +enum Operation { + AddConnection { + user_id: UserId, + }, + RemoveConnection { + user_id: UserId, + }, + BounceConnection { + user_id: UserId, + }, + RestartServer, + RunUntilParked, + MutateClient { + user_id: UserId, + operation: ClientOperation, + }, +} + +#[derive(Debug)] +enum ClientOperation { + AcceptIncomingCall, + RejectIncomingCall, + LeaveCall, + InviteContactToCall { user_id: UserId }, + OpenLocalProject { root: PathBuf }, + OpenRemoteProject { host_id: UserId, root: String }, + AddWorktreeToProject { id: u64, new_path: PathBuf }, + CloseProject { id: u64 }, +} + +impl TestPlan { + fn next_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + offline_users: &[(UserId, String)], + ) -> Operation { + let operation = loop { + break match self.rng.gen_range(0..100) { + 0..=9 if !offline_users.is_empty() => { + let user_id = offline_users[self.rng.gen_range(0..offline_users.len())].0; + Operation::AddConnection { user_id } + } + 10..=14 if clients.len() > 1 && self.allow_client_disconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::RemoveConnection { user_id } + } + 15..=19 if clients.len() > 1 && self.allow_client_reconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::BounceConnection { user_id } + } + 20..=24 if self.allow_server_restarts => Operation::RestartServer, + 25..=29 => Operation::RunUntilParked, + _ if !clients.is_empty() => { + let ix = self.rng.gen_range(0..clients.len()); + let (client, cx) = &clients[ix]; + let user_id = client.current_user_id(cx); + let operation = self.next_client_operation(clients, ix); + Operation::MutateClient { user_id, operation } + } + _ => continue, + }; + }; + operation + } + + fn next_client_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + client_ix: usize, + ) -> ClientOperation { + let (client, cx) = &clients[client_ix]; + let call = cx.read(ActiveCall::global); + + loop { + match self.rng.gen_range(0..100) { + // Respond to an incoming call + 0..=19 => { + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + return if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; + } + } + + // Invite a contact to the current call + 20..=29 => { + let available_contacts = client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + return ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } + } + + // Leave the current call + 30..=39 => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + return ClientOperation::LeaveCall; + } + } + + // Open a remote project + 40..=49 => { + if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + let remote_projects = room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| { + participant.projects.iter().map(|project| { + ( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + ) + }) + }) + .collect::>() + }); + if !remote_projects.is_empty() { + let (host_id, root) = + remote_projects.choose(&mut self.rng).unwrap().clone(); + return ClientOperation::OpenRemoteProject { host_id, root }; + } + } + } + + // Open a local project + 50..=59 => { + let root = client.create_new_root_dir(); + return ClientOperation::OpenLocalProject { root }; + } + + // Add a worktree to a local project + 60..=69 if !client.local_projects().is_empty() => { + let project = client + .local_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + + // let paths = client.fs.paths().await; + // let path = paths.choose(&mut self.rng).unwrap(); + + // if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + // // + // } + } + + _ => continue, + }; + } + } +} #[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, - rng: StdRng, + mut rng: StdRng, ) { deterministic.forbid_parking(); - let rng = Arc::new(Mutex::new(rng)); let max_peers = env::var("MAX_PEERS") .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) @@ -56,6 +230,13 @@ async fn test_random_collaboration( available_users.push((user_id, username)); } + let plan = Arc::new(Mutex::new(TestPlan { + allow_server_restarts: rng.gen_bool(0.7), + allow_client_reconnection: rng.gen_bool(0.7), + allow_client_disconnection: rng.gen_bool(0.1), + rng, + })); + for (ix, (user_id_a, _)) in available_users.iter().enumerate() { for (user_id_b, _) in &available_users[ix + 1..] { server @@ -74,20 +255,19 @@ async fn test_random_collaboration( } let mut clients = Vec::new(); - let mut user_ids = Vec::new(); + let mut client_tasks = Vec::new(); let mut op_start_signals = Vec::new(); let mut next_entity_id = 100000; - let allow_server_restarts = rng.lock().gen_bool(0.7); - let allow_client_reconnection = rng.lock().gen_bool(0.7); - let allow_client_disconnection = rng.lock().gen_bool(0.1); - let mut operations = 0; - while operations < max_operations { - let distribution = rng.lock().gen_range(0..100); - match distribution { - 0..=19 if !available_users.is_empty() => { - let client_ix = rng.lock().gen_range(0..available_users.len()); - let (_, username) = available_users.remove(client_ix); + for _ in 0..max_operations { + let next_operation = plan.lock().next_operation(&clients, &available_users); + match next_operation { + Operation::AddConnection { user_id } => { + let user_ix = available_users + .iter() + .position(|(id, _)| *id == user_id) + .unwrap(); + let (_, username) = available_users.remove(user_ix); log::info!("Adding new connection for {}", username); next_entity_id += 100000; let mut client_cx = TestAppContext::new( @@ -102,47 +282,45 @@ async fn test_random_collaboration( ); let op_start_signal = futures::channel::mpsc::unbounded(); - let client = server.create_client(&mut client_cx, &username).await; - user_ids.push(client.current_user_id(&client_cx)); + let client = Rc::new(server.create_client(&mut client_cx, &username).await); op_start_signals.push(op_start_signal.0); - clients.push(client_cx.foreground().spawn(simulate_client( + clients.push((client.clone(), client_cx.clone())); + client_tasks.push(client_cx.foreground().spawn(simulate_client( client, op_start_signal.1, - allow_client_disconnection, - rng.clone(), + plan.clone(), client_cx, ))); log::info!("Added connection for {}", username); - operations += 1; } - 20..=24 if clients.len() > 1 && allow_client_disconnection => { - let client_ix = rng.lock().gen_range(1..clients.len()); - log::info!( - "Simulating full disconnection of user {}", - user_ids[client_ix] - ); - let removed_user_id = user_ids.remove(client_ix); + Operation::RemoveConnection { user_id } => { + log::info!("Simulating full disconnection of user {}", user_id); + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); let user_connection_ids = server .connection_pool .lock() - .user_connection_ids(removed_user_id) + .user_connection_ids(user_id) .collect::>(); assert_eq!(user_connection_ids.len(), 1); let removed_peer_id = user_connection_ids[0].into(); - let client = clients.remove(client_ix); + let (client, mut client_cx) = clients.remove(client_ix); + let client_task = client_tasks.remove(client_ix); op_start_signals.remove(client_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); deterministic.start_waiting(); - log::info!("Waiting for user {} to exit...", removed_user_id); - let (client, mut client_cx) = client.await; + log::info!("Waiting for user {} to exit...", user_id); + client_task.await; deterministic.finish_waiting(); server.allow_connections(); - for project in &client.remote_projects { + for project in client.remote_projects().iter() { project.read_with(&client_cx, |project, _| { assert!( project.is_read_only(), @@ -151,14 +329,20 @@ async fn test_random_collaboration( ) }); } - for user_id in &user_ids { - let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap(); + + for (client, cx) in &clients { + let contacts = server + .app_state + .db + .get_contacts(client.current_user_id(cx)) + .await + .unwrap(); let pool = server.connection_pool.lock(); for contact in contacts { - if let db::Contact::Accepted { user_id, .. } = contact { - if pool.is_user_online(user_id) { + if let db::Contact::Accepted { user_id: id, .. } = contact { + if pool.is_user_online(id) { assert_ne!( - user_id, removed_user_id, + id, user_id, "removed client is still a contact of another peer" ); } @@ -167,18 +351,14 @@ async fn test_random_collaboration( } log::info!("{} removed", client.username); - available_users.push((removed_user_id, client.username.clone())); + available_users.push((user_id, client.username.clone())); client_cx.update(|cx| { cx.clear_globals(); drop(client); }); - - operations += 1; } - 25..=29 if clients.len() > 1 && allow_client_reconnection => { - let client_ix = rng.lock().gen_range(1..clients.len()); - let user_id = user_ids[client_ix]; + Operation::BounceConnection { user_id } => { log::info!("Simulating temporary disconnection of user {}", user_id); let user_connection_ids = server .connection_pool @@ -189,10 +369,9 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - operations += 1; } - 30..=34 if allow_server_restarts => { + Operation::RestartServer => { log::info!("Simulating server restart"); server.reset().await; deterministic.advance_clock(RECEIVE_TIMEOUT); @@ -208,39 +387,41 @@ async fn test_random_collaboration( assert_eq!(stale_room_ids, vec![]); } - _ if !op_start_signals.is_empty() => { - while operations < max_operations && rng.lock().gen_bool(0.7) { - op_start_signals - .choose(&mut *rng.lock()) - .unwrap() - .unbounded_send(()) - .unwrap(); - operations += 1; - } + Operation::RunUntilParked => { + deterministic.run_until_parked(); + } - if rng.lock().gen_bool(0.8) { - deterministic.run_until_parked(); - } + Operation::MutateClient { user_id, operation } => { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); + op_start_signals[client_ix] + .unbounded_send(operation) + .unwrap(); } - _ => {} } } drop(op_start_signals); deterministic.start_waiting(); - let clients = futures::future::join_all(clients).await; + futures::future::join_all(client_tasks).await; deterministic.finish_waiting(); deterministic.run_until_parked(); for (client, client_cx) in &clients { - for guest_project in &client.remote_projects { + for guest_project in client.remote_projects().iter() { guest_project.read_with(client_cx, |guest_project, cx| { let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == guest_project.remote_id() - }) - })?; + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == guest_project.remote_id() + }) + })? + .clone(); Some((project, cx)) }); @@ -305,7 +486,8 @@ async fn test_random_collaboration( }); } - for (guest_project, guest_buffers) in &client.buffers { + let buffers = client.buffers().clone(); + for (guest_project, guest_buffers) in &buffers { let project_id = if guest_project.read_with(client_cx, |project, _| { project.is_local() || project.is_read_only() }) { @@ -318,11 +500,15 @@ async fn test_random_collaboration( let guest_user_id = client.user_id().unwrap(); let host_project = clients.iter().find_map(|(client, cx)| { - let project = client.local_projects.iter().find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == Some(project_id) - }) - })?; + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == Some(project_id) + }) + })? + .clone(); Some((client.user_id().unwrap(), project, cx)) }); @@ -398,12 +584,11 @@ async fn test_random_collaboration( } async fn simulate_client( - mut client: TestClient, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>, - can_hang_up: bool, - rng: Arc>, + client: Rc, + mut op_start_signal: futures::channel::mpsc::UnboundedReceiver, + plan: Arc>, mut cx: TestAppContext, -) -> (TestClient, TestAppContext) { +) { // Setup language server let mut language = Language::new( LanguageConfig { @@ -418,7 +603,7 @@ async fn simulate_client( name: "the-fake-language-server", capabilities: lsp::LanguageServer::full_capabilities(), initializer: Some(Box::new({ - let rng = rng.clone(); + let plan = plan.clone(); let fs = client.fs.clone(); move |fake_server: &mut FakeLanguageServer| { fake_server.handle_request::( @@ -460,16 +645,16 @@ async fn simulate_client( fake_server.handle_request::({ let fs = fs.clone(); - let rng = rng.clone(); + let plan = plan.clone(); move |_, _| { let fs = fs.clone(); - let rng = rng.clone(); + let plan = plan.clone(); async move { let files = fs.files().await; - let mut rng = rng.lock(); - let count = rng.gen_range::(1..3); + let mut plan = plan.lock(); + let count = plan.rng.gen_range::(1..3); let files = (0..count) - .map(|_| files.choose(&mut *rng).unwrap()) + .map(|_| files.choose(&mut plan.rng).unwrap()) .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( @@ -486,16 +671,16 @@ async fn simulate_client( }); fake_server.handle_request::({ - let rng = rng.clone(); + let plan = plan.clone(); move |_, _| { let mut highlights = Vec::new(); - let highlight_count = rng.lock().gen_range(1..=5); + let highlight_count = plan.lock().rng.gen_range(1..=5); for _ in 0..highlight_count { - let start_row = rng.lock().gen_range(0..100); - let start_column = rng.lock().gen_range(0..100); + let start_row = plan.lock().rng.gen_range(0..100); + let start_column = plan.lock().rng.gen_range(0..100); let start = PointUtf16::new(start_row, start_column); - let end_row = rng.lock().gen_range(0..100); - let end_column = rng.lock().gen_range(0..100); + let end_row = plan.lock().rng.gen_range(0..100); + let end_column = plan.lock().rng.gen_range(0..100); let end = PointUtf16::new(end_row, end_column); let range = if start > end { end..start } else { start..end }; highlights.push(lsp::DocumentHighlight { @@ -517,50 +702,62 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while op_start_signal.next().await.is_some() { - if let Err(error) = - randomly_mutate_client(&mut client, can_hang_up, rng.clone(), &mut cx).await - { + if let Err(error) = randomly_mutate_client(&client, plan.clone(), &mut cx).await { log::error!("{} error: {:?}", client.username, error); } cx.background().simulate_random_delay().await; } log::info!("{}: done", client.username); - - (client, cx) } +// async fn apply_client_operation( +// client: &mut TestClient, +// plan: Arc>, +// operation: ClientOperation, +// cx: &mut TestAppContext, +// ) -> Result<()> { +// match operation { +// ClientOperation::AcceptIncomingCall => todo!(), +// ClientOperation::RejectIncomingCall => todo!(), +// ClientOperation::OpenLocalProject { path } => todo!(), +// ClientOperation::AddWorktreeToProject { +// existing_path, +// new_path, +// } => todo!(), +// ClientOperation::CloseProject { existing_path } => todo!(), +// } +// } + async fn randomly_mutate_client( - client: &mut TestClient, - can_hang_up: bool, - rng: Arc>, + client: &Rc, + plan: Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { - 0..=19 => randomly_mutate_active_call(client, can_hang_up, &rng, cx).await?, - 20..=49 => randomly_mutate_projects(client, &rng, cx).await?, - 50..=59 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { - randomly_mutate_worktrees(client, &rng, cx).await?; + 0..=19 => randomly_mutate_active_call(client, &plan, cx).await?, + 20..=49 => randomly_mutate_projects(client, &plan, cx).await?, + 50..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { + randomly_mutate_worktrees(client, &plan, cx).await?; } - 60..=84 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => { - randomly_query_and_mutate_buffers(client, &rng, cx).await?; + 60..=84 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { + randomly_query_and_mutate_buffers(client, &plan, cx).await?; } - _ => randomly_mutate_fs(client, &rng).await, + _ => randomly_mutate_fs(client, &plan).await, } Ok(()) } async fn randomly_mutate_active_call( - client: &mut TestClient, - can_hang_up: bool, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if rng.lock().gen_bool(0.7) { + if plan.lock().rng.gen_bool(0.7) { log::info!("{}: accepting incoming call", client.username); active_call .update(cx, |call, cx| call.accept_incoming(cx)) @@ -579,10 +776,10 @@ async fn randomly_mutate_active_call( .collect::>() }); - let distribution = rng.lock().gen_range(0..100); + let distribution = plan.lock().rng.gen_range(0..100); match distribution { 0..=29 if !available_contacts.is_empty() => { - let contact = available_contacts.choose(&mut *rng.lock()).unwrap(); + let contact = available_contacts.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: inviting {}", client.username, @@ -593,7 +790,8 @@ async fn randomly_mutate_active_call( .await?; } 30..=39 - if can_hang_up && active_call.read_with(cx, |call, _| call.room().is_some()) => + if plan.lock().allow_client_disconnection + && active_call.read_with(cx, |call, _| call.room().is_some()) => { log::info!("{}: hanging up", client.username); active_call.update(cx, |call, cx| call.hang_up(cx))?; @@ -605,16 +803,16 @@ async fn randomly_mutate_active_call( Ok(()) } -async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex) { - let is_dir = rng.lock().gen::(); +async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { + let is_dir = plan.lock().rng.gen::(); let mut new_path = client .fs .directories() .await - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) .unwrap() .clone(); - new_path.push(gen_file_name(rng)); + new_path.push(gen_file_name(&mut plan.lock().rng)); if is_dir { log::info!("{}: creating local dir at {:?}", client.username, new_path); client.fs.create_dir(&new_path).await.unwrap(); @@ -630,8 +828,8 @@ async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex) { } async fn randomly_mutate_projects( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { let active_call = cx.read(ActiveCall::global); @@ -647,10 +845,10 @@ async fn randomly_mutate_projects( Default::default() }; - let project = if remote_projects.is_empty() || rng.lock().gen() { - if client.local_projects.is_empty() || rng.lock().gen() { + let project = if remote_projects.is_empty() || plan.lock().rng.gen() { + if client.local_projects().is_empty() || plan.lock().rng.gen() { let paths = client.fs.paths().await; - let local_project = if paths.is_empty() || rng.lock().gen() { + let local_project = if paths.is_empty() || plan.lock().rng.gen() { let root_path = client.create_new_root_dir(); client.fs.create_dir(&root_path).await.unwrap(); client @@ -665,7 +863,7 @@ async fn randomly_mutate_projects( ); client.build_local_project(root_path, cx).await.0 } else { - let root_path = paths.choose(&mut *rng.lock()).unwrap(); + let root_path = paths.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: opening local project at {:?}", client.username, @@ -673,25 +871,29 @@ async fn randomly_mutate_projects( ); client.build_local_project(root_path, cx).await.0 }; - client.local_projects.push(local_project.clone()); + client.local_projects_mut().push(local_project.clone()); local_project } else { client - .local_projects - .choose(&mut *rng.lock()) + .local_projects() + .choose(&mut plan.lock().rng) .unwrap() .clone() } } else { - if client.remote_projects.is_empty() || rng.lock().gen() { - let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id; - let remote_project = if let Some(project) = - client.remote_projects.iter().find(|project| { + if client.remote_projects().is_empty() || plan.lock().rng.gen() { + let remote_project_id = remote_projects.choose(&mut plan.lock().rng).unwrap().id; + let remote_projects = client.remote_projects().clone(); + let remote_project = if let Some(project) = remote_projects + .iter() + .find(|project| { project.read_with(cx, |project, _| { project.remote_id() == Some(remote_project_id) }) - }) { - project.clone() + }) + .cloned() + { + project } else { log::info!( "{}: opening remote project {}", @@ -710,15 +912,15 @@ async fn randomly_mutate_projects( ) }) .await?; - client.remote_projects.push(remote_project.clone()); + client.remote_projects_mut().push(remote_project.clone()); remote_project }; remote_project } else { client - .remote_projects - .choose(&mut *rng.lock()) + .remote_projects() + .choose(&mut plan.lock().rng) .unwrap() .clone() } @@ -740,11 +942,11 @@ async fn randomly_mutate_projects( } } - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { 0..=19 if project.read_with(cx, |project, _| project.is_local()) => { let paths = client.fs.paths().await; - let path = paths.choose(&mut *rng.lock()).unwrap(); + let path = paths.choose(&mut plan.lock().rng).unwrap(); log::info!( "{}: finding/creating local worktree for path {:?}", client.username, @@ -766,9 +968,9 @@ async fn randomly_mutate_projects( cx.update(|_| { client - .remote_projects + .remote_projects_mut() .retain(|remote_project| *remote_project != project); - client.buffers.remove(&project); + client.buffers().remove(&project); drop(project); }); } @@ -779,11 +981,11 @@ async fn randomly_mutate_projects( } async fn randomly_mutate_worktrees( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let project = choose_random_project(client, rng).unwrap(); + let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); let Some(worktree) = project.read_with(cx, |project, cx| { project .worktrees(cx) @@ -793,7 +995,7 @@ async fn randomly_mutate_worktrees( && worktree.entries(false).any(|e| e.is_file()) && worktree.root_entry().map_or(false, |e| e.is_dir()) }) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) }) else { return Ok(()) }; @@ -802,9 +1004,9 @@ async fn randomly_mutate_worktrees( (worktree.id(), worktree.root_name().to_string()) }); - let is_dir = rng.lock().gen::(); + let is_dir = plan.lock().rng.gen::(); let mut new_path = PathBuf::new(); - new_path.push(gen_file_name(rng)); + new_path.push(gen_file_name(&mut plan.lock().rng)); if !is_dir { new_path.set_extension("rs"); } @@ -825,13 +1027,13 @@ async fn randomly_mutate_worktrees( } async fn randomly_query_and_mutate_buffers( - client: &mut TestClient, - rng: &Mutex, + client: &TestClient, + plan: &Arc>, cx: &mut TestAppContext, ) -> Result<()> { - let project = choose_random_project(client, rng).unwrap(); - let buffers = client.buffers.entry(project.clone()).or_default(); - let buffer = if buffers.is_empty() || rng.lock().gen() { + let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); + let has_buffers_for_project = !client.buffers_for_project(&project).is_empty(); + let buffer = if !has_buffers_for_project || plan.lock().rng.gen() { let Some(worktree) = project.read_with(cx, |project, cx| { project .worktrees(cx) @@ -839,7 +1041,7 @@ async fn randomly_query_and_mutate_buffers( let worktree = worktree.read(cx); worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) }) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) }) else { return Ok(()); }; @@ -848,7 +1050,7 @@ async fn randomly_query_and_mutate_buffers( let entry = worktree .entries(false) .filter(|e| e.is_file()) - .choose(&mut *rng.lock()) + .choose(&mut plan.lock().rng) .unwrap(); ( worktree.root_name().to_string(), @@ -875,13 +1077,18 @@ async fn randomly_query_and_mutate_buffers( worktree_root_name, buffer.read_with(cx, |buffer, _| buffer.remote_id()) ); - buffers.insert(buffer.clone()); + client.buffers_for_project(&project).insert(buffer.clone()); buffer } else { - buffers.iter().choose(&mut *rng.lock()).unwrap().clone() + client + .buffers_for_project(&project) + .iter() + .choose(&mut plan.lock().rng) + .unwrap() + .clone() }; - let choice = rng.lock().gen_range(0..100); + let choice = plan.lock().rng.gen_range(0..100); match choice { 0..=9 => { cx.update(|cx| { @@ -890,7 +1097,7 @@ async fn randomly_query_and_mutate_buffers( client.username, buffer.read(cx).file().unwrap().full_path(cx) ); - buffers.remove(&buffer); + client.buffers_for_project(&project).remove(&buffer); drop(buffer); }); } @@ -902,7 +1109,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.completions(&buffer, offset, cx) }); let completions = cx.background().spawn(async move { @@ -910,7 +1117,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("completions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching completions request", client.username); cx.update(|cx| completions.detach_and_log_err(cx)); } else { @@ -925,7 +1132,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock()); + let range = buffer.read(cx).random_byte_range(0, &mut plan.lock().rng); project.code_actions(&buffer, range, cx) }); let code_actions = cx.background().spawn(async move { @@ -933,7 +1140,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("code actions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching code actions request", client.username); cx.update(|cx| code_actions.detach_and_log_err(cx)); } else { @@ -957,7 +1164,7 @@ async fn randomly_query_and_mutate_buffers( assert!(saved_version.observed_all(&requested_version)); Ok::<_, anyhow::Error>(()) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching save request", client.username); cx.update(|cx| save.detach_and_log_err(cx)); } else { @@ -972,7 +1179,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.prepare_rename(buffer, offset, cx) }); let prepare_rename = cx.background().spawn(async move { @@ -980,7 +1187,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching prepare rename request", client.username); cx.update(|cx| prepare_rename.detach_and_log_err(cx)); } else { @@ -995,7 +1202,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.definition(&buffer, offset, cx) }); let definitions = cx.background().spawn(async move { @@ -1003,11 +1210,14 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("definitions request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching definitions request", client.username); cx.update(|cx| definitions.detach_and_log_err(cx)); } else { - buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer)); + let definitions = definitions.await?; + client + .buffers_for_project(&project) + .extend(definitions.into_iter().map(|loc| loc.target.buffer)); } } 50..=54 => { @@ -1018,7 +1228,7 @@ async fn randomly_query_and_mutate_buffers( buffer.read(cx).remote_id(), buffer.read(cx).file().unwrap().full_path(cx) ); - let offset = rng.lock().gen_range(0..=buffer.read(cx).len()); + let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); project.document_highlights(&buffer, offset, cx) }); let highlights = cx.background().spawn(async move { @@ -1026,7 +1236,7 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("highlights request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching highlights request", client.username); cx.update(|cx| highlights.detach_and_log_err(cx)); } else { @@ -1035,7 +1245,7 @@ async fn randomly_query_and_mutate_buffers( } 55..=59 => { let search = project.update(cx, |project, cx| { - let query = rng.lock().gen_range('a'..='z'); + let query = plan.lock().rng.gen_range('a'..='z'); log::info!("{}: project-wide search {:?}", client.username, query); project.search(SearchQuery::text(query, false, false), cx) }); @@ -1044,11 +1254,14 @@ async fn randomly_query_and_mutate_buffers( .await .map_err(|err| anyhow!("search request failed: {:?}", err)) }); - if rng.lock().gen_bool(0.3) { + if plan.lock().rng.gen_bool(0.3) { log::info!("{}: detaching search request", client.username); cx.update(|cx| search.detach_and_log_err(cx)); } else { - buffers.extend(search.await?.into_keys()); + let search = search.await?; + client + .buffers_for_project(&project) + .extend(search.into_keys()); } } _ => { @@ -1059,10 +1272,10 @@ async fn randomly_query_and_mutate_buffers( buffer.remote_id(), buffer.file().unwrap().full_path(cx) ); - if rng.lock().gen_bool(0.7) { - buffer.randomly_edit(&mut *rng.lock(), 5, cx); + if plan.lock().rng.gen_bool(0.7) { + buffer.randomly_edit(&mut plan.lock().rng, 5, cx); } else { - buffer.randomly_undo_redo(&mut *rng.lock(), cx); + buffer.randomly_undo_redo(&mut plan.lock().rng, cx); } }); } @@ -1071,22 +1284,19 @@ async fn randomly_query_and_mutate_buffers( Ok(()) } -fn choose_random_project( - client: &mut TestClient, - rng: &Mutex, -) -> Option> { +fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client - .local_projects + .local_projects() .iter() - .chain(&client.remote_projects) - .choose(&mut *rng.lock()) + .chain(client.remote_projects().iter()) + .choose(rng) .cloned() } -fn gen_file_name(rng: &Mutex) -> String { +fn gen_file_name(rng: &mut StdRng) -> String { let mut name = String::new(); for _ in 0..10 { - let letter = rng.lock().gen_range('a'..='z'); + let letter = rng.gen_range('a'..='z'); name.push(letter); } name diff --git a/crates/gpui/src/app/test_app_context.rs b/crates/gpui/src/app/test_app_context.rs index 72f1f546fb8a32cb3c95fc932d2128fcbbc17cb7..d8586f753bd678b94ad9d4bc771c7d645f8403c4 100644 --- a/crates/gpui/src/app/test_app_context.rs +++ b/crates/gpui/src/app/test_app_context.rs @@ -27,6 +27,7 @@ use collections::BTreeMap; use super::{AsyncAppContext, RefCounts}; +#[derive(Clone)] pub struct TestAppContext { cx: Rc>, foreground_platform: Rc, From ce8dd5a286eb604de88550321003c84101ca3b7b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 3 Jan 2023 18:05:13 -0800 Subject: [PATCH 02/80] wip --- .../src/tests/randomized_integration_tests.rs | 71 ++++++++++++++----- 1 file changed, 53 insertions(+), 18 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index d9d1c1c8e48f7a15522d2f2ae6e132b04c56097e..06c63cfde00a19e0de377782ea63fb5ea5de4c50 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -48,15 +48,27 @@ enum ClientOperation { AcceptIncomingCall, RejectIncomingCall, LeaveCall, - InviteContactToCall { user_id: UserId }, - OpenLocalProject { root: PathBuf }, - OpenRemoteProject { host_id: UserId, root: String }, - AddWorktreeToProject { id: u64, new_path: PathBuf }, - CloseProject { id: u64 }, + InviteContactToCall { + user_id: UserId, + }, + OpenLocalProject { + first_root_path: PathBuf, + }, + OpenRemoteProject { + host_id: UserId, + first_root_name: String, + }, + AddWorktreeToProject { + first_root_path: PathBuf, + new_root_path: PathBuf, + }, + CloseProject { + id: u64, + }, } impl TestPlan { - fn next_operation( + async fn next_operation( &mut self, clients: &[(Rc, TestAppContext)], offline_users: &[(UserId, String)], @@ -83,7 +95,7 @@ impl TestPlan { let ix = self.rng.gen_range(0..clients.len()); let (client, cx) = &clients[ix]; let user_id = client.current_user_id(cx); - let operation = self.next_client_operation(clients, ix); + let operation = self.next_client_operation(clients, ix).await; Operation::MutateClient { user_id, operation } } _ => continue, @@ -92,7 +104,7 @@ impl TestPlan { operation } - fn next_client_operation( + async fn next_client_operation( &mut self, clients: &[(Rc, TestAppContext)], client_ix: usize, @@ -157,17 +169,25 @@ impl TestPlan { .collect::>() }); if !remote_projects.is_empty() { - let (host_id, root) = + let (host_id, first_root_name) = remote_projects.choose(&mut self.rng).unwrap().clone(); - return ClientOperation::OpenRemoteProject { host_id, root }; + return ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + }; } } } // Open a local project 50..=59 => { - let root = client.create_new_root_dir(); - return ClientOperation::OpenLocalProject { root }; + let paths = client.fs.paths().await; + let first_root_path = if paths.is_empty() || self.rng.gen() { + client.create_new_root_dir() + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + return ClientOperation::OpenLocalProject { first_root_path }; } // Add a worktree to a local project @@ -178,12 +198,27 @@ impl TestPlan { .unwrap() .clone(); - // let paths = client.fs.paths().await; - // let path = paths.choose(&mut self.rng).unwrap(); + let first_root_path = project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .next() + .unwrap() + .read(cx) + .abs_path() + .to_path_buf() + }); - // if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - // // - // } + let paths = client.fs.paths().await; + let new_root_path = if paths.is_empty() || self.rng.gen() { + client.create_new_root_dir() + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + + return ClientOperation::AddWorktreeToProject { + first_root_path, + new_root_path, + }; } _ => continue, @@ -260,7 +295,7 @@ async fn test_random_collaboration( let mut next_entity_id = 100000; for _ in 0..max_operations { - let next_operation = plan.lock().next_operation(&clients, &available_users); + let next_operation = plan.lock().next_operation(&clients, &available_users).await; match next_operation { Operation::AddConnection { user_id } => { let user_ix = available_users From f243633f3ef28e8f395d4ca3d423391dff0afc24 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 4 Jan 2023 15:16:06 -0800 Subject: [PATCH 03/80] Set up flow for mutating clients via explicit operation values --- crates/collab/src/tests.rs | 12 +- .../src/tests/randomized_integration_tests.rs | 1108 ++++++++++------- crates/text/src/text.rs | 20 +- 3 files changed, 649 insertions(+), 491 deletions(-) diff --git a/crates/collab/src/tests.rs b/crates/collab/src/tests.rs index 8b52c7ddcf1830f0d379e8a89db970669f85285c..67d363ddc239f304a2227a3e07abe052390d917f 100644 --- a/crates/collab/src/tests.rs +++ b/crates/collab/src/tests.rs @@ -24,7 +24,7 @@ use std::{ cell::{Ref, RefCell, RefMut}, env, ops::{Deref, DerefMut}, - path::{Path, PathBuf}, + path::Path, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}, Arc, @@ -332,7 +332,6 @@ struct TestClientState { local_projects: Vec>, remote_projects: Vec>, buffers: HashMap, HashSet>>, - next_root_dir_id: usize, } impl Deref for TestClient { @@ -483,15 +482,6 @@ impl TestClient { ) }) } - - fn create_new_root_dir(&self) -> PathBuf { - format!( - "/{}-root-{}", - self.username, - util::post_inc(&mut self.state.borrow_mut().next_root_dir_id) - ) - .into() - } } impl Drop for TestClient { diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 06c63cfde00a19e0de377782ea63fb5ea5de4c50..64792cf422ea145ba65c262c76f4a8502bd5e4dd 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::BTreeMap; -use fs::{FakeFs, Fs as _}; +use fs::Fs as _; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; @@ -15,217 +15,13 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project}; use rand::prelude::*; -use std::{env, path::PathBuf, rc::Rc, sync::Arc}; - -struct TestPlan { - rng: StdRng, - allow_server_restarts: bool, - allow_client_reconnection: bool, - allow_client_disconnection: bool, -} - -#[derive(Debug)] -enum Operation { - AddConnection { - user_id: UserId, - }, - RemoveConnection { - user_id: UserId, - }, - BounceConnection { - user_id: UserId, - }, - RestartServer, - RunUntilParked, - MutateClient { - user_id: UserId, - operation: ClientOperation, - }, -} - -#[derive(Debug)] -enum ClientOperation { - AcceptIncomingCall, - RejectIncomingCall, - LeaveCall, - InviteContactToCall { - user_id: UserId, - }, - OpenLocalProject { - first_root_path: PathBuf, - }, - OpenRemoteProject { - host_id: UserId, - first_root_name: String, - }, - AddWorktreeToProject { - first_root_path: PathBuf, - new_root_path: PathBuf, - }, - CloseProject { - id: u64, - }, -} - -impl TestPlan { - async fn next_operation( - &mut self, - clients: &[(Rc, TestAppContext)], - offline_users: &[(UserId, String)], - ) -> Operation { - let operation = loop { - break match self.rng.gen_range(0..100) { - 0..=9 if !offline_users.is_empty() => { - let user_id = offline_users[self.rng.gen_range(0..offline_users.len())].0; - Operation::AddConnection { user_id } - } - 10..=14 if clients.len() > 1 && self.allow_client_disconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::RemoveConnection { user_id } - } - 15..=19 if clients.len() > 1 && self.allow_client_reconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::BounceConnection { user_id } - } - 20..=24 if self.allow_server_restarts => Operation::RestartServer, - 25..=29 => Operation::RunUntilParked, - _ if !clients.is_empty() => { - let ix = self.rng.gen_range(0..clients.len()); - let (client, cx) = &clients[ix]; - let user_id = client.current_user_id(cx); - let operation = self.next_client_operation(clients, ix).await; - Operation::MutateClient { user_id, operation } - } - _ => continue, - }; - }; - operation - } - - async fn next_client_operation( - &mut self, - clients: &[(Rc, TestAppContext)], - client_ix: usize, - ) -> ClientOperation { - let (client, cx) = &clients[client_ix]; - let call = cx.read(ActiveCall::global); - - loop { - match self.rng.gen_range(0..100) { - // Respond to an incoming call - 0..=19 => { - if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - return if self.rng.gen_bool(0.7) { - ClientOperation::AcceptIncomingCall - } else { - ClientOperation::RejectIncomingCall - }; - } - } - - // Invite a contact to the current call - 20..=29 => { - let available_contacts = client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - if !available_contacts.is_empty() { - let contact = available_contacts.choose(&mut self.rng).unwrap(); - return ClientOperation::InviteContactToCall { - user_id: UserId(contact.user.id as i32), - }; - } - } - - // Leave the current call - 30..=39 => { - if self.allow_client_disconnection - && call.read_with(cx, |call, _| call.room().is_some()) - { - return ClientOperation::LeaveCall; - } - } - - // Open a remote project - 40..=49 => { - if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - let remote_projects = room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| { - participant.projects.iter().map(|project| { - ( - UserId::from_proto(participant.user.id), - project.worktree_root_names[0].clone(), - ) - }) - }) - .collect::>() - }); - if !remote_projects.is_empty() { - let (host_id, first_root_name) = - remote_projects.choose(&mut self.rng).unwrap().clone(); - return ClientOperation::OpenRemoteProject { - host_id, - first_root_name, - }; - } - } - } - - // Open a local project - 50..=59 => { - let paths = client.fs.paths().await; - let first_root_path = if paths.is_empty() || self.rng.gen() { - client.create_new_root_dir() - } else { - paths.choose(&mut self.rng).unwrap().clone() - }; - return ClientOperation::OpenLocalProject { first_root_path }; - } - - // Add a worktree to a local project - 60..=69 if !client.local_projects().is_empty() => { - let project = client - .local_projects() - .choose(&mut self.rng) - .unwrap() - .clone(); - - let first_root_path = project.read_with(cx, |project, cx| { - project - .visible_worktrees(cx) - .next() - .unwrap() - .read(cx) - .abs_path() - .to_path_buf() - }); - - let paths = client.fs.paths().await; - let new_root_path = if paths.is_empty() || self.rng.gen() { - client.create_new_root_dir() - } else { - paths.choose(&mut self.rng).unwrap().clone() - }; - - return ClientOperation::AddWorktreeToProject { - first_root_path, - new_root_path, - }; - } - - _ => continue, - }; - } - } -} +use std::{ + env, + ops::Range, + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, +}; #[gpui::test(iterations = 100)] async fn test_random_collaboration( @@ -246,7 +42,7 @@ async fn test_random_collaboration( let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); - let mut available_users = Vec::new(); + let mut users = Vec::new(); for ix in 0..max_peers { let username = format!("user-{}", ix + 1); let user_id = db @@ -262,47 +58,55 @@ async fn test_random_collaboration( .await .unwrap() .user_id; - available_users.push((user_id, username)); + users.push(UserTestPlan { + user_id, + username, + online: false, + next_root_id: 0, + }); } - let plan = Arc::new(Mutex::new(TestPlan { - allow_server_restarts: rng.gen_bool(0.7), - allow_client_reconnection: rng.gen_bool(0.7), - allow_client_disconnection: rng.gen_bool(0.1), - rng, - })); - - for (ix, (user_id_a, _)) in available_users.iter().enumerate() { - for (user_id_b, _) in &available_users[ix + 1..] { + for (ix, user_a) in users.iter().enumerate() { + for user_b in &users[ix + 1..] { server .app_state .db - .send_contact_request(*user_id_a, *user_id_b) + .send_contact_request(user_a.user_id, user_b.user_id) .await .unwrap(); server .app_state .db - .respond_to_contact_request(*user_id_b, *user_id_a, true) + .respond_to_contact_request(user_b.user_id, user_a.user_id, true) .await .unwrap(); } } + let plan = Arc::new(Mutex::new(TestPlan { + users, + allow_server_restarts: rng.gen_bool(0.7), + allow_client_reconnection: rng.gen_bool(0.7), + allow_client_disconnection: rng.gen_bool(0.1), + rng, + })); + let mut clients = Vec::new(); let mut client_tasks = Vec::new(); - let mut op_start_signals = Vec::new(); + let mut operation_channels = Vec::new(); let mut next_entity_id = 100000; - for _ in 0..max_operations { - let next_operation = plan.lock().next_operation(&clients, &available_users).await; + let mut i = 0; + while i < max_operations { + let next_operation = plan.lock().next_operation(&clients).await; match next_operation { Operation::AddConnection { user_id } => { - let user_ix = available_users - .iter() - .position(|(id, _)| *id == user_id) - .unwrap(); - let (_, username) = available_users.remove(user_ix); + let username = { + let mut plan = plan.lock(); + let mut user = plan.user(user_id); + user.online = true; + user.username.clone() + }; log::info!("Adding new connection for {}", username); next_entity_id += 100000; let mut client_cx = TestAppContext::new( @@ -316,18 +120,19 @@ async fn test_random_collaboration( cx.function_name.clone(), ); - let op_start_signal = futures::channel::mpsc::unbounded(); + let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); let client = Rc::new(server.create_client(&mut client_cx, &username).await); - op_start_signals.push(op_start_signal.0); + operation_channels.push(operation_tx); clients.push((client.clone(), client_cx.clone())); client_tasks.push(client_cx.foreground().spawn(simulate_client( client, - op_start_signal.1, + operation_rx, plan.clone(), client_cx, ))); log::info!("Added connection for {}", username); + i += 1; } Operation::RemoveConnection { user_id } => { @@ -345,7 +150,7 @@ async fn test_random_collaboration( let removed_peer_id = user_connection_ids[0].into(); let (client, mut client_cx) = clients.remove(client_ix); let client_task = client_tasks.remove(client_ix); - op_start_signals.remove(client_ix); + operation_channels.remove(client_ix); server.forbid_connections(); server.disconnect_client(removed_peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); @@ -386,11 +191,12 @@ async fn test_random_collaboration( } log::info!("{} removed", client.username); - available_users.push((user_id, client.username.clone())); + plan.lock().user(user_id).online = false; client_cx.update(|cx| { cx.clear_globals(); drop(client); }); + i += 1; } Operation::BounceConnection { user_id } => { @@ -404,6 +210,7 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + i += 1; } Operation::RestartServer => { @@ -420,25 +227,27 @@ async fn test_random_collaboration( .await .unwrap(); assert_eq!(stale_room_ids, vec![]); + i += 1; } Operation::RunUntilParked => { deterministic.run_until_parked(); } - Operation::MutateClient { user_id, operation } => { - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - op_start_signals[client_ix] - .unbounded_send(operation) - .unwrap(); + Operation::MutateClients(user_ids) => { + for user_id in user_ids { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id) + .unwrap(); + operation_channels[client_ix].unbounded_send(()).unwrap(); + i += 1; + } } } } - drop(op_start_signals); + drop(operation_channels); deterministic.start_waiting(); futures::future::join_all(client_tasks).await; deterministic.finish_waiting(); @@ -618,9 +427,331 @@ async fn test_random_collaboration( } } +struct TestPlan { + rng: StdRng, + users: Vec, + allow_server_restarts: bool, + allow_client_reconnection: bool, + allow_client_disconnection: bool, +} + +struct UserTestPlan { + user_id: UserId, + username: String, + next_root_id: usize, + online: bool, +} + +#[derive(Debug)] +enum Operation { + AddConnection { user_id: UserId }, + RemoveConnection { user_id: UserId }, + BounceConnection { user_id: UserId }, + RestartServer, + RunUntilParked, + MutateClients(Vec), +} + +#[derive(Debug)] +enum ClientOperation { + AcceptIncomingCall, + RejectIncomingCall, + LeaveCall, + InviteContactToCall { + user_id: UserId, + }, + OpenLocalProject { + first_root_name: String, + }, + OpenRemoteProject { + host_id: UserId, + first_root_name: String, + }, + AddWorktreeToProject { + project_root_name: String, + new_root_path: PathBuf, + }, + CloseRemoteProject { + project_root_name: String, + }, + OpenBuffer { + project_root_name: String, + full_path: PathBuf, + }, + EditBuffer { + project_root_name: String, + full_path: PathBuf, + edits: Vec<(Range, Arc)>, + }, + Other, +} + +impl TestPlan { + async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { + let operation = loop { + break match self.rng.gen_range(0..100) { + 0..=19 if clients.len() < self.users.len() => { + let user = self + .users + .iter() + .filter(|u| !u.online) + .choose(&mut self.rng) + .unwrap(); + Operation::AddConnection { + user_id: user.user_id, + } + } + 20..=24 if clients.len() > 1 && self.allow_client_disconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::RemoveConnection { user_id } + } + 25..=29 if clients.len() > 1 && self.allow_client_reconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + Operation::BounceConnection { user_id } + } + 30..=34 if self.allow_server_restarts && clients.len() > 1 => { + Operation::RestartServer + } + 35..=39 => Operation::RunUntilParked, + _ if !clients.is_empty() => { + let user_ids = (0..self.rng.gen_range(0..10)) + .map(|_| { + let ix = self.rng.gen_range(0..clients.len()); + let (client, cx) = &clients[ix]; + client.current_user_id(cx) + }) + .collect(); + Operation::MutateClients(user_ids) + } + _ => continue, + }; + }; + operation + } + + async fn next_client_operation( + &mut self, + client: &TestClient, + cx: &TestAppContext, + ) -> ClientOperation { + let user_id = client.current_user_id(cx); + let call = cx.read(ActiveCall::global); + let operation = loop { + match self.rng.gen_range(0..100) { + // Mutate the call + 0..=19 => match self.rng.gen_range(0..100_u32) { + // Respond to an incoming call + 0..=39 => { + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + break if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; + } + } + + // Invite a contact to the current call + 30..=89 => { + let available_contacts = + client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + break ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } + } + + // Leave the current call + 90.. => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + break ClientOperation::LeaveCall; + } + } + }, + + // Mutate projects + 20..=39 => match self.rng.gen_range(0..100_u32) { + // Open a remote project + 0..=30 => { + if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + let remote_projects = room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| { + participant.projects.iter().map(|project| { + ( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + ) + }) + }) + .collect::>() + }); + if !remote_projects.is_empty() { + let (host_id, first_root_name) = + remote_projects.choose(&mut self.rng).unwrap().clone(); + break ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + }; + } + } + } + + // Close a remote project + 31..=40 => { + if !client.remote_projects().is_empty() { + let project = client + .remote_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + let first_root_name = root_name_for_project(&project, cx); + break ClientOperation::CloseRemoteProject { + project_root_name: first_root_name, + }; + } + } + + // Open a local project + 41..=60 => { + let first_root_name = self.next_root_dir_name(user_id); + break ClientOperation::OpenLocalProject { first_root_name }; + } + + // Add a worktree to a local project + 61.. => { + if !client.local_projects().is_empty() { + let project = client + .local_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + let project_root_name = root_name_for_project(&project, cx); + + let mut paths = client.fs.paths().await; + paths.remove(0); + let new_root_path = if paths.is_empty() || self.rng.gen() { + Path::new("/").join(&self.next_root_dir_name(user_id)) + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + + break ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + }; + } + } + }, + + // Mutate buffers + 40..=79 => { + let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; + let project_root_name = root_name_for_project(&project, cx); + + match self.rng.gen_range(0..100_u32) { + // Manipulate an existing buffer + 0..=80 => { + let Some(buffer) = client + .buffers_for_project(&project) + .iter() + .choose(&mut self.rng) + .cloned() else { continue }; + + match self.rng.gen_range(0..100_u32) { + 0..=9 => { + let (full_path, edits) = buffer.read_with(cx, |buffer, cx| { + ( + buffer.file().unwrap().full_path(cx), + buffer.get_random_edits(&mut self.rng, 3), + ) + }); + break ClientOperation::EditBuffer { + project_root_name, + full_path, + edits, + }; + } + _ => {} + } + } + + // Open a buffer + 81.. => { + let worktree = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + }) + .choose(&mut self.rng) + }); + let Some(worktree) = worktree else { continue }; + let full_path = worktree.read_with(cx, |worktree, _| { + let entry = worktree + .entries(false) + .filter(|e| e.is_file()) + .choose(&mut self.rng) + .unwrap(); + if entry.path.as_ref() == Path::new("") { + Path::new(worktree.root_name()).into() + } else { + Path::new(worktree.root_name()).join(&entry.path) + } + }); + break ClientOperation::OpenBuffer { + project_root_name, + full_path, + }; + } + } + } + + _ => break ClientOperation::Other, + } + }; + operation + } + + fn next_root_dir_name(&mut self, user_id: UserId) -> String { + let user_ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + let root_id = util::post_inc(&mut self.users[user_ix].next_root_id); + format!("dir-{user_id}-{root_id}") + } + + fn user(&mut self, user_id: UserId) -> &mut UserTestPlan { + let ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + &mut self.users[ix] + } +} + async fn simulate_client( client: Rc, - mut op_start_signal: futures::channel::mpsc::UnboundedReceiver, + mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, plan: Arc>, mut cx: TestAppContext, ) { @@ -736,8 +867,10 @@ async fn simulate_client( .await; client.language_registry.add(Arc::new(language)); - while op_start_signal.next().await.is_some() { - if let Err(error) = randomly_mutate_client(&client, plan.clone(), &mut cx).await { + while operation_rx.next().await.is_some() { + let operation = plan.lock().next_client_operation(&client, &cx).await; + if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await + { log::error!("{} error: {:?}", client.username, error); } @@ -746,98 +879,274 @@ async fn simulate_client( log::info!("{}: done", client.username); } -// async fn apply_client_operation( -// client: &mut TestClient, -// plan: Arc>, -// operation: ClientOperation, -// cx: &mut TestAppContext, -// ) -> Result<()> { -// match operation { -// ClientOperation::AcceptIncomingCall => todo!(), -// ClientOperation::RejectIncomingCall => todo!(), -// ClientOperation::OpenLocalProject { path } => todo!(), -// ClientOperation::AddWorktreeToProject { -// existing_path, -// new_path, -// } => todo!(), -// ClientOperation::CloseProject { existing_path } => todo!(), -// } -// } - -async fn randomly_mutate_client( - client: &Rc, - plan: Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=19 => randomly_mutate_active_call(client, &plan, cx).await?, - 20..=49 => randomly_mutate_projects(client, &plan, cx).await?, - 50..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { - randomly_mutate_worktrees(client, &plan, cx).await?; - } - 60..=84 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { - randomly_query_and_mutate_buffers(client, &plan, cx).await?; - } - _ => randomly_mutate_fs(client, &plan).await, - } - - Ok(()) -} - -async fn randomly_mutate_active_call( +async fn apply_client_operation( client: &TestClient, - plan: &Arc>, + plan: Arc>, + operation: ClientOperation, cx: &mut TestAppContext, ) -> Result<()> { - let active_call = cx.read(ActiveCall::global); - if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - if plan.lock().rng.gen_bool(0.7) { + match operation { + ClientOperation::AcceptIncomingCall => { log::info!("{}: accepting incoming call", client.username); + let active_call = cx.read(ActiveCall::global); active_call .update(cx, |call, cx| call.accept_incoming(cx)) .await?; - } else { + } + + ClientOperation::RejectIncomingCall => { log::info!("{}: declining incoming call", client.username); + let active_call = cx.read(ActiveCall::global); active_call.update(cx, |call, _| call.decline_incoming())?; } - } else { - let available_contacts = client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() + + ClientOperation::LeaveCall => { + log::info!("{}: hanging up", client.username); + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, cx| call.hang_up(cx))?; + } + + ClientOperation::InviteContactToCall { user_id } => { + log::info!("{}: inviting {}", client.username, user_id,); + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) + .await?; + } + + ClientOperation::OpenLocalProject { first_root_name } => { + log::info!( + "{}: opening local project at {:?}", + client.username, + first_root_name + ); + let root_path = Path::new("/").join(&first_root_name); + client.fs.create_dir(&root_path).await.unwrap(); + client + .fs + .create_file(&root_path.join("main.rs"), Default::default()) + .await + .unwrap(); + let project = client.build_local_project(root_path, cx).await.0; + + let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_some()) + && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) + { + match active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + Ok(project_id) => { + log::info!( + "{}: shared project {} with id {}", + client.username, + first_root_name, + project_id + ); + } + Err(error) => { + log::error!( + "{}: error sharing project {}: {:?}", + client.username, + first_root_name, + error + ); + } + } + } + + client.local_projects_mut().push(project.clone()); + } + + ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + } => { + log::info!( + "{}: finding/creating local worktree at {:?} to project with root path {}", + client.username, + new_root_path, + project_root_name + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + if !client.fs.paths().await.contains(&new_root_path) { + client.fs.create_dir(&new_root_path).await.unwrap(); + } + project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(&new_root_path, true, cx) + }) + .await + .unwrap(); + } + + ClientOperation::CloseRemoteProject { project_root_name } => { + log::info!( + "{}: dropping project with root path {}", + client.username, + project_root_name, + ); + let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) + .expect("invalid project in test operation"); + client.remote_projects_mut().remove(ix); + } + + ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + } => { + log::info!( + "{}: joining remote project of user {}, root name {}", + client.username, + host_id, + first_root_name, + ); + let active_call = cx.read(ActiveCall::global); + let project_id = active_call + .read_with(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)?; + Some(project.id) + }) + .expect("invalid project in test operation"); + let project = client.build_remote_project(project_id, cx).await; + client.remote_projects_mut().push(project); + } + + ClientOperation::OpenBuffer { + project_root_name, + full_path, + } => { + log::info!( + "{}: opening path {:?} in project {}", + client.username, + full_path, + project_root_name, + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let mut components = full_path.components(); + let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); + let path = components.as_path(); + let worktree_id = project + .read_with(cx, |project, cx| { + project.worktrees(cx).find_map(|worktree| { + let worktree = worktree.read(cx); + if worktree.root_name() == root_name { + Some(worktree.id()) + } else { + None + } + }) + }) + .expect("invalid buffer path in test operation"); + let buffer = project + .update(cx, |project, cx| { + project.open_buffer((worktree_id, &path), cx) + }) + .await?; + client.buffers_for_project(&project).insert(buffer); + } + + ClientOperation::EditBuffer { + project_root_name, + full_path, + edits, + } => { + log::info!( + "{}: editing buffer {:?} in project {} with {:?}", + client.username, + full_path, + project_root_name, + edits + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = client + .buffers_for_project(&project) .iter() - .filter(|contact| contact.online && !contact.busy) + .find(|buffer| { + buffer.read_with(cx, |buffer, cx| { + buffer.file().unwrap().full_path(cx) == full_path + }) + }) .cloned() - .collect::>() - }); + .expect("invalid buffer path in test operation"); + buffer.update(cx, |buffer, cx| { + buffer.edit(edits, None, cx); + }); + } - let distribution = plan.lock().rng.gen_range(0..100); - match distribution { - 0..=29 if !available_contacts.is_empty() => { - let contact = available_contacts.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: inviting {}", - client.username, - contact.user.github_login - ); - active_call - .update(cx, |call, cx| call.invite(contact.user.id, None, cx)) - .await?; - } - 30..=39 - if plan.lock().allow_client_disconnection - && active_call.read_with(cx, |call, _| call.room().is_some()) => - { - log::info!("{}: hanging up", client.username); - active_call.update(cx, |call, cx| call.hang_up(cx))?; + _ => { + let choice = plan.lock().rng.gen_range(0..100); + match choice { + 50..=59 + if !client.local_projects().is_empty() + || !client.remote_projects().is_empty() => + { + randomly_mutate_worktrees(client, &plan, cx).await?; + } + 60..=84 + if !client.local_projects().is_empty() + || !client.remote_projects().is_empty() => + { + randomly_query_and_mutate_buffers(client, &plan, cx).await?; + } + _ => randomly_mutate_fs(client, &plan).await, } - _ => {} } } - Ok(()) } +fn project_for_root_name( + client: &TestClient, + root_name: &str, + cx: &TestAppContext, +) -> Option> { + if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) { + return Some(client.local_projects()[ix].clone()); + } + if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) { + return Some(client.remote_projects()[ix].clone()); + } + None +} + +fn project_ix_for_root_name( + projects: &[ModelHandle], + root_name: &str, + cx: &TestAppContext, +) -> Option { + projects.iter().position(|project| { + project.read_with(cx, |project, cx| { + let worktree = project.visible_worktrees(cx).next().unwrap(); + worktree.read(cx).root_name() == root_name + }) + }) +} + +fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> String { + project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .next() + .unwrap() + .read(cx) + .root_name() + .to_string() + }) +} + async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { let is_dir = plan.lock().rng.gen::(); let mut new_path = client @@ -862,159 +1171,6 @@ async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { } } -async fn randomly_mutate_projects( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let active_call = cx.read(ActiveCall::global); - let remote_projects = - if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) { - room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| participant.projects.clone()) - .collect::>() - }) - } else { - Default::default() - }; - - let project = if remote_projects.is_empty() || plan.lock().rng.gen() { - if client.local_projects().is_empty() || plan.lock().rng.gen() { - let paths = client.fs.paths().await; - let local_project = if paths.is_empty() || plan.lock().rng.gen() { - let root_path = client.create_new_root_dir(); - client.fs.create_dir(&root_path).await.unwrap(); - client - .fs - .create_file(&root_path.join("main.rs"), Default::default()) - .await - .unwrap(); - log::info!( - "{}: opening local project at {:?}", - client.username, - root_path - ); - client.build_local_project(root_path, cx).await.0 - } else { - let root_path = paths.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: opening local project at {:?}", - client.username, - root_path - ); - client.build_local_project(root_path, cx).await.0 - }; - client.local_projects_mut().push(local_project.clone()); - local_project - } else { - client - .local_projects() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - } - } else { - if client.remote_projects().is_empty() || plan.lock().rng.gen() { - let remote_project_id = remote_projects.choose(&mut plan.lock().rng).unwrap().id; - let remote_projects = client.remote_projects().clone(); - let remote_project = if let Some(project) = remote_projects - .iter() - .find(|project| { - project.read_with(cx, |project, _| { - project.remote_id() == Some(remote_project_id) - }) - }) - .cloned() - { - project - } else { - log::info!( - "{}: opening remote project {}", - client.username, - remote_project_id - ); - let call = cx.read(ActiveCall::global); - let room = call.read_with(cx, |call, _| call.room().unwrap().clone()); - let remote_project = room - .update(cx, |room, cx| { - room.join_project( - remote_project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - }) - .await?; - client.remote_projects_mut().push(remote_project.clone()); - remote_project - }; - - remote_project - } else { - client - .remote_projects() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - } - }; - - if active_call.read_with(cx, |call, _| call.room().is_some()) - && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) - { - match active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - Ok(project_id) => { - log::info!("{}: shared project with id {}", client.username, project_id); - } - Err(error) => { - log::error!("{}: error sharing project, {:?}", client.username, error); - } - } - } - - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=19 if project.read_with(cx, |project, _| project.is_local()) => { - let paths = client.fs.paths().await; - let path = paths.choose(&mut plan.lock().rng).unwrap(); - log::info!( - "{}: finding/creating local worktree for path {:?}", - client.username, - path - ); - project - .update(cx, |project, cx| { - project.find_or_create_local_worktree(&path, true, cx) - }) - .await - .unwrap(); - } - 20..=24 if project.read_with(cx, |project, _| project.is_remote()) => { - log::info!( - "{}: dropping remote project {}", - client.username, - project.read_with(cx, |project, _| project.remote_id().unwrap()) - ); - - cx.update(|_| { - client - .remote_projects_mut() - .retain(|remote_project| *remote_project != project); - client.buffers().remove(&project); - drop(project); - }); - } - _ => {} - } - - Ok(()) -} - async fn randomly_mutate_worktrees( client: &TestClient, plan: &Arc>, diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 914023f305b4ba27aee8359205f3befc5245503a..3bf6695cced14636e5ce503322e4071db044afcc 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -1429,12 +1429,11 @@ impl Buffer { start..end } - #[allow(clippy::type_complexity)] - pub fn randomly_edit( - &mut self, + pub fn get_random_edits( + &self, rng: &mut T, edit_count: usize, - ) -> (Vec<(Range, Arc)>, Operation) + ) -> Vec<(Range, Arc)> where T: rand::Rng, { @@ -1453,8 +1452,21 @@ impl Buffer { edits.push((range, new_text.into())); } + edits + } + #[allow(clippy::type_complexity)] + pub fn randomly_edit( + &mut self, + rng: &mut T, + edit_count: usize, + ) -> (Vec<(Range, Arc)>, Operation) + where + T: rand::Rng, + { + let mut edits = self.get_random_edits(rng, edit_count); log::info!("mutating buffer {} with {:?}", self.replica_id, edits); + let op = self.edit(edits.iter().cloned()); if let Operation::Edit(edit) = &op { assert_eq!(edits.len(), edit.new_text.len()); From f1b3692a354e42039156c735020fb5d46581f72b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 4 Jan 2023 18:46:31 -0800 Subject: [PATCH 04/80] Tweak operation rates --- .../src/tests/randomized_integration_tests.rs | 237 ++++++++++-------- 1 file changed, 133 insertions(+), 104 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 64792cf422ea145ba65c262c76f4a8502bd5e4dd..0db56549a6b89e8039d0ac8f81f62a0e145c9eba 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -230,11 +230,7 @@ async fn test_random_collaboration( i += 1; } - Operation::RunUntilParked => { - deterministic.run_until_parked(); - } - - Operation::MutateClients(user_ids) => { + Operation::MutateClients { user_ids, quiesce } => { for user_id in user_ids { let client_ix = clients .iter() @@ -243,6 +239,10 @@ async fn test_random_collaboration( operation_channels[client_ix].unbounded_send(()).unwrap(); i += 1; } + + if quiesce { + deterministic.run_until_parked(); + } } } } @@ -444,12 +444,20 @@ struct UserTestPlan { #[derive(Debug)] enum Operation { - AddConnection { user_id: UserId }, - RemoveConnection { user_id: UserId }, - BounceConnection { user_id: UserId }, + AddConnection { + user_id: UserId, + }, + RemoveConnection { + user_id: UserId, + }, + BounceConnection { + user_id: UserId, + }, RestartServer, - RunUntilParked, - MutateClients(Vec), + MutateClients { + user_ids: Vec, + quiesce: bool, + }, } #[derive(Debug)] @@ -490,7 +498,7 @@ impl TestPlan { async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { let operation = loop { break match self.rng.gen_range(0..100) { - 0..=19 if clients.len() < self.users.len() => { + 0..=29 if clients.len() < self.users.len() => { let user = self .users .iter() @@ -501,20 +509,19 @@ impl TestPlan { user_id: user.user_id, } } - 20..=24 if clients.len() > 1 && self.allow_client_disconnection => { + 30..=34 if clients.len() > 1 && self.allow_client_disconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); Operation::RemoveConnection { user_id } } - 25..=29 if clients.len() > 1 && self.allow_client_reconnection => { + 35..=39 if clients.len() > 1 && self.allow_client_reconnection => { let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; let user_id = client.current_user_id(cx); Operation::BounceConnection { user_id } } - 30..=34 if self.allow_server_restarts && clients.len() > 1 => { + 40..=44 if self.allow_server_restarts && clients.len() > 1 => { Operation::RestartServer } - 35..=39 => Operation::RunUntilParked, _ if !clients.is_empty() => { let user_ids = (0..self.rng.gen_range(0..10)) .map(|_| { @@ -523,7 +530,10 @@ impl TestPlan { client.current_user_id(cx) }) .collect(); - Operation::MutateClients(user_ids) + Operation::MutateClients { + user_ids, + quiesce: self.rng.gen(), + } } _ => continue, }; @@ -541,78 +551,95 @@ impl TestPlan { let operation = loop { match self.rng.gen_range(0..100) { // Mutate the call - 0..=19 => match self.rng.gen_range(0..100_u32) { + 0..=29 => { // Respond to an incoming call - 0..=39 => { - if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - break if self.rng.gen_bool(0.7) { - ClientOperation::AcceptIncomingCall - } else { - ClientOperation::RejectIncomingCall - }; - } + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + break if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; } - // Invite a contact to the current call - 30..=89 => { - let available_contacts = - client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - if !available_contacts.is_empty() { - let contact = available_contacts.choose(&mut self.rng).unwrap(); - break ClientOperation::InviteContactToCall { - user_id: UserId(contact.user.id as i32), - }; + match self.rng.gen_range(0..100_u32) { + // Invite a contact to the current call + 0..=70 => { + let available_contacts = + client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + break ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } } - } - // Leave the current call - 90.. => { - if self.allow_client_disconnection - && call.read_with(cx, |call, _| call.room().is_some()) - { - break ClientOperation::LeaveCall; + // Leave the current call + 71.. => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + break ClientOperation::LeaveCall; + } } } - }, + } // Mutate projects - 20..=39 => match self.rng.gen_range(0..100_u32) { - // Open a remote project - 0..=30 => { + 39..=59 => match self.rng.gen_range(0..100_u32) { + // Open a new project + 0..=70 => { + // Open a remote project if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - let remote_projects = room.read_with(cx, |room, _| { + let existing_remote_project_ids = cx.read(|cx| { + client + .remote_projects() + .iter() + .map(|p| p.read(cx).remote_id().unwrap()) + .collect::>() + }); + let new_remote_projects = room.read_with(cx, |room, _| { room.remote_participants() .values() .flat_map(|participant| { - participant.projects.iter().map(|project| { - ( - UserId::from_proto(participant.user.id), - project.worktree_root_names[0].clone(), - ) + participant.projects.iter().filter_map(|project| { + if existing_remote_project_ids.contains(&project.id) { + None + } else { + Some(( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + )) + } }) }) .collect::>() }); - if !remote_projects.is_empty() { + if !new_remote_projects.is_empty() { let (host_id, first_root_name) = - remote_projects.choose(&mut self.rng).unwrap().clone(); + new_remote_projects.choose(&mut self.rng).unwrap().clone(); break ClientOperation::OpenRemoteProject { host_id, first_root_name, }; } } + // Open a local project + else { + let first_root_name = self.next_root_dir_name(user_id); + break ClientOperation::OpenLocalProject { first_root_name }; + } } // Close a remote project - 31..=40 => { + 71..=80 => { if !client.remote_projects().is_empty() { let project = client .remote_projects() @@ -626,14 +653,8 @@ impl TestPlan { } } - // Open a local project - 41..=60 => { - let first_root_name = self.next_root_dir_name(user_id); - break ClientOperation::OpenLocalProject { first_root_name }; - } - // Add a worktree to a local project - 61.. => { + 81.. => { if !client.local_projects().is_empty() { let project = client .local_projects() @@ -659,7 +680,7 @@ impl TestPlan { }, // Mutate buffers - 40..=79 => { + 60.. => { let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; let project_root_name = root_name_for_project(&project, cx); @@ -871,9 +892,8 @@ async fn simulate_client( let operation = plan.lock().next_client_operation(&client, &cx).await; if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await { - log::error!("{} error: {:?}", client.username, error); + log::error!("{} error: {}", client.username, error); } - cx.background().simulate_random_delay().await; } log::info!("{}: done", client.username); @@ -928,34 +948,7 @@ async fn apply_client_operation( .await .unwrap(); let project = client.build_local_project(root_path, cx).await.0; - - let active_call = cx.read(ActiveCall::global); - if active_call.read_with(cx, |call, _| call.room().is_some()) - && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) - { - match active_call - .update(cx, |call, cx| call.share_project(project.clone(), cx)) - .await - { - Ok(project_id) => { - log::info!( - "{}: shared project {} with id {}", - client.username, - first_root_name, - project_id - ); - } - Err(error) => { - log::error!( - "{}: error sharing project {}: {:?}", - client.username, - first_root_name, - error - ); - } - } - } - + ensure_project_shared(&project, client, cx).await; client.local_projects_mut().push(project.clone()); } @@ -971,6 +964,7 @@ async fn apply_client_operation( ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; if !client.fs.paths().await.contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); } @@ -984,13 +978,13 @@ async fn apply_client_operation( ClientOperation::CloseRemoteProject { project_root_name } => { log::info!( - "{}: dropping project with root path {}", + "{}: closing remote project with root path {}", client.username, project_root_name, ); let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) .expect("invalid project in test operation"); - client.remote_projects_mut().remove(ix); + cx.update(|_| client.remote_projects_mut().remove(ix)); } ClientOperation::OpenRemoteProject { @@ -1027,13 +1021,14 @@ async fn apply_client_operation( full_path, } => { log::info!( - "{}: opening path {:?} in project {}", + "{}: opening buffer {:?} in project {}", client.username, full_path, project_root_name, ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); + // ensure_project_shared(&project, client, cx).await; let mut components = full_path.components(); let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); let path = components.as_path(); @@ -1086,10 +1081,10 @@ async fn apply_client_operation( }); } - _ => { + ClientOperation::Other => { let choice = plan.lock().rng.gen_range(0..100); match choice { - 50..=59 + 0..=59 if !client.local_projects().is_empty() || !client.remote_projects().is_empty() => { @@ -1147,6 +1142,40 @@ fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> }) } +async fn ensure_project_shared( + project: &ModelHandle, + client: &TestClient, + cx: &mut TestAppContext, +) { + let first_root_name = root_name_for_project(project, cx); + let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_some()) + && project.read_with(cx, |project, _| project.is_local() && !project.is_shared()) + { + match active_call + .update(cx, |call, cx| call.share_project(project.clone(), cx)) + .await + { + Ok(project_id) => { + log::info!( + "{}: shared project {} with id {}", + client.username, + first_root_name, + project_id + ); + } + Err(error) => { + log::error!( + "{}: error sharing project {}: {:?}", + client.username, + first_root_name, + error + ); + } + } + } +} + async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { let is_dir = plan.lock().rng.gen::(); let mut new_path = client From 210286da489c782f3441ff37af73dc64d2bdd3e0 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 12:55:50 -0800 Subject: [PATCH 05/80] Make operations for all buffer manipulations --- .../src/tests/randomized_integration_tests.rs | 536 ++++++++---------- 1 file changed, 252 insertions(+), 284 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0db56549a6b89e8039d0ac8f81f62a0e145c9eba..86ca673df42fb9728216e41450f08636fcb490a0 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -6,7 +6,7 @@ use crate::{ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; -use collections::BTreeMap; +use collections::{BTreeMap, HashSet}; use fs::Fs as _; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; @@ -486,14 +486,44 @@ enum ClientOperation { project_root_name: String, full_path: PathBuf, }, + SearchProject { + project_root_name: String, + query: String, + detach: bool, + }, EditBuffer { project_root_name: String, full_path: PathBuf, edits: Vec<(Range, Arc)>, }, + CloseBuffer { + project_root_name: String, + full_path: PathBuf, + }, + SaveBuffer { + project_root_name: String, + full_path: PathBuf, + detach: bool, + }, + RequestLspDataInBuffer { + project_root_name: String, + full_path: PathBuf, + offset: usize, + kind: LspRequestKind, + detach: bool, + }, Other, } +#[derive(Debug)] +enum LspRequestKind { + Rename, + Completion, + CodeAction, + Definition, + Highlights, +} + impl TestPlan { async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { let operation = loop { @@ -679,27 +709,44 @@ impl TestPlan { } }, - // Mutate buffers + // Query and mutate buffers 60.. => { let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; let project_root_name = root_name_for_project(&project, cx); match self.rng.gen_range(0..100_u32) { // Manipulate an existing buffer - 0..=80 => { + 0..=70 => { let Some(buffer) = client .buffers_for_project(&project) .iter() .choose(&mut self.rng) .cloned() else { continue }; + let full_path = buffer + .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + match self.rng.gen_range(0..100_u32) { - 0..=9 => { - let (full_path, edits) = buffer.read_with(cx, |buffer, cx| { - ( - buffer.file().unwrap().full_path(cx), - buffer.get_random_edits(&mut self.rng, 3), - ) + // Close the buffer + 0..=15 => { + break ClientOperation::CloseBuffer { + project_root_name, + full_path, + }; + } + // Save the buffer + 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => { + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SaveBuffer { + project_root_name, + full_path, + detach, + }; + } + // Edit the buffer + 30..=69 => { + let edits = buffer.read_with(cx, |buffer, _| { + buffer.get_random_edits(&mut self.rng, 3) }); break ClientOperation::EditBuffer { project_root_name, @@ -707,10 +754,42 @@ impl TestPlan { edits, }; } - _ => {} + // Make an LSP request + _ => { + let offset = buffer.read_with(cx, |buffer, _| { + buffer.clip_offset( + self.rng.gen_range(0..=buffer.len()), + language::Bias::Left, + ) + }); + let detach = self.rng.gen(); + break ClientOperation::RequestLspDataInBuffer { + project_root_name, + full_path, + offset, + kind: match self.rng.gen_range(0..5_u32) { + 0 => LspRequestKind::Rename, + 1 => LspRequestKind::Highlights, + 2 => LspRequestKind::Definition, + 3 => LspRequestKind::CodeAction, + 4.. => LspRequestKind::Completion, + }, + detach, + }; + } } } + 71..=80 => { + let query = self.rng.gen_range('a'..='z').to_string(); + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SearchProject { + project_root_name, + query, + detach, + }; + } + // Open a buffer 81.. => { let worktree = project.read_with(cx, |project, cx| { @@ -1066,21 +1145,159 @@ async fn apply_client_operation( ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); - let buffer = client - .buffers_for_project(&project) - .iter() - .find(|buffer| { - buffer.read_with(cx, |buffer, cx| { - buffer.file().unwrap().full_path(cx) == full_path - }) - }) - .cloned() - .expect("invalid buffer path in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); buffer.update(cx, |buffer, cx| { buffer.edit(edits, None, cx); }); } + ClientOperation::CloseBuffer { + project_root_name, + full_path, + } => { + log::info!( + "{}: dropping buffer {:?} in project {}", + client.username, + full_path, + project_root_name + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + cx.update(|_| { + client.buffers_for_project(&project).remove(&buffer); + drop(buffer); + }); + } + + ClientOperation::SaveBuffer { + project_root_name, + full_path, + detach, + } => { + log::info!( + "{}: saving buffer {:?} in project {}{}", + client.username, + full_path, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let (requested_version, save) = + buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); + let save = cx.background().spawn(async move { + let (saved_version, _, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + anyhow::Ok(()) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } + } + + ClientOperation::RequestLspDataInBuffer { + project_root_name, + full_path, + offset, + kind, + detach, + } => { + log::info!( + "{}: request LSP {:?} for buffer {:?} in project {}{}", + client.username, + kind, + full_path, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } + ); + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + let request = match kind { + LspRequestKind::Rename => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) + .await?; + anyhow::Ok(()) + }), + LspRequestKind::Completion => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Definition => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Highlights => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) + .await?; + Ok(()) + }), + }; + if detach { + request.detach(); + } else { + request.await?; + } + } + + ClientOperation::SearchProject { + project_root_name, + query, + detach, + } => { + log::info!( + "{}: search project {} for {:?}{}", + client.username, + project_root_name, + query, + if detach { ", detaching" } else { ", awaiting" } + ); + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + let search = project.update(cx, |project, cx| { + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search + .await + .map_err(|err| anyhow!("search request failed: {:?}", err)) + }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| search.detach_and_log_err(cx)); + } else { + search.await?; + } + } + ClientOperation::Other => { let choice = plan.lock().rng.gen_range(0..100); match choice { @@ -1090,12 +1307,6 @@ async fn apply_client_operation( { randomly_mutate_worktrees(client, &plan, cx).await?; } - 60..=84 - if !client.local_projects().is_empty() - || !client.remote_projects().is_empty() => - { - randomly_query_and_mutate_buffers(client, &plan, cx).await?; - } _ => randomly_mutate_fs(client, &plan).await, } } @@ -1103,6 +1314,21 @@ async fn apply_client_operation( Ok(()) } +fn buffer_for_full_path( + buffers: &HashSet>, + full_path: &PathBuf, + cx: &TestAppContext, +) -> Option> { + buffers + .iter() + .find(|buffer| { + buffer.read_with(cx, |buffer, cx| { + buffer.file().unwrap().full_path(cx) == *full_path + }) + }) + .cloned() +} + fn project_for_root_name( client: &TestClient, root_name: &str, @@ -1246,264 +1472,6 @@ async fn randomly_mutate_worktrees( Ok(()) } -async fn randomly_query_and_mutate_buffers( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); - let has_buffers_for_project = !client.buffers_for_project(&project).is_empty(); - let buffer = if !has_buffers_for_project || plan.lock().rng.gen() { - let Some(worktree) = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() && worktree.entries(false).any(|e| e.is_file()) - }) - .choose(&mut plan.lock().rng) - }) else { - return Ok(()); - }; - - let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| { - let entry = worktree - .entries(false) - .filter(|e| e.is_file()) - .choose(&mut plan.lock().rng) - .unwrap(); - ( - worktree.root_name().to_string(), - (worktree.id(), entry.path.clone()), - ) - }); - log::info!( - "{}: opening path {:?} in worktree {} ({})", - client.username, - project_path.1, - project_path.0, - worktree_root_name, - ); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer(project_path.clone(), cx) - }) - .await?; - log::info!( - "{}: opened path {:?} in worktree {} ({}) with buffer id {}", - client.username, - project_path.1, - project_path.0, - worktree_root_name, - buffer.read_with(cx, |buffer, _| buffer.remote_id()) - ); - client.buffers_for_project(&project).insert(buffer.clone()); - buffer - } else { - client - .buffers_for_project(&project) - .iter() - .choose(&mut plan.lock().rng) - .unwrap() - .clone() - }; - - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=9 => { - cx.update(|cx| { - log::info!( - "{}: dropping buffer {:?}", - client.username, - buffer.read(cx).file().unwrap().full_path(cx) - ); - client.buffers_for_project(&project).remove(&buffer); - drop(buffer); - }); - } - 10..=19 => { - let completions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting completions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.completions(&buffer, offset, cx) - }); - let completions = cx.background().spawn(async move { - completions - .await - .map_err(|err| anyhow!("completions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching completions request", client.username); - cx.update(|cx| completions.detach_and_log_err(cx)); - } else { - completions.await?; - } - } - 20..=29 => { - let code_actions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting code actions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let range = buffer.read(cx).random_byte_range(0, &mut plan.lock().rng); - project.code_actions(&buffer, range, cx) - }); - let code_actions = cx.background().spawn(async move { - code_actions - .await - .map_err(|err| anyhow!("code actions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching code actions request", client.username); - cx.update(|cx| code_actions.detach_and_log_err(cx)); - } else { - code_actions.await?; - } - } - 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => { - let (requested_version, save) = buffer.update(cx, |buffer, cx| { - log::info!( - "{}: saving buffer {} ({:?})", - client.username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - (buffer.version(), buffer.save(cx)) - }); - let save = cx.background().spawn(async move { - let (saved_version, _, _) = save - .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - Ok::<_, anyhow::Error>(()) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching save request", client.username); - cx.update(|cx| save.detach_and_log_err(cx)); - } else { - save.await?; - } - } - 40..=44 => { - let prepare_rename = project.update(cx, |project, cx| { - log::info!( - "{}: preparing rename for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.prepare_rename(buffer, offset, cx) - }); - let prepare_rename = cx.background().spawn(async move { - prepare_rename - .await - .map_err(|err| anyhow!("prepare rename request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching prepare rename request", client.username); - cx.update(|cx| prepare_rename.detach_and_log_err(cx)); - } else { - prepare_rename.await?; - } - } - 45..=49 => { - let definitions = project.update(cx, |project, cx| { - log::info!( - "{}: requesting definitions for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.definition(&buffer, offset, cx) - }); - let definitions = cx.background().spawn(async move { - definitions - .await - .map_err(|err| anyhow!("definitions request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching definitions request", client.username); - cx.update(|cx| definitions.detach_and_log_err(cx)); - } else { - let definitions = definitions.await?; - client - .buffers_for_project(&project) - .extend(definitions.into_iter().map(|loc| loc.target.buffer)); - } - } - 50..=54 => { - let highlights = project.update(cx, |project, cx| { - log::info!( - "{}: requesting highlights for buffer {} ({:?})", - client.username, - buffer.read(cx).remote_id(), - buffer.read(cx).file().unwrap().full_path(cx) - ); - let offset = plan.lock().rng.gen_range(0..=buffer.read(cx).len()); - project.document_highlights(&buffer, offset, cx) - }); - let highlights = cx.background().spawn(async move { - highlights - .await - .map_err(|err| anyhow!("highlights request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching highlights request", client.username); - cx.update(|cx| highlights.detach_and_log_err(cx)); - } else { - highlights.await?; - } - } - 55..=59 => { - let search = project.update(cx, |project, cx| { - let query = plan.lock().rng.gen_range('a'..='z'); - log::info!("{}: project-wide search {:?}", client.username, query); - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if plan.lock().rng.gen_bool(0.3) { - log::info!("{}: detaching search request", client.username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - let search = search.await?; - client - .buffers_for_project(&project) - .extend(search.into_keys()); - } - } - _ => { - buffer.update(cx, |buffer, cx| { - log::info!( - "{}: updating buffer {} ({:?})", - client.username, - buffer.remote_id(), - buffer.file().unwrap().full_path(cx) - ); - if plan.lock().rng.gen_bool(0.7) { - buffer.randomly_edit(&mut plan.lock().rng, 5, cx); - } else { - buffer.randomly_undo_redo(&mut plan.lock().rng, cx); - } - }); - } - } - - Ok(()) -} - fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client .local_projects() From 99390a7237344e41918da4b9cc09034e6d04ffaa Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 15:30:49 -0800 Subject: [PATCH 06/80] Represent all randomized test actions as operations --- .../src/tests/randomized_integration_tests.rs | 1831 +++++++++-------- 1 file changed, 953 insertions(+), 878 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 86ca673df42fb9728216e41450f08636fcb490a0..9cdc05833e331f5a9deb1f5ce691537c01815df9 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -13,7 +13,7 @@ use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; -use project::{search::SearchQuery, Project}; +use project::{search::SearchQuery, Project, ProjectPath}; use rand::prelude::*; use std::{ env, @@ -22,6 +22,7 @@ use std::{ rc::Rc, sync::Arc, }; +use util::ResultExt; #[gpui::test(iterations = 100)] async fn test_random_collaboration( @@ -84,10 +85,12 @@ async fn test_random_collaboration( } let plan = Arc::new(Mutex::new(TestPlan { - users, allow_server_restarts: rng.gen_bool(0.7), allow_client_reconnection: rng.gen_bool(0.7), allow_client_disconnection: rng.gen_bool(0.1), + operation_ix: 0, + max_operations, + users, rng, })); @@ -96,9 +99,8 @@ async fn test_random_collaboration( let mut operation_channels = Vec::new(); let mut next_entity_id = 100000; - let mut i = 0; - while i < max_operations { - let next_operation = plan.lock().next_operation(&clients).await; + loop { + let Some(next_operation) = plan.lock().next_operation(&clients).await else { break }; match next_operation { Operation::AddConnection { user_id } => { let username = { @@ -132,7 +134,6 @@ async fn test_random_collaboration( ))); log::info!("Added connection for {}", username); - i += 1; } Operation::RemoveConnection { user_id } => { @@ -196,7 +197,6 @@ async fn test_random_collaboration( cx.clear_globals(); drop(client); }); - i += 1; } Operation::BounceConnection { user_id } => { @@ -210,7 +210,6 @@ async fn test_random_collaboration( let peer_id = user_connection_ids[0].into(); server.disconnect_client(peer_id); deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - i += 1; } Operation::RestartServer => { @@ -227,7 +226,6 @@ async fn test_random_collaboration( .await .unwrap(); assert_eq!(stale_room_ids, vec![]); - i += 1; } Operation::MutateClients { user_ids, quiesce } => { @@ -237,7 +235,6 @@ async fn test_random_collaboration( .position(|(client, cx)| client.current_user_id(cx) == user_id) .unwrap(); operation_channels[client_ix].unbounded_send(()).unwrap(); - i += 1; } if quiesce { @@ -427,891 +424,1018 @@ async fn test_random_collaboration( } } -struct TestPlan { - rng: StdRng, - users: Vec, - allow_server_restarts: bool, - allow_client_reconnection: bool, - allow_client_disconnection: bool, -} +async fn apply_client_operation( + client: &TestClient, + operation: ClientOperation, + cx: &mut TestAppContext, +) -> Result<()> { + match operation { + ClientOperation::AcceptIncomingCall => { + log::info!("{}: accepting incoming call", client.username); -struct UserTestPlan { - user_id: UserId, - username: String, - next_root_id: usize, - online: bool, -} + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.accept_incoming(cx)) + .await?; + } -#[derive(Debug)] -enum Operation { - AddConnection { - user_id: UserId, - }, - RemoveConnection { - user_id: UserId, - }, - BounceConnection { - user_id: UserId, - }, - RestartServer, - MutateClients { - user_ids: Vec, - quiesce: bool, - }, -} + ClientOperation::RejectIncomingCall => { + log::info!("{}: declining incoming call", client.username); -#[derive(Debug)] -enum ClientOperation { - AcceptIncomingCall, - RejectIncomingCall, - LeaveCall, - InviteContactToCall { - user_id: UserId, - }, - OpenLocalProject { - first_root_name: String, - }, - OpenRemoteProject { - host_id: UserId, - first_root_name: String, - }, - AddWorktreeToProject { - project_root_name: String, - new_root_path: PathBuf, - }, - CloseRemoteProject { - project_root_name: String, - }, - OpenBuffer { - project_root_name: String, - full_path: PathBuf, - }, - SearchProject { - project_root_name: String, - query: String, - detach: bool, - }, - EditBuffer { - project_root_name: String, - full_path: PathBuf, - edits: Vec<(Range, Arc)>, - }, - CloseBuffer { - project_root_name: String, - full_path: PathBuf, - }, - SaveBuffer { - project_root_name: String, - full_path: PathBuf, - detach: bool, - }, - RequestLspDataInBuffer { - project_root_name: String, - full_path: PathBuf, - offset: usize, - kind: LspRequestKind, - detach: bool, - }, - Other, -} + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, _| call.decline_incoming())?; + } -#[derive(Debug)] -enum LspRequestKind { - Rename, - Completion, - CodeAction, - Definition, - Highlights, -} + ClientOperation::LeaveCall => { + log::info!("{}: hanging up", client.username); -impl TestPlan { - async fn next_operation(&mut self, clients: &[(Rc, TestAppContext)]) -> Operation { - let operation = loop { - break match self.rng.gen_range(0..100) { - 0..=29 if clients.len() < self.users.len() => { - let user = self - .users - .iter() - .filter(|u| !u.online) - .choose(&mut self.rng) - .unwrap(); - Operation::AddConnection { - user_id: user.user_id, - } - } - 30..=34 if clients.len() > 1 && self.allow_client_disconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::RemoveConnection { user_id } - } - 35..=39 if clients.len() > 1 && self.allow_client_reconnection => { - let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; - let user_id = client.current_user_id(cx); - Operation::BounceConnection { user_id } - } - 40..=44 if self.allow_server_restarts && clients.len() > 1 => { - Operation::RestartServer - } - _ if !clients.is_empty() => { - let user_ids = (0..self.rng.gen_range(0..10)) - .map(|_| { - let ix = self.rng.gen_range(0..clients.len()); - let (client, cx) = &clients[ix]; - client.current_user_id(cx) - }) - .collect(); - Operation::MutateClients { - user_ids, - quiesce: self.rng.gen(), - } - } - _ => continue, - }; - }; - operation - } + let active_call = cx.read(ActiveCall::global); + active_call.update(cx, |call, cx| call.hang_up(cx))?; + } - async fn next_client_operation( - &mut self, - client: &TestClient, - cx: &TestAppContext, - ) -> ClientOperation { - let user_id = client.current_user_id(cx); - let call = cx.read(ActiveCall::global); - let operation = loop { - match self.rng.gen_range(0..100) { - // Mutate the call - 0..=29 => { - // Respond to an incoming call - if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { - break if self.rng.gen_bool(0.7) { - ClientOperation::AcceptIncomingCall - } else { - ClientOperation::RejectIncomingCall - }; - } + ClientOperation::InviteContactToCall { user_id } => { + log::info!("{}: inviting {}", client.username, user_id,); - match self.rng.gen_range(0..100_u32) { - // Invite a contact to the current call - 0..=70 => { - let available_contacts = - client.user_store.read_with(cx, |user_store, _| { - user_store - .contacts() - .iter() - .filter(|contact| contact.online && !contact.busy) - .cloned() - .collect::>() - }); - if !available_contacts.is_empty() { - let contact = available_contacts.choose(&mut self.rng).unwrap(); - break ClientOperation::InviteContactToCall { - user_id: UserId(contact.user.id as i32), - }; - } - } + let active_call = cx.read(ActiveCall::global); + active_call + .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) + .await + .log_err(); + } - // Leave the current call - 71.. => { - if self.allow_client_disconnection - && call.read_with(cx, |call, _| call.room().is_some()) - { - break ClientOperation::LeaveCall; - } - } - } - } + ClientOperation::OpenLocalProject { first_root_name } => { + log::info!( + "{}: opening local project at {:?}", + client.username, + first_root_name + ); - // Mutate projects - 39..=59 => match self.rng.gen_range(0..100_u32) { - // Open a new project - 0..=70 => { - // Open a remote project - if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { - let existing_remote_project_ids = cx.read(|cx| { - client - .remote_projects() - .iter() - .map(|p| p.read(cx).remote_id().unwrap()) - .collect::>() - }); - let new_remote_projects = room.read_with(cx, |room, _| { - room.remote_participants() - .values() - .flat_map(|participant| { - participant.projects.iter().filter_map(|project| { - if existing_remote_project_ids.contains(&project.id) { - None - } else { - Some(( - UserId::from_proto(participant.user.id), - project.worktree_root_names[0].clone(), - )) - } - }) - }) - .collect::>() - }); - if !new_remote_projects.is_empty() { - let (host_id, first_root_name) = - new_remote_projects.choose(&mut self.rng).unwrap().clone(); - break ClientOperation::OpenRemoteProject { - host_id, - first_root_name, - }; - } - } - // Open a local project - else { - let first_root_name = self.next_root_dir_name(user_id); - break ClientOperation::OpenLocalProject { first_root_name }; - } - } - - // Close a remote project - 71..=80 => { - if !client.remote_projects().is_empty() { - let project = client - .remote_projects() - .choose(&mut self.rng) - .unwrap() - .clone(); - let first_root_name = root_name_for_project(&project, cx); - break ClientOperation::CloseRemoteProject { - project_root_name: first_root_name, - }; - } - } + let root_path = Path::new("/").join(&first_root_name); + client.fs.create_dir(&root_path).await.unwrap(); + client + .fs + .create_file(&root_path.join("main.rs"), Default::default()) + .await + .unwrap(); + let project = client.build_local_project(root_path, cx).await.0; + ensure_project_shared(&project, client, cx).await; + client.local_projects_mut().push(project.clone()); + } - // Add a worktree to a local project - 81.. => { - if !client.local_projects().is_empty() { - let project = client - .local_projects() - .choose(&mut self.rng) - .unwrap() - .clone(); - let project_root_name = root_name_for_project(&project, cx); + ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + } => { + log::info!( + "{}: finding/creating local worktree at {:?} to project with root path {}", + client.username, + new_root_path, + project_root_name + ); - let mut paths = client.fs.paths().await; - paths.remove(0); - let new_root_path = if paths.is_empty() || self.rng.gen() { - Path::new("/").join(&self.next_root_dir_name(user_id)) - } else { - paths.choose(&mut self.rng).unwrap().clone() - }; + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + if !client.fs.paths().await.contains(&new_root_path) { + client.fs.create_dir(&new_root_path).await.unwrap(); + } + project + .update(cx, |project, cx| { + project.find_or_create_local_worktree(&new_root_path, true, cx) + }) + .await + .unwrap(); + } - break ClientOperation::AddWorktreeToProject { - project_root_name, - new_root_path, - }; - } - } - }, + ClientOperation::CloseRemoteProject { project_root_name } => { + log::info!( + "{}: closing remote project with root path {}", + client.username, + project_root_name, + ); - // Query and mutate buffers - 60.. => { - let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; - let project_root_name = root_name_for_project(&project, cx); + let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) + .expect("invalid project in test operation"); + cx.update(|_| client.remote_projects_mut().remove(ix)); + } - match self.rng.gen_range(0..100_u32) { - // Manipulate an existing buffer - 0..=70 => { - let Some(buffer) = client - .buffers_for_project(&project) - .iter() - .choose(&mut self.rng) - .cloned() else { continue }; + ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + } => { + log::info!( + "{}: joining remote project of user {}, root name {}", + client.username, + host_id, + first_root_name, + ); - let full_path = buffer - .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + let active_call = cx.read(ActiveCall::global); + let project_id = active_call + .read_with(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)?; + Some(project.id) + }) + .expect("invalid project in test operation"); + let project = client.build_remote_project(project_id, cx).await; + client.remote_projects_mut().push(project); + } - match self.rng.gen_range(0..100_u32) { - // Close the buffer - 0..=15 => { - break ClientOperation::CloseBuffer { - project_root_name, - full_path, - }; - } - // Save the buffer - 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => { - let detach = self.rng.gen_bool(0.3); - break ClientOperation::SaveBuffer { - project_root_name, - full_path, - detach, - }; - } - // Edit the buffer - 30..=69 => { - let edits = buffer.read_with(cx, |buffer, _| { - buffer.get_random_edits(&mut self.rng, 3) - }); - break ClientOperation::EditBuffer { - project_root_name, - full_path, - edits, - }; - } - // Make an LSP request - _ => { - let offset = buffer.read_with(cx, |buffer, _| { - buffer.clip_offset( - self.rng.gen_range(0..=buffer.len()), - language::Bias::Left, - ) - }); - let detach = self.rng.gen(); - break ClientOperation::RequestLspDataInBuffer { - project_root_name, - full_path, - offset, - kind: match self.rng.gen_range(0..5_u32) { - 0 => LspRequestKind::Rename, - 1 => LspRequestKind::Highlights, - 2 => LspRequestKind::Definition, - 3 => LspRequestKind::CodeAction, - 4.. => LspRequestKind::Completion, - }, - detach, - }; - } - } - } + ClientOperation::CreateWorktreeEntry { + project_root_name, + is_local, + full_path, + is_dir, + } => { + log::info!( + "{}: creating {} at path {:?} in {} project {}", + client.username, + if is_dir { "dir" } else { "file" }, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + ); - 71..=80 => { - let query = self.rng.gen_range('a'..='z').to_string(); - let detach = self.rng.gen_bool(0.3); - break ClientOperation::SearchProject { - project_root_name, - query, - detach, - }; - } + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .expect("invalid worktree path in test operation"); + project + .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx)) + .unwrap() + .await?; + } - // Open a buffer - 81.. => { - let worktree = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - }) - .choose(&mut self.rng) - }); - let Some(worktree) = worktree else { continue }; - let full_path = worktree.read_with(cx, |worktree, _| { - let entry = worktree - .entries(false) - .filter(|e| e.is_file()) - .choose(&mut self.rng) - .unwrap(); - if entry.path.as_ref() == Path::new("") { - Path::new(worktree.root_name()).into() - } else { - Path::new(worktree.root_name()).join(&entry.path) - } - }); - break ClientOperation::OpenBuffer { - project_root_name, - full_path, - }; - } - } - } + ClientOperation::OpenBuffer { + project_root_name, + is_local, + full_path, + } => { + log::info!( + "{}: opening buffer {:?} in {} project {}", + client.username, + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + ); - _ => break ClientOperation::Other, - } - }; - operation - } + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .expect("invalid buffer path in test operation"); + let buffer = project + .update(cx, |project, cx| project.open_buffer(project_path, cx)) + .await?; + client.buffers_for_project(&project).insert(buffer); + } - fn next_root_dir_name(&mut self, user_id: UserId) -> String { - let user_ix = self - .users - .iter() - .position(|user| user.user_id == user_id) - .unwrap(); - let root_id = util::post_inc(&mut self.users[user_ix].next_root_id); - format!("dir-{user_id}-{root_id}") - } - - fn user(&mut self, user_id: UserId) -> &mut UserTestPlan { - let ix = self - .users - .iter() - .position(|user| user.user_id == user_id) - .unwrap(); - &mut self.users[ix] - } -} - -async fn simulate_client( - client: Rc, - mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, - plan: Arc>, - mut cx: TestAppContext, -) { - // Setup language server - let mut language = Language::new( - LanguageConfig { - name: "Rust".into(), - path_suffixes: vec!["rs".to_string()], - ..Default::default() - }, - None, - ); - let _fake_language_servers = language - .set_fake_lsp_adapter(Arc::new(FakeLspAdapter { - name: "the-fake-language-server", - capabilities: lsp::LanguageServer::full_capabilities(), - initializer: Some(Box::new({ - let plan = plan.clone(); - let fs = client.fs.clone(); - move |fake_server: &mut FakeLanguageServer| { - fake_server.handle_request::( - |_, _| async move { - Ok(Some(lsp::CompletionResponse::Array(vec![ - lsp::CompletionItem { - text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { - range: lsp::Range::new( - lsp::Position::new(0, 0), - lsp::Position::new(0, 0), - ), - new_text: "the-new-text".to_string(), - })), - ..Default::default() - }, - ]))) - }, - ); - - fake_server.handle_request::( - |_, _| async move { - Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( - lsp::CodeAction { - title: "the-code-action".to_string(), - ..Default::default() - }, - )])) - }, - ); - - fake_server.handle_request::( - |params, _| async move { - Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( - params.position, - params.position, - )))) - }, - ); - - fake_server.handle_request::({ - let fs = fs.clone(); - let plan = plan.clone(); - move |_, _| { - let fs = fs.clone(); - let plan = plan.clone(); - async move { - let files = fs.files().await; - let mut plan = plan.lock(); - let count = plan.rng.gen_range::(1..3); - let files = (0..count) - .map(|_| files.choose(&mut plan.rng).unwrap()) - .collect::>(); - log::info!("LSP: Returning definitions in files {:?}", &files); - Ok(Some(lsp::GotoDefinitionResponse::Array( - files - .into_iter() - .map(|file| lsp::Location { - uri: lsp::Url::from_file_path(file).unwrap(), - range: Default::default(), - }) - .collect(), - ))) - } - } - }); - - fake_server.handle_request::({ - let plan = plan.clone(); - move |_, _| { - let mut highlights = Vec::new(); - let highlight_count = plan.lock().rng.gen_range(1..=5); - for _ in 0..highlight_count { - let start_row = plan.lock().rng.gen_range(0..100); - let start_column = plan.lock().rng.gen_range(0..100); - let start = PointUtf16::new(start_row, start_column); - let end_row = plan.lock().rng.gen_range(0..100); - let end_column = plan.lock().rng.gen_range(0..100); - let end = PointUtf16::new(end_row, end_column); - let range = if start > end { end..start } else { start..end }; - highlights.push(lsp::DocumentHighlight { - range: range_to_lsp(range.clone()), - kind: Some(lsp::DocumentHighlightKind::READ), - }); - } - highlights.sort_unstable_by_key(|highlight| { - (highlight.range.start, highlight.range.end) - }); - async move { Ok(Some(highlights)) } - } - }); - } - })), - ..Default::default() - })) - .await; - client.language_registry.add(Arc::new(language)); - - while operation_rx.next().await.is_some() { - let operation = plan.lock().next_client_operation(&client, &cx).await; - if let Err(error) = apply_client_operation(&client, plan.clone(), operation, &mut cx).await - { - log::error!("{} error: {}", client.username, error); - } - cx.background().simulate_random_delay().await; - } - log::info!("{}: done", client.username); -} - -async fn apply_client_operation( - client: &TestClient, - plan: Arc>, - operation: ClientOperation, - cx: &mut TestAppContext, -) -> Result<()> { - match operation { - ClientOperation::AcceptIncomingCall => { - log::info!("{}: accepting incoming call", client.username); - let active_call = cx.read(ActiveCall::global); - active_call - .update(cx, |call, cx| call.accept_incoming(cx)) - .await?; - } - - ClientOperation::RejectIncomingCall => { - log::info!("{}: declining incoming call", client.username); - let active_call = cx.read(ActiveCall::global); - active_call.update(cx, |call, _| call.decline_incoming())?; - } - - ClientOperation::LeaveCall => { - log::info!("{}: hanging up", client.username); - let active_call = cx.read(ActiveCall::global); - active_call.update(cx, |call, cx| call.hang_up(cx))?; - } - - ClientOperation::InviteContactToCall { user_id } => { - log::info!("{}: inviting {}", client.username, user_id,); - let active_call = cx.read(ActiveCall::global); - active_call - .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) - .await?; - } - - ClientOperation::OpenLocalProject { first_root_name } => { + ClientOperation::EditBuffer { + project_root_name, + is_local, + full_path, + edits, + } => { log::info!( - "{}: opening local project at {:?}", + "{}: editing buffer {:?} in {} project {} with {:?}", client.username, - first_root_name + full_path, + if is_local { "local" } else { "remote" }, + project_root_name, + edits ); - let root_path = Path::new("/").join(&first_root_name); - client.fs.create_dir(&root_path).await.unwrap(); - client - .fs - .create_file(&root_path.join("main.rs"), Default::default()) - .await - .unwrap(); - let project = client.build_local_project(root_path, cx).await.0; + + let project = project_for_root_name(client, &project_root_name, cx) + .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - client.local_projects_mut().push(project.clone()); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + buffer.update(cx, |buffer, cx| { + buffer.edit(edits, None, cx); + }); } - ClientOperation::AddWorktreeToProject { + ClientOperation::CloseBuffer { project_root_name, - new_root_path, + is_local, + full_path, } => { log::info!( - "{}: finding/creating local worktree at {:?} to project with root path {}", + "{}: dropping buffer {:?} in {} project {}", client.username, - new_root_path, + full_path, + if is_local { "local" } else { "remote" }, project_root_name ); + let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - if !client.fs.paths().await.contains(&new_root_path) { - client.fs.create_dir(&new_root_path).await.unwrap(); - } - project - .update(cx, |project, cx| { - project.find_or_create_local_worktree(&new_root_path, true, cx) - }) - .await - .unwrap(); + let buffer = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) + .expect("invalid buffer path in test operation"); + cx.update(|_| { + client.buffers_for_project(&project).remove(&buffer); + drop(buffer); + }); } - ClientOperation::CloseRemoteProject { project_root_name } => { + ClientOperation::SaveBuffer { + project_root_name, + is_local, + full_path, + detach, + } => { log::info!( - "{}: closing remote project with root path {}", + "{}: saving buffer {:?} in {} project {}{}", client.username, + full_path, + if is_local { "local" } else { "remote" }, project_root_name, + if detach { ", detaching" } else { ", awaiting" } ); - let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) - .expect("invalid project in test operation"); - cx.update(|_| client.remote_projects_mut().remove(ix)); - } - ClientOperation::OpenRemoteProject { - host_id, - first_root_name, - } => { - log::info!( - "{}: joining remote project of user {}, root name {}", - client.username, - host_id, - first_root_name, - ); - let active_call = cx.read(ActiveCall::global); - let project_id = active_call - .read_with(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)?; - Some(project.id) - }) - .expect("invalid project in test operation"); - let project = client.build_remote_project(project_id, cx).await; - client.remote_projects_mut().push(project); - } - - ClientOperation::OpenBuffer { - project_root_name, - full_path, - } => { - log::info!( - "{}: opening buffer {:?} in project {}", - client.username, - full_path, - project_root_name, - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - // ensure_project_shared(&project, client, cx).await; - let mut components = full_path.components(); - let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); - let path = components.as_path(); - let worktree_id = project - .read_with(cx, |project, cx| { - project.worktrees(cx).find_map(|worktree| { - let worktree = worktree.read(cx); - if worktree.root_name() == root_name { - Some(worktree.id()) - } else { - None - } - }) - }) - .expect("invalid buffer path in test operation"); - let buffer = project - .update(cx, |project, cx| { - project.open_buffer((worktree_id, &path), cx) - }) - .await?; - client.buffers_for_project(&project).insert(buffer); - } - - ClientOperation::EditBuffer { - project_root_name, - full_path, - edits, - } => { - log::info!( - "{}: editing buffer {:?} in project {} with {:?}", - client.username, - full_path, - project_root_name, - edits - ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); + ensure_project_shared(&project, client, cx).await; let buffer = buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) .expect("invalid buffer path in test operation"); - buffer.update(cx, |buffer, cx| { - buffer.edit(edits, None, cx); + let (requested_version, save) = + buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); + let save = cx.background().spawn(async move { + let (saved_version, _, _) = save + .await + .map_err(|err| anyhow!("save request failed: {:?}", err))?; + assert!(saved_version.observed_all(&requested_version)); + anyhow::Ok(()) }); + if detach { + log::info!("{}: detaching save request", client.username); + cx.update(|cx| save.detach_and_log_err(cx)); + } else { + save.await?; + } } - ClientOperation::CloseBuffer { + ClientOperation::RequestLspDataInBuffer { project_root_name, + is_local, full_path, + offset, + kind, + detach, } => { log::info!( - "{}: dropping buffer {:?} in project {}", + "{}: request LSP {:?} for buffer {:?} in {} project {}{}", client.username, + kind, full_path, - project_root_name + if is_local { "local" } else { "remote" }, + project_root_name, + if detach { ", detaching" } else { ", awaiting" } ); + let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); let buffer = buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) .expect("invalid buffer path in test operation"); - cx.update(|_| { - client.buffers_for_project(&project).remove(&buffer); - drop(buffer); - }); + let request = match kind { + LspRequestKind::Rename => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) + .await?; + anyhow::Ok(()) + }), + LspRequestKind::Completion => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Definition => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) + .await?; + Ok(()) + }), + LspRequestKind::Highlights => cx.spawn(|mut cx| async move { + project + .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) + .await?; + Ok(()) + }), + }; + if detach { + request.detach(); + } else { + request.await?; + } } - ClientOperation::SaveBuffer { + ClientOperation::SearchProject { project_root_name, - full_path, + query, detach, } => { log::info!( - "{}: saving buffer {:?} in project {}{}", + "{}: search project {} for {:?}{}", client.username, - full_path, project_root_name, + query, if detach { ", detaching" } else { ", awaiting" } ); let project = project_for_root_name(client, &project_root_name, cx) .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - let (requested_version, save) = - buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); - let save = cx.background().spawn(async move { - let (saved_version, _, _) = save + let search = project.update(cx, |project, cx| { + project.search(SearchQuery::text(query, false, false), cx) + }); + let search = cx.background().spawn(async move { + search .await - .map_err(|err| anyhow!("save request failed: {:?}", err))?; - assert!(saved_version.observed_all(&requested_version)); - anyhow::Ok(()) + .map_err(|err| anyhow!("search request failed: {:?}", err)) }); if detach { log::info!("{}: detaching save request", client.username); - cx.update(|cx| save.detach_and_log_err(cx)); + cx.update(|cx| search.detach_and_log_err(cx)); } else { - save.await?; + search.await?; + } + } + + ClientOperation::CreateFsEntry { path, is_dir } => { + log::info!( + "{}: creating {} at {:?}", + client.username, + if is_dir { "dir" } else { "file" }, + path + ); + if is_dir { + client.fs.create_dir(&path).await.unwrap(); + } else { + client + .fs + .create_file(&path, Default::default()) + .await + .unwrap(); + } + } + } + Ok(()) +} + +struct TestPlan { + rng: StdRng, + max_operations: usize, + operation_ix: usize, + users: Vec, + allow_server_restarts: bool, + allow_client_reconnection: bool, + allow_client_disconnection: bool, +} + +struct UserTestPlan { + user_id: UserId, + username: String, + next_root_id: usize, + online: bool, +} + +#[derive(Debug)] +enum Operation { + AddConnection { + user_id: UserId, + }, + RemoveConnection { + user_id: UserId, + }, + BounceConnection { + user_id: UserId, + }, + RestartServer, + MutateClients { + user_ids: Vec, + quiesce: bool, + }, +} + +#[derive(Debug)] +enum ClientOperation { + AcceptIncomingCall, + RejectIncomingCall, + LeaveCall, + InviteContactToCall { + user_id: UserId, + }, + OpenLocalProject { + first_root_name: String, + }, + OpenRemoteProject { + host_id: UserId, + first_root_name: String, + }, + AddWorktreeToProject { + project_root_name: String, + new_root_path: PathBuf, + }, + CloseRemoteProject { + project_root_name: String, + }, + OpenBuffer { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + }, + SearchProject { + project_root_name: String, + query: String, + detach: bool, + }, + EditBuffer { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + edits: Vec<(Range, Arc)>, + }, + CloseBuffer { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + }, + SaveBuffer { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + detach: bool, + }, + RequestLspDataInBuffer { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + offset: usize, + kind: LspRequestKind, + detach: bool, + }, + CreateWorktreeEntry { + project_root_name: String, + is_local: bool, + full_path: PathBuf, + is_dir: bool, + }, + CreateFsEntry { + path: PathBuf, + is_dir: bool, + }, +} + +#[derive(Debug)] +enum LspRequestKind { + Rename, + Completion, + CodeAction, + Definition, + Highlights, +} + +impl TestPlan { + async fn next_operation( + &mut self, + clients: &[(Rc, TestAppContext)], + ) -> Option { + if self.operation_ix == self.max_operations { + return None; + } + + let operation = loop { + break match self.rng.gen_range(0..100) { + 0..=29 if clients.len() < self.users.len() => { + let user = self + .users + .iter() + .filter(|u| !u.online) + .choose(&mut self.rng) + .unwrap(); + self.operation_ix += 1; + Operation::AddConnection { + user_id: user.user_id, + } + } + 30..=34 if clients.len() > 1 && self.allow_client_disconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + self.operation_ix += 1; + Operation::RemoveConnection { user_id } + } + 35..=39 if clients.len() > 1 && self.allow_client_reconnection => { + let (client, cx) = &clients[self.rng.gen_range(0..clients.len())]; + let user_id = client.current_user_id(cx); + self.operation_ix += 1; + Operation::BounceConnection { user_id } + } + 40..=44 if self.allow_server_restarts && clients.len() > 1 => { + self.operation_ix += 1; + Operation::RestartServer + } + _ if !clients.is_empty() => { + let count = self + .rng + .gen_range(1..10) + .min(self.max_operations - self.operation_ix); + let user_ids = (0..count) + .map(|_| { + let ix = self.rng.gen_range(0..clients.len()); + let (client, cx) = &clients[ix]; + client.current_user_id(cx) + }) + .collect(); + Operation::MutateClients { + user_ids, + quiesce: self.rng.gen(), + } + } + _ => continue, + }; + }; + Some(operation) + } + + async fn next_client_operation( + &mut self, + client: &TestClient, + cx: &TestAppContext, + ) -> Option { + if self.operation_ix == self.max_operations { + return None; + } + + let user_id = client.current_user_id(cx); + let call = cx.read(ActiveCall::global); + let operation = loop { + match self.rng.gen_range(0..100_u32) { + // Mutate the call + 0..=29 => { + // Respond to an incoming call + if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) { + break if self.rng.gen_bool(0.7) { + ClientOperation::AcceptIncomingCall + } else { + ClientOperation::RejectIncomingCall + }; + } + + match self.rng.gen_range(0..100_u32) { + // Invite a contact to the current call + 0..=70 => { + let available_contacts = + client.user_store.read_with(cx, |user_store, _| { + user_store + .contacts() + .iter() + .filter(|contact| contact.online && !contact.busy) + .cloned() + .collect::>() + }); + if !available_contacts.is_empty() { + let contact = available_contacts.choose(&mut self.rng).unwrap(); + break ClientOperation::InviteContactToCall { + user_id: UserId(contact.user.id as i32), + }; + } + } + + // Leave the current call + 71.. => { + if self.allow_client_disconnection + && call.read_with(cx, |call, _| call.room().is_some()) + { + break ClientOperation::LeaveCall; + } + } + } + } + + // Mutate projects + 30..=59 => match self.rng.gen_range(0..100_u32) { + // Open a new project + 0..=70 => { + // Open a remote project + if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) { + let existing_remote_project_ids = cx.read(|cx| { + client + .remote_projects() + .iter() + .map(|p| p.read(cx).remote_id().unwrap()) + .collect::>() + }); + let new_remote_projects = room.read_with(cx, |room, _| { + room.remote_participants() + .values() + .flat_map(|participant| { + participant.projects.iter().filter_map(|project| { + if existing_remote_project_ids.contains(&project.id) { + None + } else { + Some(( + UserId::from_proto(participant.user.id), + project.worktree_root_names[0].clone(), + )) + } + }) + }) + .collect::>() + }); + if !new_remote_projects.is_empty() { + let (host_id, first_root_name) = + new_remote_projects.choose(&mut self.rng).unwrap().clone(); + break ClientOperation::OpenRemoteProject { + host_id, + first_root_name, + }; + } + } + // Open a local project + else { + let first_root_name = self.next_root_dir_name(user_id); + break ClientOperation::OpenLocalProject { first_root_name }; + } + } + + // Close a remote project + 71..=80 => { + if !client.remote_projects().is_empty() { + let project = client + .remote_projects() + .choose(&mut self.rng) + .unwrap() + .clone(); + let first_root_name = root_name_for_project(&project, cx); + break ClientOperation::CloseRemoteProject { + project_root_name: first_root_name, + }; + } + } + + // Mutate project worktrees + 81.. => match self.rng.gen_range(0..100_u32) { + // Add a worktree to a local project + 0..=50 => { + let Some(project) = client + .local_projects() + .choose(&mut self.rng) + .cloned() else { continue }; + let project_root_name = root_name_for_project(&project, cx); + let mut paths = client.fs.paths().await; + paths.remove(0); + let new_root_path = if paths.is_empty() || self.rng.gen() { + Path::new("/").join(&self.next_root_dir_name(user_id)) + } else { + paths.choose(&mut self.rng).unwrap().clone() + }; + break ClientOperation::AddWorktreeToProject { + project_root_name, + new_root_path, + }; + } + + // Add an entry to a worktree + _ => { + let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; + let project_root_name = root_name_for_project(&project, cx); + let is_local = project.read_with(cx, |project, _| project.is_local()); + let worktree = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + && worktree.root_entry().map_or(false, |e| e.is_dir()) + }) + .choose(&mut self.rng) + }); + let Some(worktree) = worktree else { continue }; + let is_dir = self.rng.gen::(); + let mut full_path = + worktree.read_with(cx, |w, _| PathBuf::from(w.root_name())); + full_path.push(gen_file_name(&mut self.rng)); + if !is_dir { + full_path.set_extension("rs"); + } + break ClientOperation::CreateWorktreeEntry { + project_root_name, + is_local, + full_path, + is_dir, + }; + } + }, + }, + + // Query and mutate buffers + 60..=95 => { + let Some(project) = choose_random_project(client, &mut self.rng) else { continue }; + let project_root_name = root_name_for_project(&project, cx); + let is_local = project.read_with(cx, |project, _| project.is_local()); + + match self.rng.gen_range(0..100_u32) { + // Manipulate an existing buffer + 0..=70 => { + let Some(buffer) = client + .buffers_for_project(&project) + .iter() + .choose(&mut self.rng) + .cloned() else { continue }; + + let full_path = buffer + .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + + match self.rng.gen_range(0..100_u32) { + // Close the buffer + 0..=15 => { + break ClientOperation::CloseBuffer { + project_root_name, + is_local, + full_path, + }; + } + // Save the buffer + 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => { + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SaveBuffer { + project_root_name, + is_local, + full_path, + detach, + }; + } + // Edit the buffer + 30..=69 => { + let edits = buffer.read_with(cx, |buffer, _| { + buffer.get_random_edits(&mut self.rng, 3) + }); + break ClientOperation::EditBuffer { + project_root_name, + is_local, + full_path, + edits, + }; + } + // Make an LSP request + _ => { + let offset = buffer.read_with(cx, |buffer, _| { + buffer.clip_offset( + self.rng.gen_range(0..=buffer.len()), + language::Bias::Left, + ) + }); + let detach = self.rng.gen(); + break ClientOperation::RequestLspDataInBuffer { + project_root_name, + full_path, + offset, + is_local, + kind: match self.rng.gen_range(0..5_u32) { + 0 => LspRequestKind::Rename, + 1 => LspRequestKind::Highlights, + 2 => LspRequestKind::Definition, + 3 => LspRequestKind::CodeAction, + 4.. => LspRequestKind::Completion, + }, + detach, + }; + } + } + } + + 71..=80 => { + let query = self.rng.gen_range('a'..='z').to_string(); + let detach = self.rng.gen_bool(0.3); + break ClientOperation::SearchProject { + project_root_name, + query, + detach, + }; + } + + // Open a buffer + 81.. => { + let worktree = project.read_with(cx, |project, cx| { + project + .worktrees(cx) + .filter(|worktree| { + let worktree = worktree.read(cx); + worktree.is_visible() + && worktree.entries(false).any(|e| e.is_file()) + }) + .choose(&mut self.rng) + }); + let Some(worktree) = worktree else { continue }; + let full_path = worktree.read_with(cx, |worktree, _| { + let entry = worktree + .entries(false) + .filter(|e| e.is_file()) + .choose(&mut self.rng) + .unwrap(); + if entry.path.as_ref() == Path::new("") { + Path::new(worktree.root_name()).into() + } else { + Path::new(worktree.root_name()).join(&entry.path) + } + }); + break ClientOperation::OpenBuffer { + project_root_name, + is_local, + full_path, + }; + } + } + } + + // Create a file or directory + 96.. => { + let is_dir = self.rng.gen::(); + let mut path = client + .fs + .directories() + .await + .choose(&mut self.rng) + .unwrap() + .clone(); + path.push(gen_file_name(&mut self.rng)); + if !is_dir { + path.set_extension("rs"); + } + break ClientOperation::CreateFsEntry { path, is_dir }; + } } - } + }; + self.operation_ix += 1; + Some(operation) + } - ClientOperation::RequestLspDataInBuffer { - project_root_name, - full_path, - offset, - kind, - detach, - } => { - log::info!( - "{}: request LSP {:?} for buffer {:?} in project {}{}", - client.username, - kind, - full_path, - project_root_name, - if detach { ", detaching" } else { ", awaiting" } - ); + fn next_root_dir_name(&mut self, user_id: UserId) -> String { + let user_ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + let root_id = util::post_inc(&mut self.users[user_ix].next_root_id); + format!("dir-{user_id}-{root_id}") + } - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); - let request = match kind { - LspRequestKind::Rename => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) - .await?; - anyhow::Ok(()) - }), - LspRequestKind::Completion => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Definition => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Highlights => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) - .await?; - Ok(()) - }), - }; - if detach { - request.detach(); - } else { - request.await?; - } - } + fn user(&mut self, user_id: UserId) -> &mut UserTestPlan { + let ix = self + .users + .iter() + .position(|user| user.user_id == user_id) + .unwrap(); + &mut self.users[ix] + } +} - ClientOperation::SearchProject { - project_root_name, - query, - detach, - } => { - log::info!( - "{}: search project {} for {:?}{}", - client.username, - project_root_name, - query, - if detach { ", detaching" } else { ", awaiting" } - ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let search = project.update(cx, |project, cx| { - project.search(SearchQuery::text(query, false, false), cx) - }); - let search = cx.background().spawn(async move { - search - .await - .map_err(|err| anyhow!("search request failed: {:?}", err)) - }); - if detach { - log::info!("{}: detaching save request", client.username); - cx.update(|cx| search.detach_and_log_err(cx)); - } else { - search.await?; - } - } +async fn simulate_client( + client: Rc, + mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, + plan: Arc>, + mut cx: TestAppContext, +) { + // Setup language server + let mut language = Language::new( + LanguageConfig { + name: "Rust".into(), + path_suffixes: vec!["rs".to_string()], + ..Default::default() + }, + None, + ); + let _fake_language_servers = language + .set_fake_lsp_adapter(Arc::new(FakeLspAdapter { + name: "the-fake-language-server", + capabilities: lsp::LanguageServer::full_capabilities(), + initializer: Some(Box::new({ + let plan = plan.clone(); + let fs = client.fs.clone(); + move |fake_server: &mut FakeLanguageServer| { + fake_server.handle_request::( + |_, _| async move { + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + range: lsp::Range::new( + lsp::Position::new(0, 0), + lsp::Position::new(0, 0), + ), + new_text: "the-new-text".to_string(), + })), + ..Default::default() + }, + ]))) + }, + ); + + fake_server.handle_request::( + |_, _| async move { + Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction( + lsp::CodeAction { + title: "the-code-action".to_string(), + ..Default::default() + }, + )])) + }, + ); + + fake_server.handle_request::( + |params, _| async move { + Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new( + params.position, + params.position, + )))) + }, + ); + + fake_server.handle_request::({ + let fs = fs.clone(); + let plan = plan.clone(); + move |_, _| { + let fs = fs.clone(); + let plan = plan.clone(); + async move { + let files = fs.files().await; + let mut plan = plan.lock(); + let count = plan.rng.gen_range::(1..3); + let files = (0..count) + .map(|_| files.choose(&mut plan.rng).unwrap()) + .collect::>(); + log::info!("LSP: Returning definitions in files {:?}", &files); + Ok(Some(lsp::GotoDefinitionResponse::Array( + files + .into_iter() + .map(|file| lsp::Location { + uri: lsp::Url::from_file_path(file).unwrap(), + range: Default::default(), + }) + .collect(), + ))) + } + } + }); - ClientOperation::Other => { - let choice = plan.lock().rng.gen_range(0..100); - match choice { - 0..=59 - if !client.local_projects().is_empty() - || !client.remote_projects().is_empty() => - { - randomly_mutate_worktrees(client, &plan, cx).await?; + fake_server.handle_request::({ + let plan = plan.clone(); + move |_, _| { + let mut highlights = Vec::new(); + let highlight_count = plan.lock().rng.gen_range(1..=5); + for _ in 0..highlight_count { + let start_row = plan.lock().rng.gen_range(0..100); + let start_column = plan.lock().rng.gen_range(0..100); + let start = PointUtf16::new(start_row, start_column); + let end_row = plan.lock().rng.gen_range(0..100); + let end_column = plan.lock().rng.gen_range(0..100); + let end = PointUtf16::new(end_row, end_column); + let range = if start > end { end..start } else { start..end }; + highlights.push(lsp::DocumentHighlight { + range: range_to_lsp(range.clone()), + kind: Some(lsp::DocumentHighlightKind::READ), + }); + } + highlights.sort_unstable_by_key(|highlight| { + (highlight.range.start, highlight.range.end) + }); + async move { Ok(Some(highlights)) } + } + }); } - _ => randomly_mutate_fs(client, &plan).await, - } + })), + ..Default::default() + })) + .await; + client.language_registry.add(Arc::new(language)); + + while operation_rx.next().await.is_some() { + let Some(operation) = plan.lock().next_client_operation(&client, &cx).await else { break }; + if let Err(error) = apply_client_operation(&client, operation, &mut cx).await { + log::error!("{} error: {}", client.username, error); } + cx.background().simulate_random_delay().await; } - Ok(()) + log::info!("{}: done", client.username); } fn buffer_for_full_path( @@ -1368,6 +1492,27 @@ fn root_name_for_project(project: &ModelHandle, cx: &TestAppContext) -> }) } +fn project_path_for_full_path( + project: &ModelHandle, + full_path: &Path, + cx: &TestAppContext, +) -> Option { + let mut components = full_path.components(); + let root_name = components.next().unwrap().as_os_str().to_str().unwrap(); + let path = components.as_path().into(); + let worktree_id = project.read_with(cx, |project, cx| { + project.worktrees(cx).find_map(|worktree| { + let worktree = worktree.read(cx); + if worktree.root_name() == root_name { + Some(worktree.id()) + } else { + None + } + }) + })?; + Some(ProjectPath { worktree_id, path }) +} + async fn ensure_project_shared( project: &ModelHandle, client: &TestClient, @@ -1402,76 +1547,6 @@ async fn ensure_project_shared( } } -async fn randomly_mutate_fs(client: &TestClient, plan: &Arc>) { - let is_dir = plan.lock().rng.gen::(); - let mut new_path = client - .fs - .directories() - .await - .choose(&mut plan.lock().rng) - .unwrap() - .clone(); - new_path.push(gen_file_name(&mut plan.lock().rng)); - if is_dir { - log::info!("{}: creating local dir at {:?}", client.username, new_path); - client.fs.create_dir(&new_path).await.unwrap(); - } else { - new_path.set_extension("rs"); - log::info!("{}: creating local file at {:?}", client.username, new_path); - client - .fs - .create_file(&new_path, Default::default()) - .await - .unwrap(); - } -} - -async fn randomly_mutate_worktrees( - client: &TestClient, - plan: &Arc>, - cx: &mut TestAppContext, -) -> Result<()> { - let project = choose_random_project(client, &mut plan.lock().rng).unwrap(); - let Some(worktree) = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .filter(|worktree| { - let worktree = worktree.read(cx); - worktree.is_visible() - && worktree.entries(false).any(|e| e.is_file()) - && worktree.root_entry().map_or(false, |e| e.is_dir()) - }) - .choose(&mut plan.lock().rng) - }) else { - return Ok(()) - }; - - let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| { - (worktree.id(), worktree.root_name().to_string()) - }); - - let is_dir = plan.lock().rng.gen::(); - let mut new_path = PathBuf::new(); - new_path.push(gen_file_name(&mut plan.lock().rng)); - if !is_dir { - new_path.set_extension("rs"); - } - log::info!( - "{}: creating {:?} in worktree {} ({})", - client.username, - new_path, - worktree_id, - worktree_root_name, - ); - project - .update(cx, |project, cx| { - project.create_entry((worktree_id, new_path), is_dir, cx) - }) - .unwrap() - .await?; - Ok(()) -} - fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option> { client .local_projects() From 2351f2bd0cedc851e08da6423234d2baab286640 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 15:40:42 -0800 Subject: [PATCH 07/80] Tolerate failure to join remote projects in randomized test --- .../src/tests/randomized_integration_tests.rs | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 9cdc05833e331f5a9deb1f5ce691537c01815df9..8ed290bcf833f9ba8cd713a985cfa0adabdecb1d 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::{BTreeMap, HashSet}; -use fs::Fs as _; +use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; @@ -531,22 +531,30 @@ async fn apply_client_operation( ); let active_call = cx.read(ActiveCall::global); - let project_id = active_call - .read_with(cx, |call, cx| { + let project = active_call + .update(cx, |call, cx| { let room = call.room().cloned()?; let participant = room .read(cx) .remote_participants() .get(&host_id.to_proto())?; - let project = participant + let project_id = participant .projects .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)?; - Some(project.id) + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) }) - .expect("invalid project in test operation"); - let project = client.build_remote_project(project_id, cx).await; - client.remote_projects_mut().push(project); + .expect("invalid project in test operation") + .await?; + client.remote_projects_mut().push(project.clone()); } ClientOperation::CreateWorktreeEntry { From c503ba00b63b9b04d167b9b17ce0ac20c0584e9b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 6 Jan 2023 17:12:15 -0800 Subject: [PATCH 08/80] Add env vars to store and load test plan from JSON files --- .../src/tests/randomized_integration_tests.rs | 178 +++++++++++++++--- 1 file changed, 153 insertions(+), 25 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 8ed290bcf833f9ba8cd713a985cfa0adabdecb1d..243d275e13dc39b554afdea21224ea984829cab8 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -15,6 +15,7 @@ use lsp::FakeLanguageServer; use parking_lot::Mutex; use project::{search::SearchQuery, Project, ProjectPath}; use rand::prelude::*; +use serde::{Deserialize, Serialize}; use std::{ env, ops::Range, @@ -28,18 +29,20 @@ use util::ResultExt; async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, - mut rng: StdRng, + rng: StdRng, ) { deterministic.forbid_parking(); let max_peers = env::var("MAX_PEERS") .map(|i| i.parse().expect("invalid `MAX_PEERS` variable")) - .unwrap_or(5); - + .unwrap_or(3); let max_operations = env::var("OPERATIONS") .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) .unwrap_or(10); + let plan_load_path = path_env_var("LOAD_PLAN"); + let plan_save_path = path_env_var("SAVE_PLAN"); + let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); @@ -64,6 +67,7 @@ async fn test_random_collaboration( username, online: false, next_root_id: 0, + operation_ix: 0, }); } @@ -84,15 +88,12 @@ async fn test_random_collaboration( } } - let plan = Arc::new(Mutex::new(TestPlan { - allow_server_restarts: rng.gen_bool(0.7), - allow_client_reconnection: rng.gen_bool(0.7), - allow_client_disconnection: rng.gen_bool(0.1), - operation_ix: 0, - max_operations, - users, - rng, - })); + let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); + + if let Some(path) = &plan_load_path { + eprintln!("loaded plan from path {:?}", path); + plan.lock().load(path); + } let mut clients = Vec::new(); let mut client_tasks = Vec::new(); @@ -250,6 +251,11 @@ async fn test_random_collaboration( deterministic.finish_waiting(); deterministic.run_until_parked(); + if let Some(path) = &plan_save_path { + eprintln!("saved test plan to path {:?}", path); + plan.lock().save(path); + } + for (client, client_cx) in &clients { for guest_project in client.remote_projects().iter() { guest_project.read_with(client_cx, |guest_project, cx| { @@ -760,12 +766,14 @@ async fn apply_client_operation( ClientOperation::SearchProject { project_root_name, + is_local, query, detach, } => { log::info!( - "{}: search project {} for {:?}{}", + "{}: search {} project {} for {:?}{}", client.username, + if is_local { "local" } else { "remote" }, project_root_name, query, if detach { ", detaching" } else { ", awaiting" } @@ -811,6 +819,8 @@ async fn apply_client_operation( struct TestPlan { rng: StdRng, + replay: bool, + stored_operations: Vec, max_operations: usize, operation_ix: usize, users: Vec, @@ -823,10 +833,21 @@ struct UserTestPlan { user_id: UserId, username: String, next_root_id: usize, + operation_ix: usize, online: bool, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(untagged)] +enum StoredOperation { + Server(Operation), + Client { + user_id: UserId, + operation: ClientOperation, + }, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] enum Operation { AddConnection { user_id: UserId, @@ -844,7 +865,7 @@ enum Operation { }, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] enum ClientOperation { AcceptIncomingCall, RejectIncomingCall, @@ -873,6 +894,7 @@ enum ClientOperation { }, SearchProject { project_root_name: String, + is_local: bool, query: String, detach: bool, }, @@ -913,7 +935,7 @@ enum ClientOperation { }, } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] enum LspRequestKind { Rename, Completion, @@ -923,15 +945,109 @@ enum LspRequestKind { } impl TestPlan { + fn new(mut rng: StdRng, users: Vec, max_operations: usize) -> Self { + Self { + replay: false, + allow_server_restarts: rng.gen_bool(0.7), + allow_client_reconnection: rng.gen_bool(0.7), + allow_client_disconnection: rng.gen_bool(0.1), + stored_operations: Vec::new(), + operation_ix: 0, + max_operations, + users, + rng, + } + } + + fn load(&mut self, path: &Path) { + let json = std::fs::read_to_string(path).unwrap(); + self.replay = true; + self.stored_operations = serde_json::from_str(&json).unwrap(); + } + + fn save(&mut self, path: &Path) { + // Format each operation as one line + let mut json = Vec::new(); + json.push(b'['); + for (i, stored_operation) in self.stored_operations.iter().enumerate() { + if i > 0 { + json.push(b','); + } + json.extend_from_slice(b"\n "); + serde_json::to_writer(&mut json, stored_operation).unwrap(); + } + json.extend_from_slice(b"\n]\n"); + std::fs::write(path, &json).unwrap(); + } + async fn next_operation( &mut self, clients: &[(Rc, TestAppContext)], + ) -> Option { + if self.replay { + while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { + self.operation_ix += 1; + if let StoredOperation::Server(operation) = stored_operation { + return Some(operation.clone()); + } + } + None + } else { + let operation = self.generate_operation(clients).await; + if let Some(operation) = &operation { + self.stored_operations + .push(StoredOperation::Server(operation.clone())) + } + operation + } + } + + async fn next_client_operation( + &mut self, + client: &TestClient, + cx: &TestAppContext, + ) -> Option { + let current_user_id = client.current_user_id(cx); + let user_ix = self + .users + .iter() + .position(|user| user.user_id == current_user_id) + .unwrap(); + let user_plan = &mut self.users[user_ix]; + + if self.replay { + while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { + user_plan.operation_ix += 1; + if let StoredOperation::Client { user_id, operation } = stored_operation { + if user_id == ¤t_user_id { + return Some(operation.clone()); + } + } + } + None + } else { + let operation = self + .generate_client_operation(current_user_id, client, cx) + .await; + if let Some(operation) = &operation { + self.stored_operations.push(StoredOperation::Client { + user_id: current_user_id, + operation: operation.clone(), + }) + } + operation + } + } + + async fn generate_operation( + &mut self, + clients: &[(Rc, TestAppContext)], ) -> Option { if self.operation_ix == self.max_operations { return None; } - let operation = loop { + Some(loop { break match self.rng.gen_range(0..100) { 0..=29 if clients.len() < self.users.len() => { let user = self @@ -980,12 +1096,12 @@ impl TestPlan { } _ => continue, }; - }; - Some(operation) + }) } - async fn next_client_operation( + async fn generate_client_operation( &mut self, + user_id: UserId, client: &TestClient, cx: &TestAppContext, ) -> Option { @@ -993,9 +1109,9 @@ impl TestPlan { return None; } - let user_id = client.current_user_id(cx); + self.operation_ix += 1; let call = cx.read(ActiveCall::global); - let operation = loop { + Some(loop { match self.rng.gen_range(0..100_u32) { // Mutate the call 0..=29 => { @@ -1237,6 +1353,7 @@ impl TestPlan { let detach = self.rng.gen_bool(0.3); break ClientOperation::SearchProject { project_root_name, + is_local, query, detach, }; @@ -1293,9 +1410,7 @@ impl TestPlan { break ClientOperation::CreateFsEntry { path, is_dir }; } } - }; - self.operation_ix += 1; - Some(operation) + }) } fn next_root_dir_name(&mut self, user_id: UserId) -> String { @@ -1572,3 +1687,16 @@ fn gen_file_name(rng: &mut StdRng) -> String { } name } + +fn path_env_var(name: &str) -> Option { + let value = env::var(name).ok()?; + let mut path = PathBuf::from(value); + if path.is_relative() { + let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + abs_path.pop(); + abs_path.pop(); + abs_path.push(path); + path = abs_path + } + Some(path) +} From 3e3a703b60096e3e5287ddf33cff57839f80d339 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 9 Jan 2023 11:36:53 -0800 Subject: [PATCH 09/80] Skip inapplicable operations when running an edited test plan --- .../src/tests/randomized_integration_tests.rs | 633 +++++++++++------- 1 file changed, 375 insertions(+), 258 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 243d275e13dc39b554afdea21224ea984829cab8..4d87ca9ccc62389675821e0d6dfcb70c3a64dcba 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -7,9 +7,10 @@ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; use collections::{BTreeMap, HashSet}; +use editor::Bias; use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; -use gpui::{executor::Deterministic, ModelHandle, TestAppContext}; +use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext}; use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16}; use lsp::FakeLanguageServer; use parking_lot::Mutex; @@ -21,7 +22,10 @@ use std::{ ops::Range, path::{Path, PathBuf}, rc::Rc, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, + }, }; use util::ResultExt; @@ -101,147 +105,21 @@ async fn test_random_collaboration( let mut next_entity_id = 100000; loop { - let Some(next_operation) = plan.lock().next_operation(&clients).await else { break }; - match next_operation { - Operation::AddConnection { user_id } => { - let username = { - let mut plan = plan.lock(); - let mut user = plan.user(user_id); - user.online = true; - user.username.clone() - }; - log::info!("Adding new connection for {}", username); - next_entity_id += 100000; - let mut client_cx = TestAppContext::new( - cx.foreground_platform(), - cx.platform(), - deterministic.build_foreground(next_entity_id), - deterministic.build_background(), - cx.font_cache(), - cx.leak_detector(), - next_entity_id, - cx.function_name.clone(), - ); - - let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); - let client = Rc::new(server.create_client(&mut client_cx, &username).await); - operation_channels.push(operation_tx); - clients.push((client.clone(), client_cx.clone())); - client_tasks.push(client_cx.foreground().spawn(simulate_client( - client, - operation_rx, - plan.clone(), - client_cx, - ))); - - log::info!("Added connection for {}", username); - } - - Operation::RemoveConnection { user_id } => { - log::info!("Simulating full disconnection of user {}", user_id); - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(user_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let removed_peer_id = user_connection_ids[0].into(); - let (client, mut client_cx) = clients.remove(client_ix); - let client_task = client_tasks.remove(client_ix); - operation_channels.remove(client_ix); - server.forbid_connections(); - server.disconnect_client(removed_peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - deterministic.start_waiting(); - log::info!("Waiting for user {} to exit...", user_id); - client_task.await; - deterministic.finish_waiting(); - server.allow_connections(); - - for project in client.remote_projects().iter() { - project.read_with(&client_cx, |project, _| { - assert!( - project.is_read_only(), - "project {:?} should be read only", - project.remote_id() - ) - }); - } - - for (client, cx) in &clients { - let contacts = server - .app_state - .db - .get_contacts(client.current_user_id(cx)) - .await - .unwrap(); - let pool = server.connection_pool.lock(); - for contact in contacts { - if let db::Contact::Accepted { user_id: id, .. } = contact { - if pool.is_user_online(id) { - assert_ne!( - id, user_id, - "removed client is still a contact of another peer" - ); - } - } - } - } - - log::info!("{} removed", client.username); - plan.lock().user(user_id).online = false; - client_cx.update(|cx| { - cx.clear_globals(); - drop(client); - }); - } - - Operation::BounceConnection { user_id } => { - log::info!("Simulating temporary disconnection of user {}", user_id); - let user_connection_ids = server - .connection_pool - .lock() - .user_connection_ids(user_id) - .collect::>(); - assert_eq!(user_connection_ids.len(), 1); - let peer_id = user_connection_ids[0].into(); - server.disconnect_client(peer_id); - deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); - } - - Operation::RestartServer => { - log::info!("Simulating server restart"); - server.reset().await; - deterministic.advance_clock(RECEIVE_TIMEOUT); - server.start().await.unwrap(); - deterministic.advance_clock(CLEANUP_TIMEOUT); - let environment = &server.app_state.config.zed_environment; - let stale_room_ids = server - .app_state - .db - .stale_room_ids(environment, server.id()) - .await - .unwrap(); - assert_eq!(stale_room_ids, vec![]); - } - - Operation::MutateClients { user_ids, quiesce } => { - for user_id in user_ids { - let client_ix = clients - .iter() - .position(|(client, cx)| client.current_user_id(cx) == user_id) - .unwrap(); - operation_channels[client_ix].unbounded_send(()).unwrap(); - } - - if quiesce { - deterministic.run_until_parked(); - } - } + let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; + let applied = apply_server_operation( + deterministic.clone(), + &mut server, + &mut clients, + &mut client_tasks, + &mut operation_channels, + &mut next_entity_id, + plan.clone(), + next_operation, + cx, + ) + .await; + if !applied { + skipped.store(false, SeqCst); } } @@ -430,39 +308,216 @@ async fn test_random_collaboration( } } +async fn apply_server_operation( + deterministic: Arc, + server: &mut TestServer, + clients: &mut Vec<(Rc, TestAppContext)>, + client_tasks: &mut Vec>, + operation_channels: &mut Vec>, + next_entity_id: &mut usize, + plan: Arc>, + operation: Operation, + cx: &mut TestAppContext, +) -> bool { + match operation { + Operation::AddConnection { user_id } => { + let username; + { + let mut plan = plan.lock(); + let mut user = plan.user(user_id); + if user.online { + return false; + } + user.online = true; + username = user.username.clone(); + }; + log::info!("Adding new connection for {}", username); + *next_entity_id += 100000; + let mut client_cx = TestAppContext::new( + cx.foreground_platform(), + cx.platform(), + deterministic.build_foreground(*next_entity_id), + deterministic.build_background(), + cx.font_cache(), + cx.leak_detector(), + *next_entity_id, + cx.function_name.clone(), + ); + + let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded(); + let client = Rc::new(server.create_client(&mut client_cx, &username).await); + operation_channels.push(operation_tx); + clients.push((client.clone(), client_cx.clone())); + client_tasks.push(client_cx.foreground().spawn(simulate_client( + client, + operation_rx, + plan.clone(), + client_cx, + ))); + + log::info!("Added connection for {}", username); + } + + Operation::RemoveConnection { user_id } => { + log::info!("Simulating full disconnection of user {}", user_id); + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id); + let Some(client_ix) = client_ix else { return false }; + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(user_id) + .collect::>(); + assert_eq!(user_connection_ids.len(), 1); + let removed_peer_id = user_connection_ids[0].into(); + let (client, mut client_cx) = clients.remove(client_ix); + let client_task = client_tasks.remove(client_ix); + operation_channels.remove(client_ix); + server.forbid_connections(); + server.disconnect_client(removed_peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + deterministic.start_waiting(); + log::info!("Waiting for user {} to exit...", user_id); + client_task.await; + deterministic.finish_waiting(); + server.allow_connections(); + + for project in client.remote_projects().iter() { + project.read_with(&client_cx, |project, _| { + assert!( + project.is_read_only(), + "project {:?} should be read only", + project.remote_id() + ) + }); + } + + for (client, cx) in clients { + let contacts = server + .app_state + .db + .get_contacts(client.current_user_id(cx)) + .await + .unwrap(); + let pool = server.connection_pool.lock(); + for contact in contacts { + if let db::Contact::Accepted { user_id: id, .. } = contact { + if pool.is_user_online(id) { + assert_ne!( + id, user_id, + "removed client is still a contact of another peer" + ); + } + } + } + } + + log::info!("{} removed", client.username); + plan.lock().user(user_id).online = false; + client_cx.update(|cx| { + cx.clear_globals(); + drop(client); + }); + } + + Operation::BounceConnection { user_id } => { + log::info!("Simulating temporary disconnection of user {}", user_id); + let user_connection_ids = server + .connection_pool + .lock() + .user_connection_ids(user_id) + .collect::>(); + if user_connection_ids.is_empty() { + return false; + } + assert_eq!(user_connection_ids.len(), 1); + let peer_id = user_connection_ids[0].into(); + server.disconnect_client(peer_id); + deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT); + } + + Operation::RestartServer => { + log::info!("Simulating server restart"); + server.reset().await; + deterministic.advance_clock(RECEIVE_TIMEOUT); + server.start().await.unwrap(); + deterministic.advance_clock(CLEANUP_TIMEOUT); + let environment = &server.app_state.config.zed_environment; + let stale_room_ids = server + .app_state + .db + .stale_room_ids(environment, server.id()) + .await + .unwrap(); + assert_eq!(stale_room_ids, vec![]); + } + + Operation::MutateClients { user_ids, quiesce } => { + let mut applied = false; + for user_id in user_ids { + let client_ix = clients + .iter() + .position(|(client, cx)| client.current_user_id(cx) == user_id); + let Some(client_ix) = client_ix else { continue }; + applied = true; + if let Err(err) = operation_channels[client_ix].unbounded_send(()) { + // panic!("error signaling user {}, client {}", user_id, client_ix); + } + } + + if quiesce && applied { + deterministic.run_until_parked(); + } + + return applied; + } + } + true +} + async fn apply_client_operation( client: &TestClient, operation: ClientOperation, cx: &mut TestAppContext, -) -> Result<()> { +) -> Result { match operation { ClientOperation::AcceptIncomingCall => { - log::info!("{}: accepting incoming call", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { + return Ok(false); + } + + log::info!("{}: accepting incoming call", client.username); active_call .update(cx, |call, cx| call.accept_incoming(cx)) .await?; } ClientOperation::RejectIncomingCall => { - log::info!("{}: declining incoming call", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { + return Ok(false); + } + + log::info!("{}: declining incoming call", client.username); active_call.update(cx, |call, _| call.decline_incoming())?; } ClientOperation::LeaveCall => { - log::info!("{}: hanging up", client.username); - let active_call = cx.read(ActiveCall::global); + if active_call.read_with(cx, |call, _| call.room().is_none()) { + return Ok(false); + } + + log::info!("{}: hanging up", client.username); active_call.update(cx, |call, cx| call.hang_up(cx))?; } ClientOperation::InviteContactToCall { user_id } => { - log::info!("{}: inviting {}", client.username, user_id,); - let active_call = cx.read(ActiveCall::global); + + log::info!("{}: inviting {}", client.username, user_id,); active_call .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx)) .await @@ -492,6 +547,10 @@ async fn apply_client_operation( project_root_name, new_root_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false) + }; + log::info!( "{}: finding/creating local worktree at {:?} to project with root path {}", client.username, @@ -499,8 +558,6 @@ async fn apply_client_operation( project_root_name ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; if !client.fs.paths().await.contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); @@ -514,21 +571,56 @@ async fn apply_client_operation( } ClientOperation::CloseRemoteProject { project_root_name } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false) + }; + log::info!( "{}: closing remote project with root path {}", client.username, project_root_name, ); - let ix = project_ix_for_root_name(&*client.remote_projects(), &project_root_name, cx) - .expect("invalid project in test operation"); - cx.update(|_| client.remote_projects_mut().remove(ix)); + let ix = client + .remote_projects() + .iter() + .position(|p| p == &project) + .unwrap(); + cx.update(|_| { + client.remote_projects_mut().remove(ix); + drop(project); + }); } ClientOperation::OpenRemoteProject { host_id, first_root_name, } => { + let active_call = cx.read(ActiveCall::global); + let project = active_call.update(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project_id = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) + }); + let Some(project) = project else { + return Ok(false) + }; + log::info!( "{}: joining remote project of user {}, root name {}", client.username, @@ -536,30 +628,7 @@ async fn apply_client_operation( first_root_name, ); - let active_call = cx.read(ActiveCall::global); - let project = active_call - .update(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project_id = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)? - .id; - Some(room.update(cx, |room, cx| { - room.join_project( - project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - })) - }) - .expect("invalid project in test operation") - .await?; + let project = project.await?; client.remote_projects_mut().push(project.clone()); } @@ -569,6 +638,13 @@ async fn apply_client_operation( full_path, is_dir, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: creating {} at path {:?} in {} project {}", client.username, @@ -578,11 +654,7 @@ async fn apply_client_operation( project_root_name, ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let project_path = project_path_for_full_path(&project, &full_path, cx) - .expect("invalid worktree path in test operation"); project .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx)) .unwrap() @@ -594,6 +666,13 @@ async fn apply_client_operation( is_local, full_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: opening buffer {:?} in {} project {}", client.username, @@ -602,11 +681,7 @@ async fn apply_client_operation( project_root_name, ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let project_path = project_path_for_full_path(&project, &full_path, cx) - .expect("invalid buffer path in test operation"); let buffer = project .update(cx, |project, cx| project.open_buffer(project_path, cx)) .await?; @@ -619,6 +694,14 @@ async fn apply_client_operation( full_path, edits, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: editing buffer {:?} in {} project {} with {:?}", client.username, @@ -628,14 +711,18 @@ async fn apply_client_operation( edits ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); buffer.update(cx, |buffer, cx| { - buffer.edit(edits, None, cx); + let snapshot = buffer.snapshot(); + buffer.edit( + edits.into_iter().map(|(range, text)| { + let start = snapshot.clip_offset(range.start, Bias::Left); + let end = snapshot.clip_offset(range.end, Bias::Right); + (start..end, text) + }), + None, + cx, + ); }); } @@ -644,20 +731,23 @@ async fn apply_client_operation( is_local, full_path, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( - "{}: dropping buffer {:?} in {} project {}", + "{}: closing buffer {:?} in {} project {}", client.username, full_path, if is_local { "local" } else { "remote" }, project_root_name ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); cx.update(|_| { client.buffers_for_project(&project).remove(&buffer); drop(buffer); @@ -670,6 +760,14 @@ async fn apply_client_operation( full_path, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: saving buffer {:?} in {} project {}{}", client.username, @@ -679,12 +777,7 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); ensure_project_shared(&project, client, cx).await; - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); let (requested_version, save) = buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx))); let save = cx.background().spawn(async move { @@ -710,6 +803,14 @@ async fn apply_client_operation( kind, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + let Some(buffer) = + buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { + return Ok(false); + }; + log::info!( "{}: request LSP {:?} for buffer {:?} in {} project {}{}", client.username, @@ -720,11 +821,7 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); - let buffer = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) - .expect("invalid buffer path in test operation"); + let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left)); let request = match kind { LspRequestKind::Rename => cx.spawn(|mut cx| async move { project @@ -770,6 +867,10 @@ async fn apply_client_operation( query, detach, } => { + let Some(project) = project_for_root_name(client, &project_root_name, cx) else { + return Ok(false); + }; + log::info!( "{}: search {} project {} for {:?}{}", client.username, @@ -778,8 +879,7 @@ async fn apply_client_operation( query, if detach { ", detaching" } else { ", awaiting" } ); - let project = project_for_root_name(client, &project_root_name, cx) - .expect("invalid project in test operation"); + let search = project.update(cx, |project, cx| { project.search(SearchQuery::text(query, false, false), cx) }); @@ -797,12 +897,17 @@ async fn apply_client_operation( } ClientOperation::CreateFsEntry { path, is_dir } => { + if client.fs.metadata(&path.parent().unwrap()).await?.is_none() { + return Ok(false); + } + log::info!( "{}: creating {} at {:?}", client.username, if is_dir { "dir" } else { "file" }, path ); + if is_dir { client.fs.create_dir(&path).await.unwrap(); } else { @@ -814,13 +919,13 @@ async fn apply_client_operation( } } } - Ok(()) + Ok(true) } struct TestPlan { rng: StdRng, replay: bool, - stored_operations: Vec, + stored_operations: Vec<(StoredOperation, Arc)>, max_operations: usize, operation_ix: usize, users: Vec, @@ -962,51 +1067,57 @@ impl TestPlan { fn load(&mut self, path: &Path) { let json = std::fs::read_to_string(path).unwrap(); self.replay = true; - self.stored_operations = serde_json::from_str(&json).unwrap(); + let stored_operations: Vec = serde_json::from_str(&json).unwrap(); + self.stored_operations = stored_operations + .into_iter() + .map(|operation| (operation, Arc::new(AtomicBool::new(false)))) + .collect() } fn save(&mut self, path: &Path) { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); - for (i, stored_operation) in self.stored_operations.iter().enumerate() { - if i > 0 { + for (operation, skipped) in &self.stored_operations { + if skipped.load(SeqCst) { + continue; + } + if json.len() > 1 { json.push(b','); } json.extend_from_slice(b"\n "); - serde_json::to_writer(&mut json, stored_operation).unwrap(); + serde_json::to_writer(&mut json, operation).unwrap(); } json.extend_from_slice(b"\n]\n"); std::fs::write(path, &json).unwrap(); } - async fn next_operation( + fn next_server_operation( &mut self, clients: &[(Rc, TestAppContext)], - ) -> Option { + ) -> Option<(Operation, Arc)> { if self.replay { while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { self.operation_ix += 1; - if let StoredOperation::Server(operation) = stored_operation { - return Some(operation.clone()); + if let (StoredOperation::Server(operation), skipped) = stored_operation { + return Some((operation.clone(), skipped.clone())); } } None } else { - let operation = self.generate_operation(clients).await; - if let Some(operation) = &operation { - self.stored_operations - .push(StoredOperation::Server(operation.clone())) - } - operation + let operation = self.generate_server_operation(clients)?; + let skipped = Arc::new(AtomicBool::new(false)); + self.stored_operations + .push((StoredOperation::Server(operation.clone()), skipped.clone())); + Some((operation, skipped)) } } - async fn next_client_operation( + fn next_client_operation( &mut self, client: &TestClient, cx: &TestAppContext, - ) -> Option { + ) -> Option<(ClientOperation, Arc)> { let current_user_id = client.current_user_id(cx); let user_ix = self .users @@ -1018,28 +1129,29 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { user_plan.operation_ix += 1; - if let StoredOperation::Client { user_id, operation } = stored_operation { + if let (StoredOperation::Client { user_id, operation }, skipped) = stored_operation + { if user_id == ¤t_user_id { - return Some(operation.clone()); + return Some((operation.clone(), skipped.clone())); } } } None } else { - let operation = self - .generate_client_operation(current_user_id, client, cx) - .await; - if let Some(operation) = &operation { - self.stored_operations.push(StoredOperation::Client { + let operation = self.generate_client_operation(current_user_id, client, cx)?; + let skipped = Arc::new(AtomicBool::new(false)); + self.stored_operations.push(( + StoredOperation::Client { user_id: current_user_id, operation: operation.clone(), - }) - } - operation + }, + skipped.clone(), + )); + Some((operation, skipped)) } } - async fn generate_operation( + fn generate_server_operation( &mut self, clients: &[(Rc, TestAppContext)], ) -> Option { @@ -1091,7 +1203,7 @@ impl TestPlan { .collect(); Operation::MutateClients { user_ids, - quiesce: self.rng.gen(), + quiesce: self.rng.gen_bool(0.7), } } _ => continue, @@ -1099,7 +1211,7 @@ impl TestPlan { }) } - async fn generate_client_operation( + fn generate_client_operation( &mut self, user_id: UserId, client: &TestClient, @@ -1221,11 +1333,11 @@ impl TestPlan { // Add a worktree to a local project 0..=50 => { let Some(project) = client - .local_projects() - .choose(&mut self.rng) - .cloned() else { continue }; + .local_projects() + .choose(&mut self.rng) + .cloned() else { continue }; let project_root_name = root_name_for_project(&project, cx); - let mut paths = client.fs.paths().await; + let mut paths = cx.background().block(client.fs.paths()); paths.remove(0); let new_root_path = if paths.is_empty() || self.rng.gen() { Path::new("/").join(&self.next_root_dir_name(user_id)) @@ -1396,10 +1508,9 @@ impl TestPlan { // Create a file or directory 96.. => { let is_dir = self.rng.gen::(); - let mut path = client - .fs - .directories() - .await + let mut path = cx + .background() + .block(client.fs.directories()) .choose(&mut self.rng) .unwrap() .clone(); @@ -1501,10 +1612,9 @@ async fn simulate_client( let plan = plan.clone(); async move { let files = fs.files().await; - let mut plan = plan.lock(); - let count = plan.rng.gen_range::(1..3); + let count = plan.lock().rng.gen_range::(1..3); let files = (0..count) - .map(|_| files.choose(&mut plan.rng).unwrap()) + .map(|_| files.choose(&mut plan.lock().rng).unwrap()) .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( @@ -1552,9 +1662,16 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while operation_rx.next().await.is_some() { - let Some(operation) = plan.lock().next_client_operation(&client, &cx).await else { break }; - if let Err(error) = apply_client_operation(&client, operation, &mut cx).await { - log::error!("{} error: {}", client.username, error); + let Some((operation, skipped)) = plan.lock().next_client_operation(&client, &cx) else { break }; + match apply_client_operation(&client, operation, &mut cx).await { + Err(error) => { + log::error!("{} error: {}", client.username, error); + } + Ok(applied) => { + if !applied { + skipped.store(true, SeqCst); + } + } } cx.background().simulate_random_delay().await; } From 576a9bb92cd59cb147d9b85ff838a6d39f3dda40 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 9 Jan 2023 14:49:36 -0800 Subject: [PATCH 10/80] Drop project's buffers when closing a remote project --- crates/collab/src/tests/randomized_integration_tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 34ae96e66563b5815178aeaf8c93de015934f59f..cd51a2e1f8196ed140c9c96fd841f22cf09aebcf 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -597,6 +597,7 @@ async fn apply_client_operation( .unwrap(); cx.update(|_| { client.remote_projects_mut().remove(ix); + client.buffers().retain(|project, _| project != project); drop(project); }); } From a3c7416218d04abeeb29b0920c5e87b02ed3ff26 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 12:33:23 -0800 Subject: [PATCH 11/80] Don't include user ids with MutateClients ops in serialized test plans --- .../src/tests/randomized_integration_tests.rs | 66 +++++++++++++++---- 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index cd51a2e1f8196ed140c9c96fd841f22cf09aebcf..0c2f7ce288850d5319fe84148655d682eb94b626 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -322,7 +322,7 @@ async fn apply_server_operation( server: &mut TestServer, clients: &mut Vec<(Rc, TestAppContext)>, client_tasks: &mut Vec>, - operation_channels: &mut Vec>, + operation_channels: &mut Vec>, next_entity_id: &mut usize, plan: Arc>, operation: Operation, @@ -462,7 +462,11 @@ async fn apply_server_operation( assert_eq!(stale_room_ids, vec![]); } - Operation::MutateClients { user_ids, quiesce } => { + Operation::MutateClients { + user_ids, + batch_id, + quiesce, + } => { let mut applied = false; for user_id in user_ids { let client_ix = clients @@ -470,7 +474,7 @@ async fn apply_server_operation( .position(|(client, cx)| client.current_user_id(cx) == user_id); let Some(client_ix) = client_ix else { continue }; applied = true; - if let Err(err) = operation_channels[client_ix].unbounded_send(()) { + if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) { // panic!("error signaling user {}, client {}", user_id, client_ix); } } @@ -970,6 +974,7 @@ struct TestPlan { max_operations: usize, operation_ix: usize, users: Vec, + next_batch_id: usize, allow_server_restarts: bool, allow_client_reconnection: bool, allow_client_disconnection: bool, @@ -989,6 +994,7 @@ enum StoredOperation { Server(Operation), Client { user_id: UserId, + batch_id: usize, operation: ClientOperation, }, } @@ -1006,6 +1012,9 @@ enum Operation { }, RestartServer, MutateClients { + batch_id: usize, + #[serde(skip_serializing)] + #[serde(skip_deserializing)] user_ids: Vec, quiesce: bool, }, @@ -1103,6 +1112,7 @@ impl TestPlan { allow_client_disconnection: rng.gen_bool(0.1), stored_operations: Vec::new(), operation_ix: 0, + next_batch_id: 0, max_operations, users, rng, @@ -1114,8 +1124,32 @@ impl TestPlan { self.replay = true; let stored_operations: Vec = serde_json::from_str(&json).unwrap(); self.stored_operations = stored_operations - .into_iter() - .map(|operation| (operation, Arc::new(AtomicBool::new(false)))) + .iter() + .cloned() + .enumerate() + .map(|(i, mut operation)| { + if let StoredOperation::Server(Operation::MutateClients { + batch_id: current_batch_id, + user_ids, + .. + }) = &mut operation + { + assert!(user_ids.is_empty()); + user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| { + if let StoredOperation::Client { + user_id, batch_id, .. + } = operation + { + if batch_id == current_batch_id { + return Some(user_id); + } + } + None + })); + user_ids.sort_unstable(); + } + (operation, Arc::new(AtomicBool::new(false))) + }) .collect() } @@ -1161,6 +1195,7 @@ impl TestPlan { fn next_client_operation( &mut self, client: &TestClient, + current_batch_id: usize, cx: &TestAppContext, ) -> Option<(ClientOperation, Arc)> { let current_user_id = client.current_user_id(cx); @@ -1174,7 +1209,12 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) { user_plan.operation_ix += 1; - if let (StoredOperation::Client { user_id, operation }, skipped) = stored_operation + if let ( + StoredOperation::Client { + user_id, operation, .. + }, + skipped, + ) = stored_operation { if user_id == ¤t_user_id { return Some((operation.clone(), skipped.clone())); @@ -1188,6 +1228,7 @@ impl TestPlan { self.stored_operations.push(( StoredOperation::Client { user_id: current_user_id, + batch_id: current_batch_id, operation: operation.clone(), }, skipped.clone(), @@ -1239,15 +1280,18 @@ impl TestPlan { .rng .gen_range(1..10) .min(self.max_operations - self.operation_ix); - let user_ids = (0..count) + let batch_id = util::post_inc(&mut self.next_batch_id); + let mut user_ids = (0..count) .map(|_| { let ix = self.rng.gen_range(0..clients.len()); let (client, cx) = &clients[ix]; client.current_user_id(cx) }) - .collect(); + .collect::>(); + user_ids.sort_unstable(); Operation::MutateClients { user_ids, + batch_id, quiesce: self.rng.gen_bool(0.7), } } @@ -1625,7 +1669,7 @@ impl TestPlan { async fn simulate_client( client: Rc, - mut operation_rx: futures::channel::mpsc::UnboundedReceiver<()>, + mut operation_rx: futures::channel::mpsc::UnboundedReceiver, plan: Arc>, mut cx: TestAppContext, ) { @@ -1740,8 +1784,8 @@ async fn simulate_client( .await; client.language_registry.add(Arc::new(language)); - while operation_rx.next().await.is_some() { - let Some((operation, skipped)) = plan.lock().next_client_operation(&client, &cx) else { break }; + while let Some(batch_id) = operation_rx.next().await { + let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { Err(error) => { log::error!("{} error: {}", client.username, error); From 00e8625037f9fb6a17577a66f46c5b53f3d85d8e Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 14:30:10 -0800 Subject: [PATCH 12/80] Simplify management of entity ids for different app contexts in randomized test --- crates/collab/src/tests/randomized_integration_tests.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0c2f7ce288850d5319fe84148655d682eb94b626..0b6ec2367a8102b02a79659015ec349be1611d39 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -105,7 +105,6 @@ async fn test_random_collaboration( let mut clients = Vec::new(); let mut client_tasks = Vec::new(); let mut operation_channels = Vec::new(); - let mut next_entity_id = 100000; loop { let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; @@ -115,7 +114,6 @@ async fn test_random_collaboration( &mut clients, &mut client_tasks, &mut operation_channels, - &mut next_entity_id, plan.clone(), next_operation, cx, @@ -323,7 +321,6 @@ async fn apply_server_operation( clients: &mut Vec<(Rc, TestAppContext)>, client_tasks: &mut Vec>, operation_channels: &mut Vec>, - next_entity_id: &mut usize, plan: Arc>, operation: Operation, cx: &mut TestAppContext, @@ -341,15 +338,15 @@ async fn apply_server_operation( username = user.username.clone(); }; log::info!("Adding new connection for {}", username); - *next_entity_id += 100000; + let next_entity_id = (user_id.0 * 10_000) as usize; let mut client_cx = TestAppContext::new( cx.foreground_platform(), cx.platform(), - deterministic.build_foreground(*next_entity_id), + deterministic.build_foreground(user_id.0 as usize), deterministic.build_background(), cx.font_cache(), cx.leak_detector(), - *next_entity_id, + next_entity_id, cx.function_name.clone(), ); From e04d0be8531ae4ab78e72811d841f5c3045e71e7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 14:30:53 -0800 Subject: [PATCH 13/80] Remove unneeded log messages in randomized test --- crates/collab/src/tests/randomized_integration_tests.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 0b6ec2367a8102b02a79659015ec349be1611d39..fd6252e78c8bf32e0a9fb414e5f9c1604a87b348 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -799,7 +799,6 @@ async fn apply_client_operation( anyhow::Ok(()) }); if detach { - log::info!("{}: detaching save request", client.username); cx.update(|cx| save.detach_and_log_err(cx)); } else { save.await?; @@ -900,7 +899,6 @@ async fn apply_client_operation( .map_err(|err| anyhow!("search request failed: {:?}", err)) }); if detach { - log::info!("{}: detaching save request", client.username); cx.update(|cx| search.detach_and_log_err(cx)); } else { search.await?; From 1a9ff2420e5441d1a86496ac3daa6833d69b855b Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 12 Jan 2023 22:09:36 -0800 Subject: [PATCH 14/80] Clean up how applications are marked as inapplicable --- .../src/tests/randomized_integration_tests.rs | 172 +++++++++--------- 1 file changed, 82 insertions(+), 90 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index fd6252e78c8bf32e0a9fb414e5f9c1604a87b348..d7f946cc0a3386b796d0664f8d512ba71e11a479 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -6,7 +6,7 @@ use crate::{ use anyhow::{anyhow, Result}; use call::ActiveCall; use client::RECEIVE_TIMEOUT; -use collections::{BTreeMap, HashSet}; +use collections::BTreeMap; use editor::Bias; use fs::{FakeFs, Fs as _}; use futures::StreamExt as _; @@ -490,12 +490,12 @@ async fn apply_client_operation( client: &TestClient, operation: ClientOperation, cx: &mut TestAppContext, -) -> Result { +) -> Result<(), TestError> { match operation { ClientOperation::AcceptIncomingCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: accepting incoming call", client.username); @@ -507,7 +507,7 @@ async fn apply_client_operation( ClientOperation::RejectIncomingCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: declining incoming call", client.username); @@ -517,7 +517,7 @@ async fn apply_client_operation( ClientOperation::LeaveCall => { let active_call = cx.read(ActiveCall::global); if active_call.read_with(cx, |call, _| call.room().is_none()) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!("{}: hanging up", client.username); @@ -557,9 +557,8 @@ async fn apply_client_operation( project_root_name, new_root_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false) - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: finding/creating local worktree at {:?} to project with root path {}", @@ -581,9 +580,8 @@ async fn apply_client_operation( } ClientOperation::CloseRemoteProject { project_root_name } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false) - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: closing remote project with root path {}", @@ -608,29 +606,28 @@ async fn apply_client_operation( first_root_name, } => { let active_call = cx.read(ActiveCall::global); - let project = active_call.update(cx, |call, cx| { - let room = call.room().cloned()?; - let participant = room - .read(cx) - .remote_participants() - .get(&host_id.to_proto())?; - let project_id = participant - .projects - .iter() - .find(|project| project.worktree_root_names[0] == first_root_name)? - .id; - Some(room.update(cx, |room, cx| { - room.join_project( - project_id, - client.language_registry.clone(), - FakeFs::new(cx.background().clone()), - cx, - ) - })) - }); - let Some(project) = project else { - return Ok(false) - }; + let project = active_call + .update(cx, |call, cx| { + let room = call.room().cloned()?; + let participant = room + .read(cx) + .remote_participants() + .get(&host_id.to_proto())?; + let project_id = participant + .projects + .iter() + .find(|project| project.worktree_root_names[0] == first_root_name)? + .id; + Some(room.update(cx, |room, cx| { + room.join_project( + project_id, + client.language_registry.clone(), + FakeFs::new(cx.background().clone()), + cx, + ) + })) + }) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: joining remote project of user {}, root name {}", @@ -649,12 +646,10 @@ async fn apply_client_operation( full_path, is_dir, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: creating {} at path {:?} in {} project {}", @@ -677,12 +672,10 @@ async fn apply_client_operation( is_local, full_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let project_path = project_path_for_full_path(&project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: opening buffer {:?} in {} project {}", @@ -705,13 +698,10 @@ async fn apply_client_operation( full_path, edits, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: editing buffer {:?} in {} project {} with {:?}", @@ -742,13 +732,10 @@ async fn apply_client_operation( is_local, full_path, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: closing buffer {:?} in {} project {}", @@ -771,13 +758,10 @@ async fn apply_client_operation( full_path, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: saving buffer {:?} in {} project {}{}", @@ -813,13 +797,10 @@ async fn apply_client_operation( kind, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; - let Some(buffer) = - buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; + let buffer = buffer_for_full_path(client, &project, &full_path, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: request LSP {:?} for buffer {:?} in {} project {}{}", @@ -877,9 +858,8 @@ async fn apply_client_operation( query, detach, } => { - let Some(project) = project_for_root_name(client, &project_root_name, cx) else { - return Ok(false); - }; + let project = project_for_root_name(client, &project_root_name, cx) + .ok_or(TestError::Inapplicable)?; log::info!( "{}: search {} project {} for {:?}{}", @@ -906,9 +886,11 @@ async fn apply_client_operation( } ClientOperation::CreateFsEntry { path, is_dir } => { - if client.fs.metadata(&path.parent().unwrap()).await?.is_none() { - return Ok(false); - } + client + .fs + .metadata(&path.parent().unwrap()) + .await? + .ok_or(TestError::Inapplicable)?; log::info!( "{}: creating {} at {:?}", @@ -938,7 +920,7 @@ async fn apply_client_operation( .await? .map_or(false, |m| m.is_dir) { - return Ok(false); + Err(TestError::Inapplicable)?; } log::info!( @@ -959,7 +941,7 @@ async fn apply_client_operation( client.fs.set_index_for_repo(&dot_git_dir, &contents).await; } } - Ok(true) + Ok(()) } struct TestPlan { @@ -1098,6 +1080,17 @@ enum LspRequestKind { Highlights, } +enum TestError { + Inapplicable, + Other(anyhow::Error), +} + +impl From for TestError { + fn from(value: anyhow::Error) -> Self { + Self::Other(value) + } +} + impl TestPlan { fn new(mut rng: StdRng, users: Vec, max_operations: usize) -> Self { Self { @@ -1782,14 +1775,11 @@ async fn simulate_client( while let Some(batch_id) = operation_rx.next().await { let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { - Err(error) => { + Ok(()) => {} + Err(TestError::Inapplicable) => skipped.store(true, SeqCst), + Err(TestError::Other(error)) => { log::error!("{} error: {}", client.username, error); } - Ok(applied) => { - if !applied { - skipped.store(true, SeqCst); - } - } } cx.background().simulate_random_delay().await; } @@ -1797,11 +1787,13 @@ async fn simulate_client( } fn buffer_for_full_path( - buffers: &HashSet>, + client: &TestClient, + project: &ModelHandle, full_path: &PathBuf, cx: &TestAppContext, ) -> Option> { - buffers + client + .buffers_for_project(project) .iter() .find(|buffer| { buffer.read_with(cx, |buffer, cx| { From 2c84b741263f958ba51e257e5763aa0bf40253a5 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 16 Jan 2023 09:48:54 -0800 Subject: [PATCH 15/80] Avoid retaining project in randomized test while LSP request is outstanding --- .../src/tests/randomized_integration_tests.rs | 68 +++++++++---------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index d7f946cc0a3386b796d0664f8d512ba71e11a479..01a744427ec825d79f0b7c69644d8fb78a8e26cc 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -197,13 +197,17 @@ async fn test_random_collaboration( assert_eq!( guest_snapshot.entries(false).collect::>(), host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {} ({:?}) and project {:?}", + "{} has different snapshot than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + host_project.read_with(host_cx, |project, _| project.remote_id()) + ); + assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), + "{} has different scan id than the host for worktree {:?} and project {:?}", client.username, - id, host_snapshot.abs_path(), host_project.read_with(host_cx, |project, _| project.remote_id()) ); - assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id()); } } } @@ -812,39 +816,32 @@ async fn apply_client_operation( if detach { ", detaching" } else { ", awaiting" } ); + use futures::{FutureExt as _, TryFutureExt as _}; let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left)); - let request = match kind { - LspRequestKind::Rename => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx)) - .await?; - anyhow::Ok(()) - }), - LspRequestKind::Completion => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::CodeAction => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Definition => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx)) - .await?; - Ok(()) - }), - LspRequestKind::Highlights => cx.spawn(|mut cx| async move { - project - .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx)) - .await?; - Ok(()) - }), - }; + let request = cx.foreground().spawn(project.update(cx, |project, cx| { + match kind { + LspRequestKind::Rename => project + .prepare_rename(buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Completion => project + .completions(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::CodeAction => project + .code_actions(&buffer, offset..offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Definition => project + .definition(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + LspRequestKind::Highlights => project + .document_highlights(&buffer, offset, cx) + .map_ok(|_| ()) + .boxed(), + } + })); if detach { request.detach(); } else { @@ -873,6 +870,7 @@ async fn apply_client_operation( let search = project.update(cx, |project, cx| { project.search(SearchQuery::text(query, false, false), cx) }); + drop(project); let search = cx.background().spawn(async move { search .await From 543301f94930ec6dc71caed81bc56b9ec53b49d6 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 15:58:11 -0700 Subject: [PATCH 16/80] Avoid repeatedly loading/saving the test plan for each iteration --- .../src/tests/randomized_integration_tests.rs | 64 +++++++++++++------ 1 file changed, 43 insertions(+), 21 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 19961c3ba536689c8f28d30b1ce7a75778995c95..a05870dc1ee49da4744d007f60cd30f5388ac0ce 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -33,6 +33,11 @@ use std::{ }; use util::ResultExt; +lazy_static::lazy_static! { + static ref LOADED_PLAN_JSON: Mutex>> = Default::default(); + static ref DID_SAVE_PLAN_JSON: AtomicBool = Default::default(); +} + #[gpui::test(iterations = 100)] async fn test_random_collaboration( cx: &mut TestAppContext, @@ -99,8 +104,14 @@ async fn test_random_collaboration( let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); if let Some(path) = &plan_load_path { - eprintln!("loaded plan from path {:?}", path); - plan.lock().load(path); + let json = LOADED_PLAN_JSON + .lock() + .get_or_insert_with(|| { + eprintln!("loaded test plan from path {:?}", path); + std::fs::read(path).unwrap() + }) + .clone(); + plan.lock().deserialize(json); } let mut clients = Vec::new(); @@ -132,8 +143,10 @@ async fn test_random_collaboration( deterministic.run_until_parked(); if let Some(path) = &plan_save_path { - eprintln!("saved test plan to path {:?}", path); - plan.lock().save(path); + if !DID_SAVE_PLAN_JSON.swap(true, SeqCst) { + eprintln!("saved test plan to path {:?}", path); + std::fs::write(path, plan.lock().serialize()).unwrap(); + } } for (client, client_cx) in &clients { @@ -313,28 +326,38 @@ async fn test_random_collaboration( host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); let guest_saved_version = guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); - assert_eq!(guest_saved_version, host_saved_version); + assert_eq!( + guest_saved_version, host_saved_version, + "guest saved version does not match host's for path {path:?} in project {project_id}", + ); let host_saved_version_fingerprint = host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); let guest_saved_version_fingerprint = guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); assert_eq!( - guest_saved_version_fingerprint, - host_saved_version_fingerprint + guest_saved_version_fingerprint, host_saved_version_fingerprint, + "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", ); let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); - assert_eq!(guest_saved_mtime, host_saved_mtime); + assert_eq!( + guest_saved_mtime, host_saved_mtime, + "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + ); let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); - assert_eq!(guest_is_dirty, host_is_dirty); + assert_eq!(guest_is_dirty, host_is_dirty, + "guest's dirty status does not match host's for path {path:?} in project {project_id}", + ); let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); - assert_eq!(guest_has_conflict, host_has_conflict); + assert_eq!(guest_has_conflict, host_has_conflict, + "guest's conflict status does not match host's for path {path:?} in project {project_id}", + ); } } } @@ -797,12 +820,12 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: saving buffer {:?} in {} project {}{}", + "{}: saving buffer {:?} in {} project {}, {}", client.username, full_path, if is_local { "local" } else { "remote" }, project_root_name, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); ensure_project_shared(&project, client, cx).await; @@ -836,13 +859,13 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: request LSP {:?} for buffer {:?} in {} project {}{}", + "{}: request LSP {:?} for buffer {:?} in {} project {}, {}", client.username, kind, full_path, if is_local { "local" } else { "remote" }, project_root_name, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); use futures::{FutureExt as _, TryFutureExt as _}; @@ -888,12 +911,12 @@ async fn apply_client_operation( .ok_or(TestError::Inapplicable)?; log::info!( - "{}: search {} project {} for {:?}{}", + "{}: search {} project {} for {:?}, {}", client.username, if is_local { "local" } else { "remote" }, project_root_name, query, - if detach { ", detaching" } else { ", awaiting" } + if detach { "detaching" } else { "awaiting" } ); let search = project.update(cx, |project, cx| { @@ -1137,10 +1160,9 @@ impl TestPlan { } } - fn load(&mut self, path: &Path) { - let json = std::fs::read_to_string(path).unwrap(); + fn deserialize(&mut self, json: Vec) { + let stored_operations: Vec = serde_json::from_slice(&json).unwrap(); self.replay = true; - let stored_operations: Vec = serde_json::from_str(&json).unwrap(); self.stored_operations = stored_operations .iter() .cloned() @@ -1171,7 +1193,7 @@ impl TestPlan { .collect() } - fn save(&mut self, path: &Path) { + fn serialize(&mut self) -> Vec { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); @@ -1186,7 +1208,7 @@ impl TestPlan { serde_json::to_writer(&mut json, operation).unwrap(); } json.extend_from_slice(b"\n]\n"); - std::fs::write(path, &json).unwrap(); + json } fn next_server_operation( From f95732e981adda5331512caac4b008a5efd91ee9 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 16:23:44 -0700 Subject: [PATCH 17/80] Fix bug where guest would drop BufferSaved messages while opening the buffer --- .../src/tests/randomized_integration_tests.rs | 2 +- crates/project/src/lsp_command.rs | 20 ++++----- crates/project/src/project.rs | 44 ++++++++++--------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a05870dc1ee49da4744d007f60cd30f5388ac0ce..44e1891363f566688d8403cd8bf4550c3d3b95d9 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -132,7 +132,7 @@ async fn test_random_collaboration( ) .await; if !applied { - skipped.store(false, SeqCst); + skipped.store(true, SeqCst); } } diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index feec1ee0e4c56c9af39f30570a8b0717e0a0e353..dcc462546f2ff4f02a5b4aeaa299922a82616004 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -4,7 +4,7 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use client::proto::{self, PeerId}; -use gpui::{AppContext, AsyncAppContext, ModelHandle}; +use gpui::{AppContext, AsyncAppContext, ModelHandle, MutableAppContext}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, @@ -49,7 +49,7 @@ pub(crate) trait LspCommand: 'static + Sized { project: &mut Project, peer_id: PeerId, buffer_version: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> ::Response; async fn response_from_proto( self, @@ -175,7 +175,7 @@ impl LspCommand for PrepareRename { _: &mut Project, _: PeerId, buffer_version: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::PrepareRenameResponse { proto::PrepareRenameResponse { can_rename: range.is_some(), @@ -296,7 +296,7 @@ impl LspCommand for PerformRename { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::PerformRenameResponse { let transaction = project.serialize_project_transaction_for_peer(response, peer_id, cx); proto::PerformRenameResponse { @@ -391,7 +391,7 @@ impl LspCommand for GetDefinition { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetDefinitionResponse { let links = location_links_to_proto(response, project, peer_id, cx); proto::GetDefinitionResponse { links } @@ -477,7 +477,7 @@ impl LspCommand for GetTypeDefinition { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetTypeDefinitionResponse { let links = location_links_to_proto(response, project, peer_id, cx); proto::GetTypeDefinitionResponse { links } @@ -658,7 +658,7 @@ fn location_links_to_proto( links: Vec, project: &mut Project, peer_id: PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> Vec { links .into_iter() @@ -787,7 +787,7 @@ impl LspCommand for GetReferences { project: &mut Project, peer_id: PeerId, _: &clock::Global, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::GetReferencesResponse { let locations = response .into_iter() @@ -928,7 +928,7 @@ impl LspCommand for GetDocumentHighlights { _: &mut Project, _: PeerId, _: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::GetDocumentHighlightsResponse { let highlights = response .into_iter() @@ -1130,7 +1130,7 @@ impl LspCommand for GetHover { _: &mut Project, _: PeerId, _: &clock::Global, - _: &AppContext, + _: &mut MutableAppContext, ) -> proto::GetHoverResponse { if let Some(response) = response { let (start, end) = if let Some(range) = response.range { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index b0a9784ba94f830a6e32841b3b88bb95d9b8808e..2755f281f3cb6a6aa45f62791f67ab68ac3ed5ff 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5858,7 +5858,7 @@ impl Project { &mut self, project_transaction: ProjectTransaction, peer_id: proto::PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> proto::ProjectTransaction { let mut serialized_transaction = proto::ProjectTransaction { buffer_ids: Default::default(), @@ -5916,27 +5916,27 @@ impl Project { &mut self, buffer: &ModelHandle, peer_id: proto::PeerId, - cx: &AppContext, + cx: &mut MutableAppContext, ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); if let Some(project_id) = self.remote_id() { let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); if shared_buffers.insert(buffer_id) { - let buffer = buffer.read(cx); - let state = buffer.to_proto(); - let operations = buffer.serialize_ops(None, cx); + let buffer = buffer.clone(); + let operations = buffer.read(cx).serialize_ops(None, cx); let client = self.client.clone(); - cx.background() - .spawn( - async move { - let operations = operations.await; + cx.spawn(move |cx| async move { + let operations = operations.await; + let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::State(state)), - })?; + client.send(proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some(proto::create_buffer_for_peer::Variant::State(state)), + })?; + cx.background() + .spawn(async move { let mut chunks = split_operations(operations).peekable(); while let Some(chunk) = chunks.next() { let is_last = chunks.peek().is_none(); @@ -5952,12 +5952,11 @@ impl Project { )), })?; } - anyhow::Ok(()) - } - .log_err(), - ) - .detach(); + }) + .await + }) + .detach() } } @@ -6231,7 +6230,12 @@ impl Project { let buffer = this .opened_buffers .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)); + .and_then(|buffer| buffer.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&envelope.payload.buffer_id) + .and_then(|b| b.clone()) + }); if let Some(buffer) = buffer { buffer.update(cx, |buffer, cx| { buffer.did_save(version, fingerprint, mtime, cx); From 5ecc9606af10d7ad400c344e7944e97dd3d99886 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:15:07 -0700 Subject: [PATCH 18/80] Use synchronous locks in FakeFs This way, the state can be accessed without running the deterministic executor. --- .../src/tests/randomized_integration_tests.rs | 21 +- crates/fs/src/fs.rs | 297 ++++++++---------- crates/project/src/worktree.rs | 2 +- 3 files changed, 149 insertions(+), 171 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 44e1891363f566688d8403cd8bf4550c3d3b95d9..583271b342342361ff86efedcb88b9c0e59e6a2e 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -624,7 +624,7 @@ async fn apply_client_operation( ); ensure_project_shared(&project, client, cx).await; - if !client.fs.paths().await.contains(&new_root_path) { + if !client.fs.paths().contains(&new_root_path) { client.fs.create_dir(&new_root_path).await.unwrap(); } project @@ -1350,7 +1350,6 @@ impl TestPlan { return None; } - let executor = cx.background(); self.operation_ix += 1; let call = cx.read(ActiveCall::global); Some(loop { @@ -1467,7 +1466,7 @@ impl TestPlan { .choose(&mut self.rng) .cloned() else { continue }; let project_root_name = root_name_for_project(&project, cx); - let mut paths = executor.block(client.fs.paths()); + let mut paths = client.fs.paths(); paths.remove(0); let new_root_path = if paths.is_empty() || self.rng.gen() { Path::new("/").join(&self.next_root_dir_name(user_id)) @@ -1637,14 +1636,16 @@ impl TestPlan { // Update a git index 91..=95 => { - let repo_path = executor - .block(client.fs.directories()) + let repo_path = client + .fs + .directories() .choose(&mut self.rng) .unwrap() .clone(); - let mut file_paths = executor - .block(client.fs.files()) + let mut file_paths = client + .fs + .files() .into_iter() .filter(|path| path.starts_with(&repo_path)) .collect::>(); @@ -1673,7 +1674,7 @@ impl TestPlan { let is_dir = self.rng.gen::(); let content; let mut path; - let dir_paths = cx.background().block(client.fs.directories()); + let dir_paths = client.fs.directories(); if is_dir { content = String::new(); @@ -1683,7 +1684,7 @@ impl TestPlan { content = Alphanumeric.sample_string(&mut self.rng, 16); // Create a new file or overwrite an existing file - let file_paths = cx.background().block(client.fs.files()); + let file_paths = client.fs.files(); if file_paths.is_empty() || self.rng.gen_bool(0.5) { path = dir_paths.choose(&mut self.rng).unwrap().clone(); path.push(gen_file_name(&mut self.rng)); @@ -1789,7 +1790,7 @@ async fn simulate_client( let fs = fs.clone(); let plan = plan.clone(); async move { - let files = fs.files().await; + let files = fs.files(); let count = plan.lock().rng.gen_range::(1..3); let files = (0..count) .map(|_| files.choose(&mut plan.lock().rng).unwrap()) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index fd713ef3b552be734d5ea3813e647a235f7cdd9b..4d0b0c4f447d541eb3fbaecd67d2ea49dc9ad42b 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -5,7 +5,7 @@ use fsevent::EventStream; use futures::{future::BoxFuture, Stream, StreamExt}; use git2::Repository as LibGitRepository; use lazy_static::lazy_static; -use parking_lot::Mutex as SyncMutex; +use parking_lot::Mutex; use regex::Regex; use repository::GitRepository; use rope::Rope; @@ -27,8 +27,6 @@ use util::ResultExt; #[cfg(any(test, feature = "test-support"))] use collections::{btree_map, BTreeMap}; #[cfg(any(test, feature = "test-support"))] -use futures::lock::Mutex; -#[cfg(any(test, feature = "test-support"))] use repository::FakeGitRepositoryState; #[cfg(any(test, feature = "test-support"))] use std::sync::Weak; @@ -117,7 +115,7 @@ pub trait Fs: Send + Sync { path: &Path, latency: Duration, ) -> Pin>>>; - fn open_repo(&self, abs_dot_git: &Path) -> Option>>; + fn open_repo(&self, abs_dot_git: &Path) -> Option>>; fn is_fake(&self) -> bool; #[cfg(any(test, feature = "test-support"))] fn as_fake(&self) -> &FakeFs; @@ -350,11 +348,11 @@ impl Fs for RealFs { }))) } - fn open_repo(&self, dotgit_path: &Path) -> Option>> { + fn open_repo(&self, dotgit_path: &Path) -> Option>> { LibGitRepository::open(&dotgit_path) .log_err() - .and_then::>, _>(|libgit_repository| { - Some(Arc::new(SyncMutex::new(libgit_repository))) + .and_then::>, _>(|libgit_repository| { + Some(Arc::new(Mutex::new(libgit_repository))) }) } @@ -396,7 +394,7 @@ enum FakeFsEntry { inode: u64, mtime: SystemTime, entries: BTreeMap>>, - git_repo_state: Option>>, + git_repo_state: Option>>, }, Symlink { target: PathBuf, @@ -405,18 +403,14 @@ enum FakeFsEntry { #[cfg(any(test, feature = "test-support"))] impl FakeFsState { - async fn read_path<'a>(&'a self, target: &Path) -> Result>> { + fn read_path<'a>(&'a self, target: &Path) -> Result>> { Ok(self .try_read_path(target) - .await .ok_or_else(|| anyhow!("path does not exist: {}", target.display()))? .0) } - async fn try_read_path<'a>( - &'a self, - target: &Path, - ) -> Option<(Arc>, PathBuf)> { + fn try_read_path<'a>(&'a self, target: &Path) -> Option<(Arc>, PathBuf)> { let mut path = target.to_path_buf(); let mut real_path = PathBuf::new(); let mut entry_stack = Vec::new(); @@ -438,10 +432,10 @@ impl FakeFsState { } Component::Normal(name) => { let current_entry = entry_stack.last().cloned()?; - let current_entry = current_entry.lock().await; + let current_entry = current_entry.lock(); if let FakeFsEntry::Dir { entries, .. } = &*current_entry { let entry = entries.get(name.to_str().unwrap()).cloned()?; - let _entry = entry.lock().await; + let _entry = entry.lock(); if let FakeFsEntry::Symlink { target, .. } = &*_entry { let mut target = target.clone(); target.extend(path_components); @@ -462,7 +456,7 @@ impl FakeFsState { entry_stack.pop().map(|entry| (entry, real_path)) } - async fn write_path(&self, path: &Path, callback: Fn) -> Result + fn write_path(&self, path: &Path, callback: Fn) -> Result where Fn: FnOnce(btree_map::Entry>>) -> Result, { @@ -472,8 +466,8 @@ impl FakeFsState { .ok_or_else(|| anyhow!("cannot overwrite the root"))?; let parent_path = path.parent().unwrap(); - let parent = self.read_path(parent_path).await?; - let mut parent = parent.lock().await; + let parent = self.read_path(parent_path)?; + let mut parent = parent.lock(); let new_entry = parent .dir_entries(parent_path)? .entry(filename.to_str().unwrap().into()); @@ -529,7 +523,7 @@ impl FakeFs { } pub async fn insert_file(&self, path: impl AsRef, content: String) { - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let path = path.as_ref(); let inode = state.next_inode; let mtime = state.next_mtime; @@ -552,13 +546,12 @@ impl FakeFs { } Ok(()) }) - .await .unwrap(); state.emit_event(&[path]); } pub async fn insert_symlink(&self, path: impl AsRef, target: PathBuf) { - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let path = path.as_ref(); let file = Arc::new(Mutex::new(FakeFsEntry::Symlink { target })); state @@ -572,21 +565,20 @@ impl FakeFs { Ok(()) } }) - .await .unwrap(); state.emit_event(&[path]); } pub async fn pause_events(&self) { - self.state.lock().await.events_paused = true; + self.state.lock().events_paused = true; } pub async fn buffered_event_count(&self) -> usize { - self.state.lock().await.buffered_events.len() + self.state.lock().buffered_events.len() } pub async fn flush_events(&self, count: usize) { - self.state.lock().await.flush_events(count); + self.state.lock().flush_events(count); } #[must_use] @@ -625,9 +617,9 @@ impl FakeFs { } pub async fn set_index_for_repo(&self, dot_git: &Path, head_state: &[(&Path, String)]) { - let mut state = self.state.lock().await; - let entry = state.read_path(dot_git).await.unwrap(); - let mut entry = entry.lock().await; + let mut state = self.state.lock(); + let entry = state.read_path(dot_git).unwrap(); + let mut entry = entry.lock(); if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { let repo_state = git_repo_state.get_or_insert_with(Default::default); @@ -646,12 +638,12 @@ impl FakeFs { } } - pub async fn paths(&self) -> Vec { + pub fn paths(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - if let FakeFsEntry::Dir { entries, .. } = &*entry.lock().await { + if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() { for (name, entry) in entries { queue.push_back((path.join(name), entry.clone())); } @@ -661,12 +653,12 @@ impl FakeFs { result } - pub async fn directories(&self) -> Vec { + pub fn directories(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - if let FakeFsEntry::Dir { entries, .. } = &*entry.lock().await { + if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() { for (name, entry) in entries { queue.push_back((path.join(name), entry.clone())); } @@ -676,12 +668,12 @@ impl FakeFs { result } - pub async fn files(&self) -> Vec { + pub fn files(&self) -> Vec { let mut result = Vec::new(); let mut queue = collections::VecDeque::new(); - queue.push_back((PathBuf::from("/"), self.state.lock().await.root.clone())); + queue.push_back((PathBuf::from("/"), self.state.lock().root.clone())); while let Some((path, entry)) = queue.pop_front() { - let e = entry.lock().await; + let e = entry.lock(); match &*e { FakeFsEntry::File { .. } => result.push(path), FakeFsEntry::Dir { entries, .. } => { @@ -745,11 +737,11 @@ impl FakeFsEntry { impl Fs for FakeFs { async fn create_dir(&self, path: &Path) -> Result<()> { self.simulate_random_delay().await; - let mut state = self.state.lock().await; let mut created_dirs = Vec::new(); let mut cur_path = PathBuf::new(); for component in path.components() { + let mut state = self.state.lock(); cur_path.push(component); if cur_path == Path::new("/") { continue; @@ -759,29 +751,27 @@ impl Fs for FakeFs { let mtime = state.next_mtime; state.next_mtime += Duration::from_nanos(1); state.next_inode += 1; - state - .write_path(&cur_path, |entry| { - entry.or_insert_with(|| { - created_dirs.push(cur_path.clone()); - Arc::new(Mutex::new(FakeFsEntry::Dir { - inode, - mtime, - entries: Default::default(), - git_repo_state: None, - })) - }); - Ok(()) - }) - .await?; + state.write_path(&cur_path, |entry| { + entry.or_insert_with(|| { + created_dirs.push(cur_path.clone()); + Arc::new(Mutex::new(FakeFsEntry::Dir { + inode, + mtime, + entries: Default::default(), + git_repo_state: None, + })) + }); + Ok(()) + })? } - state.emit_event(&created_dirs); + self.state.lock().emit_event(&created_dirs); Ok(()) } async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> { self.simulate_random_delay().await; - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let inode = state.next_inode; let mtime = state.next_mtime; state.next_mtime += Duration::from_nanos(1); @@ -791,23 +781,21 @@ impl Fs for FakeFs { mtime, content: String::new(), })); - state - .write_path(path, |entry| { - match entry { - btree_map::Entry::Occupied(mut e) => { - if options.overwrite { - *e.get_mut() = file; - } else if !options.ignore_if_exists { - return Err(anyhow!("path already exists: {}", path.display())); - } - } - btree_map::Entry::Vacant(e) => { - e.insert(file); + state.write_path(path, |entry| { + match entry { + btree_map::Entry::Occupied(mut e) => { + if options.overwrite { + *e.get_mut() = file; + } else if !options.ignore_if_exists { + return Err(anyhow!("path already exists: {}", path.display())); } } - Ok(()) - }) - .await?; + btree_map::Entry::Vacant(e) => { + e.insert(file); + } + } + Ok(()) + })?; state.emit_event(&[path]); Ok(()) } @@ -815,33 +803,29 @@ impl Fs for FakeFs { async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> { let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); - let mut state = self.state.lock().await; - let moved_entry = state - .write_path(&old_path, |e| { - if let btree_map::Entry::Occupied(e) = e { - Ok(e.remove()) - } else { - Err(anyhow!("path does not exist: {}", &old_path.display())) - } - }) - .await?; - state - .write_path(&new_path, |e| { - match e { - btree_map::Entry::Occupied(mut e) => { - if options.overwrite { - *e.get_mut() = moved_entry; - } else if !options.ignore_if_exists { - return Err(anyhow!("path already exists: {}", new_path.display())); - } - } - btree_map::Entry::Vacant(e) => { - e.insert(moved_entry); + let mut state = self.state.lock(); + let moved_entry = state.write_path(&old_path, |e| { + if let btree_map::Entry::Occupied(e) = e { + Ok(e.remove()) + } else { + Err(anyhow!("path does not exist: {}", &old_path.display())) + } + })?; + state.write_path(&new_path, |e| { + match e { + btree_map::Entry::Occupied(mut e) => { + if options.overwrite { + *e.get_mut() = moved_entry; + } else if !options.ignore_if_exists { + return Err(anyhow!("path already exists: {}", new_path.display())); } } - Ok(()) - }) - .await?; + btree_map::Entry::Vacant(e) => { + e.insert(moved_entry); + } + } + Ok(()) + })?; state.emit_event(&[old_path, new_path]); Ok(()) } @@ -849,35 +833,33 @@ impl Fs for FakeFs { async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> { let source = normalize_path(source); let target = normalize_path(target); - let mut state = self.state.lock().await; + let mut state = self.state.lock(); let mtime = state.next_mtime; let inode = util::post_inc(&mut state.next_inode); state.next_mtime += Duration::from_nanos(1); - let source_entry = state.read_path(&source).await?; - let content = source_entry.lock().await.file_content(&source)?.clone(); - let entry = state - .write_path(&target, |e| match e { - btree_map::Entry::Occupied(e) => { - if options.overwrite { - Ok(Some(e.get().clone())) - } else if !options.ignore_if_exists { - return Err(anyhow!("{target:?} already exists")); - } else { - Ok(None) - } + let source_entry = state.read_path(&source)?; + let content = source_entry.lock().file_content(&source)?.clone(); + let entry = state.write_path(&target, |e| match e { + btree_map::Entry::Occupied(e) => { + if options.overwrite { + Ok(Some(e.get().clone())) + } else if !options.ignore_if_exists { + return Err(anyhow!("{target:?} already exists")); + } else { + Ok(None) } - btree_map::Entry::Vacant(e) => Ok(Some( - e.insert(Arc::new(Mutex::new(FakeFsEntry::File { - inode, - mtime, - content: String::new(), - }))) - .clone(), - )), - }) - .await?; + } + btree_map::Entry::Vacant(e) => Ok(Some( + e.insert(Arc::new(Mutex::new(FakeFsEntry::File { + inode, + mtime, + content: String::new(), + }))) + .clone(), + )), + })?; if let Some(entry) = entry { - entry.lock().await.set_file_content(&target, content)?; + entry.lock().set_file_content(&target, content)?; } state.emit_event(&[target]); Ok(()) @@ -890,9 +872,9 @@ impl Fs for FakeFs { .ok_or_else(|| anyhow!("cannot remove the root"))?; let base_name = path.file_name().unwrap(); - let mut state = self.state.lock().await; - let parent_entry = state.read_path(parent_path).await?; - let mut parent_entry = parent_entry.lock().await; + let mut state = self.state.lock(); + let parent_entry = state.read_path(parent_path)?; + let mut parent_entry = parent_entry.lock(); let entry = parent_entry .dir_entries(parent_path)? .entry(base_name.to_str().unwrap().into()); @@ -905,7 +887,7 @@ impl Fs for FakeFs { } btree_map::Entry::Occupied(e) => { { - let mut entry = e.get().lock().await; + let mut entry = e.get().lock(); let children = entry.dir_entries(&path)?; if !options.recursive && !children.is_empty() { return Err(anyhow!("{path:?} is not empty")); @@ -924,9 +906,9 @@ impl Fs for FakeFs { .parent() .ok_or_else(|| anyhow!("cannot remove the root"))?; let base_name = path.file_name().unwrap(); - let mut state = self.state.lock().await; - let parent_entry = state.read_path(parent_path).await?; - let mut parent_entry = parent_entry.lock().await; + let mut state = self.state.lock(); + let parent_entry = state.read_path(parent_path)?; + let mut parent_entry = parent_entry.lock(); let entry = parent_entry .dir_entries(parent_path)? .entry(base_name.to_str().unwrap().into()); @@ -937,7 +919,7 @@ impl Fs for FakeFs { } } btree_map::Entry::Occupied(e) => { - e.get().lock().await.file_content(&path)?; + e.get().lock().file_content(&path)?; e.remove(); } } @@ -953,9 +935,9 @@ impl Fs for FakeFs { async fn load(&self, path: &Path) -> Result { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - let entry = state.read_path(&path).await?; - let entry = entry.lock().await; + let state = self.state.lock(); + let entry = state.read_path(&path)?; + let entry = entry.lock(); entry.file_content(&path).cloned() } @@ -978,8 +960,8 @@ impl Fs for FakeFs { async fn canonicalize(&self, path: &Path) -> Result { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - if let Some((_, real_path)) = state.try_read_path(&path).await { + let state = self.state.lock(); + if let Some((_, real_path)) = state.try_read_path(&path) { Ok(real_path) } else { Err(anyhow!("path does not exist: {}", path.display())) @@ -989,9 +971,9 @@ impl Fs for FakeFs { async fn is_file(&self, path: &Path) -> bool { let path = normalize_path(path); self.simulate_random_delay().await; - let state = self.state.lock().await; - if let Some((entry, _)) = state.try_read_path(&path).await { - entry.lock().await.is_file() + let state = self.state.lock(); + if let Some((entry, _)) = state.try_read_path(&path) { + entry.lock().is_file() } else { false } @@ -1000,9 +982,9 @@ impl Fs for FakeFs { async fn metadata(&self, path: &Path) -> Result> { self.simulate_random_delay().await; let path = normalize_path(path); - let state = self.state.lock().await; - if let Some((entry, real_path)) = state.try_read_path(&path).await { - let entry = entry.lock().await; + let state = self.state.lock(); + if let Some((entry, real_path)) = state.try_read_path(&path) { + let entry = entry.lock(); let is_symlink = real_path != path; Ok(Some(match &*entry { @@ -1031,9 +1013,9 @@ impl Fs for FakeFs { ) -> Result>>>> { self.simulate_random_delay().await; let path = normalize_path(path); - let state = self.state.lock().await; - let entry = state.read_path(&path).await?; - let mut entry = entry.lock().await; + let state = self.state.lock(); + let entry = state.read_path(&path)?; + let mut entry = entry.lock(); let children = entry.dir_entries(&path)?; let paths = children .keys() @@ -1047,10 +1029,9 @@ impl Fs for FakeFs { path: &Path, _: Duration, ) -> Pin>>> { - let mut state = self.state.lock().await; self.simulate_random_delay().await; let (tx, rx) = smol::channel::unbounded(); - state.event_txs.push(tx); + self.state.lock().event_txs.push(tx); let path = path.to_path_buf(); let executor = self.executor.clone(); Box::pin(futures::StreamExt::filter(rx, move |events| { @@ -1065,22 +1046,18 @@ impl Fs for FakeFs { })) } - fn open_repo(&self, abs_dot_git: &Path) -> Option>> { - smol::block_on(async move { - let state = self.state.lock().await; - let entry = state.read_path(abs_dot_git).await.unwrap(); - let mut entry = entry.lock().await; - if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { - let state = git_repo_state - .get_or_insert_with(|| { - Arc::new(SyncMutex::new(FakeGitRepositoryState::default())) - }) - .clone(); - Some(repository::FakeGitRepository::open(state)) - } else { - None - } - }) + fn open_repo(&self, abs_dot_git: &Path) -> Option>> { + let state = self.state.lock(); + let entry = state.read_path(abs_dot_git).unwrap(); + let mut entry = entry.lock(); + if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry { + let state = git_repo_state + .get_or_insert_with(|| Arc::new(Mutex::new(FakeGitRepositoryState::default()))) + .clone(); + Some(repository::FakeGitRepository::open(state)) + } else { + None + } } fn is_fake(&self) -> bool { @@ -1213,7 +1190,7 @@ mod tests { .await; assert_eq!( - fs.files().await, + fs.files(), vec![ PathBuf::from("/root/dir1/a"), PathBuf::from("/root/dir1/b"), diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 2357052d2cb13ad819caa5eecce4f2f210675ea3..b1aebf29f1048746c6f0a78ce347c5b507acfd68 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -3729,7 +3729,7 @@ mod tests { ) { let mut files = Vec::new(); let mut dirs = Vec::new(); - for path in fs.as_fake().paths().await { + for path in fs.as_fake().paths() { if path.starts_with(root_path) { if fs.is_file(&path).await { files.push(path); From 7b0a6c0dfaf73bd92f0d3cff8e818e63e2645f50 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:16:08 -0700 Subject: [PATCH 19/80] Add an 'on_failure' attribute to gpui tests This lets us perform a finalization step when a randomized test fails. --- crates/gpui/src/test.rs | 2 ++ crates/gpui_macros/src/gpui_macros.rs | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/crates/gpui/src/test.rs b/crates/gpui/src/test.rs index d784d43ece50602b53f1d019c2b6dd57d3463537..0bf3c333b34258d007d60f52a55866f4dba3dc53 100644 --- a/crates/gpui/src/test.rs +++ b/crates/gpui/src/test.rs @@ -45,6 +45,7 @@ pub fn run_test( Arc, u64, )), + on_fail_fn: Option, fn_name: String, ) { // let _profiler = dhat::Profiler::new_heap(); @@ -177,6 +178,7 @@ pub fn run_test( if is_randomized { eprintln!("failing seed: {}", atomic_seed.load(SeqCst)); } + on_fail_fn.map(|f| f()); panic::resume_unwind(error); } } diff --git a/crates/gpui_macros/src/gpui_macros.rs b/crates/gpui_macros/src/gpui_macros.rs index cabae1ac0a14c290da73af306dc80db824273bc6..42cdb66ee3a0cf6a94dd237e9d81e1431fcd3900 100644 --- a/crates/gpui_macros/src/gpui_macros.rs +++ b/crates/gpui_macros/src/gpui_macros.rs @@ -1,4 +1,5 @@ use proc_macro::TokenStream; +use proc_macro2::Ident; use quote::{format_ident, quote}; use std::mem; use syn::{ @@ -15,6 +16,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { let mut num_iterations = 1; let mut starting_seed = 0; let mut detect_nondeterminism = false; + let mut on_failure_fn_name = quote!(None); for arg in args { match arg { @@ -33,6 +35,20 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { Some("retries") => max_retries = parse_int(&meta.lit)?, Some("iterations") => num_iterations = parse_int(&meta.lit)?, Some("seed") => starting_seed = parse_int(&meta.lit)?, + Some("on_failure") => { + if let Lit::Str(name) = meta.lit { + let ident = Ident::new(&name.value(), name.span()); + on_failure_fn_name = quote!(Some(#ident)); + } else { + return Err(TokenStream::from( + syn::Error::new( + meta.lit.span(), + "on_failure argument must be a string", + ) + .into_compile_error(), + )); + } + } _ => { return Err(TokenStream::from( syn::Error::new(meta.path.span(), "invalid argument") @@ -152,6 +168,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { cx.foreground().run(#inner_fn_name(#inner_fn_args)); #cx_teardowns }, + #on_failure_fn_name, stringify!(#outer_fn_name).to_string(), ); } @@ -187,6 +204,7 @@ pub fn test(args: TokenStream, function: TokenStream) -> TokenStream { #max_retries, #detect_nondeterminism, &mut |cx, _, _, seed| #inner_fn_name(#inner_fn_args), + #on_failure_fn_name, stringify!(#outer_fn_name).to_string(), ); } From b251e249a7c4f812a3cb49dabd15a89dcc9a81c7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 18:17:11 -0700 Subject: [PATCH 20/80] Check for consistency between clients every time the system quiesces --- .../src/tests/randomized_integration_tests.rs | 462 +++++++++--------- 1 file changed, 236 insertions(+), 226 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 583271b342342361ff86efedcb88b9c0e59e6a2e..ffe09b74bdf4139b06fb0ae40e9ab2216b5a0d13 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -34,11 +34,13 @@ use std::{ use util::ResultExt; lazy_static::lazy_static! { + static ref PLAN_LOAD_PATH: Option = path_env_var("LOAD_PLAN"); + static ref PLAN_SAVE_PATH: Option = path_env_var("SAVE_PLAN"); static ref LOADED_PLAN_JSON: Mutex>> = Default::default(); - static ref DID_SAVE_PLAN_JSON: AtomicBool = Default::default(); + static ref PLAN: Mutex>>> = Default::default(); } -#[gpui::test(iterations = 100)] +#[gpui::test(iterations = 100, on_failure = "on_failure")] async fn test_random_collaboration( cx: &mut TestAppContext, deterministic: Arc, @@ -53,9 +55,6 @@ async fn test_random_collaboration( .map(|i| i.parse().expect("invalid `OPERATIONS` variable")) .unwrap_or(10); - let plan_load_path = path_env_var("LOAD_PLAN"); - let plan_save_path = path_env_var("SAVE_PLAN"); - let mut server = TestServer::start(&deterministic).await; let db = server.app_state.db.clone(); @@ -103,7 +102,7 @@ async fn test_random_collaboration( let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations))); - if let Some(path) = &plan_load_path { + if let Some(path) = &*PLAN_LOAD_PATH { let json = LOADED_PLAN_JSON .lock() .get_or_insert_with(|| { @@ -114,6 +113,8 @@ async fn test_random_collaboration( plan.lock().deserialize(json); } + PLAN.lock().replace(plan.clone()); + let mut clients = Vec::new(); let mut client_tasks = Vec::new(); let mut operation_channels = Vec::new(); @@ -142,225 +143,7 @@ async fn test_random_collaboration( deterministic.finish_waiting(); deterministic.run_until_parked(); - if let Some(path) = &plan_save_path { - if !DID_SAVE_PLAN_JSON.swap(true, SeqCst) { - eprintln!("saved test plan to path {:?}", path); - std::fs::write(path, plan.lock().serialize()).unwrap(); - } - } - - for (client, client_cx) in &clients { - for guest_project in client.remote_projects().iter() { - guest_project.read_with(client_cx, |guest_project, cx| { - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client - .local_projects() - .iter() - .find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == guest_project.remote_id() - }) - })? - .clone(); - Some((project, cx)) - }); - - if !guest_project.is_read_only() { - if let Some((host_project, host_cx)) = host_project { - let host_worktree_snapshots = - host_project.read_with(host_cx, |host_project, cx| { - host_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>() - }); - let guest_worktree_snapshots = guest_project - .worktrees(cx) - .map(|worktree| { - let worktree = worktree.read(cx); - (worktree.id(), worktree.snapshot()) - }) - .collect::>(); - - assert_eq!( - guest_worktree_snapshots.keys().collect::>(), - host_worktree_snapshots.keys().collect::>(), - "{} has different worktrees than the host", - client.username - ); - - for (id, host_snapshot) in &host_worktree_snapshots { - let guest_snapshot = &guest_worktree_snapshots[id]; - assert_eq!( - guest_snapshot.root_name(), - host_snapshot.root_name(), - "{} has different root name than the host for worktree {}", - client.username, - id - ); - assert_eq!( - guest_snapshot.abs_path(), - host_snapshot.abs_path(), - "{} has different abs path than the host for worktree {}", - client.username, - id - ); - assert_eq!( - guest_snapshot.entries(false).collect::>(), - host_snapshot.entries(false).collect::>(), - "{} has different snapshot than the host for worktree {:?} and project {:?}", - client.username, - host_snapshot.abs_path(), - host_project.read_with(host_cx, |project, _| project.remote_id()) - ); - assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), - "{} has different scan id than the host for worktree {:?} and project {:?}", - client.username, - host_snapshot.abs_path(), - host_project.read_with(host_cx, |project, _| project.remote_id()) - ); - } - } - } - - guest_project.check_invariants(cx); - }); - } - - let buffers = client.buffers().clone(); - for (guest_project, guest_buffers) in &buffers { - let project_id = if guest_project.read_with(client_cx, |project, _| { - project.is_local() || project.is_read_only() - }) { - continue; - } else { - guest_project - .read_with(client_cx, |project, _| project.remote_id()) - .unwrap() - }; - let guest_user_id = client.user_id().unwrap(); - - let host_project = clients.iter().find_map(|(client, cx)| { - let project = client - .local_projects() - .iter() - .find(|host_project| { - host_project.read_with(cx, |host_project, _| { - host_project.remote_id() == Some(project_id) - }) - })? - .clone(); - Some((client.user_id().unwrap(), project, cx)) - }); - - let (host_user_id, host_project, host_cx) = - if let Some((host_user_id, host_project, host_cx)) = host_project { - (host_user_id, host_project, host_cx) - } else { - continue; - }; - - for guest_buffer in guest_buffers { - let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); - let host_buffer = host_project.read_with(host_cx, |project, cx| { - project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { - panic!( - "host does not have buffer for guest:{}, peer:{:?}, id:{}", - client.username, - client.peer_id(), - buffer_id - ) - }) - }); - let path = host_buffer - .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); - - assert_eq!( - guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()), - 0, - "{}, buffer {}, path {:?} has deferred operations", - client.username, - buffer_id, - path, - ); - assert_eq!( - guest_buffer.read_with(client_cx, |buffer, _| buffer.text()), - host_buffer.read_with(host_cx, |buffer, _| buffer.text()), - "{}, buffer {}, path {:?}, differs from the host's buffer", - client.username, - buffer_id, - path - ); - - let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned()); - let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); - match (host_file, guest_file) { - (Some(host_file), Some(guest_file)) => { - assert_eq!(guest_file.path(), host_file.path()); - assert_eq!(guest_file.is_deleted(), host_file.is_deleted()); - assert_eq!( - guest_file.mtime(), - host_file.mtime(), - "guest {} mtime does not match host {} for path {:?} in project {}", - guest_user_id, - host_user_id, - guest_file.path(), - project_id, - ); - } - (None, None) => {} - (None, _) => panic!("host's file is None, guest's isn't"), - (_, None) => panic!("guest's file is None, hosts's isn't"), - } - - let host_diff_base = - host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); - let guest_diff_base = guest_buffer - .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); - assert_eq!(guest_diff_base, host_diff_base); - - let host_saved_version = - host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); - let guest_saved_version = - guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); - assert_eq!( - guest_saved_version, host_saved_version, - "guest saved version does not match host's for path {path:?} in project {project_id}", - ); - - let host_saved_version_fingerprint = - host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); - let guest_saved_version_fingerprint = - guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); - assert_eq!( - guest_saved_version_fingerprint, host_saved_version_fingerprint, - "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", - ); - - let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); - let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); - assert_eq!( - guest_saved_mtime, host_saved_mtime, - "guest's saved mtime does not match host's for path {path:?} in project {project_id}", - ); - - let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); - let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); - assert_eq!(guest_is_dirty, host_is_dirty, - "guest's dirty status does not match host's for path {path:?} in project {project_id}", - ); - - let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); - let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); - assert_eq!(guest_has_conflict, host_has_conflict, - "guest's conflict status does not match host's for path {path:?} in project {project_id}", - ); - } - } - } + check_consistency_between_clients(&clients); for (client, mut cx) in clients { cx.update(|cx| { @@ -371,6 +154,15 @@ async fn test_random_collaboration( } } +fn on_failure() { + if let Some(plan) = PLAN.lock().clone() { + if let Some(path) = &*PLAN_SAVE_PATH { + eprintln!("saved test plan to path {:?}", path); + std::fs::write(path, plan.lock().serialize()).unwrap(); + } + } +} + async fn apply_server_operation( deterministic: Arc, server: &mut TestServer, @@ -528,12 +320,13 @@ async fn apply_server_operation( let Some(client_ix) = client_ix else { continue }; applied = true; if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) { - // panic!("error signaling user {}, client {}", user_id, client_ix); + log::error!("error signaling user {user_id}: {err}"); } } if quiesce && applied { deterministic.run_until_parked(); + check_consistency_between_clients(&clients); } return applied; @@ -996,6 +789,223 @@ async fn apply_client_operation( Ok(()) } +fn check_consistency_between_clients(clients: &[(Rc, TestAppContext)]) { + for (client, client_cx) in clients { + for guest_project in client.remote_projects().iter() { + guest_project.read_with(client_cx, |guest_project, cx| { + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == guest_project.remote_id() + }) + })? + .clone(); + Some((project, cx)) + }); + + if !guest_project.is_read_only() { + if let Some((host_project, host_cx)) = host_project { + let host_worktree_snapshots = + host_project.read_with(host_cx, |host_project, cx| { + host_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>() + }); + let guest_worktree_snapshots = guest_project + .worktrees(cx) + .map(|worktree| { + let worktree = worktree.read(cx); + (worktree.id(), worktree.snapshot()) + }) + .collect::>(); + + assert_eq!( + guest_worktree_snapshots.values().map(|w| w.abs_path()).collect::>(), + host_worktree_snapshots.values().map(|w| w.abs_path()).collect::>(), + "{} has different worktrees than the host for project {:?}", + client.username, guest_project.remote_id(), + ); + + for (id, host_snapshot) in &host_worktree_snapshots { + let guest_snapshot = &guest_worktree_snapshots[id]; + assert_eq!( + guest_snapshot.root_name(), + host_snapshot.root_name(), + "{} has different root name than the host for worktree {}, project {:?}", + client.username, + id, + guest_project.remote_id(), + ); + assert_eq!( + guest_snapshot.abs_path(), + host_snapshot.abs_path(), + "{} has different abs path than the host for worktree {}, project: {:?}", + client.username, + id, + guest_project.remote_id(), + ); + assert_eq!( + guest_snapshot.entries(false).collect::>(), + host_snapshot.entries(false).collect::>(), + "{} has different snapshot than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + guest_project.remote_id(), + ); + assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id(), + "{} has different scan id than the host for worktree {:?} and project {:?}", + client.username, + host_snapshot.abs_path(), + guest_project.remote_id(), + ); + } + } + } + + guest_project.check_invariants(cx); + }); + } + + let buffers = client.buffers().clone(); + for (guest_project, guest_buffers) in &buffers { + let project_id = if guest_project.read_with(client_cx, |project, _| { + project.is_local() || project.is_read_only() + }) { + continue; + } else { + guest_project + .read_with(client_cx, |project, _| project.remote_id()) + .unwrap() + }; + let guest_user_id = client.user_id().unwrap(); + + let host_project = clients.iter().find_map(|(client, cx)| { + let project = client + .local_projects() + .iter() + .find(|host_project| { + host_project.read_with(cx, |host_project, _| { + host_project.remote_id() == Some(project_id) + }) + })? + .clone(); + Some((client.user_id().unwrap(), project, cx)) + }); + + let (host_user_id, host_project, host_cx) = + if let Some((host_user_id, host_project, host_cx)) = host_project { + (host_user_id, host_project, host_cx) + } else { + continue; + }; + + for guest_buffer in guest_buffers { + let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id()); + let host_buffer = host_project.read_with(host_cx, |project, cx| { + project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| { + panic!( + "host does not have buffer for guest:{}, peer:{:?}, id:{}", + client.username, + client.peer_id(), + buffer_id + ) + }) + }); + let path = host_buffer + .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx)); + + assert_eq!( + guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()), + 0, + "{}, buffer {}, path {:?} has deferred operations", + client.username, + buffer_id, + path, + ); + assert_eq!( + guest_buffer.read_with(client_cx, |buffer, _| buffer.text()), + host_buffer.read_with(host_cx, |buffer, _| buffer.text()), + "{}, buffer {}, path {:?}, differs from the host's buffer", + client.username, + buffer_id, + path + ); + + let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned()); + let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned()); + match (host_file, guest_file) { + (Some(host_file), Some(guest_file)) => { + assert_eq!(guest_file.path(), host_file.path()); + assert_eq!(guest_file.is_deleted(), host_file.is_deleted()); + assert_eq!( + guest_file.mtime(), + host_file.mtime(), + "guest {} mtime does not match host {} for path {:?} in project {}", + guest_user_id, + host_user_id, + guest_file.path(), + project_id, + ); + } + (None, None) => {} + (None, _) => panic!("host's file is None, guest's isn't"), + (_, None) => panic!("guest's file is None, hosts's isn't"), + } + + let host_diff_base = + host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); + let guest_diff_base = guest_buffer + .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); + assert_eq!(guest_diff_base, host_diff_base); + + let host_saved_version = + host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); + let guest_saved_version = + guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); + assert_eq!( + guest_saved_version, host_saved_version, + "guest saved version does not match host's for path {path:?} in project {project_id}", + ); + + let host_saved_version_fingerprint = + host_buffer.read_with(host_cx, |b, _| b.saved_version_fingerprint()); + let guest_saved_version_fingerprint = + guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); + assert_eq!( + guest_saved_version_fingerprint, host_saved_version_fingerprint, + "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", + ); + + let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); + let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); + assert_eq!( + guest_saved_mtime, host_saved_mtime, + "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + ); + + let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); + let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); + assert_eq!(guest_is_dirty, host_is_dirty, + "guest's dirty status does not match host's for path {path:?} in project {project_id}", + ); + + let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); + let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); + assert_eq!(guest_has_conflict, host_has_conflict, + "guest's conflict status does not match host's for path {path:?} in project {project_id}", + ); + } + } + } +} + struct TestPlan { rng: StdRng, replay: bool, From bcf9b2f10dfec38fbff289327725da014c5a4d11 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 3 Apr 2023 22:42:34 -0700 Subject: [PATCH 21/80] Add missing random delays in FakeFs --- crates/fs/src/fs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 4d0b0c4f447d541eb3fbaecd67d2ea49dc9ad42b..c53c20c774bfd087dc914ecfeb9e596754298e54 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -801,6 +801,8 @@ impl Fs for FakeFs { } async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> { + self.simulate_random_delay().await; + let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); let mut state = self.state.lock(); @@ -831,6 +833,8 @@ impl Fs for FakeFs { } async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> { + self.simulate_random_delay().await; + let source = normalize_path(source); let target = normalize_path(target); let mut state = self.state.lock(); @@ -866,6 +870,8 @@ impl Fs for FakeFs { } async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> { + self.simulate_random_delay().await; + let path = normalize_path(path); let parent_path = path .parent() @@ -901,6 +907,8 @@ impl Fs for FakeFs { } async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> { + self.simulate_random_delay().await; + let path = normalize_path(path); let parent_path = path .parent() From 1ccf174388151c2997fdec620fae7d10c08c4b12 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 4 Apr 2023 18:34:39 -0700 Subject: [PATCH 22/80] Avoid applying outdated UpdateProject messages Co-authored-by: Nathan Sobo --- crates/client/src/client.rs | 15 +++++++++++++-- crates/project/src/project.rs | 18 +++++++++++++----- crates/rpc/src/peer.rs | 25 +++++++++++++++++++++---- 3 files changed, 47 insertions(+), 11 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 76004f14a4dc404392f17d0cf84b288362690804..ae8cf8bf56d87f3b710caf62a6e068883aed7586 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -10,7 +10,10 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; +use futures::{ + future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryFutureExt as _, + TryStreamExt, +}; use gpui::{ actions, serde_json::{self, Value}, @@ -1187,6 +1190,14 @@ impl Client { &self, request: T, ) -> impl Future> { + self.request_envelope(request) + .map_ok(|envelope| envelope.payload) + } + + pub fn request_envelope( + &self, + request: T, + ) -> impl Future>> { let client_id = self.id; log::debug!( "rpc request start. client_id:{}. name:{}", @@ -1195,7 +1206,7 @@ impl Client { ); let response = self .connection_id() - .map(|conn_id| self.peer.request(conn_id, request)); + .map(|conn_id| self.peer.request_envelope(conn_id, request)); async move { let response = response?.await; log::debug!( diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 2755f281f3cb6a6aa45f62791f67ab68ac3ed5ff..1e9721339f010d3803cb3de03a6bae70df52306b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -100,6 +100,7 @@ pub struct Project { next_language_server_id: usize, client: Arc, next_entry_id: Arc, + join_project_response_message_id: u32, next_diagnostic_group_id: usize, user_store: ModelHandle, fs: Arc, @@ -425,6 +426,7 @@ impl Project { loading_buffers_by_path: Default::default(), loading_local_worktrees: Default::default(), buffer_snapshots: Default::default(), + join_project_response_message_id: 0, client_state: None, opened_buffer: watch::channel(), client_subscriptions: Vec::new(), @@ -463,15 +465,15 @@ impl Project { let subscription = client.subscribe_to_entity(remote_id); let response = client - .request(proto::JoinProject { + .request_envelope(proto::JoinProject { project_id: remote_id, }) .await?; let this = cx.add_model(|cx| { - let replica_id = response.replica_id as ReplicaId; + let replica_id = response.payload.replica_id as ReplicaId; let mut worktrees = Vec::new(); - for worktree in response.worktrees { + for worktree in response.payload.worktrees { let worktree = cx.update(|cx| { Worktree::remote(remote_id, replica_id, worktree, client.clone(), cx) }); @@ -487,6 +489,7 @@ impl Project { loading_local_worktrees: Default::default(), active_entry: None, collaborators: Default::default(), + join_project_response_message_id: response.message_id, _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), languages, @@ -505,6 +508,7 @@ impl Project { language_servers: Default::default(), language_server_ids: Default::default(), language_server_statuses: response + .payload .language_servers .into_iter() .map(|server| { @@ -537,6 +541,7 @@ impl Project { let subscription = subscription.set_model(&this, &mut cx); let user_ids = response + .payload .collaborators .iter() .map(|peer| peer.user_id) @@ -546,7 +551,7 @@ impl Project { .await?; this.update(&mut cx, |this, cx| { - this.set_collaborators_from_proto(response.collaborators, cx)?; + this.set_collaborators_from_proto(response.payload.collaborators, cx)?; this.client_subscriptions.push(subscription); anyhow::Ok(()) })?; @@ -4930,7 +4935,10 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { this.update(&mut cx, |this, cx| { - this.set_worktrees_from_proto(envelope.payload.worktrees, cx)?; + // Don't handle messages that were sent before the response to us joining the project + if envelope.message_id > this.join_project_response_message_id { + this.set_worktrees_from_proto(envelope.payload.worktrees, cx)?; + } Ok(()) }) } diff --git a/crates/rpc/src/peer.rs b/crates/rpc/src/peer.rs index 0df87fd92d9ab92237ccefe98afe9b457cec9a49..72ddfa567b5822540632bce4bf70d26fe192281c 100644 --- a/crates/rpc/src/peer.rs +++ b/crates/rpc/src/peer.rs @@ -7,7 +7,7 @@ use collections::HashMap; use futures::{ channel::{mpsc, oneshot}, stream::BoxStream, - FutureExt, SinkExt, StreamExt, + FutureExt, SinkExt, StreamExt, TryFutureExt, }; use parking_lot::{Mutex, RwLock}; use serde::{ser::SerializeStruct, Serialize}; @@ -71,6 +71,7 @@ impl Clone for Receipt { impl Copy for Receipt {} +#[derive(Clone, Debug)] pub struct TypedEnvelope { pub sender_id: ConnectionId, pub original_sender_id: Option, @@ -370,6 +371,15 @@ impl Peer { receiver_id: ConnectionId, request: T, ) -> impl Future> { + self.request_internal(None, receiver_id, request) + .map_ok(|envelope| envelope.payload) + } + + pub fn request_envelope( + &self, + receiver_id: ConnectionId, + request: T, + ) -> impl Future>> { self.request_internal(None, receiver_id, request) } @@ -380,6 +390,7 @@ impl Peer { request: T, ) -> impl Future> { self.request_internal(Some(sender_id), receiver_id, request) + .map_ok(|envelope| envelope.payload) } pub fn request_internal( @@ -387,7 +398,7 @@ impl Peer { original_sender_id: Option, receiver_id: ConnectionId, request: T, - ) -> impl Future> { + ) -> impl Future>> { let (tx, rx) = oneshot::channel(); let send = self.connection_state(receiver_id).and_then(|connection| { let message_id = connection.next_message_id.fetch_add(1, SeqCst); @@ -410,6 +421,7 @@ impl Peer { async move { send?; let (response, _barrier) = rx.await.map_err(|_| anyhow!("connection was closed"))?; + if let Some(proto::envelope::Payload::Error(error)) = &response.payload { Err(anyhow!( "RPC request {} failed - {}", @@ -417,8 +429,13 @@ impl Peer { error.message )) } else { - T::Response::from_envelope(response) - .ok_or_else(|| anyhow!("received response of the wrong type")) + Ok(TypedEnvelope { + message_id: response.id, + sender_id: receiver_id, + original_sender_id: response.original_sender_id, + payload: T::Response::from_envelope(response) + .ok_or_else(|| anyhow!("received response of the wrong type"))?, + }) } } } From 1159f5517b6a6aed577ce62358a379ce5ddfec88 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 4 Apr 2023 21:49:34 -0700 Subject: [PATCH 23/80] Avoid applying outdated UpdateProject methods after rejoining a room --- crates/call/src/room.rs | 6 ++++-- crates/project/src/project.rs | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/call/src/room.rs b/crates/call/src/room.rs index eeb8a6a5d88fef4bd1c679a9d11ed43c656a99fc..70d70218f322012ebd92ee8d85aa3a699a40ee44 100644 --- a/crates/call/src/room.rs +++ b/crates/call/src/room.rs @@ -424,7 +424,7 @@ impl Room { false }); - let response = self.client.request(proto::RejoinRoom { + let response = self.client.request_envelope(proto::RejoinRoom { id: self.id, reshared_projects, rejoined_projects, @@ -432,6 +432,8 @@ impl Room { cx.spawn(|this, mut cx| async move { let response = response.await?; + let message_id = response.message_id; + let response = response.payload; let room_proto = response.room.ok_or_else(|| anyhow!("invalid room"))?; this.update(&mut cx, |this, cx| { this.status = RoomStatus::Online; @@ -448,7 +450,7 @@ impl Room { for rejoined_project in response.rejoined_projects { if let Some(project) = projects.get(&rejoined_project.id) { project.update(cx, |project, cx| { - project.rejoined(rejoined_project, cx).log_err(); + project.rejoined(rejoined_project, message_id, cx).log_err(); }); } } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 1e9721339f010d3803cb3de03a6bae70df52306b..3e5a45007560a4f5bdb1a89bc4c8e99783c7e72b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1089,8 +1089,10 @@ impl Project { pub fn rejoined( &mut self, message: proto::RejoinedProject, + message_id: u32, cx: &mut ModelContext, ) -> Result<()> { + self.join_project_response_message_id = message_id; self.set_worktrees_from_proto(message.worktrees, cx)?; self.set_collaborators_from_proto(message.collaborators, cx)?; self.language_server_statuses = message From 781d66f628dfb302f07c11bdce3f3bd676a91470 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:04:27 -0700 Subject: [PATCH 24/80] Omit operations for non-existent users from serialized test plan --- .../src/tests/randomized_integration_tests.rs | 41 ++++++++++--------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index ffe09b74bdf4139b06fb0ae40e9ab2216b5a0d13..e53f33b16dc770afbb96e86ad21e323510f2ff8d 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -120,8 +120,8 @@ async fn test_random_collaboration( let mut operation_channels = Vec::new(); loop { - let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break }; - let applied = apply_server_operation( + let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else { break }; + let did_apply = apply_server_operation( deterministic.clone(), &mut server, &mut clients, @@ -132,8 +132,8 @@ async fn test_random_collaboration( cx, ) .await; - if !applied { - skipped.store(true, SeqCst); + if did_apply { + applied.store(true, SeqCst); } } @@ -1207,8 +1207,8 @@ impl TestPlan { // Format each operation as one line let mut json = Vec::new(); json.push(b'['); - for (operation, skipped) in &self.stored_operations { - if skipped.load(SeqCst) { + for (operation, applied) in &self.stored_operations { + if !applied.load(SeqCst) { continue; } if json.len() > 1 { @@ -1228,17 +1228,17 @@ impl TestPlan { if self.replay { while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) { self.operation_ix += 1; - if let (StoredOperation::Server(operation), skipped) = stored_operation { - return Some((operation.clone(), skipped.clone())); + if let (StoredOperation::Server(operation), applied) = stored_operation { + return Some((operation.clone(), applied.clone())); } } None } else { let operation = self.generate_server_operation(clients)?; - let skipped = Arc::new(AtomicBool::new(false)); + let applied = Arc::new(AtomicBool::new(false)); self.stored_operations - .push((StoredOperation::Server(operation.clone()), skipped.clone())); - Some((operation, skipped)) + .push((StoredOperation::Server(operation.clone()), applied.clone())); + Some((operation, applied)) } } @@ -1263,27 +1263,27 @@ impl TestPlan { StoredOperation::Client { user_id, operation, .. }, - skipped, + applied, ) = stored_operation { if user_id == ¤t_user_id { - return Some((operation.clone(), skipped.clone())); + return Some((operation.clone(), applied.clone())); } } } None } else { let operation = self.generate_client_operation(current_user_id, client, cx)?; - let skipped = Arc::new(AtomicBool::new(false)); + let applied = Arc::new(AtomicBool::new(false)); self.stored_operations.push(( StoredOperation::Client { user_id: current_user_id, batch_id: current_batch_id, operation: operation.clone(), }, - skipped.clone(), + applied.clone(), )); - Some((operation, skipped)) + Some((operation, applied)) } } @@ -1851,11 +1851,14 @@ async fn simulate_client( client.language_registry.add(Arc::new(language)); while let Some(batch_id) = operation_rx.next().await { - let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; + let Some((operation, applied)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; match apply_client_operation(&client, operation, &mut cx).await { - Ok(()) => {} - Err(TestError::Inapplicable) => skipped.store(true, SeqCst), + Ok(()) => applied.store(true, SeqCst), + Err(TestError::Inapplicable) => { + log::info!("skipped operation"); + } Err(TestError::Other(error)) => { + applied.store(true, SeqCst); log::error!("{} error: {}", client.username, error); } } From 661fba864025f3d2e3b1efe53b9beb684e821031 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:05:32 -0700 Subject: [PATCH 25/80] Run executor until parked at end of each iteration of random collaboration test Without this, the server doesn't get dropped at the end of the test, and we eventually run out of file handles due to sqlite connections being retained. --- crates/collab/src/tests/randomized_integration_tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index e53f33b16dc770afbb96e86ad21e323510f2ff8d..b85633ba158870355ff53871baf2d9d47e83acf1 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -152,6 +152,8 @@ async fn test_random_collaboration( drop(client); }); } + + deterministic.run_until_parked(); } fn on_failure() { From 43a94cda5fa4e3dc9255656cc66416c23a75fc5d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 15:36:01 -0700 Subject: [PATCH 26/80] Don't skip worktree updates if unknown entries are removed When rejoining a project, if entries were both created and deleted since joining the project, the guest will receive those entries ids in as removed. --- crates/project/src/worktree.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index b1aebf29f1048746c6f0a78ce347c5b507acfd68..cbd80def6ca1b2aabb163885346b56693d8dba71 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1227,11 +1227,10 @@ impl Snapshot { let mut entries_by_path_edits = Vec::new(); let mut entries_by_id_edits = Vec::new(); for entry_id in update.removed_entries { - let entry = self - .entry_for_id(ProjectEntryId::from_proto(entry_id)) - .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?; - entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone()))); - entries_by_id_edits.push(Edit::Remove(entry.id)); + if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) { + entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone()))); + entries_by_id_edits.push(Edit::Remove(entry.id)); + } } for entry in update.updated_entries { From 8e68c7f808bc3e07f17f0ce998f54388721daeba Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 16:52:39 -0700 Subject: [PATCH 27/80] Do include operations in serialized test plan if they cause a client to hang --- crates/collab/src/tests/randomized_integration_tests.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index b85633ba158870355ff53871baf2d9d47e83acf1..c70c76a76adaee9a71d800cf5c319be55d497bea 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1854,13 +1854,14 @@ async fn simulate_client( while let Some(batch_id) = operation_rx.next().await { let Some((operation, applied)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break }; + applied.store(true, SeqCst); match apply_client_operation(&client, operation, &mut cx).await { - Ok(()) => applied.store(true, SeqCst), + Ok(()) => {} Err(TestError::Inapplicable) => { + applied.store(false, SeqCst); log::info!("skipped operation"); } Err(TestError::Other(error)) => { - applied.store(true, SeqCst); log::error!("{} error: {}", client.username, error); } } From bda708622093660eef6ec6289093346d0ed13e85 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 16:53:36 -0700 Subject: [PATCH 28/80] Clear guest's shared buffers if they rejoin project after leaving while host was disconnected --- crates/project/src/project.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 3e5a45007560a4f5bdb1a89bc4c8e99783c7e72b..88a187982cc45c807ad3b3b02bcdec6633a81003 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4847,6 +4847,7 @@ impl Project { let collaborator = Collaborator::from_proto(collaborator)?; this.update(&mut cx, |this, cx| { + this.shared_buffers.remove(&collaborator.peer_id); this.collaborators .insert(collaborator.peer_id, collaborator); cx.notify(); From 1064b147794e06a49835fb164a51ccbebf0ebad0 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 17:50:04 -0700 Subject: [PATCH 29/80] Don't use TestPlan's rng in fake LSP handlers These should use the test context's rng, so that they behave the same whether a pre-recorded plan was used, or the plan is being generated. --- .../src/tests/randomized_integration_tests.rs | 40 +++++++++---------- crates/gpui/src/executor.rs | 10 +++++ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index c70c76a76adaee9a71d800cf5c319be55d497bea..a592881929cd720ab03f758fe06616f4400a70e6 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -1755,7 +1755,6 @@ async fn simulate_client( name: "the-fake-language-server", capabilities: lsp::LanguageServer::full_capabilities(), initializer: Some(Box::new({ - let plan = plan.clone(); let fs = client.fs.clone(); move |fake_server: &mut FakeLanguageServer| { fake_server.handle_request::( @@ -1797,16 +1796,15 @@ async fn simulate_client( fake_server.handle_request::({ let fs = fs.clone(); - let plan = plan.clone(); - move |_, _| { - let fs = fs.clone(); - let plan = plan.clone(); + move |_, cx| { + let background = cx.background(); + let mut rng = background.rng(); + let count = rng.gen_range::(1..3); + let files = fs.files(); + let files = (0..count) + .map(|_| files.choose(&mut *rng).unwrap().clone()) + .collect::>(); async move { - let files = fs.files(); - let count = plan.lock().rng.gen_range::(1..3); - let files = (0..count) - .map(|_| files.choose(&mut plan.lock().rng).unwrap()) - .collect::>(); log::info!("LSP: Returning definitions in files {:?}", &files); Ok(Some(lsp::GotoDefinitionResponse::Array( files @@ -1821,17 +1819,19 @@ async fn simulate_client( } }); - fake_server.handle_request::({ - let plan = plan.clone(); - move |_, _| { + fake_server.handle_request::( + move |_, cx| { let mut highlights = Vec::new(); - let highlight_count = plan.lock().rng.gen_range(1..=5); + let background = cx.background(); + let mut rng = background.rng(); + + let highlight_count = rng.gen_range(1..=5); for _ in 0..highlight_count { - let start_row = plan.lock().rng.gen_range(0..100); - let start_column = plan.lock().rng.gen_range(0..100); + let start_row = rng.gen_range(0..100); + let start_column = rng.gen_range(0..100); + let end_row = rng.gen_range(0..100); + let end_column = rng.gen_range(0..100); let start = PointUtf16::new(start_row, start_column); - let end_row = plan.lock().rng.gen_range(0..100); - let end_column = plan.lock().rng.gen_range(0..100); let end = PointUtf16::new(end_row, end_column); let range = if start > end { end..start } else { start..end }; highlights.push(lsp::DocumentHighlight { @@ -1843,8 +1843,8 @@ async fn simulate_client( (highlight.range.start, highlight.range.end) }); async move { Ok(Some(highlights)) } - } - }); + }, + ); } })), ..Default::default() diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 16afa987e9d732cc6bbfceb77b2a1cf564860aa2..3ed6abc8e025c28ef85ee50d01fbbe605ed6642d 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -829,6 +829,16 @@ impl Background { } } + #[cfg(any(test, feature = "test-support"))] + pub fn rng<'a>(&'a self) -> impl 'a + std::ops::DerefMut { + match self { + Self::Deterministic { executor, .. } => { + parking_lot::lock_api::MutexGuard::map(executor.state.lock(), |s| &mut s.rng) + } + _ => panic!("this method can only be called on a deterministic executor"), + } + } + #[cfg(any(test, feature = "test-support"))] pub async fn simulate_random_delay(&self) { match self { From bf3b8adf359a4bf45222d99c6108bfd597c1a2b9 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 5 Apr 2023 18:11:33 -0700 Subject: [PATCH 30/80] Avoid async fs call before checking if operation is applicable This way, the executor isn't influenced by operations that aren't applicable. --- .../src/tests/randomized_integration_tests.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index a592881929cd720ab03f758fe06616f4400a70e6..144484816b289a16f3acfbb4cc0db4984c2266bf 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -735,11 +735,13 @@ async fn apply_client_operation( is_dir, content, } => { - client + if !client .fs - .metadata(&path.parent().unwrap()) - .await? - .ok_or(TestError::Inapplicable)?; + .directories() + .contains(&path.parent().unwrap().to_owned()) + { + return Err(TestError::Inapplicable); + } if is_dir { log::info!("{}: creating dir at {:?}", client.username, path); @@ -761,13 +763,8 @@ async fn apply_client_operation( repo_path, contents, } => { - if !client - .fs - .metadata(&repo_path) - .await? - .map_or(false, |m| m.is_dir) - { - Err(TestError::Inapplicable)?; + if !client.fs.directories().contains(&repo_path) { + return Err(TestError::Inapplicable); } log::info!( From d7f56d6126fd93cfa59429894446b348a9dec6f0 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 13:49:09 +0200 Subject: [PATCH 31/80] Forget which buffers were shared when host reconnects This fixes a bug where the host would momentarily disconnect and the guest would close and reopen the project. This would cause the host to not observe the guest closing the project. When the guest tried to open one of the buffers opened prior to closing the project, the host would not send them the buffer state because it would still remember that the buffer was shared. The `shared_buffers` map is now cleared when the host reconnects and will slowly get re-filled as guests issue `SynchronizeBuffers` requests. --- crates/project/src/project.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 88a187982cc45c807ad3b3b02bcdec6633a81003..19078f31d77aa21d5b68b09cb50fffca3966f81e 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1081,6 +1081,7 @@ impl Project { message: proto::ResharedProject, cx: &mut ModelContext, ) -> Result<()> { + self.shared_buffers.clear(); self.set_collaborators_from_proto(message.collaborators, cx)?; let _ = self.metadata_changed(cx); Ok(()) From f995d07542fd96630c593ff34fa0d432d1f73e12 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 14:42:19 +0200 Subject: [PATCH 32/80] Return error if subscribing to an entity that was already subscribed to --- crates/client/src/client.rs | 27 +++++++++++++++++---------- crates/project/src/project.rs | 13 ++++++------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index ae8cf8bf56d87f3b710caf62a6e068883aed7586..f405c14a18cfa937f2ab094c3d10f88d8aadbc27 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -473,18 +473,22 @@ impl Client { pub fn subscribe_to_entity( self: &Arc, remote_id: u64, - ) -> PendingEntitySubscription { + ) -> Result> { let id = (TypeId::of::(), remote_id); - self.state - .write() - .entities_by_type_and_remote_id - .insert(id, WeakSubscriber::Pending(Default::default())); - PendingEntitySubscription { - client: self.clone(), - remote_id, - consumed: false, - _entity_type: PhantomData, + let mut state = self.state.write(); + if state.entities_by_type_and_remote_id.contains_key(&id) { + return Err(anyhow!("already subscribed to entity")); + } else { + state + .entities_by_type_and_remote_id + .insert(id, WeakSubscriber::Pending(Default::default())); + Ok(PendingEntitySubscription { + client: self.clone(), + remote_id, + consumed: false, + _entity_type: PhantomData, + }) } } @@ -1605,14 +1609,17 @@ mod tests { let _subscription1 = client .subscribe_to_entity(1) + .unwrap() .set_model(&model1, &mut cx.to_async()); let _subscription2 = client .subscribe_to_entity(2) + .unwrap() .set_model(&model2, &mut cx.to_async()); // Ensure dropping a subscription for the same entity type still allows receiving of // messages for other entity IDs of the same type. let subscription3 = client .subscribe_to_entity(3) + .unwrap() .set_model(&model3, &mut cx.to_async()); drop(subscription3); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 19078f31d77aa21d5b68b09cb50fffca3966f81e..72f3d05cae5170f08ecb9ad7722e047f7d536686 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -463,7 +463,7 @@ impl Project { ) -> Result> { client.authenticate_and_connect(true, &cx).await?; - let subscription = client.subscribe_to_entity(remote_id); + let subscription = client.subscribe_to_entity(remote_id)?; let response = client .request_envelope(proto::JoinProject { project_id: remote_id, @@ -989,6 +989,11 @@ impl Project { if self.client_state.is_some() { return Err(anyhow!("project was already shared")); } + self.client_subscriptions.push( + self.client + .subscribe_to_entity(project_id)? + .set_model(&cx.handle(), &mut cx.to_async()), + ); for open_buffer in self.opened_buffers.values_mut() { match open_buffer { @@ -1025,12 +1030,6 @@ impl Project { .log_err(); } - self.client_subscriptions.push( - self.client - .subscribe_to_entity(project_id) - .set_model(&cx.handle(), &mut cx.to_async()), - ); - let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, From 4a61e2dfa45f982585019b3859a41be727ec9aa2 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 16:02:16 +0200 Subject: [PATCH 33/80] Save server operations that were in the middle of being applied Previously, if the test panicked before it had a chance to fully apply an operation, it would end up not being saved in the plan. With this commit we will mark the operation as applied before we start processing it, and mark it as not applied if, once we're done, we've found out that it couldn't be applied. This is consistent with what we do for client operations. --- crates/collab/src/tests/randomized_integration_tests.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 144484816b289a16f3acfbb4cc0db4984c2266bf..6a13c4ef2eaa9993b50e4dfd825daa4ee9a06d12 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -121,6 +121,7 @@ async fn test_random_collaboration( loop { let Some((next_operation, applied)) = plan.lock().next_server_operation(&clients) else { break }; + applied.store(true, SeqCst); let did_apply = apply_server_operation( deterministic.clone(), &mut server, @@ -132,8 +133,8 @@ async fn test_random_collaboration( cx, ) .await; - if did_apply { - applied.store(true, SeqCst); + if !did_apply { + applied.store(false, SeqCst); } } From 8020ea783fc3eaa90d0aa38ee328f5c32e734005 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 16:23:10 +0200 Subject: [PATCH 34/80] Wait to see guest's buffer version before converting completion anchor --- crates/project/src/project.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 72f3d05cae5170f08ecb9ad7722e047f7d536686..a9cab023cd86b17a3e9f371dbc9ae25c1fe2c53d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5550,6 +5550,12 @@ impl Project { .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) })?; + let version = deserialize_version(envelope.payload.version); + buffer + .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) + .await; + let version = buffer.read_with(&cx, |buffer, _| buffer.version()); + let position = envelope .payload .position @@ -5561,12 +5567,6 @@ impl Project { }) .ok_or_else(|| anyhow!("invalid position"))?; - let version = deserialize_version(envelope.payload.version); - buffer - .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await; - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - let completions = this .update(&mut cx, |this, cx| this.completions(&buffer, position, cx)) .await?; From ef04dc14ccf54b716cb331690fce25ef83939d72 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 6 Apr 2023 17:48:44 +0200 Subject: [PATCH 35/80] Update file on incomplete buffer instead of waiting for it to be opened This ensures that two successive file updates coming from the host are not applied in reverse order. --- crates/project/src/project.rs | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index a9cab023cd86b17a3e9f371dbc9ae25c1fe2c53d..4e39f5e155c772f531c8773445fcd8684af4f8a9 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5313,28 +5313,20 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let buffer_id = envelope.payload.buffer_id; - let is_incomplete = this.read_with(&cx, |this, _| { - this.incomplete_remote_buffers.contains_key(&buffer_id) - }); - - let buffer = if is_incomplete { - Some( - this.update(&mut cx, |this, cx| { - this.wait_for_remote_buffer(buffer_id, cx) - }) - .await?, - ) - } else { - None - }; this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); - if let Some(buffer) = buffer.or_else(|| { - this.opened_buffers - .get(&buffer_id) - .and_then(|b| b.upgrade(cx)) - }) { + if let Some(buffer) = this + .opened_buffers + .get(&buffer_id) + .and_then(|b| b.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&buffer_id) + .cloned() + .flatten() + }) + { let file = payload.file.ok_or_else(|| anyhow!("invalid file"))?; let worktree = this .worktree_for_id(WorktreeId::from_proto(file.worktree_id), cx) From 22a6a243bc5711a8b313d286166685932c1ff3a1 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 6 Apr 2023 11:38:21 -0700 Subject: [PATCH 36/80] Move project assertions into main assertion function Co-authored-by: Antonio Scandurra --- .../src/tests/randomized_integration_tests.rs | 12 ++++++- crates/project/src/project.rs | 36 +++---------------- 2 files changed, 16 insertions(+), 32 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 6a13c4ef2eaa9993b50e4dfd825daa4ee9a06d12..20309d1a63c8d0e61ad0a1c0774100c0cbe6731f 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -869,7 +869,17 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) } } - guest_project.check_invariants(cx); + for buffer in guest_project.opened_buffers(cx) { + let buffer = buffer.read(cx); + assert_eq!( + buffer.deferred_ops_len(), + 0, + "{} has deferred operations for buffer {:?} in project {:?}", + client.username, + buffer.file().unwrap().full_path(cx), + guest_project.remote_id(), + ); + } }); } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 4e39f5e155c772f531c8773445fcd8684af4f8a9..7589a52fe3ec724a0bff230cf49b3aa20d10142f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -659,37 +659,11 @@ impl Project { } #[cfg(any(test, feature = "test-support"))] - pub fn check_invariants(&self, cx: &AppContext) { - if self.is_local() { - let mut worktree_root_paths = HashMap::default(); - for worktree in self.worktrees(cx) { - let worktree = worktree.read(cx); - let abs_path = worktree.as_local().unwrap().abs_path().clone(); - let prev_worktree_id = worktree_root_paths.insert(abs_path.clone(), worktree.id()); - assert_eq!( - prev_worktree_id, - None, - "abs path {:?} for worktree {:?} is not unique ({:?} was already registered with the same path)", - abs_path, - worktree.id(), - prev_worktree_id - ) - } - } else { - let replica_id = self.replica_id(); - for buffer in self.opened_buffers.values() { - if let Some(buffer) = buffer.upgrade(cx) { - let buffer = buffer.read(cx); - assert_eq!( - buffer.deferred_ops_len(), - 0, - "replica {}, buffer {} has deferred operations", - replica_id, - buffer.remote_id() - ); - } - } - } + pub fn opened_buffers(&self, cx: &AppContext) -> Vec> { + self.opened_buffers + .values() + .filter_map(|b| b.upgrade(cx)) + .collect() } #[cfg(any(test, feature = "test-support"))] From aa7918c4b5442a4d85320830555d4b5dfd17659d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 6 Apr 2023 12:17:25 -0700 Subject: [PATCH 37/80] Fix handling of redundant buffer creation messages on guests Check if the buffer already exists *before* overwriting it. Ignore redundant registrations on remote projects. Co-authored-by: Antonio Scandurra --- crates/project/src/project.rs | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 7589a52fe3ec724a0bff230cf49b3aa20d10142f..bbaa76ea694e0d5f6dfdf8cc5fe7601de9e94936 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1488,32 +1488,29 @@ impl Project { }); let remote_id = buffer.read(cx).remote_id(); - let open_buffer = if self.is_remote() || self.is_shared() { + let is_remote = self.is_remote(); + let open_buffer = if is_remote || self.is_shared() { OpenBuffer::Strong(buffer.clone()) } else { OpenBuffer::Weak(buffer.downgrade()) }; - match self.opened_buffers.insert(remote_id, open_buffer) { - None => {} - Some(OpenBuffer::Operations(operations)) => { - buffer.update(cx, |buffer, cx| buffer.apply_ops(operations, cx))? + match self.opened_buffers.entry(remote_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(open_buffer); } - Some(OpenBuffer::Weak(existing_handle)) => { - if existing_handle.upgrade(cx).is_some() { - debug_panic!("already registered buffer with remote id {}", remote_id); - Err(anyhow!( - "already registered buffer with remote id {}", - remote_id - ))? + hash_map::Entry::Occupied(mut entry) => { + if let OpenBuffer::Operations(operations) = entry.get_mut() { + buffer.update(cx, |b, cx| b.apply_ops(operations.drain(..), cx))?; + } else if entry.get().upgrade(cx).is_some() { + if is_remote { + return Ok(()); + } else { + debug_panic!("buffer {} was already registered", remote_id); + Err(anyhow!("buffer {} was already registered", remote_id))?; + } } - } - Some(OpenBuffer::Strong(_)) => { - debug_panic!("already registered buffer with remote id {}", remote_id); - Err(anyhow!( - "already registered buffer with remote id {}", - remote_id - ))? + entry.insert(open_buffer); } } cx.subscribe(buffer, |this, buffer, event, cx| { From 035189a2a138ba5192e8a45a4cabd9b1cb230d6b Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Fri, 7 Apr 2023 14:32:14 -0400 Subject: [PATCH 38/80] Put file location details of panic on separate line --- crates/zed/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 0e4f15261f9dd22c28968ae91f3837d8f02a0731..aec012db0bf0c522d3d3710663ced8aaba7792ba 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -336,7 +336,7 @@ fn init_panic_hook(app_version: String) { let message = match info.location() { Some(location) => { format!( - "thread '{}' panicked at '{}': {}:{}\n{:?}", + "thread '{}' panicked at '{}'\n{}:{}\n{:?}", thread, payload, location.file(), From f519f32ec2e3b1f7088157eccdb43b94f878e94c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 12:24:59 -0700 Subject: [PATCH 39/80] Fixed removal of closed projects in randomized test Co-authored-by: Antonio Scandurra --- crates/collab/src/tests/randomized_integration_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index 20309d1a63c8d0e61ad0a1c0774100c0cbe6731f..fc491fd7f312352be639c6e09d498d35562dac25 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -448,7 +448,7 @@ async fn apply_client_operation( .unwrap(); cx.update(|_| { client.remote_projects_mut().remove(ix); - client.buffers().retain(|project, _| project != project); + client.buffers().retain(|p, _| *p != project); drop(project); }); } From e50c48852ae4d7569c6ab28f5019ec5addf314b4 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 16:27:48 -0700 Subject: [PATCH 40/80] Wait for host to acknowledge buffer updates before sending them to other guests --- crates/collab/src/rpc.rs | 33 ++++++++++++++++++++++++++++----- crates/project/src/project.rs | 6 +++--- crates/rpc/src/rpc.rs | 2 +- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 42a88d7d4ce9dc533f79fb809121ec978cce58d5..c9b9efdc4c3ae416cfeff3c9c2592edc4d989b48 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1655,17 +1655,40 @@ async fn update_buffer( ) -> Result<()> { session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); - let project_connection_ids = session + let host_connection_id = { + let collaborators = session + .db() + .await + .project_collaborators(project_id, session.connection_id) + .await?; + + let host = collaborators + .iter() + .find(|collaborator| collaborator.is_host) + .ok_or_else(|| anyhow!("host not found"))?; + host.connection_id + }; + + if host_connection_id != session.connection_id { + session + .peer + .forward_request(session.connection_id, host_connection_id, request.clone()) + .await?; + } + + session.executor.record_backtrace(); + let collaborators = session .db() .await - .project_connection_ids(project_id, session.connection_id) + .project_collaborators(project_id, session.connection_id) .await?; - session.executor.record_backtrace(); - broadcast( Some(session.connection_id), - project_connection_ids.iter().copied(), + collaborators + .iter() + .filter(|collaborator| !collaborator.is_host) + .map(|collaborator| collaborator.connection_id), |connection_id| { session .peer diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index bbaa76ea694e0d5f6dfdf8cc5fe7601de9e94936..376c84a9d0fc9f19a36efdfbdd0c56f98dabd28f 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -380,7 +380,7 @@ impl Project { client.add_model_message_handler(Self::handle_unshare_project); client.add_model_message_handler(Self::handle_create_buffer_for_peer); client.add_model_message_handler(Self::handle_update_buffer_file); - client.add_model_message_handler(Self::handle_update_buffer); + client.add_model_request_handler(Self::handle_update_buffer); client.add_model_message_handler(Self::handle_update_diagnostic_summary); client.add_model_message_handler(Self::handle_update_worktree); client.add_model_request_handler(Self::handle_create_project_entry); @@ -5160,7 +5160,7 @@ impl Project { envelope: TypedEnvelope, _: Arc, mut cx: AsyncAppContext, - ) -> Result<()> { + ) -> Result { this.update(&mut cx, |this, cx| { let payload = envelope.payload.clone(); let buffer_id = payload.buffer_id; @@ -5187,7 +5187,7 @@ impl Project { e.insert(OpenBuffer::Operations(ops)); } } - Ok(()) + Ok(proto::Ack {}) }) } diff --git a/crates/rpc/src/rpc.rs b/crates/rpc/src/rpc.rs index bec518b707be38d1083e9ffcf66982e7435b41d1..898c8c5e98bbcc1f864d3906e41bfe6296ec3398 100644 --- a/crates/rpc/src/rpc.rs +++ b/crates/rpc/src/rpc.rs @@ -6,4 +6,4 @@ pub use conn::Connection; pub use peer::*; mod macros; -pub const PROTOCOL_VERSION: u32 = 50; +pub const PROTOCOL_VERSION: u32 = 51; From acbf9b55d71247488882a906416dd6da7f3a6fd6 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 7 Apr 2023 17:31:47 -0700 Subject: [PATCH 41/80] Halt UpdateBuffer messages until sync if one errors Co-authored-by: Antonio Scandurra --- crates/project/src/project.rs | 158 ++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 44 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 376c84a9d0fc9f19a36efdfbdd0c56f98dabd28f..f915d53c01e3c9f765dc894f0ac70cc68fb364e7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,7 +13,10 @@ use client::{proto, Client, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ - channel::{mpsc, oneshot}, + channel::{ + mpsc::{self, UnboundedReceiver}, + oneshot, + }, future::{try_join_all, Shared}, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; @@ -92,6 +95,7 @@ pub trait Item { pub struct Project { worktrees: Vec, active_entry: Option, + buffer_changes_tx: mpsc::UnboundedSender, languages: Arc, language_servers: HashMap, language_server_ids: HashMap<(WorktreeId, LanguageServerName), usize>, @@ -130,6 +134,14 @@ pub struct Project { terminals: Terminals, } +enum BufferMessage { + Operation { + buffer_id: u64, + operation: proto::Operation, + }, + Resync, +} + enum OpenBuffer { Strong(ModelHandle), Weak(WeakModelHandle), @@ -417,39 +429,45 @@ impl Project { fs: Arc, cx: &mut MutableAppContext, ) -> ModelHandle { - cx.add_model(|cx: &mut ModelContext| Self { - worktrees: Default::default(), - collaborators: Default::default(), - opened_buffers: Default::default(), - shared_buffers: Default::default(), - incomplete_remote_buffers: Default::default(), - loading_buffers_by_path: Default::default(), - loading_local_worktrees: Default::default(), - buffer_snapshots: Default::default(), - join_project_response_message_id: 0, - client_state: None, - opened_buffer: watch::channel(), - client_subscriptions: Vec::new(), - _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], - _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), - _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), - active_entry: None, - languages, - client, - user_store, - fs, - next_entry_id: Default::default(), - next_diagnostic_group_id: Default::default(), - language_servers: Default::default(), - language_server_ids: Default::default(), - language_server_statuses: Default::default(), - last_workspace_edits_by_language_server: Default::default(), - buffers_being_formatted: Default::default(), - next_language_server_id: 0, - nonce: StdRng::from_entropy().gen(), - terminals: Terminals { - local_handles: Vec::new(), - }, + cx.add_model(|cx: &mut ModelContext| { + let (tx, rx) = mpsc::unbounded(); + cx.spawn_weak(|this, cx| Self::send_buffer_messages(this, rx, cx)) + .detach(); + Self { + worktrees: Default::default(), + buffer_changes_tx: tx, + collaborators: Default::default(), + opened_buffers: Default::default(), + shared_buffers: Default::default(), + incomplete_remote_buffers: Default::default(), + loading_buffers_by_path: Default::default(), + loading_local_worktrees: Default::default(), + buffer_snapshots: Default::default(), + join_project_response_message_id: 0, + client_state: None, + opened_buffer: watch::channel(), + client_subscriptions: Vec::new(), + _subscriptions: vec![cx.observe_global::(Self::on_settings_changed)], + _maintain_buffer_languages: Self::maintain_buffer_languages(&languages, cx), + _maintain_workspace_config: Self::maintain_workspace_config(languages.clone(), cx), + active_entry: None, + languages, + client, + user_store, + fs, + next_entry_id: Default::default(), + next_diagnostic_group_id: Default::default(), + language_servers: Default::default(), + language_server_ids: Default::default(), + language_server_statuses: Default::default(), + last_workspace_edits_by_language_server: Default::default(), + buffers_being_formatted: Default::default(), + next_language_server_id: 0, + nonce: StdRng::from_entropy().gen(), + terminals: Terminals { + local_handles: Vec::new(), + }, + } }) } @@ -480,8 +498,12 @@ impl Project { worktrees.push(worktree); } + let (tx, rx) = mpsc::unbounded(); + cx.spawn_weak(|this, cx| Self::send_buffer_messages(this, rx, cx)) + .detach(); let mut this = Self { worktrees: Vec::new(), + buffer_changes_tx: tx, loading_buffers_by_path: Default::default(), opened_buffer: watch::channel(), shared_buffers: Default::default(), @@ -1084,8 +1106,9 @@ impl Project { ) }) .collect(); - self.synchronize_remote_buffers(cx).detach_and_log_err(cx); - + self.buffer_changes_tx + .unbounded_send(BufferMessage::Resync) + .unwrap(); cx.notify(); Ok(()) } @@ -1635,6 +1658,53 @@ impl Project { }); } + async fn send_buffer_messages( + this: WeakModelHandle, + mut rx: UnboundedReceiver, + mut cx: AsyncAppContext, + ) { + let mut needs_resync_with_host = false; + while let Some(change) = rx.next().await { + if let Some(this) = this.upgrade(&mut cx) { + let is_local = this.read_with(&cx, |this, _| this.is_local()); + match change { + BufferMessage::Operation { + buffer_id, + operation, + } => { + if needs_resync_with_host { + continue; + } + let request = this.read_with(&cx, |this, _| { + let project_id = this.remote_id()?; + Some(this.client.request(proto::UpdateBuffer { + buffer_id, + project_id, + operations: vec![operation], + })) + }); + if let Some(request) = request { + if request.await.is_err() && !is_local { + needs_resync_with_host = true; + } + } + } + BufferMessage::Resync => { + if this + .update(&mut cx, |this, cx| this.synchronize_remote_buffers(cx)) + .await + .is_ok() + { + needs_resync_with_host = false; + } + } + } + } else { + break; + } + } + } + fn on_buffer_event( &mut self, buffer: ModelHandle, @@ -1643,14 +1713,12 @@ impl Project { ) -> Option<()> { match event { BufferEvent::Operation(operation) => { - if let Some(project_id) = self.remote_id() { - let request = self.client.request(proto::UpdateBuffer { - project_id, + self.buffer_changes_tx + .unbounded_send(BufferMessage::Operation { buffer_id: buffer.read(cx).remote_id(), - operations: vec![language::proto::serialize_operation(operation)], - }); - cx.background().spawn(request).detach_and_log_err(cx); - } + operation: language::proto::serialize_operation(operation), + }) + .ok(); } BufferEvent::Edited { .. } => { let language_server = self @@ -4861,7 +4929,9 @@ impl Project { } if is_host { - this.synchronize_remote_buffers(cx).detach_and_log_err(cx); + this.buffer_changes_tx + .unbounded_send(BufferMessage::Resync) + .unwrap(); } cx.emit(Event::CollaboratorUpdated { From 372e31d54f5e940d741fba4750f14444dcc636f6 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 08:16:08 +0200 Subject: [PATCH 42/80] Don't panic if worktree was dropped before sending path changes In `refresh_entry`, we send a message to the `self.path_changes_tx` channel to notify the background thread that a path has changed. However, given that `refresh_entry` uses `spawn_weak`, the worktree could get dropped before sending the message, which could cause a panic. This commit changes the code to return an error instead of panicking. --- crates/project/src/worktree.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 3a9838c62d5ae0c8b35467f66e7537b72406ae43..72e05dd1f9b0a90bf98c7db991fd268895a5943b 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -838,8 +838,7 @@ impl LocalWorktree { .unwrap() .path_changes_tx .try_send((vec![abs_path], tx)) - .unwrap(); - }); + })?; rx.recv().await; Ok(()) })) @@ -930,7 +929,7 @@ impl LocalWorktree { } let (tx, mut rx) = barrier::channel(); - path_changes_tx.try_send((paths, tx)).unwrap(); + path_changes_tx.try_send((paths, tx))?; rx.recv().await; this.upgrade(&cx) .ok_or_else(|| anyhow!("worktree was dropped"))? From 7f73ebdab5686da524d57b7b45d157b44514ef60 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 08:41:31 +0200 Subject: [PATCH 43/80] Apply `BufferReloaded` message to incomplete remote buffers --- crates/project/src/project.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index f915d53c01e3c9f765dc894f0ac70cc68fb364e7..fd10e17bbf57e26211e916ae29896c1a4f0d7614 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -6310,7 +6310,13 @@ impl Project { let buffer = this .opened_buffers .get(&payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)); + .and_then(|buffer| buffer.upgrade(cx)) + .or_else(|| { + this.incomplete_remote_buffers + .get(&payload.buffer_id) + .cloned() + .flatten() + }); if let Some(buffer) = buffer { buffer.update(cx, |buffer, cx| { buffer.did_reload(version, fingerprint, line_ending, mtime, cx); From 3a82c04248377e58bf6e1347535c60526bfdb55e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 10:01:44 +0200 Subject: [PATCH 44/80] Improve assertion message when buffer state diverges --- .../src/tests/randomized_integration_tests.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index fc491fd7f312352be639c6e09d498d35562dac25..eb78ffd47a555f1f2e54f1907a4d899f85902ee9 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -981,7 +981,8 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) guest_buffer.read_with(client_cx, |b, _| b.saved_version().clone()); assert_eq!( guest_saved_version, host_saved_version, - "guest saved version does not match host's for path {path:?} in project {project_id}", + "guest {} saved version does not match host's for path {path:?} in project {project_id}", + client.username ); let host_saved_version_fingerprint = @@ -990,26 +991,30 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) guest_buffer.read_with(client_cx, |b, _| b.saved_version_fingerprint()); assert_eq!( guest_saved_version_fingerprint, host_saved_version_fingerprint, - "guest's saved fingerprint does not match host's for path {path:?} in project {project_id}", + "guest {} saved fingerprint does not match host's for path {path:?} in project {project_id}", + client.username ); let host_saved_mtime = host_buffer.read_with(host_cx, |b, _| b.saved_mtime()); let guest_saved_mtime = guest_buffer.read_with(client_cx, |b, _| b.saved_mtime()); assert_eq!( guest_saved_mtime, host_saved_mtime, - "guest's saved mtime does not match host's for path {path:?} in project {project_id}", + "guest {} saved mtime does not match host's for path {path:?} in project {project_id}", + client.username ); let host_is_dirty = host_buffer.read_with(host_cx, |b, _| b.is_dirty()); let guest_is_dirty = guest_buffer.read_with(client_cx, |b, _| b.is_dirty()); assert_eq!(guest_is_dirty, host_is_dirty, - "guest's dirty status does not match host's for path {path:?} in project {project_id}", + "guest {} dirty status does not match host's for path {path:?} in project {project_id}", + client.username ); let host_has_conflict = host_buffer.read_with(host_cx, |b, _| b.has_conflict()); let guest_has_conflict = guest_buffer.read_with(client_cx, |b, _| b.has_conflict()); assert_eq!(guest_has_conflict, host_has_conflict, - "guest's conflict status does not match host's for path {path:?} in project {project_id}", + "guest {} conflict status does not match host's for path {path:?} in project {project_id}", + client.username ); } } From 9761febf82d74c79fcfcef96a0fcd6b5c7ba43ba Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Mon, 10 Apr 2023 10:02:13 +0200 Subject: [PATCH 45/80] Avoid broadcasting `SaveBuffer` in response to a client's save request The host will send a `SaveBuffer` message anyway and this prevents re-querying the database, which could cause two `BufferSaved` messages to race and, as a result, cause guest to apply them in the wrong order. --- crates/collab/src/rpc.rs | 47 +--------------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index c9b9efdc4c3ae416cfeff3c9c2592edc4d989b48..ce5a6a0a1f767a4729f49f88fbd8864eaddb33ac 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -228,7 +228,7 @@ impl Server { .add_message_handler(update_buffer_file) .add_message_handler(buffer_reloaded) .add_message_handler(buffer_saved) - .add_request_handler(save_buffer) + .add_request_handler(forward_project_request::) .add_request_handler(get_users) .add_request_handler(fuzzy_search_users) .add_request_handler(request_contact) @@ -1591,51 +1591,6 @@ where Ok(()) } -async fn save_buffer( - request: proto::SaveBuffer, - response: Response, - session: Session, -) -> Result<()> { - let project_id = ProjectId::from_proto(request.project_id); - let host_connection_id = { - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))? - .connection_id - }; - let response_payload = session - .peer - .forward_request(session.connection_id, host_connection_id, request.clone()) - .await?; - - let mut collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - collaborators.retain(|collaborator| collaborator.connection_id != session.connection_id); - let project_connection_ids = collaborators - .iter() - .map(|collaborator| collaborator.connection_id); - broadcast( - Some(host_connection_id), - project_connection_ids, - |conn_id| { - session - .peer - .forward_send(host_connection_id, conn_id, response_payload.clone()) - }, - ); - response.send(response_payload)?; - Ok(()) -} - async fn create_buffer_for_peer( request: proto::CreateBufferForPeer, session: Session, From e79815622c0dcff6414a268e88b1d9e8c9246ab7 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 12:40:09 -0700 Subject: [PATCH 46/80] Preserve ordering between UpdateProject and CreateBufferForPeer messages Previously, because UpdateProject messages were sent in a separately- spawned task, they could be sent after CreateBufferForPeer messages that were intended to be sent after them. Co-authored-by: Antonio Scandurra --- crates/collab/src/tests/integration_tests.rs | 24 +-- crates/project/src/project.rs | 212 +++++++++---------- crates/workspace/src/workspace.rs | 8 +- 3 files changed, 116 insertions(+), 128 deletions(-) diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs index 82b542cb6b2d70a65a2151010fab85b056050336..dda80358742d7070b7c40864bb3fefa9dd2610b6 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -1633,9 +1633,7 @@ async fn test_project_reconnect( }) .await .unwrap(); - worktree_a2 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; + deterministic.run_until_parked(); let worktree2_id = worktree_a2.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() @@ -1696,11 +1694,9 @@ async fn test_project_reconnect( .unwrap(); // While client A is disconnected, add and remove worktrees from client A's project. - project_a1 - .update(cx_a, |project, cx| { - project.remove_worktree(worktree2_id, cx) - }) - .await; + project_a1.update(cx_a, |project, cx| { + project.remove_worktree(worktree2_id, cx) + }); let (worktree_a3, _) = project_a1 .update(cx_a, |p, cx| { p.find_or_create_local_worktree("/root-1/dir3", true, cx) @@ -1824,18 +1820,14 @@ async fn test_project_reconnect( }) .await .unwrap(); - worktree_a4 - .read_with(cx_a, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; + deterministic.run_until_parked(); let worktree4_id = worktree_a4.read_with(cx_a, |tree, _| { assert!(tree.as_local().unwrap().is_shared()); tree.id() }); - project_a1 - .update(cx_a, |project, cx| { - project.remove_worktree(worktree3_id, cx) - }) - .await; + project_a1.update(cx_a, |project, cx| { + project.remove_worktree(worktree3_id, cx) + }); deterministic.run_until_parked(); // While client B is disconnected, mutate a buffer on both the host and the guest. diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 6a8bbb98d29f7740e53a8405c4cf74f67884735e..655425a2a873029219e8cc778314a5a8aef3368a 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -13,10 +13,7 @@ use client::{proto, Client, TypedEnvelope, UserStore}; use clock::ReplicaId; use collections::{hash_map, BTreeMap, HashMap, HashSet}; use futures::{ - channel::{ - mpsc::{self, UnboundedReceiver}, - oneshot, - }, + channel::mpsc::{self, UnboundedReceiver}, future::{try_join_all, Shared}, AsyncWriteExt, Future, FutureExt, StreamExt, TryFutureExt, }; @@ -142,6 +139,14 @@ enum BufferMessage { Resync, } +enum LocalProjectUpdate { + WorktreesChanged, + CreateBufferForPeer { + peer_id: proto::PeerId, + buffer_id: u64, + }, +} + enum OpenBuffer { Strong(ModelHandle), Weak(WeakModelHandle), @@ -156,8 +161,8 @@ enum WorktreeHandle { enum ProjectClientState { Local { remote_id: u64, - metadata_changed: mpsc::UnboundedSender>, - _maintain_metadata: Task<()>, + updates_tx: mpsc::UnboundedSender, + _send_updates: Task<()>, }, Remote { sharing_has_stopped: bool, @@ -725,22 +730,13 @@ impl Project { } } - fn metadata_changed(&mut self, cx: &mut ModelContext) -> impl Future { - let (tx, rx) = oneshot::channel(); - if let Some(ProjectClientState::Local { - metadata_changed, .. - }) = &mut self.client_state - { - let _ = metadata_changed.unbounded_send(tx); + fn metadata_changed(&mut self, cx: &mut ModelContext) { + if let Some(ProjectClientState::Local { updates_tx, .. }) = &mut self.client_state { + updates_tx + .unbounded_send(LocalProjectUpdate::WorktreesChanged) + .ok(); } cx.notify(); - - async move { - // If the project is shared, this will resolve when the `_maintain_metadata` task has - // a chance to update the metadata. Otherwise, it will resolve right away because `tx` - // will get dropped. - let _ = rx.await; - } } pub fn collaborators(&self) -> &HashMap { @@ -1026,40 +1022,90 @@ impl Project { .log_err(); } - let (metadata_changed_tx, mut metadata_changed_rx) = mpsc::unbounded(); + let (updates_tx, mut updates_rx) = mpsc::unbounded(); + let client = self.client.clone(); self.client_state = Some(ProjectClientState::Local { remote_id: project_id, - metadata_changed: metadata_changed_tx, - _maintain_metadata: cx.spawn_weak(move |this, mut cx| async move { - let mut txs = Vec::new(); - while let Some(tx) = metadata_changed_rx.next().await { - txs.push(tx); - while let Ok(Some(next_tx)) = metadata_changed_rx.try_next() { - txs.push(next_tx); - } - + updates_tx, + _send_updates: cx.spawn_weak(move |this, mut cx| async move { + while let Some(update) = updates_rx.next().await { let Some(this) = this.upgrade(&cx) else { break }; - let worktrees = - this.read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); - let update_project = this - .read_with(&cx, |this, cx| { - this.client.request(proto::UpdateProject { - project_id, - worktrees: this.worktree_metadata_protos(cx), - }) - }) - .await; - if update_project.is_ok() { - for worktree in worktrees { - worktree.update(&mut cx, |worktree, cx| { - let worktree = worktree.as_local_mut().unwrap(); - worktree.share(project_id, cx).detach_and_log_err(cx) - }); + + match update { + LocalProjectUpdate::WorktreesChanged => { + let worktrees = this + .read_with(&cx, |this, cx| this.worktrees(cx).collect::>()); + let update_project = this + .read_with(&cx, |this, cx| { + this.client.request(proto::UpdateProject { + project_id, + worktrees: this.worktree_metadata_protos(cx), + }) + }) + .await; + if update_project.is_ok() { + for worktree in worktrees { + worktree.update(&mut cx, |worktree, cx| { + let worktree = worktree.as_local_mut().unwrap(); + worktree.share(project_id, cx).detach_and_log_err(cx) + }); + } + } } - } + LocalProjectUpdate::CreateBufferForPeer { peer_id, buffer_id } => { + let buffer = this.update(&mut cx, |this, _| { + let buffer = this.opened_buffers.get(&buffer_id).unwrap(); + let shared_buffers = + this.shared_buffers.entry(peer_id).or_default(); + if shared_buffers.insert(buffer_id) { + if let OpenBuffer::Strong(buffer) = buffer { + Some(buffer.clone()) + } else { + None + } + } else { + None + } + }); + + let Some(buffer) = buffer else { continue }; + let operations = + buffer.read_with(&cx, |b, cx| b.serialize_ops(None, cx)); + let operations = operations.await; + let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); - for tx in txs.drain(..) { - let _ = tx.send(()); + let initial_state = proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some(proto::create_buffer_for_peer::Variant::State(state)), + }; + if client.send(initial_state).log_err().is_some() { + let client = client.clone(); + cx.background() + .spawn(async move { + let mut chunks = split_operations(operations).peekable(); + while let Some(chunk) = chunks.next() { + let is_last = chunks.peek().is_none(); + client.send(proto::CreateBufferForPeer { + project_id, + peer_id: Some(peer_id), + variant: Some( + proto::create_buffer_for_peer::Variant::Chunk( + proto::BufferChunk { + buffer_id, + operations: chunk, + is_last, + }, + ), + ), + })?; + } + anyhow::Ok(()) + }) + .await + .log_err(); + } + } } } }), @@ -4493,15 +4539,13 @@ impl Project { &mut cx, ) .await; + project.update(&mut cx, |project, _| { project.loading_local_worktrees.remove(&path); }); - let worktree = worktree?; - - project - .update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)) - .await; + let worktree = worktree?; + project.update(&mut cx, |project, cx| project.add_worktree(&worktree, cx)); Ok(worktree) } .map_err(Arc::new) @@ -4517,11 +4561,7 @@ impl Project { }) } - pub fn remove_worktree( - &mut self, - id_to_remove: WorktreeId, - cx: &mut ModelContext, - ) -> impl Future { + pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut ModelContext) { self.worktrees.retain(|worktree| { if let Some(worktree) = worktree.upgrade(cx) { let id = worktree.read(cx).id(); @@ -4535,14 +4575,10 @@ impl Project { false } }); - self.metadata_changed(cx) + self.metadata_changed(cx); } - fn add_worktree( - &mut self, - worktree: &ModelHandle, - cx: &mut ModelContext, - ) -> impl Future { + fn add_worktree(&mut self, worktree: &ModelHandle, cx: &mut ModelContext) { cx.observe(worktree, |_, _, cx| cx.notify()).detach(); if worktree.read(cx).is_local() { cx.subscribe(worktree, |this, worktree, event, cx| match event { @@ -4575,7 +4611,7 @@ impl Project { .detach(); cx.emit(Event::WorktreeAdded); - self.metadata_changed(cx) + self.metadata_changed(cx); } fn update_local_worktree_buffers( @@ -5963,47 +5999,11 @@ impl Project { cx: &mut AppContext, ) -> u64 { let buffer_id = buffer.read(cx).remote_id(); - if let Some(project_id) = self.remote_id() { - let shared_buffers = self.shared_buffers.entry(peer_id).or_default(); - if shared_buffers.insert(buffer_id) { - let buffer = buffer.clone(); - let operations = buffer.read(cx).serialize_ops(None, cx); - let client = self.client.clone(); - cx.spawn(move |cx| async move { - let operations = operations.await; - let state = buffer.read_with(&cx, |buffer, _| buffer.to_proto()); - - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::State(state)), - })?; - - cx.background() - .spawn(async move { - let mut chunks = split_operations(operations).peekable(); - while let Some(chunk) = chunks.next() { - let is_last = chunks.peek().is_none(); - client.send(proto::CreateBufferForPeer { - project_id, - peer_id: Some(peer_id), - variant: Some(proto::create_buffer_for_peer::Variant::Chunk( - proto::BufferChunk { - buffer_id, - operations: chunk, - is_last, - }, - )), - })?; - } - anyhow::Ok(()) - }) - .await - }) - .detach() - } + if let Some(ProjectClientState::Local { updates_tx, .. }) = &self.client_state { + updates_tx + .unbounded_send(LocalProjectUpdate::CreateBufferForPeer { peer_id, buffer_id }) + .ok(); } - buffer_id } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 334578d67dbf37ffbcdfd4e4f67b898df8c0e608..739153fc78882c33e4d09dce958d264363634a97 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1305,10 +1305,8 @@ impl Workspace { RemoveWorktreeFromProject(worktree_id): &RemoveWorktreeFromProject, cx: &mut ViewContext, ) { - let future = self - .project + self.project .update(cx, |project, cx| project.remove_worktree(*worktree_id, cx)); - cx.foreground().spawn(future).detach(); } fn project_path_for_path( @@ -3266,9 +3264,7 @@ mod tests { ); // Remove a project folder - project - .update(cx, |project, cx| project.remove_worktree(worktree_id, cx)) - .await; + project.update(cx, |project, cx| project.remove_worktree(worktree_id, cx)); assert_eq!( cx.current_window_title(window_id).as_deref(), Some("one.txt — root2") From 189784f5fd0b58345cd397a69310d776dcee3e52 Mon Sep 17 00:00:00 2001 From: Julia Date: Mon, 10 Apr 2023 16:45:15 -0400 Subject: [PATCH 47/80] In the case of Github release deserialize error, log response text Co-Authored-By: Joseph Lyons --- crates/util/src/github.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/crates/util/src/github.rs b/crates/util/src/github.rs index 5170bd6f4f397c77c505c606401e7f3299637b7f..3bb4baa2937536f6c77d407ec22bea1c0320829f 100644 --- a/crates/util/src/github.rs +++ b/crates/util/src/github.rs @@ -9,13 +9,13 @@ pub struct GitHubLspBinaryVersion { pub url: String, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] pub struct GithubRelease { pub name: String, pub assets: Vec, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] pub struct GithubReleaseAsset { pub name: String, pub browser_download_url: String, @@ -40,7 +40,13 @@ pub async fn latest_github_release( .await .context("error reading latest release")?; - let release: GithubRelease = - serde_json::from_slice(body.as_slice()).context("error deserializing latest release")?; - Ok(release) + let release = serde_json::from_slice::(body.as_slice()); + if release.is_err() { + log::error!( + "Github API response text: {:?}", + String::from_utf8_lossy(body.as_slice()) + ); + } + + release.context("error deserializing latest release") } From e853e77d598e24f095b052d95eb9436d70e86109 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 16:03:49 -0700 Subject: [PATCH 48/80] Upgrade postage for oneshot channel drop fix Previously, dropping a oneshot sender didn't wake the receiver. --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4eb8ac1f954a4dceb62e60c5ceb2890dfeafe21a..4cc7ebc094c731b3f40bdcf5b86c676db101090b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4589,14 +4589,15 @@ checksum = "5da3b0203fd7ee5720aa0b5e790b591aa5d3f41c3ed2c34a3a393382198af2f7" [[package]] name = "postage" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a63d25391d04a097954b76aba742b6b5b74f213dfe3dbaeeb36e8ddc1c657f0b" +checksum = "af3fb618632874fb76937c2361a7f22afd393c982a2165595407edc75b06d3c1" dependencies = [ "atomic", "crossbeam-queue", "futures 0.3.25", "log", + "parking_lot 0.12.1", "pin-project", "pollster", "static_assertions", diff --git a/Cargo.toml b/Cargo.toml index 75178371d2f8b0784bda77074b56f4459f76ccf7..8113c0cfcfd93c9518f8f6730e4c4dbae2cef7a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ serde = { version = "1.0", features = ["derive", "rc"] } serde_derive = { version = "1.0", features = ["deserialize_in_place"] } serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] } rand = { version = "0.8" } -postage = { version = "0.4.1", features = ["futures-traits"] } +postage = { version = "0.5", features = ["futures-traits"] } [patch.crates-io] tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "c51896d32dcc11a38e41f36e3deb1a6a9c4f4b14" } From 25e3c4e58638f349de098083d8f1f5b114b73c7a Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 16:06:28 -0700 Subject: [PATCH 49/80] Fix leak when project is unshared while LSP handler waits for edits --- crates/language/src/buffer.rs | 12 ++++--- crates/language/src/proto.rs | 8 ++--- crates/project/src/lsp_command.rs | 40 +++++++++++------------ crates/project/src/project.rs | 42 +++++++++++++----------- crates/project/src/worktree.rs | 2 +- crates/text/src/text.rs | 54 ++++++++++++++++++++++--------- 6 files changed, 95 insertions(+), 63 deletions(-) diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 8c9f34789fa85b898eeb40b3ef9b112d4078612d..fa8368f20bb897d8afea9165d6101af39e05aa9b 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -377,7 +377,7 @@ impl Buffer { rpc::proto::LineEnding::from_i32(message.line_ending) .ok_or_else(|| anyhow!("missing line_ending"))?, )); - this.saved_version = proto::deserialize_version(message.saved_version); + this.saved_version = proto::deserialize_version(&message.saved_version); this.saved_version_fingerprint = proto::deserialize_fingerprint(&message.saved_version_fingerprint)?; this.saved_mtime = message @@ -1309,21 +1309,25 @@ impl Buffer { pub fn wait_for_edits( &mut self, edit_ids: impl IntoIterator, - ) -> impl Future { + ) -> impl Future> { self.text.wait_for_edits(edit_ids) } pub fn wait_for_anchors<'a>( &mut self, anchors: impl IntoIterator, - ) -> impl Future { + ) -> impl Future> { self.text.wait_for_anchors(anchors) } - pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future { + pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future> { self.text.wait_for_version(version) } + pub fn give_up_waiting(&mut self) { + self.text.give_up_waiting(); + } + pub fn set_active_selections( &mut self, selections: Arc<[Selection]>, diff --git a/crates/language/src/proto.rs b/crates/language/src/proto.rs index 1b95e3ace9a21615bb803d3ad4f4015e5d17188b..1f6ecd0a90fbd39e9841b380d7dbd7ea0fdf91d6 100644 --- a/crates/language/src/proto.rs +++ b/crates/language/src/proto.rs @@ -220,7 +220,7 @@ pub fn deserialize_operation(message: proto::Operation) -> Result EditOperation local: edit.local_timestamp, lamport: edit.lamport_timestamp, }, - version: deserialize_version(edit.version), + version: deserialize_version(&edit.version), ranges: edit.ranges.into_iter().map(deserialize_range).collect(), new_text: edit.new_text.into_iter().map(Arc::from).collect(), } @@ -509,7 +509,7 @@ pub fn deserialize_transaction(transaction: proto::Transaction) -> Result Range { FullOffset(range.start as usize)..FullOffset(range.end as usize) } -pub fn deserialize_version(message: Vec) -> clock::Global { +pub fn deserialize_version(message: &[proto::VectorClockEntry]) -> clock::Global { let mut version = clock::Global::new(); for entry in message { version.observe(clock::Local { diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 27c87125061fe4462bbcaa87529613b897a03d91..b6c5c633f0568c8fa0697b8d2488474e42b93285 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -161,9 +161,9 @@ impl LspCommand for PrepareRename { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), @@ -199,9 +199,9 @@ impl LspCommand for PrepareRename { if message.can_rename { buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; let start = message.start.and_then(deserialize_anchor); let end = message.end.and_then(deserialize_anchor); Ok(start.zip(end).map(|(start, end)| start..end)) @@ -281,9 +281,9 @@ impl LspCommand for PerformRename { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), new_name: message.new_name, @@ -378,9 +378,9 @@ impl LspCommand for GetDefinition { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -464,9 +464,9 @@ impl LspCommand for GetTypeDefinition { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -537,7 +537,7 @@ async fn location_links_from_proto( .ok_or_else(|| anyhow!("missing origin end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; Some(Location { buffer, range: start..end, @@ -562,7 +562,7 @@ async fn location_links_from_proto( .ok_or_else(|| anyhow!("missing target end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; let target = Location { buffer, range: start..end, @@ -774,9 +774,9 @@ impl LspCommand for GetReferences { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -827,7 +827,7 @@ impl LspCommand for GetReferences { .ok_or_else(|| anyhow!("missing target end"))?; target_buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; locations.push(Location { buffer: target_buffer, range: start..end, @@ -915,9 +915,9 @@ impl LspCommand for GetDocumentHighlights { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) @@ -965,7 +965,7 @@ impl LspCommand for GetDocumentHighlights { .ok_or_else(|| anyhow!("missing target end"))?; buffer .update(&mut cx, |buffer, _| buffer.wait_for_anchors([&start, &end])) - .await; + .await?; let kind = match proto::document_highlight::Kind::from_i32(highlight.kind) { Some(proto::document_highlight::Kind::Text) => DocumentHighlightKind::TEXT, Some(proto::document_highlight::Kind::Read) => DocumentHighlightKind::READ, @@ -1117,9 +1117,9 @@ impl LspCommand for GetHover { .ok_or_else(|| anyhow!("invalid position"))?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(message.version)) + buffer.wait_for_version(deserialize_version(&message.version)) }) - .await; + .await?; Ok(Self { position: buffer.read_with(&cx, |buffer, _| position.to_point_utf16(buffer)), }) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 655425a2a873029219e8cc778314a5a8aef3368a..803cac6a95e9bb4e7a64661e0865fade56bdcbf9 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1182,6 +1182,11 @@ impl Project { } for open_buffer in self.opened_buffers.values_mut() { + // Wake up any tasks waiting for peers' edits to this buffer. + if let Some(buffer) = open_buffer.upgrade(cx) { + buffer.update(cx, |buffer, _| buffer.give_up_waiting()); + } + if let OpenBuffer::Strong(buffer) = open_buffer { *open_buffer = OpenBuffer::Weak(buffer.downgrade()); } @@ -3738,9 +3743,9 @@ impl Project { } else { source_buffer_handle .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) + buffer.wait_for_version(deserialize_version(&response.version)) }) - .await; + .await?; let completions = response.completions.into_iter().map(|completion| { language::proto::deserialize_completion(completion, language.clone()) @@ -3831,7 +3836,7 @@ impl Project { .update(&mut cx, |buffer, _| { buffer.wait_for_edits(transaction.edit_ids.iter().copied()) }) - .await; + .await?; if push_to_history { buffer_handle.update(&mut cx, |buffer, _| { buffer.push_transaction(transaction.clone(), Instant::now()); @@ -3939,9 +3944,9 @@ impl Project { } else { buffer_handle .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(response.version)) + buffer.wait_for_version(deserialize_version(&response.version)) }) - .await; + .await?; response .actions @@ -5425,8 +5430,6 @@ impl Project { mut cx: AsyncAppContext, ) -> Result { let buffer_id = envelope.payload.buffer_id; - let requested_version = deserialize_version(envelope.payload.version); - let (project_id, buffer) = this.update(&mut cx, |this, cx| { let project_id = this.remote_id().ok_or_else(|| anyhow!("not connected"))?; let buffer = this @@ -5434,13 +5437,14 @@ impl Project { .get(&buffer_id) .and_then(|buffer| buffer.upgrade(cx)) .ok_or_else(|| anyhow!("unknown buffer id {}", buffer_id))?; - Ok::<_, anyhow::Error>((project_id, buffer)) + anyhow::Ok((project_id, buffer)) })?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(requested_version) + buffer.wait_for_version(deserialize_version(&envelope.payload.version)) }) - .await; + .await?; + let buffer_id = buffer.read_with(&cx, |buffer, _| buffer.remote_id()); let (saved_version, fingerprint, mtime) = this .update(&mut cx, |this, cx| this.save_buffer(buffer, cx)) @@ -5503,7 +5507,7 @@ impl Project { this.shared_buffers.entry(guest_id).or_default().clear(); for buffer in envelope.payload.buffers { let buffer_id = buffer.id; - let remote_version = language::proto::deserialize_version(buffer.version); + let remote_version = language::proto::deserialize_version(&buffer.version); if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { this.shared_buffers .entry(guest_id) @@ -5619,10 +5623,10 @@ impl Project { .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) })?; - let version = deserialize_version(envelope.payload.version); + let version = deserialize_version(&envelope.payload.version); buffer .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await; + .await?; let version = buffer.read_with(&cx, |buffer, _| buffer.version()); let position = envelope @@ -5710,9 +5714,9 @@ impl Project { })?; buffer .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(envelope.payload.version)) + buffer.wait_for_version(deserialize_version(&envelope.payload.version)) }) - .await; + .await?; let version = buffer.read_with(&cx, |buffer, _| buffer.version()); let code_actions = this.update(&mut cx, |this, cx| { @@ -5979,7 +5983,7 @@ impl Project { .update(&mut cx, |buffer, _| { buffer.wait_for_edits(transaction.edit_ids.iter().copied()) }) - .await; + .await?; if push_to_history { buffer.update(&mut cx, |buffer, _| { @@ -6098,7 +6102,7 @@ impl Project { let send_updates_for_buffers = response.buffers.into_iter().map(|buffer| { let client = client.clone(); let buffer_id = buffer.id; - let remote_version = language::proto::deserialize_version(buffer.version); + let remote_version = language::proto::deserialize_version(&buffer.version); this.read_with(&cx, |this, cx| { if let Some(buffer) = this.buffer_for_id(buffer_id, cx) { let operations = buffer.read(cx).serialize_ops(Some(remote_version), cx); @@ -6263,7 +6267,7 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let fingerprint = deserialize_fingerprint(&envelope.payload.fingerprint)?; - let version = deserialize_version(envelope.payload.version); + let version = deserialize_version(&envelope.payload.version); let mtime = envelope .payload .mtime @@ -6296,7 +6300,7 @@ impl Project { mut cx: AsyncAppContext, ) -> Result<()> { let payload = envelope.payload; - let version = deserialize_version(payload.version); + let version = deserialize_version(&payload.version); let fingerprint = deserialize_fingerprint(&payload.fingerprint)?; let line_ending = deserialize_line_ending( proto::LineEnding::from_i32(payload.line_ending) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index f845a8f63712b36e10c4ce2ba90c49b654ddc6ca..792e09a00a29b637fee8dda3bf92c0673e189000 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -1064,7 +1064,7 @@ impl RemoteWorktree { version: serialize_version(&version), }) .await?; - let version = deserialize_version(response.version); + let version = deserialize_version(&response.version); let fingerprint = deserialize_fingerprint(&response.fingerprint)?; let mtime = response .mtime diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index c9341df23de9cfdce7c2a6137e28cddbecef6f30..b857ec5d5e900dc9dae10f2695403c63c5327b6d 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -11,14 +11,14 @@ mod tests; mod undo_map; pub use anchor::*; -use anyhow::Result; +use anyhow::{anyhow, Result}; use clock::ReplicaId; use collections::{HashMap, HashSet}; use fs::LineEnding; use locator::Locator; use operation_queue::OperationQueue; pub use patch::Patch; -use postage::{barrier, oneshot, prelude::*}; +use postage::{oneshot, prelude::*}; pub use rope::*; pub use selection::*; @@ -52,7 +52,7 @@ pub struct Buffer { pub lamport_clock: clock::Lamport, subscriptions: Topic, edit_id_resolvers: HashMap>>, - version_barriers: Vec<(clock::Global, barrier::Sender)>, + wait_for_version_txs: Vec<(clock::Global, oneshot::Sender<()>)>, } #[derive(Clone)] @@ -522,7 +522,7 @@ impl Buffer { lamport_clock, subscriptions: Default::default(), edit_id_resolvers: Default::default(), - version_barriers: Default::default(), + wait_for_version_txs: Default::default(), } } @@ -793,8 +793,14 @@ impl Buffer { } } } - self.version_barriers - .retain(|(version, _)| !self.snapshot.version().observed_all(version)); + self.wait_for_version_txs.retain_mut(|(version, tx)| { + if self.snapshot.version().observed_all(version) { + tx.try_send(()).ok(); + false + } else { + true + } + }); Ok(()) } @@ -1305,7 +1311,7 @@ impl Buffer { pub fn wait_for_edits( &mut self, edit_ids: impl IntoIterator, - ) -> impl 'static + Future { + ) -> impl 'static + Future> { let mut futures = Vec::new(); for edit_id in edit_ids { if !self.version.observed(edit_id) { @@ -1317,15 +1323,18 @@ impl Buffer { async move { for mut future in futures { - future.recv().await; + if future.recv().await.is_none() { + Err(anyhow!("gave up waiting for edits"))?; + } } + Ok(()) } } pub fn wait_for_anchors<'a>( &mut self, anchors: impl IntoIterator, - ) -> impl 'static + Future { + ) -> impl 'static + Future> { let mut futures = Vec::new(); for anchor in anchors { if !self.version.observed(anchor.timestamp) @@ -1343,21 +1352,36 @@ impl Buffer { async move { for mut future in futures { - future.recv().await; + if future.recv().await.is_none() { + Err(anyhow!("gave up waiting for anchors"))?; + } } + Ok(()) } } - pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future { - let (tx, mut rx) = barrier::channel(); + pub fn wait_for_version(&mut self, version: clock::Global) -> impl Future> { + let mut rx = None; if !self.snapshot.version.observed_all(&version) { - self.version_barriers.push((version, tx)); + let channel = oneshot::channel(); + self.wait_for_version_txs.push((version, channel.0)); + rx = Some(channel.1); } async move { - rx.recv().await; + if let Some(mut rx) = rx { + if rx.recv().await.is_none() { + Err(anyhow!("gave up waiting for version"))?; + } + } + Ok(()) } } + pub fn give_up_waiting(&mut self) { + self.edit_id_resolvers.clear(); + self.wait_for_version_txs.clear(); + } + fn resolve_edit(&mut self, edit_id: clock::Local) { for mut tx in self .edit_id_resolvers @@ -1365,7 +1389,7 @@ impl Buffer { .into_iter() .flatten() { - let _ = tx.try_send(()); + tx.try_send(()).ok(); } } } From abfbba68f055fd145c63c503b2246ecad883d3ae Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 10 Apr 2023 18:28:34 -0700 Subject: [PATCH 50/80] Improve randomized test assertion message when diff base is wrong --- crates/collab/src/tests/randomized_integration_tests.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/collab/src/tests/randomized_integration_tests.rs b/crates/collab/src/tests/randomized_integration_tests.rs index eb78ffd47a555f1f2e54f1907a4d899f85902ee9..00273722c4312217b533c9f3a25fc12b0387acc7 100644 --- a/crates/collab/src/tests/randomized_integration_tests.rs +++ b/crates/collab/src/tests/randomized_integration_tests.rs @@ -973,7 +973,11 @@ fn check_consistency_between_clients(clients: &[(Rc, TestAppContext) host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string)); let guest_diff_base = guest_buffer .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string)); - assert_eq!(guest_diff_base, host_diff_base); + assert_eq!( + guest_diff_base, host_diff_base, + "guest {} diff base does not match host's for path {path:?} in project {project_id}", + client.username + ); let host_saved_version = host_buffer.read_with(host_cx, |b, _| b.saved_version().clone()); From 67cb0462988ae2fcb145d3587b5bd3b7a1a647b1 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Fri, 7 Apr 2023 19:36:10 -0400 Subject: [PATCH 51/80] Add tab context menu --- crates/workspace/src/item.rs | 2 +- crates/workspace/src/pane.rs | 333 ++++++++++++++++++++++++++++++----- crates/zed/src/zed.rs | 12 +- 3 files changed, 300 insertions(+), 47 deletions(-) diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index f9b3550003473778435e143c5a785db2bbe83572..f7ffe64f972d99ccb9b172f3fc70a4a0203f91b4 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -402,7 +402,7 @@ impl ItemHandle for ViewHandle { for item_event in T::to_item_events(event).into_iter() { match item_event { ItemEvent::CloseItem => { - Pane::close_item(workspace, pane, item.id(), cx) + Pane::close_item_by_id(workspace, pane, item.id(), cx) .detach_and_log_err(cx); return; } diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 4d2ff32ef3e09bdc67562ea9389a3a3684286b49..d489f1b723987ee8c5823127ca2e1d9e9e6431a2 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -37,6 +37,24 @@ use util::ResultExt; #[derive(Clone, Deserialize, PartialEq)] pub struct ActivateItem(pub usize); +#[derive(Clone, PartialEq)] +pub struct CloseItemById { + pub item_id: usize, + pub pane: WeakViewHandle, +} + +#[derive(Clone, PartialEq)] +pub struct CloseItemsToTheLeftById { + pub item_id: usize, + pub pane: WeakViewHandle, +} + +#[derive(Clone, PartialEq)] +pub struct CloseItemsToTheRightById { + pub item_id: usize, + pub pane: WeakViewHandle, +} + actions!( pane, [ @@ -57,12 +75,6 @@ actions!( ] ); -#[derive(Clone, PartialEq)] -pub struct CloseItem { - pub item_id: usize, - pub pane: WeakViewHandle, -} - #[derive(Clone, PartialEq)] pub struct MoveItem { pub item_id: usize, @@ -92,11 +104,21 @@ pub struct DeployDockMenu; #[derive(Clone, PartialEq)] pub struct DeployNewMenu; +#[derive(Clone, PartialEq)] +pub struct DeployTabContextMenu { + pub position: Vector2F, + pub item_id: usize, + pub pane: WeakViewHandle, +} + impl_actions!(pane, [GoBack, GoForward, ActivateItem]); impl_internal_actions!( pane, [ - CloseItem, + CloseItemById, + CloseItemsToTheLeftById, + CloseItemsToTheRightById, + DeployTabContextMenu, DeploySplitMenu, DeployNewMenu, DeployDockMenu, @@ -127,14 +149,34 @@ pub fn init(cx: &mut AppContext) { cx.add_async_action(Pane::close_items_to_the_left); cx.add_async_action(Pane::close_items_to_the_right); cx.add_async_action(Pane::close_all_items); - cx.add_async_action(|workspace: &mut Workspace, action: &CloseItem, cx| { + cx.add_async_action(|workspace: &mut Workspace, action: &CloseItemById, cx| { let pane = action.pane.upgrade(cx)?; - let task = Pane::close_item(workspace, pane, action.item_id, cx); + let task = Pane::close_item_by_id(workspace, pane, action.item_id, cx); Some(cx.foreground().spawn(async move { task.await?; Ok(()) })) }); + cx.add_async_action( + |workspace: &mut Workspace, action: &CloseItemsToTheLeftById, cx| { + let pane = action.pane.upgrade(cx)?; + let task = Pane::close_items_to_the_left_by_id(workspace, pane, action.item_id, cx); + Some(cx.foreground().spawn(async move { + task.await?; + Ok(()) + })) + }, + ); + cx.add_async_action( + |workspace: &mut Workspace, action: &CloseItemsToTheRightById, cx| { + let pane = action.pane.upgrade(cx)?; + let task = Pane::close_items_to_the_right_by_id(workspace, pane, action.item_id, cx); + Some(cx.foreground().spawn(async move { + task.await?; + Ok(()) + })) + }, + ); cx.add_action( |workspace, MoveItem { @@ -168,6 +210,7 @@ pub fn init(cx: &mut AppContext) { cx.add_action(Pane::deploy_split_menu); cx.add_action(Pane::deploy_dock_menu); cx.add_action(Pane::deploy_new_menu); + cx.add_action(Pane::deploy_tab_context_menu); cx.add_action(|workspace: &mut Workspace, _: &ReopenClosedItem, cx| { Pane::reopen_closed_item(workspace, cx).detach(); }); @@ -214,6 +257,7 @@ pub struct Pane { nav_history: Rc>, toolbar: ViewHandle, tab_bar_context_menu: TabBarContextMenu, + tab_context_menu: ViewHandle, docked: Option, _background_actions: BackgroundActions, _workspace_id: usize, @@ -319,6 +363,7 @@ impl Pane { kind: TabBarContextMenuKind::New, handle: context_menu, }, + tab_context_menu: cx.add_view(ContextMenu::new), docked, _background_actions: background_actions, _workspace_id: workspace_id, @@ -742,9 +787,7 @@ impl Pane { let pane = pane_handle.read(cx); let active_item_id = pane.items[pane.active_item_index].id(); - let task = Self::close_items(workspace, pane_handle, cx, move |item_id| { - item_id == active_item_id - }); + let task = Self::close_item_by_id(workspace, pane_handle, active_item_id, cx); Some(cx.foreground().spawn(async move { task.await?; @@ -752,6 +795,17 @@ impl Pane { })) } + pub fn close_item_by_id( + workspace: &mut Workspace, + pane: ViewHandle, + item_id_to_close: usize, + cx: &mut ViewContext, + ) -> Task> { + Self::close_items(workspace, pane, cx, move |view_id| { + view_id == item_id_to_close + }) + } + pub fn close_inactive_items( workspace: &mut Workspace, _: &CloseInactiveItems, @@ -804,20 +858,35 @@ impl Pane { let pane = pane_handle.read(cx); let active_item_id = pane.items[pane.active_item_index].id(); + let task = Self::close_items_to_the_left_by_id(workspace, pane_handle, active_item_id, cx); + + Some(cx.foreground().spawn(async move { + task.await?; + Ok(()) + })) + } + + pub fn close_items_to_the_left_by_id( + workspace: &mut Workspace, + pane: ViewHandle, + item_id: usize, + cx: &mut ViewContext, + ) -> Task> { let item_ids: Vec<_> = pane + .read(cx) .items() - .take_while(|item| item.id() != active_item_id) + .take_while(|item| item.id() != item_id) .map(|item| item.id()) .collect(); - let task = Self::close_items(workspace, pane_handle, cx, move |item_id| { + let task = Self::close_items(workspace, pane, cx, move |item_id| { item_ids.contains(&item_id) }); - Some(cx.foreground().spawn(async move { + cx.foreground().spawn(async move { task.await?; Ok(()) - })) + }) } pub fn close_items_to_the_right( @@ -829,21 +898,36 @@ impl Pane { let pane = pane_handle.read(cx); let active_item_id = pane.items[pane.active_item_index].id(); + let task = Self::close_items_to_the_right_by_id(workspace, pane_handle, active_item_id, cx); + + Some(cx.foreground().spawn(async move { + task.await?; + Ok(()) + })) + } + + pub fn close_items_to_the_right_by_id( + workspace: &mut Workspace, + pane: ViewHandle, + item_id: usize, + cx: &mut ViewContext, + ) -> Task> { let item_ids: Vec<_> = pane + .read(cx) .items() .rev() - .take_while(|item| item.id() != active_item_id) + .take_while(|item| item.id() != item_id) .map(|item| item.id()) .collect(); - let task = Self::close_items(workspace, pane_handle, cx, move |item_id| { + let task = Self::close_items(workspace, pane, cx, move |item_id| { item_ids.contains(&item_id) }); - Some(cx.foreground().spawn(async move { + cx.foreground().spawn(async move { task.await?; Ok(()) - })) + }) } pub fn close_all_items( @@ -861,17 +945,6 @@ impl Pane { })) } - pub fn close_item( - workspace: &mut Workspace, - pane: ViewHandle, - item_id_to_close: usize, - cx: &mut ViewContext, - ) -> Task> { - Self::close_items(workspace, pane, cx, move |view_id| { - view_id == item_id_to_close - }) - } - pub fn close_items( workspace: &mut Workspace, pane: ViewHandle, @@ -1207,6 +1280,65 @@ impl Pane { self.tab_bar_context_menu.kind = TabBarContextMenuKind::New; } + fn deploy_tab_context_menu( + &mut self, + action: &DeployTabContextMenu, + cx: &mut ViewContext, + ) { + let target_item_id = action.item_id; + let target_pane = action.pane.clone(); + let active_item_id = self.items[self.active_item_index].id(); + let is_active_item = target_item_id == active_item_id; + + let mut options = Vec::new(); + + // TODO: Explain why we are doing this - for the key bindings + options.push(if is_active_item { + ContextMenuItem::item("Close Active Item", CloseActiveItem) + } else { + ContextMenuItem::item( + "Close Inactive Item", + CloseItemById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ) + }); + // This should really be called "close others" and the behaviour should be dynamically based on the tab the action is ran on. Currenlty, this is a weird action because you can run it on a non-active tab and it will close everything by the actual active tab + options.push(ContextMenuItem::item( + "Close Inactive Items", + CloseInactiveItems, + )); + options.push(ContextMenuItem::item("Close Clean Items", CloseCleanItems)); + options.push(if is_active_item { + ContextMenuItem::item("Close Items To The Left", CloseItemsToTheLeft) + } else { + ContextMenuItem::item( + "Close Items To The Left", + CloseItemsToTheLeftById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ) + }); + options.push(if is_active_item { + ContextMenuItem::item("Close Items To The Right", CloseItemsToTheRight) + } else { + ContextMenuItem::item( + "Close Items To The Right", + CloseItemsToTheRightById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ) + }); + options.push(ContextMenuItem::item("Close All Items", CloseAllItems)); + + self.tab_context_menu.update(cx, |menu, cx| { + menu.show(action.position, AnchorCorner::TopLeft, options, cx); + }); + } + pub fn toolbar(&self) -> &ViewHandle { &self.toolbar } @@ -1277,13 +1409,22 @@ impl Pane { }) .on_click(MouseButton::Middle, { let item = item.clone(); + let pane = pane.clone(); move |_, cx: &mut EventContext| { - cx.dispatch_action(CloseItem { + cx.dispatch_action(CloseItemById { item_id: item.id(), pane: pane.clone(), }) } }) + .on_down(MouseButton::Right, move |e, cx| { + let item = item.clone(); + cx.dispatch_action(DeployTabContextMenu { + position: e.position, + item_id: item.id(), + pane: pane.clone(), + }); + }) .boxed() } }); @@ -1454,7 +1595,7 @@ impl Pane { .on_click(MouseButton::Left, { let pane = pane.clone(); move |_, cx| { - cx.dispatch_action(CloseItem { + cx.dispatch_action(CloseItemById { item_id, pane: pane.clone(), }) @@ -1624,6 +1765,7 @@ impl View for Pane { .flex(1., true) .boxed() }) + .with_child(ChildView::new(&self.tab_context_menu, cx).boxed()) .boxed() } else { enum EmptyPane {} @@ -2219,14 +2361,14 @@ mod tests { let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); - add_labled_item(&workspace, &pane, "A", cx); - add_labled_item(&workspace, &pane, "B", cx); - add_labled_item(&workspace, &pane, "C", cx); - add_labled_item(&workspace, &pane, "D", cx); + add_labeled_item(&workspace, &pane, "A", false, cx); + add_labeled_item(&workspace, &pane, "B", false, cx); + add_labeled_item(&workspace, &pane, "C", false, cx); + add_labeled_item(&workspace, &pane, "D", false, cx); assert_item_labels(&pane, ["A", "B", "C", "D*"], cx); pane.update(cx, |pane, cx| pane.activate_item(1, false, false, cx)); - add_labled_item(&workspace, &pane, "1", cx); + add_labeled_item(&workspace, &pane, "1", false, cx); assert_item_labels(&pane, ["A", "B", "1*", "C", "D"], cx); workspace.update(cx, |workspace, cx| { @@ -2257,14 +2399,125 @@ mod tests { assert_item_labels(&pane, ["A*"], cx); } - fn add_labled_item( + #[gpui::test] + async fn test_close_inactive_items(deterministic: Arc, cx: &mut TestAppContext) { + Settings::test_async(cx); + let fs = FakeFs::new(cx.background()); + + let project = Project::test(fs, None, cx).await; + let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); + let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); + + set_labeled_items(&workspace, &pane, ["A", "B", "C*", "D", "E"], cx); + + workspace.update(cx, |workspace, cx| { + Pane::close_inactive_items(workspace, &CloseInactiveItems, cx); + }); + + deterministic.run_until_parked(); + assert_item_labels(&pane, ["C*"], cx); + } + + #[gpui::test] + async fn test_close_clean_items(deterministic: Arc, cx: &mut TestAppContext) { + Settings::test_async(cx); + let fs = FakeFs::new(cx.background()); + + let project = Project::test(fs, None, cx).await; + let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); + let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); + + add_labeled_item(&workspace, &pane, "A", true, cx); + add_labeled_item(&workspace, &pane, "B", false, cx); + add_labeled_item(&workspace, &pane, "C", false, cx); + add_labeled_item(&workspace, &pane, "D", false, cx); + add_labeled_item(&workspace, &pane, "E", false, cx); + assert_item_labels(&pane, ["A", "B", "C", "D", "E*"], cx); + + workspace.update(cx, |workspace, cx| { + Pane::close_clean_items(workspace, &CloseCleanItems, cx); + }); + + deterministic.run_until_parked(); + assert_item_labels(&pane, ["A*"], cx); + } + + #[gpui::test] + async fn test_close_items_to_the_left( + deterministic: Arc, + cx: &mut TestAppContext, + ) { + Settings::test_async(cx); + let fs = FakeFs::new(cx.background()); + + let project = Project::test(fs, None, cx).await; + let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); + let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); + + set_labeled_items(&workspace, &pane, ["A", "B", "C*", "D", "E"], cx); + + workspace.update(cx, |workspace, cx| { + Pane::close_items_to_the_left(workspace, &CloseItemsToTheLeft, cx); + }); + + deterministic.run_until_parked(); + assert_item_labels(&pane, ["C*", "D", "E"], cx); + } + + #[gpui::test] + async fn test_close_items_to_the_right( + deterministic: Arc, + cx: &mut TestAppContext, + ) { + Settings::test_async(cx); + let fs = FakeFs::new(cx.background()); + + let project = Project::test(fs, None, cx).await; + let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); + let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); + + set_labeled_items(&workspace, &pane, ["A", "B", "C*", "D", "E"], cx); + + workspace.update(cx, |workspace, cx| { + Pane::close_items_to_the_right(workspace, &CloseItemsToTheRight, cx); + }); + + deterministic.run_until_parked(); + assert_item_labels(&pane, ["A", "B", "C*"], cx); + } + + #[gpui::test] + async fn test_close_all_items(deterministic: Arc, cx: &mut TestAppContext) { + Settings::test_async(cx); + let fs = FakeFs::new(cx.background()); + + let project = Project::test(fs, None, cx).await; + let (_, workspace) = cx.add_window(|cx| Workspace::test_new(project.clone(), cx)); + let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); + + add_labeled_item(&workspace, &pane, "A", false, cx); + add_labeled_item(&workspace, &pane, "B", false, cx); + add_labeled_item(&workspace, &pane, "C", false, cx); + assert_item_labels(&pane, ["A", "B", "C*"], cx); + + workspace.update(cx, |workspace, cx| { + Pane::close_all_items(workspace, &CloseAllItems, cx); + }); + + deterministic.run_until_parked(); + assert_item_labels(&pane, [], cx); + } + + fn add_labeled_item( workspace: &ViewHandle, pane: &ViewHandle, label: &str, + is_dirty: bool, cx: &mut TestAppContext, ) -> Box> { workspace.update(cx, |workspace, cx| { - let labeled_item = Box::new(cx.add_view(|_| TestItem::new().with_label(label))); + let labeled_item = + Box::new(cx.add_view(|_| TestItem::new().with_label(label).with_dirty(is_dirty))); Pane::add_item( workspace, diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 8aa8bca07b3f38f9ecd0cbbbbb93d0fa5e17fc63..36703fae316361b41e7230c7d1a5389550350895 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -1548,7 +1548,7 @@ mod tests { .update(cx, |workspace, cx| { let editor3_id = editor3.id(); drop(editor3); - Pane::close_item(workspace, workspace.active_pane().clone(), editor3_id, cx) + Pane::close_item_by_id(workspace, workspace.active_pane().clone(), editor3_id, cx) }) .await .unwrap(); @@ -1581,7 +1581,7 @@ mod tests { .update(cx, |workspace, cx| { let editor2_id = editor2.id(); drop(editor2); - Pane::close_item(workspace, workspace.active_pane().clone(), editor2_id, cx) + Pane::close_item_by_id(workspace, workspace.active_pane().clone(), editor2_id, cx) }) .await .unwrap(); @@ -1731,7 +1731,7 @@ mod tests { // Close all the pane items in some arbitrary order. workspace .update(cx, |workspace, cx| { - Pane::close_item(workspace, pane.clone(), file1_item_id, cx) + Pane::close_item_by_id(workspace, pane.clone(), file1_item_id, cx) }) .await .unwrap(); @@ -1739,7 +1739,7 @@ mod tests { workspace .update(cx, |workspace, cx| { - Pane::close_item(workspace, pane.clone(), file4_item_id, cx) + Pane::close_item_by_id(workspace, pane.clone(), file4_item_id, cx) }) .await .unwrap(); @@ -1747,7 +1747,7 @@ mod tests { workspace .update(cx, |workspace, cx| { - Pane::close_item(workspace, pane.clone(), file2_item_id, cx) + Pane::close_item_by_id(workspace, pane.clone(), file2_item_id, cx) }) .await .unwrap(); @@ -1755,7 +1755,7 @@ mod tests { workspace .update(cx, |workspace, cx| { - Pane::close_item(workspace, pane.clone(), file3_item_id, cx) + Pane::close_item_by_id(workspace, pane.clone(), file3_item_id, cx) }) .await .unwrap(); From 643381ce0cb3f523ddf4a8fbc321e604180027e1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 08:50:26 +0200 Subject: [PATCH 52/80] Make `UpdateDiffBase` a `Foreground` message to prevent reordering --- crates/rpc/src/proto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 823ffa7a19e120346345b536893adcb869d2a0e5..a27c6ac1bbc50a362a76f49439b9b2b9f04207ff 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -233,7 +233,7 @@ messages!( (UpdateProject, Foreground), (UpdateProjectCollaborator, Foreground), (UpdateWorktree, Foreground), - (UpdateDiffBase, Background), + (UpdateDiffBase, Foreground), (GetPrivateUserInfo, Foreground), (GetPrivateUserInfoResponse, Foreground), ); From 6ba5e06247fda2442754dda67eb6b44ffea8f692 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 10:42:43 +0200 Subject: [PATCH 53/80] Stop waiting for buffers when releasing a remote project --- crates/project/src/project.rs | 45 +++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 803cac6a95e9bb4e7a64661e0865fade56bdcbf9..1c1c91243dcf07a5630fdcf7fd0fec0188ce2e87 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1111,7 +1111,7 @@ impl Project { }), }); - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); cx.emit(Event::RemoteIdChanged(Some(project_id))); cx.notify(); Ok(()) @@ -1124,7 +1124,7 @@ impl Project { ) -> Result<()> { self.shared_buffers.clear(); self.set_collaborators_from_proto(message.collaborators, cx)?; - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); Ok(()) } @@ -1160,6 +1160,13 @@ impl Project { } pub fn unshare(&mut self, cx: &mut ModelContext) -> Result<()> { + self.unshare_internal(cx)?; + self.metadata_changed(cx); + cx.notify(); + Ok(()) + } + + fn unshare_internal(&mut self, cx: &mut AppContext) -> Result<()> { if self.is_remote() { return Err(anyhow!("attempted to unshare a remote project")); } @@ -1192,8 +1199,6 @@ impl Project { } } - let _ = self.metadata_changed(cx); - cx.notify(); self.client.send(proto::UnshareProject { project_id: remote_id, })?; @@ -1205,13 +1210,21 @@ impl Project { } pub fn disconnected_from_host(&mut self, cx: &mut ModelContext) { + self.disconnected_from_host_internal(cx); + cx.emit(Event::DisconnectedFromHost); + cx.notify(); + } + + fn disconnected_from_host_internal(&mut self, cx: &mut AppContext) { if let Some(ProjectClientState::Remote { sharing_has_stopped, .. }) = &mut self.client_state { *sharing_has_stopped = true; + self.collaborators.clear(); + for worktree in &self.worktrees { if let Some(worktree) = worktree.upgrade(cx) { worktree.update(cx, |worktree, _| { @@ -1221,8 +1234,17 @@ impl Project { }); } } - cx.emit(Event::DisconnectedFromHost); - cx.notify(); + + for open_buffer in self.opened_buffers.values_mut() { + // Wake up any tasks waiting for peers' edits to this buffer. + if let Some(buffer) = open_buffer.upgrade(cx) { + buffer.update(cx, |buffer, _| buffer.give_up_waiting()); + } + + if let OpenBuffer::Strong(buffer) = open_buffer { + *open_buffer = OpenBuffer::Weak(buffer.downgrade()); + } + } // Wake up all futures currently waiting on a buffer to get opened, // to give them a chance to fail now that we've disconnected. @@ -6183,7 +6205,7 @@ impl Project { } } - let _ = self.metadata_changed(cx); + self.metadata_changed(cx); for (id, _) in old_worktrees_by_id { cx.emit(Event::WorktreeRemoved(id)); } @@ -6577,17 +6599,16 @@ impl<'a> Iterator for PathMatchCandidateSetIter<'a> { impl Entity for Project { type Event = Event; - fn release(&mut self, _: &mut gpui::AppContext) { + fn release(&mut self, cx: &mut gpui::AppContext) { match &self.client_state { - Some(ProjectClientState::Local { remote_id, .. }) => { - let _ = self.client.send(proto::UnshareProject { - project_id: *remote_id, - }); + Some(ProjectClientState::Local { .. }) => { + let _ = self.unshare_internal(cx); } Some(ProjectClientState::Remote { remote_id, .. }) => { let _ = self.client.send(proto::LeaveProject { project_id: *remote_id, }); + self.disconnected_from_host_internal(cx); } _ => {} } From 9e6d865882275bf3dbc7f9047d15ed5038a0184f Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 10:43:05 +0200 Subject: [PATCH 54/80] Prevent already dropped model from being upgraded during `release` --- crates/gpui/src/app.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index bc38b28bba374f4bfdc1618c3b3b7bf49e3205e2..534f77a34904abbdbd86d3e5811b8a7de2ec529e 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -2618,7 +2618,7 @@ impl UpgradeModelHandle for AppContext { &self, handle: &WeakModelHandle, ) -> Option> { - if self.models.contains_key(&handle.model_id) { + if self.ref_counts.lock().is_entity_alive(handle.model_id) { Some(ModelHandle::new(handle.model_id, &self.ref_counts)) } else { None @@ -2626,11 +2626,11 @@ impl UpgradeModelHandle for AppContext { } fn model_handle_is_upgradable(&self, handle: &WeakModelHandle) -> bool { - self.models.contains_key(&handle.model_id) + self.ref_counts.lock().is_entity_alive(handle.model_id) } fn upgrade_any_model_handle(&self, handle: &AnyWeakModelHandle) -> Option { - if self.models.contains_key(&handle.model_id) { + if self.ref_counts.lock().is_entity_alive(handle.model_id) { Some(AnyModelHandle::new( handle.model_id, handle.model_type, From ac532cb6fade9fe813ad5d21a6ca06ef17fbcd08 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 14:52:07 +0200 Subject: [PATCH 55/80] Use `LspCommand` to handle completions --- crates/project/src/lsp_command.rs | 209 ++++++++++++++++++++++++++- crates/project/src/project.rs | 227 +----------------------------- 2 files changed, 212 insertions(+), 224 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index b6c5c633f0568c8fa0697b8d2488474e42b93285..334fe37c3ccc529250f3faaecb3b2ff19020a470 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -4,11 +4,13 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use client::proto::{self, PeerId}; +use fs::LineEnding; use gpui::{AppContext, AsyncAppContext, ModelHandle}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, - range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, PointUtf16, ToPointUtf16, + range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, Completion, PointUtf16, + ToOffset, ToPointUtf16, Unclipped, }; use lsp::{DocumentHighlightKind, LanguageServer, ServerCapabilities}; use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag}; @@ -91,6 +93,10 @@ pub(crate) struct GetHover { pub position: PointUtf16, } +pub(crate) struct GetCompletions { + pub position: PointUtf16, +} + #[async_trait(?Send)] impl LspCommand for PrepareRename { type Response = Option>; @@ -1199,3 +1205,204 @@ impl LspCommand for GetHover { message.buffer_id } } + +#[async_trait(?Send)] +impl LspCommand for GetCompletions { + type Response = Vec; + type LspRequest = lsp::request::Completion; + type ProtoRequest = proto::GetCompletions; + + fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::CompletionParams { + lsp::CompletionParams { + text_document_position: lsp::TextDocumentPositionParams::new( + lsp::TextDocumentIdentifier::new(lsp::Url::from_file_path(path).unwrap()), + point_to_lsp(self.position), + ), + context: Default::default(), + work_done_progress_params: Default::default(), + partial_result_params: Default::default(), + } + } + + async fn response_from_lsp( + self, + completions: Option, + _: ModelHandle, + buffer: ModelHandle, + cx: AsyncAppContext, + ) -> Result> { + let completions = if let Some(completions) = completions { + match completions { + lsp::CompletionResponse::Array(completions) => completions, + lsp::CompletionResponse::List(list) => list.items, + } + } else { + Default::default() + }; + + let completions = buffer.read_with(&cx, |buffer, _| { + let language = buffer.language().cloned(); + let snapshot = buffer.snapshot(); + let clipped_position = buffer.clip_point_utf16(Unclipped(self.position), Bias::Left); + let mut range_for_token = None; + completions + .into_iter() + .filter_map(move |mut lsp_completion| { + // For now, we can only handle additional edits if they are returned + // when resolving the completion, not if they are present initially. + if lsp_completion + .additional_text_edits + .as_ref() + .map_or(false, |edits| !edits.is_empty()) + { + return None; + } + + let (old_range, mut new_text) = match lsp_completion.text_edit.as_ref() { + // If the language server provides a range to overwrite, then + // check that the range is valid. + Some(lsp::CompletionTextEdit::Edit(edit)) => { + let range = range_from_lsp(edit.range); + let start = snapshot.clip_point_utf16(range.start, Bias::Left); + let end = snapshot.clip_point_utf16(range.end, Bias::Left); + if start != range.start.0 || end != range.end.0 { + log::info!("completion out of expected range"); + return None; + } + ( + snapshot.anchor_before(start)..snapshot.anchor_after(end), + edit.new_text.clone(), + ) + } + // If the language server does not provide a range, then infer + // the range based on the syntax tree. + None => { + if self.position != clipped_position { + log::info!("completion out of expected range"); + return None; + } + let Range { start, end } = range_for_token + .get_or_insert_with(|| { + let offset = self.position.to_offset(&snapshot); + let (range, kind) = snapshot.surrounding_word(offset); + if kind == Some(CharKind::Word) { + range + } else { + offset..offset + } + }) + .clone(); + let text = lsp_completion + .insert_text + .as_ref() + .unwrap_or(&lsp_completion.label) + .clone(); + ( + snapshot.anchor_before(start)..snapshot.anchor_after(end), + text, + ) + } + Some(lsp::CompletionTextEdit::InsertAndReplace(_)) => { + log::info!("unsupported insert/replace completion"); + return None; + } + }; + + let language = language.clone(); + LineEnding::normalize(&mut new_text); + Some(async move { + let mut label = None; + if let Some(language) = language { + language.process_completion(&mut lsp_completion).await; + label = language.label_for_completion(&lsp_completion).await; + } + Completion { + old_range, + new_text, + label: label.unwrap_or_else(|| { + language::CodeLabel::plain( + lsp_completion.label.clone(), + lsp_completion.filter_text.as_deref(), + ) + }), + lsp_completion, + } + }) + }) + }); + + Ok(futures::future::join_all(completions).await) + } + + fn to_proto(&self, project_id: u64, buffer: &Buffer) -> proto::GetCompletions { + let anchor = buffer.anchor_after(self.position); + proto::GetCompletions { + project_id, + buffer_id: buffer.remote_id(), + position: Some(language::proto::serialize_anchor(&anchor)), + version: serialize_version(&buffer.version()), + } + } + + async fn from_proto( + message: proto::GetCompletions, + project: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result { + let version = deserialize_version(&message.version); + buffer + .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) + .await?; + let position = message + .position + .and_then(language::proto::deserialize_anchor) + .map(|p| { + buffer.read_with(&cx, |buffer, _| { + buffer.clip_point_utf16(Unclipped(p.to_point_utf16(buffer)), Bias::Left) + }) + }) + .ok_or_else(|| anyhow!("invalid position"))?; + Ok(Self { position }) + } + + fn response_to_proto( + completions: Vec, + _: &mut Project, + _: PeerId, + buffer_version: &clock::Global, + _: &mut AppContext, + ) -> proto::GetCompletionsResponse { + proto::GetCompletionsResponse { + completions: completions + .iter() + .map(language::proto::serialize_completion) + .collect(), + version: serialize_version(&buffer_version), + } + } + + async fn response_from_proto( + self, + message: proto::GetCompletionsResponse, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result> { + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + + let language = buffer.read_with(&cx, |buffer, _| buffer.language().cloned()); + let completions = message.completions.into_iter().map(|completion| { + language::proto::deserialize_completion(completion, language.clone()) + }); + futures::future::try_join_all(completions).await + } + + fn buffer_id_from_proto(message: &proto::GetCompletions) -> u64 { + message.buffer_id + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 1c1c91243dcf07a5630fdcf7fd0fec0188ce2e87..ed5b8e98be9d0de9ebf3fef8d3f3615804c69677 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -410,7 +410,7 @@ impl Project { client.add_model_request_handler(Self::handle_synchronize_buffers); client.add_model_request_handler(Self::handle_format_buffers); client.add_model_request_handler(Self::handle_get_code_actions); - client.add_model_request_handler(Self::handle_get_completions); + client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); @@ -3596,188 +3596,12 @@ impl Project { pub fn completions( &self, - source_buffer_handle: &ModelHandle, + buffer: &ModelHandle, position: T, cx: &mut ModelContext, ) -> Task>> { - let source_buffer_handle = source_buffer_handle.clone(); - let source_buffer = source_buffer_handle.read(cx); - let buffer_id = source_buffer.remote_id(); - let language = source_buffer.language().cloned(); - let worktree; - let buffer_abs_path; - if let Some(file) = File::from_dyn(source_buffer.file()) { - worktree = file.worktree.clone(); - buffer_abs_path = file.as_local().map(|f| f.abs_path(cx)); - } else { - return Task::ready(Ok(Default::default())); - }; - - let position = Unclipped(position.to_point_utf16(source_buffer)); - let anchor = source_buffer.anchor_after(position); - - if worktree.read(cx).as_local().is_some() { - let buffer_abs_path = buffer_abs_path.unwrap(); - let lang_server = - if let Some((_, server)) = self.language_server_for_buffer(source_buffer, cx) { - server.clone() - } else { - return Task::ready(Ok(Default::default())); - }; - - cx.spawn(|_, cx| async move { - let completions = lang_server - .request::(lsp::CompletionParams { - text_document_position: lsp::TextDocumentPositionParams::new( - lsp::TextDocumentIdentifier::new( - lsp::Url::from_file_path(buffer_abs_path).unwrap(), - ), - point_to_lsp(position.0), - ), - context: Default::default(), - work_done_progress_params: Default::default(), - partial_result_params: Default::default(), - }) - .await - .context("lsp completion request failed")?; - - let completions = if let Some(completions) = completions { - match completions { - lsp::CompletionResponse::Array(completions) => completions, - lsp::CompletionResponse::List(list) => list.items, - } - } else { - Default::default() - }; - - let completions = source_buffer_handle.read_with(&cx, |this, _| { - let snapshot = this.snapshot(); - let clipped_position = this.clip_point_utf16(position, Bias::Left); - let mut range_for_token = None; - completions - .into_iter() - .filter_map(move |mut lsp_completion| { - // For now, we can only handle additional edits if they are returned - // when resolving the completion, not if they are present initially. - if lsp_completion - .additional_text_edits - .as_ref() - .map_or(false, |edits| !edits.is_empty()) - { - return None; - } - - let (old_range, mut new_text) = match lsp_completion.text_edit.as_ref() - { - // If the language server provides a range to overwrite, then - // check that the range is valid. - Some(lsp::CompletionTextEdit::Edit(edit)) => { - let range = range_from_lsp(edit.range); - let start = snapshot.clip_point_utf16(range.start, Bias::Left); - let end = snapshot.clip_point_utf16(range.end, Bias::Left); - if start != range.start.0 || end != range.end.0 { - log::info!("completion out of expected range"); - return None; - } - ( - snapshot.anchor_before(start)..snapshot.anchor_after(end), - edit.new_text.clone(), - ) - } - // If the language server does not provide a range, then infer - // the range based on the syntax tree. - None => { - if position.0 != clipped_position { - log::info!("completion out of expected range"); - return None; - } - let Range { start, end } = range_for_token - .get_or_insert_with(|| { - let offset = position.to_offset(&snapshot); - let (range, kind) = snapshot.surrounding_word(offset); - if kind == Some(CharKind::Word) { - range - } else { - offset..offset - } - }) - .clone(); - let text = lsp_completion - .insert_text - .as_ref() - .unwrap_or(&lsp_completion.label) - .clone(); - ( - snapshot.anchor_before(start)..snapshot.anchor_after(end), - text, - ) - } - Some(lsp::CompletionTextEdit::InsertAndReplace(_)) => { - log::info!("unsupported insert/replace completion"); - return None; - } - }; - - LineEnding::normalize(&mut new_text); - let language = language.clone(); - Some(async move { - let mut label = None; - if let Some(language) = language { - language.process_completion(&mut lsp_completion).await; - label = language.label_for_completion(&lsp_completion).await; - } - Completion { - old_range, - new_text, - label: label.unwrap_or_else(|| { - CodeLabel::plain( - lsp_completion.label.clone(), - lsp_completion.filter_text.as_deref(), - ) - }), - lsp_completion, - } - }) - }) - }); - - Ok(futures::future::join_all(completions).await) - }) - } else if let Some(project_id) = self.remote_id() { - let rpc = self.client.clone(); - let message = proto::GetCompletions { - project_id, - buffer_id, - position: Some(language::proto::serialize_anchor(&anchor)), - version: serialize_version(&source_buffer.version()), - }; - cx.spawn_weak(|this, mut cx| async move { - let response = rpc.request(message).await?; - - if this - .upgrade(&cx) - .ok_or_else(|| anyhow!("project was dropped"))? - .read_with(&cx, |this, _| this.is_read_only()) - { - return Err(anyhow!( - "failed to get completions: project was disconnected" - )); - } else { - source_buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&response.version)) - }) - .await?; - - let completions = response.completions.into_iter().map(|completion| { - language::proto::deserialize_completion(completion, language.clone()) - }); - futures::future::try_join_all(completions).await - } - }) - } else { - Task::ready(Ok(Default::default())) - } + let position = position.to_point_utf16(buffer.read(cx)); + self.request_lsp(buffer.clone(), GetCompletions { position }, cx) } pub fn apply_additional_edits_for_completion( @@ -5632,49 +5456,6 @@ impl Project { }) } - async fn handle_get_completions( - this: ModelHandle, - envelope: TypedEnvelope, - _: Arc, - mut cx: AsyncAppContext, - ) -> Result { - let buffer = this.read_with(&cx, |this, cx| { - this.opened_buffers - .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)) - .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) - })?; - - let version = deserialize_version(&envelope.payload.version); - buffer - .update(&mut cx, |buffer, _| buffer.wait_for_version(version)) - .await?; - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - - let position = envelope - .payload - .position - .and_then(language::proto::deserialize_anchor) - .map(|p| { - buffer.read_with(&cx, |buffer, _| { - buffer.clip_point_utf16(Unclipped(p.to_point_utf16(buffer)), Bias::Left) - }) - }) - .ok_or_else(|| anyhow!("invalid position"))?; - - let completions = this - .update(&mut cx, |this, cx| this.completions(&buffer, position, cx)) - .await?; - - Ok(proto::GetCompletionsResponse { - completions: completions - .iter() - .map(language::proto::serialize_completion) - .collect(), - version: serialize_version(&version), - }) - } - async fn handle_apply_additional_edits_for_completion( this: ModelHandle, envelope: TypedEnvelope, From 651a83977ee4d32644b8ddc177fc5aa13da6ba85 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 14:53:08 +0200 Subject: [PATCH 56/80] :fire: --- crates/project/src/lsp_command.rs | 2 +- crates/project/src/project.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 334fe37c3ccc529250f3faaecb3b2ff19020a470..1841a43324079c6e1a0da5871b67229d36b5e678 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -1346,7 +1346,7 @@ impl LspCommand for GetCompletions { async fn from_proto( message: proto::GetCompletions, - project: ModelHandle, + _: ModelHandle, buffer: ModelHandle, mut cx: AsyncAppContext, ) -> Result { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index ed5b8e98be9d0de9ebf3fef8d3f3615804c69677..8a5f70369c41457ca354d4ca5e740e7b00a45545 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -27,11 +27,11 @@ use language::{ deserialize_anchor, deserialize_fingerprint, deserialize_line_ending, deserialize_version, serialize_anchor, serialize_version, }, - range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, CodeAction, - CodeLabel, Completion, Diagnostic, DiagnosticEntry, DiagnosticSet, Diff, Event as BufferEvent, - File as _, Language, LanguageRegistry, LanguageServerName, LocalFile, OffsetRangeExt, - Operation, Patch, PointUtf16, RopeFingerprint, TextBufferSnapshot, ToOffset, ToPointUtf16, - Transaction, Unclipped, + range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CodeAction, CodeLabel, + Completion, Diagnostic, DiagnosticEntry, DiagnosticSet, Diff, Event as BufferEvent, File as _, + Language, LanguageRegistry, LanguageServerName, LocalFile, OffsetRangeExt, Operation, Patch, + PointUtf16, RopeFingerprint, TextBufferSnapshot, ToOffset, ToPointUtf16, Transaction, + Unclipped, }; use lsp::{ DiagnosticSeverity, DiagnosticTag, DidChangeWatchedFilesRegistrationOptions, From 589860023992f6a9f4cfbb043957f73b18c97121 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 15:11:30 +0200 Subject: [PATCH 57/80] Use `LspCommand` to handle code actions --- crates/project/src/lsp_command.rs | 206 ++++++++++++++++++++++++++++-- crates/project/src/project.rs | 146 +-------------------- 2 files changed, 199 insertions(+), 153 deletions(-) diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 1841a43324079c6e1a0da5871b67229d36b5e678..d9fafceab043a61cc09607d053055c129065ded5 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -9,8 +9,8 @@ use gpui::{AppContext, AsyncAppContext, ModelHandle}; use language::{ point_from_lsp, point_to_lsp, proto::{deserialize_anchor, deserialize_version, serialize_anchor, serialize_version}, - range_from_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, Completion, PointUtf16, - ToOffset, ToPointUtf16, Unclipped, + range_from_lsp, range_to_lsp, Anchor, Bias, Buffer, CachedLspAdapter, CharKind, CodeAction, + Completion, OffsetRangeExt, PointUtf16, ToOffset, ToPointUtf16, Unclipped, }; use lsp::{DocumentHighlightKind, LanguageServer, ServerCapabilities}; use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag}; @@ -29,6 +29,8 @@ pub(crate) trait LspCommand: 'static + Sized { fn to_lsp( &self, path: &Path, + buffer: &Buffer, + language_server: &Arc, cx: &AppContext, ) -> ::Params; async fn response_from_lsp( @@ -97,6 +99,10 @@ pub(crate) struct GetCompletions { pub position: PointUtf16, } +pub(crate) struct GetCodeActions { + pub range: Range, +} + #[async_trait(?Send)] impl LspCommand for PrepareRename { type Response = Option>; @@ -111,7 +117,13 @@ impl LspCommand for PrepareRename { } } - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::TextDocumentPositionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::TextDocumentPositionParams { lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { uri: lsp::Url::from_file_path(path).unwrap(), @@ -227,7 +239,13 @@ impl LspCommand for PerformRename { type LspRequest = lsp::request::Rename; type ProtoRequest = proto::PerformRename; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::RenameParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::RenameParams { lsp::RenameParams { text_document_position: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -338,7 +356,13 @@ impl LspCommand for GetDefinition { type LspRequest = lsp::request::GotoDefinition; type ProtoRequest = proto::GetDefinition; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::GotoDefinitionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::GotoDefinitionParams { lsp::GotoDefinitionParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -424,7 +448,13 @@ impl LspCommand for GetTypeDefinition { type LspRequest = lsp::request::GotoTypeDefinition; type ProtoRequest = proto::GetTypeDefinition; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::GotoTypeDefinitionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::GotoTypeDefinitionParams { lsp::GotoTypeDefinitionParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -699,7 +729,13 @@ impl LspCommand for GetReferences { type LspRequest = lsp::request::References; type ProtoRequest = proto::GetReferences; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::ReferenceParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::ReferenceParams { lsp::ReferenceParams { text_document_position: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -857,7 +893,13 @@ impl LspCommand for GetDocumentHighlights { capabilities.document_highlight_provider.is_some() } - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::DocumentHighlightParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::DocumentHighlightParams { lsp::DocumentHighlightParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -997,7 +1039,13 @@ impl LspCommand for GetHover { type LspRequest = lsp::request::HoverRequest; type ProtoRequest = proto::GetHover; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::HoverParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::HoverParams { lsp::HoverParams { text_document_position_params: lsp::TextDocumentPositionParams { text_document: lsp::TextDocumentIdentifier { @@ -1212,7 +1260,13 @@ impl LspCommand for GetCompletions { type LspRequest = lsp::request::Completion; type ProtoRequest = proto::GetCompletions; - fn to_lsp(&self, path: &Path, _: &AppContext) -> lsp::CompletionParams { + fn to_lsp( + &self, + path: &Path, + _: &Buffer, + _: &Arc, + _: &AppContext, + ) -> lsp::CompletionParams { lsp::CompletionParams { text_document_position: lsp::TextDocumentPositionParams::new( lsp::TextDocumentIdentifier::new(lsp::Url::from_file_path(path).unwrap()), @@ -1406,3 +1460,135 @@ impl LspCommand for GetCompletions { message.buffer_id } } + +#[async_trait(?Send)] +impl LspCommand for GetCodeActions { + type Response = Vec; + type LspRequest = lsp::request::CodeActionRequest; + type ProtoRequest = proto::GetCodeActions; + + fn check_capabilities(&self, capabilities: &ServerCapabilities) -> bool { + capabilities.code_action_provider.is_some() + } + + fn to_lsp( + &self, + path: &Path, + buffer: &Buffer, + language_server: &Arc, + _: &AppContext, + ) -> lsp::CodeActionParams { + let relevant_diagnostics = buffer + .snapshot() + .diagnostics_in_range::<_, usize>(self.range.clone(), false) + .map(|entry| entry.to_lsp_diagnostic_stub()) + .collect(); + lsp::CodeActionParams { + text_document: lsp::TextDocumentIdentifier::new( + lsp::Url::from_file_path(path).unwrap(), + ), + range: range_to_lsp(self.range.to_point_utf16(buffer)), + work_done_progress_params: Default::default(), + partial_result_params: Default::default(), + context: lsp::CodeActionContext { + diagnostics: relevant_diagnostics, + only: language_server.code_action_kinds(), + }, + } + } + + async fn response_from_lsp( + self, + actions: Option, + _: ModelHandle, + _: ModelHandle, + _: AsyncAppContext, + ) -> Result> { + Ok(actions + .unwrap_or_default() + .into_iter() + .filter_map(|entry| { + if let lsp::CodeActionOrCommand::CodeAction(lsp_action) = entry { + Some(CodeAction { + range: self.range.clone(), + lsp_action, + }) + } else { + None + } + }) + .collect()) + } + + fn to_proto(&self, project_id: u64, buffer: &Buffer) -> proto::GetCodeActions { + proto::GetCodeActions { + project_id, + buffer_id: buffer.remote_id(), + start: Some(language::proto::serialize_anchor(&self.range.start)), + end: Some(language::proto::serialize_anchor(&self.range.end)), + version: serialize_version(&buffer.version()), + } + } + + async fn from_proto( + message: proto::GetCodeActions, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result { + let start = message + .start + .and_then(language::proto::deserialize_anchor) + .ok_or_else(|| anyhow!("invalid start"))?; + let end = message + .end + .and_then(language::proto::deserialize_anchor) + .ok_or_else(|| anyhow!("invalid end"))?; + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + + Ok(Self { range: start..end }) + } + + fn response_to_proto( + code_actions: Vec, + _: &mut Project, + _: PeerId, + buffer_version: &clock::Global, + _: &mut AppContext, + ) -> proto::GetCodeActionsResponse { + proto::GetCodeActionsResponse { + actions: code_actions + .iter() + .map(language::proto::serialize_code_action) + .collect(), + version: serialize_version(&buffer_version), + } + } + + async fn response_from_proto( + self, + message: proto::GetCodeActionsResponse, + _: ModelHandle, + buffer: ModelHandle, + mut cx: AsyncAppContext, + ) -> Result> { + buffer + .update(&mut cx, |buffer, _| { + buffer.wait_for_version(deserialize_version(&message.version)) + }) + .await?; + message + .actions + .into_iter() + .map(language::proto::deserialize_code_action) + .collect() + } + + fn buffer_id_from_proto(message: &proto::GetCodeActions) -> u64 { + message.buffer_id + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 8a5f70369c41457ca354d4ca5e740e7b00a45545..2daa959cc87bab9840b4c86a61bfe04cf611bb14 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -409,7 +409,7 @@ impl Project { client.add_model_request_handler(Self::handle_reload_buffers); client.add_model_request_handler(Self::handle_synchronize_buffers); client.add_model_request_handler(Self::handle_format_buffers); - client.add_model_request_handler(Self::handle_get_code_actions); + client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); client.add_model_request_handler(Self::handle_lsp_command::); @@ -3704,106 +3704,9 @@ impl Project { range: Range, cx: &mut ModelContext, ) -> Task>> { - let buffer_handle = buffer_handle.clone(); let buffer = buffer_handle.read(cx); - let snapshot = buffer.snapshot(); - let relevant_diagnostics = snapshot - .diagnostics_in_range::(range.to_offset(&snapshot), false) - .map(|entry| entry.to_lsp_diagnostic_stub()) - .collect(); - let buffer_id = buffer.remote_id(); - let worktree; - let buffer_abs_path; - if let Some(file) = File::from_dyn(buffer.file()) { - worktree = file.worktree.clone(); - buffer_abs_path = file.as_local().map(|f| f.abs_path(cx)); - } else { - return Task::ready(Ok(Vec::new())); - }; let range = buffer.anchor_before(range.start)..buffer.anchor_before(range.end); - - if worktree.read(cx).as_local().is_some() { - let buffer_abs_path = buffer_abs_path.unwrap(); - let lang_server = if let Some((_, server)) = self.language_server_for_buffer(buffer, cx) - { - server.clone() - } else { - return Task::ready(Ok(Vec::new())); - }; - - let lsp_range = range_to_lsp(range.to_point_utf16(buffer)); - cx.foreground().spawn(async move { - if lang_server.capabilities().code_action_provider.is_none() { - return Ok(Vec::new()); - } - - Ok(lang_server - .request::(lsp::CodeActionParams { - text_document: lsp::TextDocumentIdentifier::new( - lsp::Url::from_file_path(buffer_abs_path).unwrap(), - ), - range: lsp_range, - work_done_progress_params: Default::default(), - partial_result_params: Default::default(), - context: lsp::CodeActionContext { - diagnostics: relevant_diagnostics, - only: lang_server.code_action_kinds(), - }, - }) - .await? - .unwrap_or_default() - .into_iter() - .filter_map(|entry| { - if let lsp::CodeActionOrCommand::CodeAction(lsp_action) = entry { - Some(CodeAction { - range: range.clone(), - lsp_action, - }) - } else { - None - } - }) - .collect()) - }) - } else if let Some(project_id) = self.remote_id() { - let rpc = self.client.clone(); - let version = buffer.version(); - cx.spawn_weak(|this, mut cx| async move { - let response = rpc - .request(proto::GetCodeActions { - project_id, - buffer_id, - start: Some(language::proto::serialize_anchor(&range.start)), - end: Some(language::proto::serialize_anchor(&range.end)), - version: serialize_version(&version), - }) - .await?; - - if this - .upgrade(&cx) - .ok_or_else(|| anyhow!("project was dropped"))? - .read_with(&cx, |this, _| this.is_read_only()) - { - return Err(anyhow!( - "failed to get code actions: project was disconnected" - )); - } else { - buffer_handle - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&response.version)) - }) - .await?; - - response - .actions - .into_iter() - .map(language::proto::deserialize_code_action) - .collect() - } - }) - } else { - Task::ready(Ok(Default::default())) - } + self.request_lsp(buffer_handle.clone(), GetCodeActions { range }, cx) } pub fn apply_code_action( @@ -4288,7 +4191,7 @@ impl Project { self.language_server_for_buffer(buffer, cx) .map(|(_, server)| server.clone()), ) { - let lsp_params = request.to_lsp(&file.abs_path(cx), cx); + let lsp_params = request.to_lsp(&file.abs_path(cx), buffer, &language_server, cx); return cx.spawn(|this, cx| async move { if !request.check_capabilities(language_server.capabilities()) { return Ok(Default::default()); @@ -5493,49 +5396,6 @@ impl Project { }) } - async fn handle_get_code_actions( - this: ModelHandle, - envelope: TypedEnvelope, - _: Arc, - mut cx: AsyncAppContext, - ) -> Result { - let start = envelope - .payload - .start - .and_then(language::proto::deserialize_anchor) - .ok_or_else(|| anyhow!("invalid start"))?; - let end = envelope - .payload - .end - .and_then(language::proto::deserialize_anchor) - .ok_or_else(|| anyhow!("invalid end"))?; - let buffer = this.update(&mut cx, |this, cx| { - this.opened_buffers - .get(&envelope.payload.buffer_id) - .and_then(|buffer| buffer.upgrade(cx)) - .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id)) - })?; - buffer - .update(&mut cx, |buffer, _| { - buffer.wait_for_version(deserialize_version(&envelope.payload.version)) - }) - .await?; - - let version = buffer.read_with(&cx, |buffer, _| buffer.version()); - let code_actions = this.update(&mut cx, |this, cx| { - Ok::<_, anyhow::Error>(this.code_actions(&buffer, start..end, cx)) - })?; - - Ok(proto::GetCodeActionsResponse { - actions: code_actions - .await? - .iter() - .map(language::proto::serialize_code_action) - .collect(), - version: serialize_version(&version), - }) - } - async fn handle_apply_code_action( this: ModelHandle, envelope: TypedEnvelope, From 5e37c893c2a37693dbe51ec21aa59142f145675e Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 15:14:32 +0200 Subject: [PATCH 58/80] Ensure project is still alive by the time remote LSP request starts --- crates/project/src/project.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 2daa959cc87bab9840b4c86a61bfe04cf611bb14..90985f881040512110759fa9647ad80205a6089b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4210,7 +4210,13 @@ impl Project { let rpc = self.client.clone(); let message = request.to_proto(project_id, buffer); return cx.spawn_weak(|this, cx| async move { + // Ensure the project is still alive by the time the task + // is scheduled. + this.upgrade(&cx) + .ok_or_else(|| anyhow!("project dropped"))?; + let response = rpc.request(message).await?; + let this = this .upgrade(&cx) .ok_or_else(|| anyhow!("project dropped"))?; From 172441ab7299f9d248809d0562b0ab8723ba9f0c Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 16:33:08 +0200 Subject: [PATCH 59/80] Cancel pending calls when participant fails to reconnect Previously, we would only cancel pending calls when the room became empty. --- crates/collab/src/db.rs | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 72f8d9c70379344346b0a4f32917b1bbe498eaf3..f441bbfb000504d959b9e54f333b5a33cc31273d 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -175,25 +175,39 @@ impl Database { .map(|participant| participant.user_id) .collect::>(); - // Delete participants who failed to reconnect. + // Delete participants who failed to reconnect and cancel their calls. + let mut canceled_calls_to_user_ids = Vec::new(); room_participant::Entity::delete_many() .filter(stale_participant_filter) .exec(&*tx) .await?; + let called_participants = room_participant::Entity::find() + .filter( + Condition::all() + .add( + room_participant::Column::CallingUserId + .is_in(stale_participant_user_ids.iter().copied()), + ) + .add(room_participant::Column::AnsweringConnectionId.is_null()), + ) + .all(&*tx) + .await?; + room_participant::Entity::delete_many() + .filter( + room_participant::Column::Id + .is_in(called_participants.iter().map(|participant| participant.id)), + ) + .exec(&*tx) + .await?; + canceled_calls_to_user_ids.extend( + called_participants + .into_iter() + .map(|participant| participant.user_id), + ); let room = self.get_room(room_id, &tx).await?; - let mut canceled_calls_to_user_ids = Vec::new(); - // Delete the room if it becomes empty and cancel pending calls. + // Delete the room if it becomes empty. if room.participants.is_empty() { - canceled_calls_to_user_ids.extend( - room.pending_participants - .iter() - .map(|pending_participant| UserId::from_proto(pending_participant.user_id)), - ); - room_participant::Entity::delete_many() - .filter(room_participant::Column::RoomId.eq(room_id)) - .exec(&*tx) - .await?; project::Entity::delete_many() .filter(project::Column::RoomId.eq(room_id)) .exec(&*tx) From 5eb1719ab8e9a05b2190d82afaaeb9eff5ab2d4b Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 17:15:43 +0200 Subject: [PATCH 60/80] Re-send worktree if reconnecting while initial scan isn't finished yet Previously, if the client was disconnected while the initial worktree state was being sent, it would not see the remaining state after reconnecting. This was due to `scan_id` and `completed_scan_id` both being initialized to `0`, so the client would ask for updates since `0` and get nothing. This commit changes the worktree to initialize `scan_id` to `1` and `completed_scan_id` to `0`, so that we get the full worktree again on reconnect. --- crates/project/src/worktree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 792e09a00a29b637fee8dda3bf92c0673e189000..0f31303635f1e70b978861bb796a76503e4b9f1f 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -221,7 +221,7 @@ impl Worktree { root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(), entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 0, + scan_id: 1, completed_scan_id: 0, }, }; @@ -298,7 +298,7 @@ impl Worktree { .collect(), entries_by_path: Default::default(), entries_by_id: Default::default(), - scan_id: 0, + scan_id: 1, completed_scan_id: 0, }; From 42b10044fc3d962f0e7c6f8e0fbc9585b81cef86 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 11 Apr 2023 17:43:05 +0200 Subject: [PATCH 61/80] Fix running client crate tests --- crates/client/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 86a608a00bcc65ed15cc5af03d6060a195b5a639..560a754bf7ead2d9bd7babf309f084325a0f2916 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -45,3 +45,4 @@ collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } rpc = { path = "../rpc", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } +util = { path = "../util", features = ["test-support"] } From 527f0310e2ba610442f24d4fe03e4bef2711e989 Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:22:49 -0400 Subject: [PATCH 62/80] Update + publish the Ayu theme --- styles/package-lock.json | 31 +++++++++++ styles/package.json | 1 + styles/src/themes/ayu-dark.ts | 15 ++++++ styles/src/themes/ayu-light.ts | 15 ++++++ styles/src/themes/ayu-mirage.ts | 15 ++++++ styles/src/themes/common/ayu-common.ts | 74 ++++++++++++++++++++++++++ styles/src/themes/staff/ayu-mirage.ts | 31 ----------- styles/src/themes/staff/ayu.ts | 52 ------------------ 8 files changed, 151 insertions(+), 83 deletions(-) create mode 100644 styles/src/themes/ayu-dark.ts create mode 100644 styles/src/themes/ayu-light.ts create mode 100644 styles/src/themes/ayu-mirage.ts create mode 100644 styles/src/themes/common/ayu-common.ts delete mode 100644 styles/src/themes/staff/ayu-mirage.ts delete mode 100644 styles/src/themes/staff/ayu.ts diff --git a/styles/package-lock.json b/styles/package-lock.json index 19741d62b38c98305ddebd3f5aa9f0f8182493db..c03c27c1e89e4290dd36beb98eef570da4b44f4d 100644 --- a/styles/package-lock.json +++ b/styles/package-lock.json @@ -11,6 +11,7 @@ "dependencies": { "@types/chroma-js": "^2.4.0", "@types/node": "^18.14.1", + "ayu": "^8.0.1", "bezier-easing": "^2.1.0", "case-anything": "^2.1.10", "chroma-js": "^2.4.2", @@ -106,6 +107,16 @@ "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==" }, + "node_modules/ayu": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/ayu/-/ayu-8.0.1.tgz", + "integrity": "sha512-yuPZ2kZYQoYaPRQ/78F9rXDVx1rVGCJ1neBYithBoSprD6zPdIJdAKizUXG0jtTBu7nTFyAnVFFYuLnCS3cpDw==", + "dependencies": { + "@types/chroma-js": "^2.0.0", + "chroma-js": "^2.1.0", + "nonenumerable": "^1.1.1" + } + }, "node_modules/bezier-easing": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/bezier-easing/-/bezier-easing-2.1.0.tgz", @@ -153,6 +164,11 @@ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" }, + "node_modules/nonenumerable": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/nonenumerable/-/nonenumerable-1.1.1.tgz", + "integrity": "sha512-ptUD9w9D8WqW6fuJJkZNCImkf+0vdbgUTbRK3i7jsy3olqtH96hYE6Q/S3Tx9NWbcB/ocAjYshXCAUP0lZ9B4Q==" + }, "node_modules/toml": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz", @@ -300,6 +316,16 @@ "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==" }, + "ayu": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/ayu/-/ayu-8.0.1.tgz", + "integrity": "sha512-yuPZ2kZYQoYaPRQ/78F9rXDVx1rVGCJ1neBYithBoSprD6zPdIJdAKizUXG0jtTBu7nTFyAnVFFYuLnCS3cpDw==", + "requires": { + "@types/chroma-js": "^2.0.0", + "chroma-js": "^2.1.0", + "nonenumerable": "^1.1.1" + } + }, "bezier-easing": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/bezier-easing/-/bezier-easing-2.1.0.tgz", @@ -335,6 +361,11 @@ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" }, + "nonenumerable": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/nonenumerable/-/nonenumerable-1.1.1.tgz", + "integrity": "sha512-ptUD9w9D8WqW6fuJJkZNCImkf+0vdbgUTbRK3i7jsy3olqtH96hYE6Q/S3Tx9NWbcB/ocAjYshXCAUP0lZ9B4Q==" + }, "toml": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz", diff --git a/styles/package.json b/styles/package.json index ad4dfcf561ed3b1d47bd513c207c25a0c2c1b90c..5f103c5d6c8eb091031f05ab3789625c56b97d84 100644 --- a/styles/package.json +++ b/styles/package.json @@ -12,6 +12,7 @@ "dependencies": { "@types/chroma-js": "^2.4.0", "@types/node": "^18.14.1", + "ayu": "^8.0.1", "bezier-easing": "^2.1.0", "case-anything": "^2.1.10", "chroma-js": "^2.4.2", diff --git a/styles/src/themes/ayu-dark.ts b/styles/src/themes/ayu-dark.ts new file mode 100644 index 0000000000000000000000000000000000000000..2ee558f52954833057da4b848a61f06faf6c6ac8 --- /dev/null +++ b/styles/src/themes/ayu-dark.ts @@ -0,0 +1,15 @@ +import { createColorScheme } from "../common/ramps" +import { ayu, buildTheme } from "../common/ayu-common" + +const name = "Ayu" +const author = "Konstantin Pschera " +const url = "https://github.com/ayu-theme/ayu-colors" +const license = { + type: "MIT", + url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +} + +const variant = ayu.dark +const theme = buildTheme(variant, false) + +export const dark = createColorScheme(`${name} Dark`, false, theme.ramps, theme.syntax) diff --git a/styles/src/themes/ayu-light.ts b/styles/src/themes/ayu-light.ts new file mode 100644 index 0000000000000000000000000000000000000000..96304c6ed4fb8850dd7913a44f07c1991ed26756 --- /dev/null +++ b/styles/src/themes/ayu-light.ts @@ -0,0 +1,15 @@ +import { createColorScheme } from "../common/ramps" +import { ayu, buildTheme } from "../common/ayu-common" + +const name = "Ayu" +const author = "Konstantin Pschera " +const url = "https://github.com/ayu-theme/ayu-colors" +const license = { + type: "MIT", + url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +} + +const variant = ayu.light +const theme = buildTheme(variant, true) + +export const light = createColorScheme(`${name} Light`, true, theme.ramps, theme.syntax) diff --git a/styles/src/themes/ayu-mirage.ts b/styles/src/themes/ayu-mirage.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c24b9b70ff06d45e877c658b1b2a831cc64e5ab --- /dev/null +++ b/styles/src/themes/ayu-mirage.ts @@ -0,0 +1,15 @@ +import { createColorScheme } from "../common/ramps" +import { ayu, buildTheme } from "../common/ayu-common" + +const name = "Ayu" +const author = "Konstantin Pschera " +const url = "https://github.com/ayu-theme/ayu-colors" +const license = { + type: "MIT", + url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +} + +const variant = ayu.mirage +const theme = buildTheme(variant, false) + +export const dark = createColorScheme(`${name} Mirage`, false, theme.ramps, theme.syntax) diff --git a/styles/src/themes/common/ayu-common.ts b/styles/src/themes/common/ayu-common.ts new file mode 100644 index 0000000000000000000000000000000000000000..c77a2f39733cd4c8cd684247558228302ba4092d --- /dev/null +++ b/styles/src/themes/common/ayu-common.ts @@ -0,0 +1,74 @@ +import { dark, light, mirage } from 'ayu' +import { ThemeSyntax } from './syntax' +import chroma from 'chroma-js' +import { colorRamp } from './ramps' + +export const ayu = { + dark, + light, + mirage +} + +export const buildTheme = (t: typeof dark, light: boolean) => { + const color = { + lightBlue: t.syntax.tag.hex(), + yellow: t.syntax.func.hex(), + blue: t.syntax.entity.hex(), + green: t.syntax.string.hex(), + teal: t.syntax.regexp.hex(), + red: t.syntax.markup.hex(), + orange: t.syntax.keyword.hex(), + lightYellow: t.syntax.special.hex(), + gray: t.syntax.comment.hex(), + purple: t.syntax.constant.hex(), + } + + const syntax: ThemeSyntax = { + constant: { color: t.syntax.constant.hex() }, + "string.regex": { color: t.syntax.regexp.hex() }, + string: { color: t.syntax.string.hex() }, + comment: { color: t.syntax.comment.hex() }, + keyword: { color: t.syntax.keyword.hex() }, + operator: { color: t.syntax.operator.hex() }, + number: { color: t.syntax.constant.hex() }, + type: { color: color.blue }, + boolean: { color: color.purple }, + "punctuation.special": { color: color.purple }, + "string.special": { color: t.syntax.special.hex() }, + function: { color: t.syntax.func.hex() }, + } + + return { + ramps: { + neutral: chroma.scale([ + light ? t.editor.fg.hex() : t.editor.bg.hex(), + light ? t.editor.bg.hex() : t.editor.fg.hex(), + ]), + red: colorRamp(chroma(color.red)), + orange: colorRamp(chroma(color.orange)), + yellow: colorRamp(chroma(color.yellow)), + green: colorRamp(chroma(color.green)), + cyan: colorRamp(chroma(color.teal)), + blue: colorRamp(chroma(color.blue)), + violet: colorRamp(chroma(color.purple)), + magenta: colorRamp(chroma(color.lightBlue)), + }, + syntax + } +} + +export const buildSyntax = (t: typeof dark): ThemeSyntax => { + return { + constant: { color: t.syntax.constant.hex() }, + "string.regex": { color: t.syntax.regexp.hex() }, + string: { color: t.syntax.string.hex() }, + comment: { color: t.syntax.comment.hex() }, + keyword: { color: t.syntax.keyword.hex() }, + operator: { color: t.syntax.operator.hex() }, + number: { color: t.syntax.constant.hex() }, + type: { color: t.syntax.regexp.hex() }, + "punctuation.special": { color: t.syntax.special.hex() }, + "string.special": { color: t.syntax.special.hex() }, + function: { color: t.syntax.func.hex() }, + } +} diff --git a/styles/src/themes/staff/ayu-mirage.ts b/styles/src/themes/staff/ayu-mirage.ts deleted file mode 100644 index 5b832699b46b984d9fd0fdaff223073613095aed..0000000000000000000000000000000000000000 --- a/styles/src/themes/staff/ayu-mirage.ts +++ /dev/null @@ -1,31 +0,0 @@ -import chroma from "chroma-js" -import { colorRamp, createColorScheme } from "../common/ramps" - -const name = "Ayu" -const author = "Konstantin Pschera " -const url = "https://github.com/ayu-theme/ayu-colors" -const license = { - type: "MIT", - url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", -} - -export const dark = createColorScheme(`${name} Mirage`, false, { - neutral: chroma.scale([ - "#171B24", - "#1F2430", - "#242936", - "#707A8C", - "#8A9199", - "#CCCAC2", - "#D9D7CE", - "#F3F4F5", - ]), - red: colorRamp(chroma("#F28779")), - orange: colorRamp(chroma("#FFAD66")), - yellow: colorRamp(chroma("#FFD173")), - green: colorRamp(chroma("#D5FF80")), - cyan: colorRamp(chroma("#95E6CB")), - blue: colorRamp(chroma("#5CCFE6")), - violet: colorRamp(chroma("#D4BFFF")), - magenta: colorRamp(chroma("#F29E74")), -}) diff --git a/styles/src/themes/staff/ayu.ts b/styles/src/themes/staff/ayu.ts deleted file mode 100644 index 24fcdb951b07aa3e7346c28566b26a549e388928..0000000000000000000000000000000000000000 --- a/styles/src/themes/staff/ayu.ts +++ /dev/null @@ -1,52 +0,0 @@ -import chroma from "chroma-js" -import { colorRamp, createColorScheme } from "../common/ramps" - -const name = "Ayu" -const author = "Konstantin Pschera " -const url = "https://github.com/ayu-theme/ayu-colors" -const license = { - type: "MIT", - url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", -} - -export const dark = createColorScheme(`${name} Dark`, false, { - neutral: chroma.scale([ - "#0F1419", - "#131721", - "#272D38", - "#3E4B59", - "#BFBDB6", - "#E6E1CF", - "#E6E1CF", - "#F3F4F5", - ]), - red: colorRamp(chroma("#F07178")), - orange: colorRamp(chroma("#FF8F40")), - yellow: colorRamp(chroma("#FFB454")), - green: colorRamp(chroma("#B8CC52")), - cyan: colorRamp(chroma("#95E6CB")), - blue: colorRamp(chroma("#59C2FF")), - violet: colorRamp(chroma("#D2A6FF")), - magenta: colorRamp(chroma("#E6B673")), -}) - -export const light = createColorScheme(`${name} Light`, true, { - neutral: chroma.scale([ - "#1A1F29", - "#242936", - "#5C6773", - "#828C99", - "#ABB0B6", - "#F8F9FA", - "#F3F4F5", - "#FAFAFA", - ]), - red: colorRamp(chroma("#F07178")), - orange: colorRamp(chroma("#FA8D3E")), - yellow: colorRamp(chroma("#F2AE49")), - green: colorRamp(chroma("#86B300")), - cyan: colorRamp(chroma("#4CBF99")), - blue: colorRamp(chroma("#36A3D9")), - violet: colorRamp(chroma("#A37ACC")), - magenta: colorRamp(chroma("#E6BA7E")), -}) From 975f5d5fa89ebe470c3446420b21ab9ea4f138b2 Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:23:21 -0400 Subject: [PATCH 63/80] Format --- styles/src/styleTree/components.ts | 10 +- styles/src/styleTree/copilot.ts | 127 ++++++++++++------ .../styleTree/simpleMessageNotification.ts | 1 - styles/src/styleTree/workspace.ts | 39 ++++-- styles/src/themes/ayu-dark.ts | 7 +- styles/src/themes/ayu-light.ts | 7 +- styles/src/themes/ayu-mirage.ts | 7 +- styles/src/themes/common/ayu-common.ts | 12 +- 8 files changed, 141 insertions(+), 69 deletions(-) diff --git a/styles/src/styleTree/components.ts b/styles/src/styleTree/components.ts index 6b21eec405a8f76caf3ee6f952e0937a0fd20b50..efd4a956727c7d7cad42078292346dda32b55165 100644 --- a/styles/src/styleTree/components.ts +++ b/styles/src/styleTree/components.ts @@ -281,14 +281,18 @@ export function border( } } - -export function svg(color: string, asset: String, width: Number, height: Number) { +export function svg( + color: string, + asset: String, + width: Number, + height: Number +) { return { color, asset, dimensions: { width, height, - } + }, } } diff --git a/styles/src/styleTree/copilot.ts b/styles/src/styleTree/copilot.ts index c2df2e5d405917606831b93b649d72ccec3ab2af..9fa86cd741234a421eade8f02161729e8ab502f0 100644 --- a/styles/src/styleTree/copilot.ts +++ b/styles/src/styleTree/copilot.ts @@ -1,13 +1,13 @@ import { ColorScheme } from "../themes/common/colorScheme" -import { background, border, foreground, svg, text } from "./components"; - +import { background, border, foreground, svg, text } from "./components" export default function copilot(colorScheme: ColorScheme) { - let layer = colorScheme.middle; + let layer = colorScheme.middle - let content_width = 264; + let content_width = 264 - let ctaButton = { // Copied from welcome screen. FIXME: Move this into a ZDS component + let ctaButton = { + // Copied from welcome screen. FIXME: Move this into a ZDS component background: background(layer), border: border(layer, "default"), cornerRadius: 4, @@ -15,7 +15,7 @@ export default function copilot(colorScheme: ColorScheme) { top: 4, bottom: 4, left: 8, - right: 8 + right: 8, }, padding: { top: 3, @@ -29,22 +29,32 @@ export default function copilot(colorScheme: ColorScheme) { background: background(layer, "hovered"), border: border(layer, "active"), }, - }; + } return { outLinkIcon: { - icon: svg(foreground(layer, "variant"), "icons/link_out_12.svg", 12, 12), + icon: svg( + foreground(layer, "variant"), + "icons/link_out_12.svg", + 12, + 12 + ), container: { cornerRadius: 6, padding: { left: 6 }, }, hover: { - icon: svg(foreground(layer, "hovered"), "icons/link_out_12.svg", 12, 12) + icon: svg( + foreground(layer, "hovered"), + "icons/link_out_12.svg", + 12, + 12 + ), }, }, modal: { titleText: { - ...text(layer, "sans", { size: "xs", "weight": "bold" }) + ...text(layer, "sans", { size: "xs", weight: "bold" }), }, titlebar: { background: background(colorScheme.lowest), @@ -54,7 +64,7 @@ export default function copilot(colorScheme: ColorScheme) { bottom: 4, left: 8, right: 8, - } + }, }, container: { background: background(colorScheme.lowest), @@ -63,10 +73,15 @@ export default function copilot(colorScheme: ColorScheme) { left: 0, right: 0, bottom: 8, - } + }, }, closeIcon: { - icon: svg(foreground(layer, "variant"), "icons/x_mark_8.svg", 8, 8), + icon: svg( + foreground(layer, "variant"), + "icons/x_mark_8.svg", + 8, + 8 + ), container: { cornerRadius: 2, padding: { @@ -76,15 +91,25 @@ export default function copilot(colorScheme: ColorScheme) { right: 4, }, margin: { - right: 0 - } + right: 0, + }, }, hover: { - icon: svg(foreground(layer, "on"), "icons/x_mark_8.svg", 8, 8), + icon: svg( + foreground(layer, "on"), + "icons/x_mark_8.svg", + 8, + 8 + ), }, clicked: { - icon: svg(foreground(layer, "base"), "icons/x_mark_8.svg", 8, 8), - } + icon: svg( + foreground(layer, "base"), + "icons/x_mark_8.svg", + 8, + 8 + ), + }, }, dimensions: { width: 280, @@ -98,14 +123,19 @@ export default function copilot(colorScheme: ColorScheme) { ctaButton, header: { - icon: svg(foreground(layer, "default"), "icons/zed_plus_copilot_32.svg", 92, 32), + icon: svg( + foreground(layer, "default"), + "icons/zed_plus_copilot_32.svg", + 92, + 32 + ), container: { margin: { top: 35, bottom: 5, left: 0, - right: 0 - } + right: 0, + }, }, }, @@ -116,21 +146,20 @@ export default function copilot(colorScheme: ColorScheme) { top: 6, bottom: 12, left: 0, - right: 0 - } + right: 0, + }, }, hint: { ...text(layer, "sans", { size: "xs", color: "#838994" }), margin: { top: 6, - bottom: 2 - } + bottom: 2, + }, }, deviceCode: { - text: - text(layer, "mono", { size: "sm" }), + text: text(layer, "mono", { size: "sm" }), cta: { ...ctaButton, background: background(colorScheme.lowest), @@ -144,7 +173,7 @@ export default function copilot(colorScheme: ColorScheme) { margin: { left: 16, right: 16, - } + }, }, left: content_width / 2, leftContainer: { @@ -155,9 +184,14 @@ export default function copilot(colorScheme: ColorScheme) { right: 6, }, }, - right: content_width * 1 / 3, + right: (content_width * 1) / 3, rightContainer: { - border: border(colorScheme.lowest, "inverted", { bottom: false, right: false, top: false, left: true }), + border: border(colorScheme.lowest, "inverted", { + bottom: false, + right: false, + top: false, + left: true, + }), padding: { top: 3, bottom: 5, @@ -165,9 +199,14 @@ export default function copilot(colorScheme: ColorScheme) { right: 0, }, hover: { - border: border(layer, "active", { bottom: false, right: false, top: false, left: true }), + border: border(layer, "active", { + bottom: false, + right: false, + top: false, + left: true, + }), }, - } + }, }, }, @@ -179,12 +218,15 @@ export default function copilot(colorScheme: ColorScheme) { top: 16, bottom: 16, left: 0, - right: 0 - } + right: 0, + }, }, warning: { - ...text(layer, "sans", { size: "xs", color: foreground(layer, "warning") }), + ...text(layer, "sans", { + size: "xs", + color: foreground(layer, "warning"), + }), border: border(layer, "warning"), background: background(layer, "warning"), cornerRadius: 2, @@ -197,8 +239,8 @@ export default function copilot(colorScheme: ColorScheme) { margin: { bottom: 16, left: 8, - right: 8 - } + right: 8, + }, }, }, @@ -208,19 +250,18 @@ export default function copilot(colorScheme: ColorScheme) { margin: { top: 16, - bottom: 16 - } + bottom: 16, + }, }, hint: { ...text(layer, "sans", { size: "xs", color: "#838994" }), margin: { top: 24, - bottom: 4 - } + bottom: 4, + }, }, - }, - } + }, } } diff --git a/styles/src/styleTree/simpleMessageNotification.ts b/styles/src/styleTree/simpleMessageNotification.ts index dde689e9bd0563145f0b91ac4109a55eb5911d6c..2e057ed783140e60a15d61d2f4f35bb010a30e48 100644 --- a/styles/src/styleTree/simpleMessageNotification.ts +++ b/styles/src/styleTree/simpleMessageNotification.ts @@ -23,7 +23,6 @@ export default function simpleMessageNotification( right: 7, }, - margin: { left: headerPadding, top: 6, bottom: 6 }, hover: { ...text(layer, "sans", "default", { size: "xs" }), diff --git a/styles/src/styleTree/workspace.ts b/styles/src/styleTree/workspace.ts index f0dbaad54ae70ad4a041c044aeee009165eacb00..9b53ecc5d2fd8702bbf7ade760160cfdbe83067b 100644 --- a/styles/src/styleTree/workspace.ts +++ b/styles/src/styleTree/workspace.ts @@ -1,6 +1,13 @@ import { ColorScheme } from "../themes/common/colorScheme" import { withOpacity } from "../utils/color" -import { background, border, borderColor, foreground, svg, text } from "./components" +import { + background, + border, + borderColor, + foreground, + svg, + text, +} from "./components" import statusBar from "./statusBar" import tabBar from "./tabBar" @@ -46,14 +53,24 @@ export default function workspace(colorScheme: ColorScheme) { width: 256, height: 256, }, - logo: svg(withOpacity("#000000", colorScheme.isLight ? 0.6 : 0.8), "icons/logo_96.svg", 256, 256), + logo: svg( + withOpacity("#000000", colorScheme.isLight ? 0.6 : 0.8), + "icons/logo_96.svg", + 256, + 256 + ), - logoShadow: svg(withOpacity( - colorScheme.isLight - ? "#FFFFFF" - : colorScheme.lowest.base.default.background, - colorScheme.isLight ? 1 : 0.6 - ), "icons/logo_96.svg", 256, 256), + logoShadow: svg( + withOpacity( + colorScheme.isLight + ? "#FFFFFF" + : colorScheme.lowest.base.default.background, + colorScheme.isLight ? 1 : 0.6 + ), + "icons/logo_96.svg", + 256, + 256 + ), keyboardHints: { margin: { top: 96, @@ -273,11 +290,7 @@ export default function workspace(colorScheme: ColorScheme) { }, hover: { color: foreground(colorScheme.highest, "on", "hovered"), - background: background( - colorScheme.highest, - "on", - "hovered" - ), + background: background(colorScheme.highest, "on", "hovered"), }, }, disconnectedOverlay: { diff --git a/styles/src/themes/ayu-dark.ts b/styles/src/themes/ayu-dark.ts index 2ee558f52954833057da4b848a61f06faf6c6ac8..7774885208a90180714bca3cd8c89a9d1d532414 100644 --- a/styles/src/themes/ayu-dark.ts +++ b/styles/src/themes/ayu-dark.ts @@ -12,4 +12,9 @@ const license = { const variant = ayu.dark const theme = buildTheme(variant, false) -export const dark = createColorScheme(`${name} Dark`, false, theme.ramps, theme.syntax) +export const dark = createColorScheme( + `${name} Dark`, + false, + theme.ramps, + theme.syntax +) diff --git a/styles/src/themes/ayu-light.ts b/styles/src/themes/ayu-light.ts index 96304c6ed4fb8850dd7913a44f07c1991ed26756..868c9b9fe86633724d183e50f0610c3733849e99 100644 --- a/styles/src/themes/ayu-light.ts +++ b/styles/src/themes/ayu-light.ts @@ -12,4 +12,9 @@ const license = { const variant = ayu.light const theme = buildTheme(variant, true) -export const light = createColorScheme(`${name} Light`, true, theme.ramps, theme.syntax) +export const light = createColorScheme( + `${name} Light`, + true, + theme.ramps, + theme.syntax +) diff --git a/styles/src/themes/ayu-mirage.ts b/styles/src/themes/ayu-mirage.ts index 7c24b9b70ff06d45e877c658b1b2a831cc64e5ab..724eb030af0dccf3b9acddd9c58c7ad8c59b13b1 100644 --- a/styles/src/themes/ayu-mirage.ts +++ b/styles/src/themes/ayu-mirage.ts @@ -12,4 +12,9 @@ const license = { const variant = ayu.mirage const theme = buildTheme(variant, false) -export const dark = createColorScheme(`${name} Mirage`, false, theme.ramps, theme.syntax) +export const dark = createColorScheme( + `${name} Mirage`, + false, + theme.ramps, + theme.syntax +) diff --git a/styles/src/themes/common/ayu-common.ts b/styles/src/themes/common/ayu-common.ts index c77a2f39733cd4c8cd684247558228302ba4092d..7ae48f38560ba42637c31f68c4b6e5998f965f44 100644 --- a/styles/src/themes/common/ayu-common.ts +++ b/styles/src/themes/common/ayu-common.ts @@ -1,12 +1,12 @@ -import { dark, light, mirage } from 'ayu' -import { ThemeSyntax } from './syntax' -import chroma from 'chroma-js' -import { colorRamp } from './ramps' +import { dark, light, mirage } from "ayu" +import { ThemeSyntax } from "./syntax" +import chroma from "chroma-js" +import { colorRamp } from "./ramps" export const ayu = { dark, light, - mirage + mirage, } export const buildTheme = (t: typeof dark, light: boolean) => { @@ -53,7 +53,7 @@ export const buildTheme = (t: typeof dark, light: boolean) => { violet: colorRamp(chroma(color.purple)), magenta: colorRamp(chroma(color.lightBlue)), }, - syntax + syntax, } } From de60657d533bf60ac164b6eca2a835516ee83c72 Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:23:54 -0400 Subject: [PATCH 64/80] Ignore the target folder --- styles/.prettierignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/styles/.prettierignore b/styles/.prettierignore index 04fe05da753a6c05c6d84733a26892bced1ba6a4..98aa9da7e02d2685d8a3b2033bde7db87432275b 100644 --- a/styles/.prettierignore +++ b/styles/.prettierignore @@ -1,2 +1,3 @@ package-lock.json -package.json \ No newline at end of file +package.json +target From 65c2fb1cc6e8c6490cd395935e2e212274bcba46 Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:25:02 -0400 Subject: [PATCH 65/80] Fix paths after publishing --- styles/src/themes/ayu-dark.ts | 4 ++-- styles/src/themes/ayu-light.ts | 4 ++-- styles/src/themes/ayu-mirage.ts | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/styles/src/themes/ayu-dark.ts b/styles/src/themes/ayu-dark.ts index 7774885208a90180714bca3cd8c89a9d1d532414..790e0d117378e39bdd71e678e25b03e30470b9d8 100644 --- a/styles/src/themes/ayu-dark.ts +++ b/styles/src/themes/ayu-dark.ts @@ -1,5 +1,5 @@ -import { createColorScheme } from "../common/ramps" -import { ayu, buildTheme } from "../common/ayu-common" +import { createColorScheme } from "./common/ramps" +import { ayu, buildTheme } from "./common/ayu-common" const name = "Ayu" const author = "Konstantin Pschera " diff --git a/styles/src/themes/ayu-light.ts b/styles/src/themes/ayu-light.ts index 868c9b9fe86633724d183e50f0610c3733849e99..cc4e121cc6e485a186f31b13705e1311dbaabde4 100644 --- a/styles/src/themes/ayu-light.ts +++ b/styles/src/themes/ayu-light.ts @@ -1,5 +1,5 @@ -import { createColorScheme } from "../common/ramps" -import { ayu, buildTheme } from "../common/ayu-common" +import { createColorScheme } from "./common/ramps" +import { ayu, buildTheme } from "./common/ayu-common" const name = "Ayu" const author = "Konstantin Pschera " diff --git a/styles/src/themes/ayu-mirage.ts b/styles/src/themes/ayu-mirage.ts index 724eb030af0dccf3b9acddd9c58c7ad8c59b13b1..c5550ec2780a8c2d8f8c46993fdca714119be9ff 100644 --- a/styles/src/themes/ayu-mirage.ts +++ b/styles/src/themes/ayu-mirage.ts @@ -1,5 +1,5 @@ -import { createColorScheme } from "../common/ramps" -import { ayu, buildTheme } from "../common/ayu-common" +import { createColorScheme } from "./common/ramps" +import { ayu, buildTheme } from "./common/ayu-common" const name = "Ayu" const author = "Konstantin Pschera " From 20ec9f6daf34e57201c1d671f43f4521570e0ec0 Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:37:20 -0400 Subject: [PATCH 66/80] Add meta fields to `ayu` --- styles/src/themes/ayu-dark.ts | 13 +++++-------- styles/src/themes/ayu-light.ts | 13 +++++-------- styles/src/themes/ayu-mirage.ts | 13 +++++-------- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/styles/src/themes/ayu-dark.ts b/styles/src/themes/ayu-dark.ts index 790e0d117378e39bdd71e678e25b03e30470b9d8..c7e86994feec6a78879b383e5bf71941084c0b9e 100644 --- a/styles/src/themes/ayu-dark.ts +++ b/styles/src/themes/ayu-dark.ts @@ -1,19 +1,16 @@ import { createColorScheme } from "./common/ramps" -import { ayu, buildTheme } from "./common/ayu-common" +import { ayu, meta as themeMeta, buildTheme } from "./common/ayu-common" -const name = "Ayu" -const author = "Konstantin Pschera " -const url = "https://github.com/ayu-theme/ayu-colors" -const license = { - type: "MIT", - url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +export const meta = { + ...themeMeta, + name: `${themeMeta.name} Dark` } const variant = ayu.dark const theme = buildTheme(variant, false) export const dark = createColorScheme( - `${name} Dark`, + meta.name, false, theme.ramps, theme.syntax diff --git a/styles/src/themes/ayu-light.ts b/styles/src/themes/ayu-light.ts index cc4e121cc6e485a186f31b13705e1311dbaabde4..9acabf6a3957a20aca2e3fa0181cf675840ddae4 100644 --- a/styles/src/themes/ayu-light.ts +++ b/styles/src/themes/ayu-light.ts @@ -1,19 +1,16 @@ import { createColorScheme } from "./common/ramps" -import { ayu, buildTheme } from "./common/ayu-common" +import { ayu, meta as themeMeta, buildTheme } from "./common/ayu-common" -const name = "Ayu" -const author = "Konstantin Pschera " -const url = "https://github.com/ayu-theme/ayu-colors" -const license = { - type: "MIT", - url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +export const meta = { + ...themeMeta, + name: `${themeMeta.name} Light` } const variant = ayu.light const theme = buildTheme(variant, true) export const light = createColorScheme( - `${name} Light`, + meta.name, true, theme.ramps, theme.syntax diff --git a/styles/src/themes/ayu-mirage.ts b/styles/src/themes/ayu-mirage.ts index c5550ec2780a8c2d8f8c46993fdca714119be9ff..2a01512673b73d104ead5fc20edf353b813c16bb 100644 --- a/styles/src/themes/ayu-mirage.ts +++ b/styles/src/themes/ayu-mirage.ts @@ -1,19 +1,16 @@ import { createColorScheme } from "./common/ramps" -import { ayu, buildTheme } from "./common/ayu-common" +import { ayu, meta as themeMeta, buildTheme } from "./common/ayu-common" -const name = "Ayu" -const author = "Konstantin Pschera " -const url = "https://github.com/ayu-theme/ayu-colors" -const license = { - type: "MIT", - url: "https://github.com/ayu-theme/ayu-colors/blob/master/license", +export const meta = { + ...themeMeta, + name: `${themeMeta.name} Mirage` } const variant = ayu.mirage const theme = buildTheme(variant, false) export const dark = createColorScheme( - `${name} Mirage`, + meta.name, false, theme.ramps, theme.syntax From 7ba094e10e64024927dad79446923ac62eaf948c Mon Sep 17 00:00:00 2001 From: Nate Butler Date: Tue, 11 Apr 2023 12:56:18 -0400 Subject: [PATCH 67/80] add license_checksum --- styles/src/themes/common/ayu-common.ts | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/styles/src/themes/common/ayu-common.ts b/styles/src/themes/common/ayu-common.ts index 7ae48f38560ba42637c31f68c4b6e5998f965f44..f08817ef492e7c56fac0e593ffd0d74d1e4d735b 100644 --- a/styles/src/themes/common/ayu-common.ts +++ b/styles/src/themes/common/ayu-common.ts @@ -2,6 +2,7 @@ import { dark, light, mirage } from "ayu" import { ThemeSyntax } from "./syntax" import chroma from "chroma-js" import { colorRamp } from "./ramps" +import { Meta } from "./colorScheme" export const ayu = { dark, @@ -72,3 +73,18 @@ export const buildSyntax = (t: typeof dark): ThemeSyntax => { function: { color: t.syntax.func.hex() }, } } + +export const meta: Meta = { + name: "Ayu", + author: "dempfi", + license: { + SPDX: "MIT", + license_text: { + https_url: + "https://raw.githubusercontent.com/dempfi/ayu/master/LICENSE", + license_checksum: + "e0af0e0d1754c18ca075649d42f5c6d9a60f8bdc03c20dfd97105f2253a94173", + }, + }, + url: "https://github.com/dempfi/ayu", +} From 727afae4ff8c8a077e481250a3a651c66024d30d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 11 Apr 2023 10:58:01 -0700 Subject: [PATCH 68/80] Fix unit tests after fixing gpui model drop semantics co-authored-by: Antonio Scandurra --- crates/auto_update/src/auto_update.rs | 4 ++-- crates/client/src/client.rs | 8 +++++--- crates/editor/src/blink_manager.rs | 9 +++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 9075e4df1a9e01a39d4e614e371cca34e9276483..a12a5dd3a9163b8d4d1fa3149491f6aae4c61bc3 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -63,10 +63,10 @@ pub fn init(http_client: Arc, server_url: String, cx: &mut AppCo cx.observe_global::(move |updater, cx| { if cx.global::().auto_update { if update_subscription.is_none() { - *(&mut update_subscription) = Some(updater.start_polling(cx)) + update_subscription = Some(updater.start_polling(cx)) } } else { - (&mut update_subscription).take(); + update_subscription.take(); } }) .detach(); diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index ce808cd08d6c0200753067b2422f657675745ef8..5a00f27ddf31f042a7347262a01a6d0f228fe205 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -1649,11 +1649,13 @@ mod tests { }, ); drop(subscription1); - let _subscription2 = - client.add_message_handler(model, move |_, _: TypedEnvelope, _, _| { + let _subscription2 = client.add_message_handler( + model.clone(), + move |_, _: TypedEnvelope, _, _| { done_tx2.try_send(()).unwrap(); async { Ok(()) } - }); + }, + ); server.send(proto::Ping {}); done_rx2.next().await.unwrap(); } diff --git a/crates/editor/src/blink_manager.rs b/crates/editor/src/blink_manager.rs index 9651182bd8b3bcf44a3d4c2f1fcd040e80520579..409b6f9b0344a3f5ee9f109bab82991772a15221 100644 --- a/crates/editor/src/blink_manager.rs +++ b/crates/editor/src/blink_manager.rs @@ -15,12 +15,9 @@ pub struct BlinkManager { impl BlinkManager { pub fn new(blink_interval: Duration, cx: &mut ModelContext) -> Self { - let weak_handle = cx.weak_handle(); - cx.observe_global::(move |_, cx| { - if let Some(this) = weak_handle.upgrade(cx) { - // Make sure we blink the cursors if the setting is re-enabled - this.update(cx, |this, cx| this.blink_cursors(this.blink_epoch, cx)); - } + cx.observe_global::(move |this, cx| { + // Make sure we blink the cursors if the setting is re-enabled + this.blink_cursors(this.blink_epoch, cx) }) .detach(); From ae930bde878c5ef627a67ad2eee8de44e3dd4e39 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 11 Apr 2023 15:30:44 -0400 Subject: [PATCH 69/80] Flip screen sharing icon states --- crates/collab_ui/src/collab_titlebar_item.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/collab_ui/src/collab_titlebar_item.rs b/crates/collab_ui/src/collab_titlebar_item.rs index 60272b66bb9407dffff48f5bf716363ee2fb8390..a505df316e0ede594e0a41d7da6f44e891d83e31 100644 --- a/crates/collab_ui/src/collab_titlebar_item.rs +++ b/crates/collab_ui/src/collab_titlebar_item.rs @@ -395,10 +395,10 @@ impl CollabTitlebarItem { let icon; let tooltip; if room.read(cx).is_screen_sharing() { - icon = "icons/disable_screen_sharing_12.svg"; + icon = "icons/enable_screen_sharing_12.svg"; tooltip = "Stop Sharing Screen" } else { - icon = "icons/enable_screen_sharing_12.svg"; + icon = "icons/disable_screen_sharing_12.svg"; tooltip = "Share Screen"; } From 61d048cb25c02ac18b4ba7f475da4bbe60336192 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 11 Apr 2023 12:37:08 -0700 Subject: [PATCH 70/80] Don't wait for host's reply before broadcasting buffer updates to guests --- crates/collab/src/rpc.rs | 44 ++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index ce5a6a0a1f767a4729f49f88fbd8864eaddb33ac..16e7577d957e3ee993acceab9a52e90987ef7358 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1610,46 +1610,42 @@ async fn update_buffer( ) -> Result<()> { session.executor.record_backtrace(); let project_id = ProjectId::from_proto(request.project_id); - let host_connection_id = { + let mut guest_connection_ids; + let mut host_connection_id = None; + { let collaborators = session .db() .await .project_collaborators(project_id, session.connection_id) .await?; - - let host = collaborators - .iter() - .find(|collaborator| collaborator.is_host) - .ok_or_else(|| anyhow!("host not found"))?; - host.connection_id - }; - - if host_connection_id != session.connection_id { - session - .peer - .forward_request(session.connection_id, host_connection_id, request.clone()) - .await?; + guest_connection_ids = Vec::with_capacity(collaborators.len() - 1); + for collaborator in collaborators.iter() { + if collaborator.is_host { + host_connection_id = Some(collaborator.connection_id); + } else { + guest_connection_ids.push(collaborator.connection_id); + } + } } + let host_connection_id = host_connection_id.ok_or_else(|| anyhow!("host not found"))?; session.executor.record_backtrace(); - let collaborators = session - .db() - .await - .project_collaborators(project_id, session.connection_id) - .await?; - broadcast( Some(session.connection_id), - collaborators - .iter() - .filter(|collaborator| !collaborator.is_host) - .map(|collaborator| collaborator.connection_id), + guest_connection_ids, |connection_id| { session .peer .forward_send(session.connection_id, connection_id, request.clone()) }, ); + if host_connection_id != session.connection_id { + session + .peer + .forward_request(session.connection_id, host_connection_id, request.clone()) + .await?; + } + response.send(proto::Ack {})?; Ok(()) } From c39764487cc40c62712f5a6fc83a814834fe7b79 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 11 Apr 2023 15:45:02 -0400 Subject: [PATCH 71/80] Construct context menu in a more clear way --- crates/workspace/src/pane.rs | 88 ++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index d489f1b723987ee8c5823127ca2e1d9e9e6431a2..31d099c04b78d9c1a5139c7ba82d4f2f4f05df25 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1290,52 +1290,52 @@ impl Pane { let active_item_id = self.items[self.active_item_index].id(); let is_active_item = target_item_id == active_item_id; - let mut options = Vec::new(); - - // TODO: Explain why we are doing this - for the key bindings - options.push(if is_active_item { - ContextMenuItem::item("Close Active Item", CloseActiveItem) - } else { - ContextMenuItem::item( - "Close Inactive Item", - CloseItemById { - item_id: target_item_id, - pane: target_pane.clone(), - }, - ) - }); - // This should really be called "close others" and the behaviour should be dynamically based on the tab the action is ran on. Currenlty, this is a weird action because you can run it on a non-active tab and it will close everything by the actual active tab - options.push(ContextMenuItem::item( - "Close Inactive Items", - CloseInactiveItems, - )); - options.push(ContextMenuItem::item("Close Clean Items", CloseCleanItems)); - options.push(if is_active_item { - ContextMenuItem::item("Close Items To The Left", CloseItemsToTheLeft) - } else { - ContextMenuItem::item( - "Close Items To The Left", - CloseItemsToTheLeftById { - item_id: target_item_id, - pane: target_pane.clone(), - }, - ) - }); - options.push(if is_active_item { - ContextMenuItem::item("Close Items To The Right", CloseItemsToTheRight) - } else { - ContextMenuItem::item( - "Close Items To The Right", - CloseItemsToTheRightById { - item_id: target_item_id, - pane: target_pane.clone(), - }, - ) - }); - options.push(ContextMenuItem::item("Close All Items", CloseAllItems)); + // The `CloseInactiveItems` action should really be called "CloseOthers" and the behaviour should be dynamically based on the tab the action is ran on. Currenlty, this is a weird action because you can run it on a non-active tab and it will close everything by the actual active tab self.tab_context_menu.update(cx, |menu, cx| { - menu.show(action.position, AnchorCorner::TopLeft, options, cx); + menu.show( + action.position, + AnchorCorner::TopLeft, + if is_active_item { + vec![ + ContextMenuItem::item("Close Active Item", CloseActiveItem), + ContextMenuItem::item("Close Inactive Items", CloseInactiveItems), + ContextMenuItem::item("Close Clean Items", CloseCleanItems), + ContextMenuItem::item("Close Items To The Left", CloseItemsToTheLeft), + ContextMenuItem::item("Close Items To The Right", CloseItemsToTheRight), + ContextMenuItem::item("Close All Items", CloseAllItems), + ] + } else { + // In the case of the user right clicking on a non-active tab, for some item-closing commands, we need to provide the id of the tab, for the others, we can reuse the existing command. + vec![ + ContextMenuItem::item( + "Close Inactive Item", + CloseItemById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ), + ContextMenuItem::item("Close Inactive Items", CloseInactiveItems), + ContextMenuItem::item("Close Clean Items", CloseCleanItems), + ContextMenuItem::item( + "Close Items To The Left", + CloseItemsToTheLeftById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ), + ContextMenuItem::item( + "Close Items To The Right", + CloseItemsToTheRightById { + item_id: target_item_id, + pane: target_pane.clone(), + }, + ), + ContextMenuItem::item("Close All Items", CloseAllItems), + ] + }, + cx, + ); }); } From 0b52308c99417272dd3f31b98576035ed72dd161 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 11 Apr 2023 16:25:42 -0400 Subject: [PATCH 72/80] Represent dirty state in item-testing code --- crates/workspace/src/pane.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 31d099c04b78d9c1a5139c7ba82d4f2f4f05df25..a54aed96f464bdcfff03e526ecdb8cd9f5c37963 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -2429,17 +2429,17 @@ mod tests { add_labeled_item(&workspace, &pane, "A", true, cx); add_labeled_item(&workspace, &pane, "B", false, cx); - add_labeled_item(&workspace, &pane, "C", false, cx); + add_labeled_item(&workspace, &pane, "C", true, cx); add_labeled_item(&workspace, &pane, "D", false, cx); add_labeled_item(&workspace, &pane, "E", false, cx); - assert_item_labels(&pane, ["A", "B", "C", "D", "E*"], cx); + assert_item_labels(&pane, ["A^", "B", "C^", "D", "E*"], cx); workspace.update(cx, |workspace, cx| { Pane::close_clean_items(workspace, &CloseCleanItems, cx); }); deterministic.run_until_parked(); - assert_item_labels(&pane, ["A*"], cx); + assert_item_labels(&pane, ["A^", "C*^"], cx); } #[gpui::test] @@ -2597,6 +2597,9 @@ mod tests { if ix == pane.active_item_index { state.push('*'); } + if item.is_dirty(cx) { + state.push('^'); + } state }) .collect::>(); From 12a286ac509830333975d7e70e47c923abfbb2c8 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 12 Apr 2023 09:30:34 +0200 Subject: [PATCH 73/80] Forget buffered operations when resyncing with the host Previously, we could end up with a situation where the host did not see an operation but a guest that didn't have that buffer open would. When such guest would finally open the buffer, they would apply the operation without however sending it to the host. The guest wouldn't bother resyncing it because it wasn't part of its open buffers. --- crates/project/src/project.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 90985f881040512110759fa9647ad80205a6089b..9192c7a411de149b7d7daa715ccd4eb00cb73f70 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4725,6 +4725,8 @@ impl Project { } if is_host { + this.opened_buffers + .retain(|_, buffer| !matches!(buffer, OpenBuffer::Operations(_))); this.buffer_changes_tx .unbounded_send(BufferMessage::Resync) .unwrap(); From afbd275f4f56f1e3c7cebdd32179ded2e7ec54f9 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Wed, 12 Apr 2023 13:31:39 -0400 Subject: [PATCH 74/80] v0.83.x dev --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cc7ebc094c731b3f40bdcf5b86c676db101090b..31013af33c03b486076463c57cbc061765e0bf89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8516,7 +8516,7 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zed" -version = "0.82.0" +version = "0.83.0" dependencies = [ "activity_indicator", "anyhow", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 84fb84644e24e695fd1cdb571ab9ea15727cb1c2..180a1fce24bf3b1a124e2a6f74349d1fcf1bf88e 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] description = "The fast, collaborative code editor." edition = "2021" name = "zed" -version = "0.82.0" +version = "0.83.0" publish = false [lib] From a85c2d71ad62779ab2aae29122a382426bd056b2 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 12 Apr 2023 11:11:32 -0700 Subject: [PATCH 75/80] collab 0.8.3 --- Cargo.lock | 2 +- crates/collab/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 31013af33c03b486076463c57cbc061765e0bf89..365f383b4875cddec79435f9550648a6cc5ee86a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1192,7 +1192,7 @@ dependencies = [ [[package]] name = "collab" -version = "0.8.2" +version = "0.8.3" dependencies = [ "anyhow", "async-tungstenite", diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index b85d9992986e94060eaf7ab14ae4d41dd60dc7f9..2891fe30109725f4e56b123539b181bdb551c290 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Nathan Sobo "] default-run = "collab" edition = "2021" name = "collab" -version = "0.8.2" +version = "0.8.3" publish = false [[bin]] From 2d97387f49e27e823d29339a3c85526a4732326d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 11 Apr 2023 15:15:45 -0700 Subject: [PATCH 76/80] Restructure background scanner to handle refresh requests even while scanning --- crates/project/src/project_tests.rs | 2 +- crates/project/src/worktree.rs | 629 ++++++++++++---------------- crates/sum_tree/src/tree_map.rs | 6 + 3 files changed, 280 insertions(+), 357 deletions(-) diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index 95d49da1dfcc3fe6f4dc926f93be4261aaf79ada..b4bcba24db5938c503896c41dc1b0d82f9603bcd 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -2183,7 +2183,7 @@ async fn test_apply_code_actions_with_commands(cx: &mut gpui::TestAppContext) { }); } -#[gpui::test] +#[gpui::test(iterations = 10)] async fn test_save_file(cx: &mut gpui::TestAppContext) { let fs = FakeFs::new(cx.background()); fs.insert_tree( diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 1bc15257a0920e78a1c08f7e1603cffca6058d79..7df9a595dc4d213a4aeda7115ea3a6d6c5296ae9 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -12,7 +12,9 @@ use futures::{ mpsc::{self, UnboundedSender}, oneshot, }, - select_biased, Stream, StreamExt, + select_biased, + task::Poll, + Stream, StreamExt, }; use fuzzy::CharBag; use git::{DOT_GIT, GITIGNORE}; @@ -41,11 +43,11 @@ use std::{ mem, ops::{Deref, DerefMut}, path::{Path, PathBuf}, + pin::Pin, sync::{ atomic::{AtomicUsize, Ordering::SeqCst}, Arc, }, - task::Poll, time::{Duration, SystemTime}, }; use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet}; @@ -154,20 +156,12 @@ impl DerefMut for LocalSnapshot { } enum ScanState { - /// The worktree is performing its initial scan of the filesystem. - Initializing { - snapshot: LocalSnapshot, - barrier: Option, - }, - Initialized { - snapshot: LocalSnapshot, - }, - /// The worktree is updating in response to filesystem events. - Updating, + Started, Updated { snapshot: LocalSnapshot, changes: HashMap, PathChange>, barrier: Option, + scanning: bool, }, } @@ -244,9 +238,24 @@ impl Worktree { cx.spawn_weak(|this, mut cx| async move { while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) { this.update(&mut cx, |this, cx| { - this.as_local_mut() - .unwrap() - .background_scanner_updated(state, cx); + let this = this.as_local_mut().unwrap(); + match state { + ScanState::Started => { + *this.is_scanning.0.borrow_mut() = true; + } + ScanState::Updated { + snapshot, + changes, + barrier, + scanning, + } => { + *this.is_scanning.0.borrow_mut() = scanning; + this.set_snapshot(snapshot, cx); + cx.emit(Event::UpdatedEntries(changes)); + drop(barrier); + } + } + cx.notify(); }); } }) @@ -258,9 +267,15 @@ impl Worktree { let background = cx.background().clone(); async move { let events = fs.watch(&abs_path, Duration::from_millis(100)).await; - BackgroundScanner::new(snapshot, scan_states_tx, fs, background) - .run(events, path_changes_rx) - .await; + BackgroundScanner::new( + snapshot, + fs, + scan_states_tx, + background, + path_changes_rx, + ) + .run(events) + .await; } }); @@ -533,38 +548,6 @@ impl LocalWorktree { Ok(updated) } - fn background_scanner_updated( - &mut self, - scan_state: ScanState, - cx: &mut ModelContext, - ) { - match scan_state { - ScanState::Initializing { snapshot, barrier } => { - *self.is_scanning.0.borrow_mut() = true; - self.set_snapshot(snapshot, cx); - drop(barrier); - } - ScanState::Initialized { snapshot } => { - *self.is_scanning.0.borrow_mut() = false; - self.set_snapshot(snapshot, cx); - } - ScanState::Updating => { - *self.is_scanning.0.borrow_mut() = true; - } - ScanState::Updated { - snapshot, - changes, - barrier, - } => { - *self.is_scanning.0.borrow_mut() = false; - cx.emit(Event::UpdatedEntries(changes)); - self.set_snapshot(snapshot, cx); - drop(barrier); - } - } - cx.notify(); - } - fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext) { let updated_repos = Self::changed_repos( &self.snapshot.git_repositories, @@ -1337,14 +1320,6 @@ impl Snapshot { &self.root_name } - pub fn scan_started(&mut self) { - self.scan_id += 1; - } - - pub fn scan_completed(&mut self) { - self.completed_scan_id = self.scan_id; - } - pub fn scan_id(&self) -> usize { self.scan_id } @@ -1539,17 +1514,20 @@ impl LocalSnapshot { return; }; + match parent_entry.kind { + EntryKind::PendingDir => { + parent_entry.kind = EntryKind::Dir; + } + EntryKind::Dir => {} + _ => return, + } + if let Some(ignore) = ignore { self.ignores_by_parent_abs_path.insert( self.abs_path.join(&parent_path).into(), (ignore, self.scan_id), ); } - if matches!(parent_entry.kind, EntryKind::PendingDir) { - parent_entry.kind = EntryKind::Dir; - } else { - unreachable!(); - } if parent_path.file_name() == Some(&DOT_GIT) { let abs_path = self.abs_path.join(&parent_path); @@ -2135,53 +2113,47 @@ impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey { } struct BackgroundScanner { - fs: Arc, snapshot: Mutex, - notify: UnboundedSender, + fs: Arc, + status_updates_tx: UnboundedSender, executor: Arc, + refresh_requests_rx: channel::Receiver<(Vec, barrier::Sender)>, + prev_state: Mutex<(Snapshot, Vec>)>, + finished_initial_scan: bool, } impl BackgroundScanner { fn new( snapshot: LocalSnapshot, - notify: UnboundedSender, fs: Arc, + status_updates_tx: UnboundedSender, executor: Arc, + refresh_requests_rx: channel::Receiver<(Vec, barrier::Sender)>, ) -> Self { Self { fs, - snapshot: Mutex::new(snapshot), - notify, + status_updates_tx, executor, + refresh_requests_rx, + prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())), + snapshot: Mutex::new(snapshot), + finished_initial_scan: false, } } - fn abs_path(&self) -> Arc { - self.snapshot.lock().abs_path.clone() - } - async fn run( - self, - events_rx: impl Stream>, - mut changed_paths: channel::Receiver<(Vec, barrier::Sender)>, + &mut self, + mut events_rx: Pin>>>, ) { use futures::FutureExt as _; - // Retrieve the basic properties of the root node. - let root_char_bag; - let root_abs_path; - let root_inode; - let root_is_dir; - let next_entry_id; - { - let mut snapshot = self.snapshot.lock(); - snapshot.scan_started(); - root_char_bag = snapshot.root_char_bag; - root_abs_path = snapshot.abs_path.clone(); - root_inode = snapshot.root_entry().map(|e| e.inode); - root_is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir()); - next_entry_id = snapshot.next_entry_id.clone(); - } + let (root_abs_path, root_inode) = { + let snapshot = self.snapshot.lock(); + ( + snapshot.abs_path.clone(), + snapshot.root_entry().map(|e| e.inode), + ) + }; // Populate ignores above the root. let ignore_stack; @@ -2205,198 +2177,191 @@ impl BackgroundScanner { } }; - if root_is_dir { - let mut ancestor_inodes = TreeSet::default(); - if let Some(root_inode) = root_inode { - ancestor_inodes.insert(root_inode); - } + // Perform an initial scan of the directory. + let (scan_job_tx, scan_job_rx) = channel::unbounded(); + smol::block_on(scan_job_tx.send(ScanJob { + abs_path: root_abs_path, + path: Arc::from(Path::new("")), + ignore_stack, + ancestor_inodes: TreeSet::from_ordered_entries(root_inode), + scan_queue: scan_job_tx.clone(), + })) + .unwrap(); + drop(scan_job_tx); + self.scan_dirs(true, scan_job_rx).await; - let (tx, rx) = channel::unbounded(); - self.executor - .block(tx.send(ScanJob { - abs_path: root_abs_path.to_path_buf(), - path: Arc::from(Path::new("")), - ignore_stack, - ancestor_inodes, - scan_queue: tx.clone(), - })) - .unwrap(); - drop(tx); + // Process any any FS events that occurred while performing the initial scan. + // For these events, update events cannot be as precise, because we didn't + // have the previous state loaded yet. + if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) { + let mut paths = events.into_iter().map(|e| e.path).collect::>(); + while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) { + paths.extend(more_events.into_iter().map(|e| e.path)); + } + self.process_events(paths).await; + } - let progress_update_count = AtomicUsize::new(0); - self.executor - .scoped(|scope| { - for _ in 0..self.executor.num_cpus() { - scope.spawn(async { - let mut last_progress_update_count = 0; - let progress_update_timer = self.pause_between_progress_updates().fuse(); - futures::pin_mut!(progress_update_timer); - loop { - select_biased! { - // Send periodic progress updates to the worktree. Use an atomic counter - // to ensure that only one of the workers sends a progress update after - // the update interval elapses. - _ = progress_update_timer => { - match progress_update_count.compare_exchange( - last_progress_update_count, - last_progress_update_count + 1, - SeqCst, - SeqCst - ) { - Ok(_) => { - last_progress_update_count += 1; - if self - .notify - .unbounded_send(ScanState::Initializing { - snapshot: self.snapshot.lock().clone(), - barrier: None, - }) - .is_err() - { - break; - } - } - Err(current_count) => last_progress_update_count = current_count, - } - progress_update_timer.set(self.pause_between_progress_updates().fuse()); - } + self.finished_initial_scan = true; - // Refresh any paths requested by the main thread. - job = changed_paths.recv().fuse() => { - let Ok((abs_paths, barrier)) = job else { break }; - self.update_entries_for_paths(abs_paths, None).await; - if self - .notify - .unbounded_send(ScanState::Initializing { - snapshot: self.snapshot.lock().clone(), - barrier: Some(barrier), - }) - .is_err() - { - break; - } - } + // Continue processing events until the worktree is dropped. + loop { + select_biased! { + // Process any path refresh requests from the worktree. Prioritize + // these before handling changes reported by the filesystem. + request = self.refresh_requests_rx.recv().fuse() => { + let Ok((paths, barrier)) = request else { break }; + self.reload_entries_for_paths(paths, None).await; + if !self.send_status_update(false, Some(barrier)) { + break; + } + } - // Recursively load directories from the file system. - job = rx.recv().fuse() => { - let Ok(job) = job else { break }; - if let Err(err) = self - .scan_dir(root_char_bag, next_entry_id.clone(), &job) - .await - { - log::error!("error scanning {:?}: {}", job.abs_path, err); - } - } - } - } - }); + events = events_rx.next().fuse() => { + let Some(events) = events else { break }; + let mut paths = events.into_iter().map(|e| e.path).collect::>(); + while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) { + paths.extend(more_events.into_iter().map(|e| e.path)); } - }) - .await; + self.process_events(paths).await; + } + } + } + } + + async fn process_events(&mut self, paths: Vec) { + let (scan_job_tx, scan_job_rx) = channel::unbounded(); + if let Some(mut paths) = self + .reload_entries_for_paths(paths, Some(scan_job_tx.clone())) + .await + { + paths.sort_unstable(); + util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp); } + drop(scan_job_tx); + self.scan_dirs(false, scan_job_rx).await; + } - self.snapshot.lock().scan_completed(); + async fn scan_dirs( + &self, + enable_progress_updates: bool, + scan_jobs_rx: channel::Receiver, + ) { + use futures::FutureExt as _; + self.snapshot.lock().scan_id += 1; if self - .notify - .unbounded_send(ScanState::Initialized { - snapshot: self.snapshot.lock().clone(), - }) + .status_updates_tx + .unbounded_send(ScanState::Started) .is_err() { return; } - // Process any events that occurred while performing the initial scan. These - // events can't be reported as precisely, because there is no snapshot of the - // worktree before they occurred. - futures::pin_mut!(events_rx); - if let Poll::Ready(Some(mut events)) = futures::poll!(events_rx.next()) { - while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) { - events.extend(additional_events); - } - let abs_paths = events.into_iter().map(|e| e.path).collect(); - if self.notify.unbounded_send(ScanState::Updating).is_err() { - return; - } - if let Some(changes) = self.process_events(abs_paths, true).await { - if self - .notify - .unbounded_send(ScanState::Updated { - snapshot: self.snapshot.lock().clone(), - changes, - barrier: None, - }) - .is_err() - { - return; - } - } else { - return; - } - } + let progress_update_count = AtomicUsize::new(0); + self.executor + .scoped(|scope| { + for _ in 0..self.executor.num_cpus() { + scope.spawn(async { + let mut last_progress_update_count = 0; + let progress_update_timer = self.progress_timer(enable_progress_updates).fuse(); + futures::pin_mut!(progress_update_timer); + + loop { + select_biased! { + // Process any path refresh requests before moving on to process + // the scan queue, so that user operations are prioritized. + request = self.refresh_requests_rx.recv().fuse() => { + let Ok((paths, barrier)) = request else { break }; + self.reload_entries_for_paths(paths, None).await; + if !self.send_status_update(false, Some(barrier)) { + return; + } + } - // Continue processing events until the worktree is dropped. - loop { - let barrier; - let abs_paths; - select_biased! { - request = changed_paths.next().fuse() => { - let Some((paths, b)) = request else { break }; - abs_paths = paths; - barrier = Some(b); - } - events = events_rx.next().fuse() => { - let Some(events) = events else { break }; - abs_paths = events.into_iter().map(|e| e.path).collect(); - barrier = None; - } - } + // Send periodic progress updates to the worktree. Use an atomic counter + // to ensure that only one of the workers sends a progress update after + // the update interval elapses. + _ = progress_update_timer => { + match progress_update_count.compare_exchange( + last_progress_update_count, + last_progress_update_count + 1, + SeqCst, + SeqCst + ) { + Ok(_) => { + last_progress_update_count += 1; + self.send_status_update(true, None); + } + Err(count) => { + last_progress_update_count = count; + } + } + progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse()); + } - if self.notify.unbounded_send(ScanState::Updating).is_err() { - return; - } - if let Some(changes) = self.process_events(abs_paths, false).await { - if self - .notify - .unbounded_send(ScanState::Updated { - snapshot: self.snapshot.lock().clone(), - changes, - barrier, + // Recursively load directories from the file system. + job = scan_jobs_rx.recv().fuse() => { + let Ok(job) = job else { break }; + if let Err(err) = self.scan_dir(&job).await { + if job.path.as_ref() != Path::new("") { + log::error!("error scanning directory {:?}: {}", job.abs_path, err); + } + } + } + } + } }) - .is_err() - { - return; } - } else { - return; - } - } - } + }) + .await; - async fn pause_between_progress_updates(&self) { - #[cfg(any(test, feature = "test-support"))] - if self.fs.is_fake() { - return self.executor.simulate_random_delay().await; - } - smol::Timer::after(Duration::from_millis(100)).await; + self.update_ignore_statuses().await; + + let mut snapshot = self.snapshot.lock(); + let mut git_repositories = mem::take(&mut snapshot.git_repositories); + git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some()); + snapshot.git_repositories = git_repositories; + snapshot.removed_entry_ids.clear(); + snapshot.completed_scan_id = snapshot.scan_id; + drop(snapshot); + + self.send_status_update(false, None); + } + + fn send_status_update(&self, scanning: bool, barrier: Option) -> bool { + let mut prev_state = self.prev_state.lock(); + let snapshot = self.snapshot.lock().clone(); + let mut old_snapshot = snapshot.snapshot.clone(); + mem::swap(&mut old_snapshot, &mut prev_state.0); + let changed_paths = mem::take(&mut prev_state.1); + let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths); + self.status_updates_tx + .unbounded_send(ScanState::Updated { + snapshot, + changes, + scanning, + barrier, + }) + .is_ok() } - async fn scan_dir( - &self, - root_char_bag: CharBag, - next_entry_id: Arc, - job: &ScanJob, - ) -> Result<()> { + async fn scan_dir(&self, job: &ScanJob) -> Result<()> { let mut new_entries: Vec = Vec::new(); let mut new_jobs: Vec> = Vec::new(); let mut ignore_stack = job.ignore_stack.clone(); let mut new_ignore = None; - + let (root_abs_path, root_char_bag, next_entry_id) = { + let snapshot = self.snapshot.lock(); + ( + snapshot.abs_path().clone(), + snapshot.root_char_bag, + snapshot.next_entry_id.clone(), + ) + }; let mut child_paths = self.fs.read_dir(&job.abs_path).await?; while let Some(child_abs_path) = child_paths.next().await { - let child_abs_path = match child_abs_path { - Ok(child_abs_path) => child_abs_path, + let child_abs_path: Arc = match child_abs_path { + Ok(child_abs_path) => child_abs_path.into(), Err(error) => { log::error!("error processing entry {:?}", error); continue; @@ -2419,8 +2384,7 @@ impl BackgroundScanner { match build_gitignore(&child_abs_path, self.fs.as_ref()).await { Ok(ignore) => { let ignore = Arc::new(ignore); - ignore_stack = - ignore_stack.append(job.abs_path.as_path().into(), ignore.clone()); + ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone()); new_ignore = Some(ignore); } Err(error) => { @@ -2438,7 +2402,7 @@ impl BackgroundScanner { // new jobs as well. let mut new_jobs = new_jobs.iter_mut(); for entry in &mut new_entries { - let entry_abs_path = self.abs_path().join(&entry.path); + let entry_abs_path = root_abs_path.join(&entry.path); entry.is_ignored = ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir()); @@ -2507,60 +2471,7 @@ impl BackgroundScanner { Ok(()) } - async fn process_events( - &self, - abs_paths: Vec, - received_before_initialized: bool, - ) -> Option, PathChange>> { - let (scan_queue_tx, scan_queue_rx) = channel::unbounded(); - - let prev_snapshot = { - let mut snapshot = self.snapshot.lock(); - snapshot.scan_started(); - snapshot.clone() - }; - - let event_paths = self - .update_entries_for_paths(abs_paths, Some(scan_queue_tx)) - .await?; - - // Scan any directories that were created as part of this event batch. - self.executor - .scoped(|scope| { - for _ in 0..self.executor.num_cpus() { - scope.spawn(async { - while let Ok(job) = scan_queue_rx.recv().await { - if let Err(err) = self - .scan_dir( - prev_snapshot.root_char_bag, - prev_snapshot.next_entry_id.clone(), - &job, - ) - .await - { - log::error!("error scanning {:?}: {}", job.abs_path, err); - } - } - }); - } - }) - .await; - - // Attempt to detect renames only over a single batch of file-system events. - self.snapshot.lock().removed_entry_ids.clear(); - - self.update_ignore_statuses().await; - self.update_git_repositories(); - let changes = self.build_change_set( - prev_snapshot.snapshot, - event_paths, - received_before_initialized, - ); - self.snapshot.lock().scan_completed(); - Some(changes) - } - - async fn update_entries_for_paths( + async fn reload_entries_for_paths( &self, mut abs_paths: Vec, scan_queue_tx: Option>, @@ -2569,7 +2480,7 @@ impl BackgroundScanner { abs_paths.dedup_by(|a, b| a.starts_with(&b)); let root_abs_path = self.snapshot.lock().abs_path.clone(); - let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.ok()?; + let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?; let metadata = futures::future::join_all( abs_paths .iter() @@ -2579,29 +2490,29 @@ impl BackgroundScanner { .await; let mut snapshot = self.snapshot.lock(); - if scan_queue_tx.is_some() { - for abs_path in &abs_paths { - if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) { + let doing_recursive_update = scan_queue_tx.is_some(); + + // Remove any entries for paths that no longer exist or are being recursively + // refreshed. Do this before adding any new entries, so that renames can be + // detected regardless of the order of the paths. + let mut event_paths = Vec::>::with_capacity(abs_paths.len()); + for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) { + if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) { + if matches!(metadata, Ok(None)) || doing_recursive_update { snapshot.remove_path(path); } + event_paths.push(path.into()); + } else { + log::error!( + "unexpected event {:?} for root path {:?}", + abs_path, + root_canonical_path + ); } } - let mut event_paths = Vec::with_capacity(abs_paths.len()); - for (abs_path, metadata) in abs_paths.into_iter().zip(metadata.into_iter()) { - let path: Arc = match abs_path.strip_prefix(&root_canonical_path) { - Ok(path) => Arc::from(path.to_path_buf()), - Err(_) => { - log::error!( - "unexpected event {:?} for root path {:?}", - abs_path, - root_canonical_path - ); - continue; - } - }; - event_paths.push(path.clone()); - let abs_path = root_abs_path.join(&path); + for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) { + let abs_path: Arc = root_abs_path.join(&path).into(); match metadata { Ok(Some(metadata)) => { @@ -2626,15 +2537,14 @@ impl BackgroundScanner { let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path); if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) { ancestor_inodes.insert(metadata.inode); - self.executor - .block(scan_queue_tx.send(ScanJob { - abs_path, - path, - ignore_stack, - ancestor_inodes, - scan_queue: scan_queue_tx.clone(), - })) - .unwrap(); + smol::block_on(scan_queue_tx.send(ScanJob { + abs_path, + path, + ignore_stack, + ancestor_inodes, + scan_queue: scan_queue_tx.clone(), + })) + .unwrap(); } } } @@ -2710,13 +2620,6 @@ impl BackgroundScanner { .await; } - fn update_git_repositories(&self) { - let mut snapshot = self.snapshot.lock(); - let mut git_repositories = mem::take(&mut snapshot.git_repositories); - git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some()); - snapshot.git_repositories = git_repositories; - } - async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) { let mut ignore_stack = job.ignore_stack; if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) { @@ -2728,7 +2631,7 @@ impl BackgroundScanner { let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap(); for mut entry in snapshot.child_entries(path).cloned() { let was_ignored = entry.is_ignored; - let abs_path = self.abs_path().join(&entry.path); + let abs_path = snapshot.abs_path().join(&entry.path); entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir()); if entry.is_dir() { let child_ignore_stack = if entry.is_ignored { @@ -2762,16 +2665,16 @@ impl BackgroundScanner { fn build_change_set( &self, - old_snapshot: Snapshot, + old_snapshot: &Snapshot, + new_snapshot: &Snapshot, event_paths: Vec>, - received_before_initialized: bool, ) -> HashMap, PathChange> { use PathChange::{Added, AddedOrUpdated, Removed, Updated}; - let new_snapshot = self.snapshot.lock(); let mut changes = HashMap::default(); let mut old_paths = old_snapshot.entries_by_path.cursor::(); let mut new_paths = new_snapshot.entries_by_path.cursor::(); + let received_before_initialized = !self.finished_initial_scan; for path in event_paths { let path = PathKey(path); @@ -2799,9 +2702,9 @@ impl BackgroundScanner { // If the worktree was not fully initialized when this event was generated, // we can't know whether this entry was added during the scan or whether // it was merely updated. - changes.insert(old_entry.path.clone(), AddedOrUpdated); + changes.insert(new_entry.path.clone(), AddedOrUpdated); } else if old_entry.mtime != new_entry.mtime { - changes.insert(old_entry.path.clone(), Updated); + changes.insert(new_entry.path.clone(), Updated); } old_paths.next(&()); new_paths.next(&()); @@ -2826,6 +2729,19 @@ impl BackgroundScanner { } changes } + + async fn progress_timer(&self, running: bool) { + if !running { + return futures::future::pending().await; + } + + #[cfg(any(test, feature = "test-support"))] + if self.fs.is_fake() { + return self.executor.simulate_random_delay().await; + } + + smol::Timer::after(Duration::from_millis(100)).await; + } } fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag { @@ -2839,7 +2755,7 @@ fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag { } struct ScanJob { - abs_path: PathBuf, + abs_path: Arc, path: Arc, ignore_stack: Arc, scan_queue: Sender, @@ -3524,7 +3440,7 @@ mod tests { let fs = FakeFs::new(cx.background()); fs.insert_tree( - "/a", + "/root", json!({ "b": {}, "c": {}, @@ -3535,7 +3451,7 @@ mod tests { let tree = Worktree::local( client, - "/a".as_ref(), + "/root".as_ref(), true, fs, Default::default(), @@ -3555,6 +3471,7 @@ mod tests { assert!(entry.is_dir()); cx.foreground().run_until_parked(); + tree.read_with(cx, |tree, _| { assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir); }); diff --git a/crates/sum_tree/src/tree_map.rs b/crates/sum_tree/src/tree_map.rs index 112366cdf583cbd3f247a8a0cf9496c9bc8936c3..0778cc5294ceeb3b226bee676e236ec42efc9986 100644 --- a/crates/sum_tree/src/tree_map.rs +++ b/crates/sum_tree/src/tree_map.rs @@ -154,6 +154,12 @@ impl TreeSet where K: Clone + Debug + Default + Ord, { + pub fn from_ordered_entries(entries: impl IntoIterator) -> Self { + Self(TreeMap::from_ordered_entries( + entries.into_iter().map(|key| (key, ())), + )) + } + pub fn insert(&mut self, key: K) { self.0.insert(key, ()); } From 3d14bfd90cb0aa460e9926cc743db569bf54443d Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 12 Apr 2023 16:46:50 -0700 Subject: [PATCH 77/80] Prioritize path refresh requests over gitignore status updates --- crates/project/src/worktree.rs | 73 ++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 25 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 7df9a595dc4d213a4aeda7115ea3a6d6c5296ae9..19862c2f1bc69c9d05de2b3bfc12c39560d76330 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -2248,7 +2248,6 @@ impl BackgroundScanner { ) { use futures::FutureExt as _; - self.snapshot.lock().scan_id += 1; if self .status_updates_tx .unbounded_send(ScanState::Started) @@ -2315,7 +2314,34 @@ impl BackgroundScanner { }) .await; - self.update_ignore_statuses().await; + let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded(); + let snapshot = self.update_ignore_statuses(ignore_queue_tx); + self.executor + .scoped(|scope| { + for _ in 0..self.executor.num_cpus() { + scope.spawn(async { + loop { + select_biased! { + // Process any path refresh requests before moving on to process + // the queue of ignore statuses. + request = self.refresh_requests_rx.recv().fuse() => { + let Ok((paths, barrier)) = request else { break }; + self.reload_entries_for_paths(paths, None).await; + if !self.send_status_update(false, Some(barrier)) { + return; + } + } + + job = ignore_queue_rx.recv().fuse() => { + let Ok(job) = job else { break }; + self.update_ignore_status(job, &snapshot).await; + } + } + } + }); + } + }) + .await; let mut snapshot = self.snapshot.lock(); let mut git_repositories = mem::take(&mut snapshot.git_repositories); @@ -2476,6 +2502,8 @@ impl BackgroundScanner { mut abs_paths: Vec, scan_queue_tx: Option>, ) -> Option>> { + let doing_recursive_update = scan_queue_tx.is_some(); + abs_paths.sort_unstable(); abs_paths.dedup_by(|a, b| a.starts_with(&b)); @@ -2490,7 +2518,13 @@ impl BackgroundScanner { .await; let mut snapshot = self.snapshot.lock(); - let doing_recursive_update = scan_queue_tx.is_some(); + + if snapshot.completed_scan_id == snapshot.scan_id { + snapshot.scan_id += 1; + if !doing_recursive_update { + snapshot.completed_scan_id = snapshot.scan_id; + } + } // Remove any entries for paths that no longer exist or are being recursively // refreshed. Do this before adding any new entries, so that renames can be @@ -2559,7 +2593,10 @@ impl BackgroundScanner { Some(event_paths) } - async fn update_ignore_statuses(&self) { + fn update_ignore_statuses( + &self, + ignore_queue_tx: Sender, + ) -> LocalSnapshot { let mut snapshot = self.snapshot.lock().clone(); let mut ignores_to_update = Vec::new(); let mut ignores_to_delete = Vec::new(); @@ -2584,7 +2621,6 @@ impl BackgroundScanner { .remove(&parent_abs_path); } - let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded(); ignores_to_update.sort_unstable(); let mut ignores_to_update = ignores_to_update.into_iter().peekable(); while let Some(parent_abs_path) = ignores_to_update.next() { @@ -2596,28 +2632,15 @@ impl BackgroundScanner { } let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true); - ignore_queue_tx - .send(UpdateIgnoreStatusJob { - abs_path: parent_abs_path, - ignore_stack, - ignore_queue: ignore_queue_tx.clone(), - }) - .await - .unwrap(); + smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob { + abs_path: parent_abs_path, + ignore_stack, + ignore_queue: ignore_queue_tx.clone(), + })) + .unwrap(); } - drop(ignore_queue_tx); - self.executor - .scoped(|scope| { - for _ in 0..self.executor.num_cpus() { - scope.spawn(async { - while let Ok(job) = ignore_queue_rx.recv().await { - self.update_ignore_status(job, &snapshot).await; - } - }); - } - }) - .await; + snapshot } async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) { From 5ca603dbebd776aa848b5fffcd4e5d4868f6bdff Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 12 Apr 2023 18:17:29 -0700 Subject: [PATCH 78/80] Don't process gitignore updates after the initial scan --- crates/project/src/worktree.rs | 81 ++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 19862c2f1bc69c9d05de2b3bfc12c39560d76330..3459bd7e5d7d4fd062cdeb521c1917eb34af5594 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -2189,6 +2189,7 @@ impl BackgroundScanner { .unwrap(); drop(scan_job_tx); self.scan_dirs(true, scan_job_rx).await; + self.send_status_update(false, None); // Process any any FS events that occurred while performing the initial scan. // For these events, update events cannot be as precise, because we didn't @@ -2199,6 +2200,7 @@ impl BackgroundScanner { paths.extend(more_events.into_iter().map(|e| e.path)); } self.process_events(paths).await; + self.send_status_update(false, None); } self.finished_initial_scan = true; @@ -2223,12 +2225,15 @@ impl BackgroundScanner { paths.extend(more_events.into_iter().map(|e| e.path)); } self.process_events(paths).await; + self.send_status_update(false, None); } } } } async fn process_events(&mut self, paths: Vec) { + use futures::FutureExt as _; + let (scan_job_tx, scan_job_rx) = channel::unbounded(); if let Some(mut paths) = self .reload_entries_for_paths(paths, Some(scan_job_tx.clone())) @@ -2239,6 +2244,43 @@ impl BackgroundScanner { } drop(scan_job_tx); self.scan_dirs(false, scan_job_rx).await; + + let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded(); + let snapshot = self.update_ignore_statuses(ignore_queue_tx); + self.executor + .scoped(|scope| { + for _ in 0..self.executor.num_cpus() { + scope.spawn(async { + loop { + select_biased! { + // Process any path refresh requests before moving on to process + // the queue of ignore statuses. + request = self.refresh_requests_rx.recv().fuse() => { + let Ok((paths, barrier)) = request else { break }; + self.reload_entries_for_paths(paths, None).await; + if !self.send_status_update(false, Some(barrier)) { + return; + } + } + + // Recursively process directories whose ignores have changed. + job = ignore_queue_rx.recv().fuse() => { + let Ok(job) = job else { break }; + self.update_ignore_status(job, &snapshot).await; + } + } + } + }); + } + }) + .await; + + let mut snapshot = self.snapshot.lock(); + let mut git_repositories = mem::take(&mut snapshot.git_repositories); + git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some()); + snapshot.git_repositories = git_repositories; + snapshot.removed_entry_ids.clear(); + snapshot.completed_scan_id = snapshot.scan_id; } async fn scan_dirs( @@ -2313,45 +2355,6 @@ impl BackgroundScanner { } }) .await; - - let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded(); - let snapshot = self.update_ignore_statuses(ignore_queue_tx); - self.executor - .scoped(|scope| { - for _ in 0..self.executor.num_cpus() { - scope.spawn(async { - loop { - select_biased! { - // Process any path refresh requests before moving on to process - // the queue of ignore statuses. - request = self.refresh_requests_rx.recv().fuse() => { - let Ok((paths, barrier)) = request else { break }; - self.reload_entries_for_paths(paths, None).await; - if !self.send_status_update(false, Some(barrier)) { - return; - } - } - - job = ignore_queue_rx.recv().fuse() => { - let Ok(job) = job else { break }; - self.update_ignore_status(job, &snapshot).await; - } - } - } - }); - } - }) - .await; - - let mut snapshot = self.snapshot.lock(); - let mut git_repositories = mem::take(&mut snapshot.git_repositories); - git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some()); - snapshot.git_repositories = git_repositories; - snapshot.removed_entry_ids.clear(); - snapshot.completed_scan_id = snapshot.scan_id; - drop(snapshot); - - self.send_status_update(false, None); } fn send_status_update(&self, scanning: bool, barrier: Option) -> bool { From 495c7acadfbfbe62918a905a796ec6b554550699 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Thu, 13 Apr 2023 10:36:21 +0200 Subject: [PATCH 79/80] Avoid interpolating Copilot suggestion if cursor excerpt differs --- crates/editor/src/editor.rs | 3 +- crates/editor/src/editor_tests.rs | 104 ++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index dd5bb7b05324eed476adc8446484c38c0afb6658..4f5d7e8f04c9f3e3962c2ef8741929ad38a73b2c 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1040,7 +1040,8 @@ impl CopilotState { let completion = self.completions.get(self.active_completion_index)?; let excerpt_id = self.excerpt_id?; let completion_buffer = buffer.buffer_for_excerpt(excerpt_id)?; - if !completion.range.start.is_valid(completion_buffer) + if excerpt_id != cursor.excerpt_id + || !completion.range.start.is_valid(completion_buffer) || !completion.range.end.is_valid(completion_buffer) { return None; diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index f9f3e1c4f07c87feb9897418cc94b9c8b1fad526..74fad79fe83c50adda83708f88c3d68f2d2daec0 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -6163,6 +6163,110 @@ async fn test_copilot_completion_invalidation( }); } +#[gpui::test] +async fn test_copilot_multibuffer( + deterministic: Arc, + cx: &mut gpui::TestAppContext, +) { + let (copilot, copilot_lsp) = Copilot::fake(cx); + cx.update(|cx| { + cx.set_global(Settings::test(cx)); + cx.set_global(copilot) + }); + + let buffer_1 = cx.add_model(|cx| Buffer::new(0, "a = 1\nb = 2\n", cx)); + let buffer_2 = cx.add_model(|cx| Buffer::new(0, "c = 3\nd = 4\n", cx)); + let multibuffer = cx.add_model(|cx| { + let mut multibuffer = MultiBuffer::new(0); + multibuffer.push_excerpts( + buffer_1.clone(), + [ExcerptRange { + context: Point::new(0, 0)..Point::new(2, 0), + primary: None, + }], + cx, + ); + multibuffer.push_excerpts( + buffer_2.clone(), + [ExcerptRange { + context: Point::new(0, 0)..Point::new(2, 0), + primary: None, + }], + cx, + ); + multibuffer + }); + let (_, editor) = cx.add_window(|cx| build_editor(multibuffer, cx)); + + handle_copilot_completion_request( + &copilot_lsp, + vec![copilot::request::Completion { + text: "b = 2 + a".into(), + range: lsp::Range::new(lsp::Position::new(1, 0), lsp::Position::new(1, 5)), + ..Default::default() + }], + vec![], + ); + editor.update(cx, |editor, cx| { + // Ensure copilot suggestions are shown for the first excerpt. + editor.change_selections(None, cx, |s| { + s.select_ranges([Point::new(1, 5)..Point::new(1, 5)]) + }); + editor.next_copilot_suggestion(&Default::default(), cx); + }); + deterministic.advance_clock(COPILOT_DEBOUNCE_TIMEOUT); + editor.update(cx, |editor, cx| { + assert!(editor.has_active_copilot_suggestion(cx)); + assert_eq!( + editor.display_text(cx), + "\n\na = 1\nb = 2 + a\n\n\n\nc = 3\nd = 4\n" + ); + assert_eq!(editor.text(cx), "a = 1\nb = 2\n\nc = 3\nd = 4\n"); + }); + + handle_copilot_completion_request( + &copilot_lsp, + vec![copilot::request::Completion { + text: "d = 4 + c".into(), + range: lsp::Range::new(lsp::Position::new(1, 0), lsp::Position::new(1, 6)), + ..Default::default() + }], + vec![], + ); + editor.update(cx, |editor, cx| { + // Move to another excerpt, ensuring the suggestion gets cleared. + editor.change_selections(None, cx, |s| { + s.select_ranges([Point::new(4, 5)..Point::new(4, 5)]) + }); + assert!(!editor.has_active_copilot_suggestion(cx)); + assert_eq!( + editor.display_text(cx), + "\n\na = 1\nb = 2\n\n\n\nc = 3\nd = 4\n" + ); + assert_eq!(editor.text(cx), "a = 1\nb = 2\n\nc = 3\nd = 4\n"); + + // Type a character, ensuring we don't even try to interpolate the previous suggestion. + editor.handle_input(" ", cx); + assert!(!editor.has_active_copilot_suggestion(cx)); + assert_eq!( + editor.display_text(cx), + "\n\na = 1\nb = 2\n\n\n\nc = 3\nd = 4 \n" + ); + assert_eq!(editor.text(cx), "a = 1\nb = 2\n\nc = 3\nd = 4 \n"); + }); + + // Ensure the new suggestion is displayed when the debounce timeout expires. + deterministic.advance_clock(COPILOT_DEBOUNCE_TIMEOUT); + editor.update(cx, |editor, cx| { + assert!(editor.has_active_copilot_suggestion(cx)); + assert_eq!( + editor.display_text(cx), + "\n\na = 1\nb = 2\n\n\n\nc = 3\nd = 4 + c\n" + ); + assert_eq!(editor.text(cx), "a = 1\nb = 2\n\nc = 3\nd = 4 \n"); + }); +} + fn empty_range(row: usize, column: usize) -> Range { let point = DisplayPoint::new(row as u32, column as u32); point..point From 5f0bf5929f574a60835a3f131cd9d07399efd100 Mon Sep 17 00:00:00 2001 From: Joseph Lyons Date: Tue, 11 Apr 2023 18:13:23 -0400 Subject: [PATCH 80/80] Add vim mode metric --- crates/editor/src/editor.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index dd5bb7b05324eed476adc8446484c38c0afb6658..9d1a72bf019f40f2044d5f24d2d36a429bd9c469 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -6619,13 +6619,15 @@ impl Editor { .as_singleton() .and_then(|b| b.read(cx).file()), ) { + let settings = cx.global::(); + let extension = Path::new(file.file_name(cx)) .extension() .and_then(|e| e.to_str()); project.read(cx).client().report_event( name, - json!({ "File Extension": extension }), - cx.global::().telemetry(), + json!({ "File Extension": extension, "Vim Mode": settings.vim_mode }), + settings.telemetry(), ); } }