1use crate::{
2 db::{self, NewUserParams},
3 rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
4 tests::{TestClient, TestServer},
5};
6use anyhow::{anyhow, Result};
7use call::ActiveCall;
8use client::RECEIVE_TIMEOUT;
9use collections::BTreeMap;
10use fs::{FakeFs, Fs as _};
11use futures::StreamExt as _;
12use gpui::{executor::Deterministic, ModelHandle, TestAppContext};
13use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
14use lsp::FakeLanguageServer;
15use parking_lot::Mutex;
16use project::{search::SearchQuery, Project};
17use rand::prelude::*;
18use std::{env, path::PathBuf, sync::Arc};
19
20#[gpui::test(iterations = 100)]
21async fn test_random_collaboration(
22 cx: &mut TestAppContext,
23 deterministic: Arc<Deterministic>,
24 rng: StdRng,
25) {
26 deterministic.forbid_parking();
27 let rng = Arc::new(Mutex::new(rng));
28
29 let max_peers = env::var("MAX_PEERS")
30 .map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
31 .unwrap_or(5);
32
33 let max_operations = env::var("OPERATIONS")
34 .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
35 .unwrap_or(10);
36
37 let mut server = TestServer::start(&deterministic).await;
38 let db = server.app_state.db.clone();
39
40 let mut available_users = Vec::new();
41 for ix in 0..max_peers {
42 let username = format!("user-{}", ix + 1);
43 let user_id = db
44 .create_user(
45 &format!("{username}@example.com"),
46 false,
47 NewUserParams {
48 github_login: username.clone(),
49 github_user_id: (ix + 1) as i32,
50 invite_count: 0,
51 },
52 )
53 .await
54 .unwrap()
55 .user_id;
56 available_users.push((user_id, username));
57 }
58
59 for (ix, (user_id_a, _)) in available_users.iter().enumerate() {
60 for (user_id_b, _) in &available_users[ix + 1..] {
61 server
62 .app_state
63 .db
64 .send_contact_request(*user_id_a, *user_id_b)
65 .await
66 .unwrap();
67 server
68 .app_state
69 .db
70 .respond_to_contact_request(*user_id_b, *user_id_a, true)
71 .await
72 .unwrap();
73 }
74 }
75
76 let mut clients = Vec::new();
77 let mut user_ids = Vec::new();
78 let mut op_start_signals = Vec::new();
79 let mut next_entity_id = 100000;
80 let allow_server_restarts = rng.lock().gen_bool(0.7);
81 let allow_client_reconnection = rng.lock().gen_bool(0.7);
82 let allow_client_disconnection = rng.lock().gen_bool(0.1);
83
84 let mut operations = 0;
85 while operations < max_operations {
86 let distribution = rng.lock().gen_range(0..100);
87 match distribution {
88 0..=19 if !available_users.is_empty() => {
89 let client_ix = rng.lock().gen_range(0..available_users.len());
90 let (_, username) = available_users.remove(client_ix);
91 log::info!("Adding new connection for {}", username);
92 next_entity_id += 100000;
93 let mut client_cx = TestAppContext::new(
94 cx.foreground_platform(),
95 cx.platform(),
96 deterministic.build_foreground(next_entity_id),
97 deterministic.build_background(),
98 cx.font_cache(),
99 cx.leak_detector(),
100 next_entity_id,
101 cx.function_name.clone(),
102 );
103
104 let op_start_signal = futures::channel::mpsc::unbounded();
105 let client = server.create_client(&mut client_cx, &username).await;
106 user_ids.push(client.current_user_id(&client_cx));
107 op_start_signals.push(op_start_signal.0);
108 clients.push(client_cx.foreground().spawn(simulate_client(
109 client,
110 op_start_signal.1,
111 allow_client_disconnection,
112 rng.clone(),
113 client_cx,
114 )));
115
116 log::info!("Added connection for {}", username);
117 operations += 1;
118 }
119
120 20..=24 if clients.len() > 1 && allow_client_disconnection => {
121 let client_ix = rng.lock().gen_range(1..clients.len());
122 log::info!(
123 "Simulating full disconnection of user {}",
124 user_ids[client_ix]
125 );
126 let removed_user_id = user_ids.remove(client_ix);
127 let user_connection_ids = server
128 .connection_pool
129 .lock()
130 .user_connection_ids(removed_user_id)
131 .collect::<Vec<_>>();
132 assert_eq!(user_connection_ids.len(), 1);
133 let removed_peer_id = user_connection_ids[0].into();
134 let client = clients.remove(client_ix);
135 op_start_signals.remove(client_ix);
136 server.forbid_connections();
137 server.disconnect_client(removed_peer_id);
138 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
139 deterministic.start_waiting();
140 log::info!("Waiting for user {} to exit...", removed_user_id);
141 let (client, mut client_cx) = client.await;
142 deterministic.finish_waiting();
143 server.allow_connections();
144
145 for project in &client.remote_projects {
146 project.read_with(&client_cx, |project, _| {
147 assert!(
148 project.is_read_only(),
149 "project {:?} should be read only",
150 project.remote_id()
151 )
152 });
153 }
154 for user_id in &user_ids {
155 let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap();
156 let pool = server.connection_pool.lock();
157 for contact in contacts {
158 if let db::Contact::Accepted { user_id, .. } = contact {
159 if pool.is_user_online(user_id) {
160 assert_ne!(
161 user_id, removed_user_id,
162 "removed client is still a contact of another peer"
163 );
164 }
165 }
166 }
167 }
168
169 log::info!("{} removed", client.username);
170 available_users.push((removed_user_id, client.username.clone()));
171 client_cx.update(|cx| {
172 cx.clear_globals();
173 drop(client);
174 });
175
176 operations += 1;
177 }
178
179 25..=29 if clients.len() > 1 && allow_client_reconnection => {
180 let client_ix = rng.lock().gen_range(1..clients.len());
181 let user_id = user_ids[client_ix];
182 log::info!("Simulating temporary disconnection of user {}", user_id);
183 let user_connection_ids = server
184 .connection_pool
185 .lock()
186 .user_connection_ids(user_id)
187 .collect::<Vec<_>>();
188 assert_eq!(user_connection_ids.len(), 1);
189 let peer_id = user_connection_ids[0].into();
190 server.disconnect_client(peer_id);
191 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
192 operations += 1;
193 }
194
195 30..=34 if allow_server_restarts => {
196 log::info!("Simulating server restart");
197 server.reset().await;
198 deterministic.advance_clock(RECEIVE_TIMEOUT);
199 server.start().await.unwrap();
200 deterministic.advance_clock(CLEANUP_TIMEOUT);
201 let environment = &server.app_state.config.zed_environment;
202 let stale_room_ids = server
203 .app_state
204 .db
205 .stale_room_ids(environment, server.id())
206 .await
207 .unwrap();
208 assert_eq!(stale_room_ids, vec![]);
209 }
210
211 _ if !op_start_signals.is_empty() => {
212 while operations < max_operations && rng.lock().gen_bool(0.7) {
213 op_start_signals
214 .choose(&mut *rng.lock())
215 .unwrap()
216 .unbounded_send(())
217 .unwrap();
218 operations += 1;
219 }
220
221 if rng.lock().gen_bool(0.8) {
222 deterministic.run_until_parked();
223 }
224 }
225 _ => {}
226 }
227 }
228
229 drop(op_start_signals);
230 deterministic.start_waiting();
231 let clients = futures::future::join_all(clients).await;
232 deterministic.finish_waiting();
233 deterministic.run_until_parked();
234
235 for (client, client_cx) in &clients {
236 for guest_project in &client.remote_projects {
237 guest_project.read_with(client_cx, |guest_project, cx| {
238 let host_project = clients.iter().find_map(|(client, cx)| {
239 let project = client.local_projects.iter().find(|host_project| {
240 host_project.read_with(cx, |host_project, _| {
241 host_project.remote_id() == guest_project.remote_id()
242 })
243 })?;
244 Some((project, cx))
245 });
246
247 if !guest_project.is_read_only() {
248 if let Some((host_project, host_cx)) = host_project {
249 let host_worktree_snapshots =
250 host_project.read_with(host_cx, |host_project, cx| {
251 host_project
252 .worktrees(cx)
253 .map(|worktree| {
254 let worktree = worktree.read(cx);
255 (worktree.id(), worktree.snapshot())
256 })
257 .collect::<BTreeMap<_, _>>()
258 });
259 let guest_worktree_snapshots = guest_project
260 .worktrees(cx)
261 .map(|worktree| {
262 let worktree = worktree.read(cx);
263 (worktree.id(), worktree.snapshot())
264 })
265 .collect::<BTreeMap<_, _>>();
266
267 assert_eq!(
268 guest_worktree_snapshots.keys().collect::<Vec<_>>(),
269 host_worktree_snapshots.keys().collect::<Vec<_>>(),
270 "{} has different worktrees than the host",
271 client.username
272 );
273
274 for (id, host_snapshot) in &host_worktree_snapshots {
275 let guest_snapshot = &guest_worktree_snapshots[id];
276 assert_eq!(
277 guest_snapshot.root_name(),
278 host_snapshot.root_name(),
279 "{} has different root name than the host for worktree {}",
280 client.username,
281 id
282 );
283 assert_eq!(
284 guest_snapshot.abs_path(),
285 host_snapshot.abs_path(),
286 "{} has different abs path than the host for worktree {}",
287 client.username,
288 id
289 );
290 assert_eq!(
291 guest_snapshot.entries(false).collect::<Vec<_>>(),
292 host_snapshot.entries(false).collect::<Vec<_>>(),
293 "{} has different snapshot than the host for worktree {} ({:?})",
294 client.username,
295 id,
296 host_snapshot.abs_path()
297 );
298 assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id());
299 }
300 }
301 }
302
303 guest_project.check_invariants(cx);
304 });
305 }
306
307 for (guest_project, guest_buffers) in &client.buffers {
308 let project_id = if guest_project.read_with(client_cx, |project, _| {
309 project.is_local() || project.is_read_only()
310 }) {
311 continue;
312 } else {
313 guest_project
314 .read_with(client_cx, |project, _| project.remote_id())
315 .unwrap()
316 };
317 let guest_user_id = client.user_id().unwrap();
318
319 let host_project = clients.iter().find_map(|(client, cx)| {
320 let project = client.local_projects.iter().find(|host_project| {
321 host_project.read_with(cx, |host_project, _| {
322 host_project.remote_id() == Some(project_id)
323 })
324 })?;
325 Some((client.user_id().unwrap(), project, cx))
326 });
327
328 let (host_user_id, host_project, host_cx) =
329 if let Some((host_user_id, host_project, host_cx)) = host_project {
330 (host_user_id, host_project, host_cx)
331 } else {
332 continue;
333 };
334
335 for guest_buffer in guest_buffers {
336 let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
337 let host_buffer = host_project.read_with(host_cx, |project, cx| {
338 project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
339 panic!(
340 "host does not have buffer for guest:{}, peer:{:?}, id:{}",
341 client.username,
342 client.peer_id(),
343 buffer_id
344 )
345 })
346 });
347 let path = host_buffer
348 .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
349
350 assert_eq!(
351 guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()),
352 0,
353 "{}, buffer {}, path {:?} has deferred operations",
354 client.username,
355 buffer_id,
356 path,
357 );
358 assert_eq!(
359 guest_buffer.read_with(client_cx, |buffer, _| buffer.text()),
360 host_buffer.read_with(host_cx, |buffer, _| buffer.text()),
361 "{}, buffer {}, path {:?}, differs from the host's buffer",
362 client.username,
363 buffer_id,
364 path
365 );
366
367 let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned());
368 let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned());
369 match (host_file, guest_file) {
370 (Some(host_file), Some(guest_file)) => {
371 assert_eq!(guest_file.path(), host_file.path());
372 assert_eq!(guest_file.is_deleted(), host_file.is_deleted());
373 assert_eq!(
374 guest_file.mtime(),
375 host_file.mtime(),
376 "guest {} mtime does not match host {} for path {:?} in project {}",
377 guest_user_id,
378 host_user_id,
379 guest_file.path(),
380 project_id,
381 );
382 }
383 (None, None) => {}
384 (None, _) => panic!("host's file is None, guest's isn't "),
385 (_, None) => panic!("guest's file is None, hosts's isn't "),
386 }
387 }
388 }
389 }
390
391 for (client, mut cx) in clients {
392 cx.update(|cx| {
393 cx.clear_globals();
394 drop(client);
395 });
396 }
397}
398
399async fn simulate_client(
400 mut client: TestClient,
401 mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>,
402 can_hang_up: bool,
403 rng: Arc<Mutex<StdRng>>,
404 mut cx: TestAppContext,
405) -> (TestClient, TestAppContext) {
406 // Setup language server
407 let mut language = Language::new(
408 LanguageConfig {
409 name: "Rust".into(),
410 path_suffixes: vec!["rs".to_string()],
411 ..Default::default()
412 },
413 None,
414 );
415 let _fake_language_servers = language
416 .set_fake_lsp_adapter(Arc::new(FakeLspAdapter {
417 name: "the-fake-language-server",
418 capabilities: lsp::LanguageServer::full_capabilities(),
419 initializer: Some(Box::new({
420 let rng = rng.clone();
421 let fs = client.fs.clone();
422 move |fake_server: &mut FakeLanguageServer| {
423 fake_server.handle_request::<lsp::request::Completion, _, _>(
424 |_, _| async move {
425 Ok(Some(lsp::CompletionResponse::Array(vec![
426 lsp::CompletionItem {
427 text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
428 range: lsp::Range::new(
429 lsp::Position::new(0, 0),
430 lsp::Position::new(0, 0),
431 ),
432 new_text: "the-new-text".to_string(),
433 })),
434 ..Default::default()
435 },
436 ])))
437 },
438 );
439
440 fake_server.handle_request::<lsp::request::CodeActionRequest, _, _>(
441 |_, _| async move {
442 Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction(
443 lsp::CodeAction {
444 title: "the-code-action".to_string(),
445 ..Default::default()
446 },
447 )]))
448 },
449 );
450
451 fake_server.handle_request::<lsp::request::PrepareRenameRequest, _, _>(
452 |params, _| async move {
453 Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new(
454 params.position,
455 params.position,
456 ))))
457 },
458 );
459
460 fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
461 let fs = fs.clone();
462 let rng = rng.clone();
463 move |_, _| {
464 let fs = fs.clone();
465 let rng = rng.clone();
466 async move {
467 let files = fs.files().await;
468 let mut rng = rng.lock();
469 let count = rng.gen_range::<usize, _>(1..3);
470 let files = (0..count)
471 .map(|_| files.choose(&mut *rng).unwrap())
472 .collect::<Vec<_>>();
473 log::info!("LSP: Returning definitions in files {:?}", &files);
474 Ok(Some(lsp::GotoDefinitionResponse::Array(
475 files
476 .into_iter()
477 .map(|file| lsp::Location {
478 uri: lsp::Url::from_file_path(file).unwrap(),
479 range: Default::default(),
480 })
481 .collect(),
482 )))
483 }
484 }
485 });
486
487 fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>({
488 let rng = rng.clone();
489 move |_, _| {
490 let mut highlights = Vec::new();
491 let highlight_count = rng.lock().gen_range(1..=5);
492 for _ in 0..highlight_count {
493 let start_row = rng.lock().gen_range(0..100);
494 let start_column = rng.lock().gen_range(0..100);
495 let start = PointUtf16::new(start_row, start_column);
496 let end_row = rng.lock().gen_range(0..100);
497 let end_column = rng.lock().gen_range(0..100);
498 let end = PointUtf16::new(end_row, end_column);
499 let range = if start > end { end..start } else { start..end };
500 highlights.push(lsp::DocumentHighlight {
501 range: range_to_lsp(range.clone()),
502 kind: Some(lsp::DocumentHighlightKind::READ),
503 });
504 }
505 highlights.sort_unstable_by_key(|highlight| {
506 (highlight.range.start, highlight.range.end)
507 });
508 async move { Ok(Some(highlights)) }
509 }
510 });
511 }
512 })),
513 ..Default::default()
514 }))
515 .await;
516 client.language_registry.add(Arc::new(language));
517
518 while op_start_signal.next().await.is_some() {
519 if let Err(error) =
520 randomly_mutate_client(&mut client, can_hang_up, rng.clone(), &mut cx).await
521 {
522 log::error!("{} error: {:?}", client.username, error);
523 }
524
525 cx.background().simulate_random_delay().await;
526 }
527 log::info!("{}: done", client.username);
528
529 (client, cx)
530}
531
532async fn randomly_mutate_client(
533 client: &mut TestClient,
534 can_hang_up: bool,
535 rng: Arc<Mutex<StdRng>>,
536 cx: &mut TestAppContext,
537) -> Result<()> {
538 let choice = rng.lock().gen_range(0..100);
539 match choice {
540 0..=19 => randomly_mutate_active_call(client, can_hang_up, &rng, cx).await?,
541 20..=49 => randomly_mutate_projects(client, &rng, cx).await?,
542 50..=59 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => {
543 randomly_mutate_worktrees(client, &rng, cx).await?;
544 }
545 60..=84 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => {
546 randomly_query_and_mutate_buffers(client, &rng, cx).await?;
547 }
548 _ => randomly_mutate_fs(client, &rng).await,
549 }
550
551 Ok(())
552}
553
554async fn randomly_mutate_active_call(
555 client: &mut TestClient,
556 can_hang_up: bool,
557 rng: &Mutex<StdRng>,
558 cx: &mut TestAppContext,
559) -> Result<()> {
560 let active_call = cx.read(ActiveCall::global);
561 if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) {
562 if rng.lock().gen_bool(0.7) {
563 log::info!("{}: accepting incoming call", client.username);
564 active_call
565 .update(cx, |call, cx| call.accept_incoming(cx))
566 .await?;
567 } else {
568 log::info!("{}: declining incoming call", client.username);
569 active_call.update(cx, |call, _| call.decline_incoming())?;
570 }
571 } else {
572 let available_contacts = client.user_store.read_with(cx, |user_store, _| {
573 user_store
574 .contacts()
575 .iter()
576 .filter(|contact| contact.online && !contact.busy)
577 .cloned()
578 .collect::<Vec<_>>()
579 });
580
581 let distribution = rng.lock().gen_range(0..100);
582 match distribution {
583 0..=29 if !available_contacts.is_empty() => {
584 let contact = available_contacts.choose(&mut *rng.lock()).unwrap();
585 log::info!(
586 "{}: inviting {}",
587 client.username,
588 contact.user.github_login
589 );
590 active_call
591 .update(cx, |call, cx| call.invite(contact.user.id, None, cx))
592 .await?;
593 }
594 30..=39
595 if can_hang_up && active_call.read_with(cx, |call, _| call.room().is_some()) =>
596 {
597 log::info!("{}: hanging up", client.username);
598 active_call.update(cx, |call, cx| call.hang_up(cx))?;
599 }
600 _ => {}
601 }
602 }
603
604 Ok(())
605}
606
607async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex<StdRng>) {
608 let is_dir = rng.lock().gen::<bool>();
609 let mut new_path = client
610 .fs
611 .directories()
612 .await
613 .choose(&mut *rng.lock())
614 .unwrap()
615 .clone();
616 new_path.push(gen_file_name(rng));
617 if is_dir {
618 log::info!("{}: creating local dir at {:?}", client.username, new_path);
619 client.fs.create_dir(&new_path).await.unwrap();
620 } else {
621 new_path.set_extension("rs");
622 log::info!("{}: creating local file at {:?}", client.username, new_path);
623 client
624 .fs
625 .create_file(&new_path, Default::default())
626 .await
627 .unwrap();
628 }
629}
630
631async fn randomly_mutate_projects(
632 client: &mut TestClient,
633 rng: &Mutex<StdRng>,
634 cx: &mut TestAppContext,
635) -> Result<()> {
636 let active_call = cx.read(ActiveCall::global);
637 let remote_projects =
638 if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) {
639 room.read_with(cx, |room, _| {
640 room.remote_participants()
641 .values()
642 .flat_map(|participant| participant.projects.clone())
643 .collect::<Vec<_>>()
644 })
645 } else {
646 Default::default()
647 };
648
649 let project = if remote_projects.is_empty() || rng.lock().gen() {
650 if client.local_projects.is_empty() || rng.lock().gen() {
651 let paths = client.fs.paths().await;
652 let local_project = if paths.is_empty() || rng.lock().gen() {
653 let root_path = client.create_new_root_dir();
654 client.fs.create_dir(&root_path).await.unwrap();
655 client
656 .fs
657 .create_file(&root_path.join("main.rs"), Default::default())
658 .await
659 .unwrap();
660 log::info!(
661 "{}: opening local project at {:?}",
662 client.username,
663 root_path
664 );
665 client.build_local_project(root_path, cx).await.0
666 } else {
667 let root_path = paths.choose(&mut *rng.lock()).unwrap();
668 log::info!(
669 "{}: opening local project at {:?}",
670 client.username,
671 root_path
672 );
673 client.build_local_project(root_path, cx).await.0
674 };
675 client.local_projects.push(local_project.clone());
676 local_project
677 } else {
678 client
679 .local_projects
680 .choose(&mut *rng.lock())
681 .unwrap()
682 .clone()
683 }
684 } else {
685 if client.remote_projects.is_empty() || rng.lock().gen() {
686 let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id;
687 let remote_project = if let Some(project) =
688 client.remote_projects.iter().find(|project| {
689 project.read_with(cx, |project, _| {
690 project.remote_id() == Some(remote_project_id)
691 })
692 }) {
693 project.clone()
694 } else {
695 log::info!(
696 "{}: opening remote project {}",
697 client.username,
698 remote_project_id
699 );
700 let call = cx.read(ActiveCall::global);
701 let room = call.read_with(cx, |call, _| call.room().unwrap().clone());
702 let remote_project = room
703 .update(cx, |room, cx| {
704 room.join_project(
705 remote_project_id,
706 client.language_registry.clone(),
707 FakeFs::new(cx.background().clone()),
708 cx,
709 )
710 })
711 .await?;
712 client.remote_projects.push(remote_project.clone());
713 remote_project
714 };
715
716 remote_project
717 } else {
718 client
719 .remote_projects
720 .choose(&mut *rng.lock())
721 .unwrap()
722 .clone()
723 }
724 };
725
726 if active_call.read_with(cx, |call, _| call.room().is_some())
727 && project.read_with(cx, |project, _| project.is_local() && !project.is_shared())
728 {
729 match active_call
730 .update(cx, |call, cx| call.share_project(project.clone(), cx))
731 .await
732 {
733 Ok(project_id) => {
734 log::info!("{}: shared project with id {}", client.username, project_id);
735 }
736 Err(error) => {
737 log::error!("{}: error sharing project, {:?}", client.username, error);
738 }
739 }
740 }
741
742 let choice = rng.lock().gen_range(0..100);
743 match choice {
744 0..=19 if project.read_with(cx, |project, _| project.is_local()) => {
745 let paths = client.fs.paths().await;
746 let path = paths.choose(&mut *rng.lock()).unwrap();
747 log::info!(
748 "{}: finding/creating local worktree for path {:?}",
749 client.username,
750 path
751 );
752 project
753 .update(cx, |project, cx| {
754 project.find_or_create_local_worktree(&path, true, cx)
755 })
756 .await
757 .unwrap();
758 }
759 20..=24 if project.read_with(cx, |project, _| project.is_remote()) => {
760 log::info!(
761 "{}: dropping remote project {}",
762 client.username,
763 project.read_with(cx, |project, _| project.remote_id().unwrap())
764 );
765
766 cx.update(|_| {
767 client
768 .remote_projects
769 .retain(|remote_project| *remote_project != project);
770 client.buffers.remove(&project);
771 drop(project);
772 });
773 }
774 _ => {}
775 }
776
777 Ok(())
778}
779
780async fn randomly_mutate_worktrees(
781 client: &mut TestClient,
782 rng: &Mutex<StdRng>,
783 cx: &mut TestAppContext,
784) -> Result<()> {
785 let project = choose_random_project(client, rng).unwrap();
786 let Some(worktree) = project.read_with(cx, |project, cx| {
787 project
788 .worktrees(cx)
789 .filter(|worktree| {
790 let worktree = worktree.read(cx);
791 worktree.is_visible()
792 && worktree.entries(false).any(|e| e.is_file())
793 && worktree.root_entry().map_or(false, |e| e.is_dir())
794 })
795 .choose(&mut *rng.lock())
796 }) else {
797 return Ok(())
798 };
799
800 let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| {
801 (worktree.id(), worktree.root_name().to_string())
802 });
803
804 let is_dir = rng.lock().gen::<bool>();
805 let mut new_path = PathBuf::new();
806 new_path.push(gen_file_name(rng));
807 if !is_dir {
808 new_path.set_extension("rs");
809 }
810 log::info!(
811 "{}: creating {:?} in worktree {} ({})",
812 client.username,
813 new_path,
814 worktree_id,
815 worktree_root_name,
816 );
817 project
818 .update(cx, |project, cx| {
819 project.create_entry((worktree_id, new_path), is_dir, cx)
820 })
821 .unwrap()
822 .await?;
823 Ok(())
824}
825
826async fn randomly_query_and_mutate_buffers(
827 client: &mut TestClient,
828 rng: &Mutex<StdRng>,
829 cx: &mut TestAppContext,
830) -> Result<()> {
831 let project = choose_random_project(client, rng).unwrap();
832 let buffers = client.buffers.entry(project.clone()).or_default();
833 let buffer = if buffers.is_empty() || rng.lock().gen() {
834 let Some(worktree) = project.read_with(cx, |project, cx| {
835 project
836 .worktrees(cx)
837 .filter(|worktree| {
838 let worktree = worktree.read(cx);
839 worktree.is_visible() && worktree.entries(false).any(|e| e.is_file())
840 })
841 .choose(&mut *rng.lock())
842 }) else {
843 return Ok(());
844 };
845
846 let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| {
847 let entry = worktree
848 .entries(false)
849 .filter(|e| e.is_file())
850 .choose(&mut *rng.lock())
851 .unwrap();
852 (
853 worktree.root_name().to_string(),
854 (worktree.id(), entry.path.clone()),
855 )
856 });
857 log::info!(
858 "{}: opening path {:?} in worktree {} ({})",
859 client.username,
860 project_path.1,
861 project_path.0,
862 worktree_root_name,
863 );
864 let buffer = project
865 .update(cx, |project, cx| {
866 project.open_buffer(project_path.clone(), cx)
867 })
868 .await?;
869 log::info!(
870 "{}: opened path {:?} in worktree {} ({}) with buffer id {}",
871 client.username,
872 project_path.1,
873 project_path.0,
874 worktree_root_name,
875 buffer.read_with(cx, |buffer, _| buffer.remote_id())
876 );
877 buffers.insert(buffer.clone());
878 buffer
879 } else {
880 buffers.iter().choose(&mut *rng.lock()).unwrap().clone()
881 };
882
883 let choice = rng.lock().gen_range(0..100);
884 match choice {
885 0..=9 => {
886 cx.update(|cx| {
887 log::info!(
888 "{}: dropping buffer {:?}",
889 client.username,
890 buffer.read(cx).file().unwrap().full_path(cx)
891 );
892 buffers.remove(&buffer);
893 drop(buffer);
894 });
895 }
896 10..=19 => {
897 let completions = project.update(cx, |project, cx| {
898 log::info!(
899 "{}: requesting completions for buffer {} ({:?})",
900 client.username,
901 buffer.read(cx).remote_id(),
902 buffer.read(cx).file().unwrap().full_path(cx)
903 );
904 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
905 project.completions(&buffer, offset, cx)
906 });
907 let completions = cx.background().spawn(async move {
908 completions
909 .await
910 .map_err(|err| anyhow!("completions request failed: {:?}", err))
911 });
912 if rng.lock().gen_bool(0.3) {
913 log::info!("{}: detaching completions request", client.username);
914 cx.update(|cx| completions.detach_and_log_err(cx));
915 } else {
916 completions.await?;
917 }
918 }
919 20..=29 => {
920 let code_actions = project.update(cx, |project, cx| {
921 log::info!(
922 "{}: requesting code actions for buffer {} ({:?})",
923 client.username,
924 buffer.read(cx).remote_id(),
925 buffer.read(cx).file().unwrap().full_path(cx)
926 );
927 let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock());
928 project.code_actions(&buffer, range, cx)
929 });
930 let code_actions = cx.background().spawn(async move {
931 code_actions
932 .await
933 .map_err(|err| anyhow!("code actions request failed: {:?}", err))
934 });
935 if rng.lock().gen_bool(0.3) {
936 log::info!("{}: detaching code actions request", client.username);
937 cx.update(|cx| code_actions.detach_and_log_err(cx));
938 } else {
939 code_actions.await?;
940 }
941 }
942 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => {
943 let (requested_version, save) = buffer.update(cx, |buffer, cx| {
944 log::info!(
945 "{}: saving buffer {} ({:?})",
946 client.username,
947 buffer.remote_id(),
948 buffer.file().unwrap().full_path(cx)
949 );
950 (buffer.version(), buffer.save(cx))
951 });
952 let save = cx.background().spawn(async move {
953 let (saved_version, _, _) = save
954 .await
955 .map_err(|err| anyhow!("save request failed: {:?}", err))?;
956 assert!(saved_version.observed_all(&requested_version));
957 Ok::<_, anyhow::Error>(())
958 });
959 if rng.lock().gen_bool(0.3) {
960 log::info!("{}: detaching save request", client.username);
961 cx.update(|cx| save.detach_and_log_err(cx));
962 } else {
963 save.await?;
964 }
965 }
966 40..=44 => {
967 let prepare_rename = project.update(cx, |project, cx| {
968 log::info!(
969 "{}: preparing rename for buffer {} ({:?})",
970 client.username,
971 buffer.read(cx).remote_id(),
972 buffer.read(cx).file().unwrap().full_path(cx)
973 );
974 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
975 project.prepare_rename(buffer, offset, cx)
976 });
977 let prepare_rename = cx.background().spawn(async move {
978 prepare_rename
979 .await
980 .map_err(|err| anyhow!("prepare rename request failed: {:?}", err))
981 });
982 if rng.lock().gen_bool(0.3) {
983 log::info!("{}: detaching prepare rename request", client.username);
984 cx.update(|cx| prepare_rename.detach_and_log_err(cx));
985 } else {
986 prepare_rename.await?;
987 }
988 }
989 45..=49 => {
990 let definitions = project.update(cx, |project, cx| {
991 log::info!(
992 "{}: requesting definitions for buffer {} ({:?})",
993 client.username,
994 buffer.read(cx).remote_id(),
995 buffer.read(cx).file().unwrap().full_path(cx)
996 );
997 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
998 project.definition(&buffer, offset, cx)
999 });
1000 let definitions = cx.background().spawn(async move {
1001 definitions
1002 .await
1003 .map_err(|err| anyhow!("definitions request failed: {:?}", err))
1004 });
1005 if rng.lock().gen_bool(0.3) {
1006 log::info!("{}: detaching definitions request", client.username);
1007 cx.update(|cx| definitions.detach_and_log_err(cx));
1008 } else {
1009 buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer));
1010 }
1011 }
1012 50..=54 => {
1013 let highlights = project.update(cx, |project, cx| {
1014 log::info!(
1015 "{}: requesting highlights for buffer {} ({:?})",
1016 client.username,
1017 buffer.read(cx).remote_id(),
1018 buffer.read(cx).file().unwrap().full_path(cx)
1019 );
1020 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
1021 project.document_highlights(&buffer, offset, cx)
1022 });
1023 let highlights = cx.background().spawn(async move {
1024 highlights
1025 .await
1026 .map_err(|err| anyhow!("highlights request failed: {:?}", err))
1027 });
1028 if rng.lock().gen_bool(0.3) {
1029 log::info!("{}: detaching highlights request", client.username);
1030 cx.update(|cx| highlights.detach_and_log_err(cx));
1031 } else {
1032 highlights.await?;
1033 }
1034 }
1035 55..=59 => {
1036 let search = project.update(cx, |project, cx| {
1037 let query = rng.lock().gen_range('a'..='z');
1038 log::info!("{}: project-wide search {:?}", client.username, query);
1039 project.search(SearchQuery::text(query, false, false), cx)
1040 });
1041 let search = cx.background().spawn(async move {
1042 search
1043 .await
1044 .map_err(|err| anyhow!("search request failed: {:?}", err))
1045 });
1046 if rng.lock().gen_bool(0.3) {
1047 log::info!("{}: detaching search request", client.username);
1048 cx.update(|cx| search.detach_and_log_err(cx));
1049 } else {
1050 buffers.extend(search.await?.into_keys());
1051 }
1052 }
1053 _ => {
1054 buffer.update(cx, |buffer, cx| {
1055 log::info!(
1056 "{}: updating buffer {} ({:?})",
1057 client.username,
1058 buffer.remote_id(),
1059 buffer.file().unwrap().full_path(cx)
1060 );
1061 if rng.lock().gen_bool(0.7) {
1062 buffer.randomly_edit(&mut *rng.lock(), 5, cx);
1063 } else {
1064 buffer.randomly_undo_redo(&mut *rng.lock(), cx);
1065 }
1066 });
1067 }
1068 }
1069
1070 Ok(())
1071}
1072
1073fn choose_random_project(
1074 client: &mut TestClient,
1075 rng: &Mutex<StdRng>,
1076) -> Option<ModelHandle<Project>> {
1077 client
1078 .local_projects
1079 .iter()
1080 .chain(&client.remote_projects)
1081 .choose(&mut *rng.lock())
1082 .cloned()
1083}
1084
1085fn gen_file_name(rng: &Mutex<StdRng>) -> String {
1086 let mut name = String::new();
1087 for _ in 0..10 {
1088 let letter = rng.lock().gen_range('a'..='z');
1089 name.push(letter);
1090 }
1091 name
1092}