1use crate::{
2 db::{self, NewUserParams},
3 rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
4 tests::{TestClient, TestServer},
5};
6use anyhow::{anyhow, Result};
7use call::ActiveCall;
8use client::RECEIVE_TIMEOUT;
9use collections::BTreeMap;
10use fs::{FakeFs, Fs as _};
11use futures::StreamExt as _;
12use gpui::{executor::Deterministic, ModelHandle, TestAppContext};
13use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
14use lsp::FakeLanguageServer;
15use parking_lot::Mutex;
16use project::{search::SearchQuery, Project};
17use rand::prelude::*;
18use std::{env, path::PathBuf, sync::Arc};
19
20#[gpui::test(iterations = 100)]
21async fn test_random_collaboration(
22 cx: &mut TestAppContext,
23 deterministic: Arc<Deterministic>,
24 rng: StdRng,
25) {
26 deterministic.forbid_parking();
27 let rng = Arc::new(Mutex::new(rng));
28
29 let max_peers = env::var("MAX_PEERS")
30 .map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
31 .unwrap_or(5);
32
33 let max_operations = env::var("OPERATIONS")
34 .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
35 .unwrap_or(10);
36
37 let mut server = TestServer::start(&deterministic).await;
38 let db = server.app_state.db.clone();
39
40 let mut available_users = Vec::new();
41 for ix in 0..max_peers {
42 let username = format!("user-{}", ix + 1);
43 let user_id = db
44 .create_user(
45 &format!("{username}@example.com"),
46 false,
47 NewUserParams {
48 github_login: username.clone(),
49 github_user_id: (ix + 1) as i32,
50 invite_count: 0,
51 },
52 )
53 .await
54 .unwrap()
55 .user_id;
56 available_users.push((user_id, username));
57 }
58
59 for (ix, (user_id_a, _)) in available_users.iter().enumerate() {
60 for (user_id_b, _) in &available_users[ix + 1..] {
61 server
62 .app_state
63 .db
64 .send_contact_request(*user_id_a, *user_id_b)
65 .await
66 .unwrap();
67 server
68 .app_state
69 .db
70 .respond_to_contact_request(*user_id_b, *user_id_a, true)
71 .await
72 .unwrap();
73 }
74 }
75
76 let mut clients = Vec::new();
77 let mut user_ids = Vec::new();
78 let mut op_start_signals = Vec::new();
79 let mut next_entity_id = 100000;
80 let allow_server_restarts = rng.lock().gen_bool(0.7);
81 let allow_client_reconnection = rng.lock().gen_bool(0.7);
82 let allow_client_disconnection = rng.lock().gen_bool(0.1);
83
84 let mut operations = 0;
85 while operations < max_operations {
86 let distribution = rng.lock().gen_range(0..100);
87 match distribution {
88 0..=19 if !available_users.is_empty() => {
89 let client_ix = rng.lock().gen_range(0..available_users.len());
90 let (_, username) = available_users.remove(client_ix);
91 log::info!("Adding new connection for {}", username);
92 next_entity_id += 100000;
93 let mut client_cx = TestAppContext::new(
94 cx.foreground_platform(),
95 cx.platform(),
96 deterministic.build_foreground(next_entity_id),
97 deterministic.build_background(),
98 cx.font_cache(),
99 cx.leak_detector(),
100 next_entity_id,
101 cx.function_name.clone(),
102 );
103
104 let op_start_signal = futures::channel::mpsc::unbounded();
105 let client = server.create_client(&mut client_cx, &username).await;
106 user_ids.push(client.current_user_id(&client_cx));
107 op_start_signals.push(op_start_signal.0);
108 clients.push(client_cx.foreground().spawn(simulate_client(
109 client,
110 op_start_signal.1,
111 allow_client_disconnection,
112 rng.clone(),
113 client_cx,
114 )));
115
116 log::info!("Added connection for {}", username);
117 operations += 1;
118 }
119
120 20..=24 if clients.len() > 1 && allow_client_disconnection => {
121 let client_ix = rng.lock().gen_range(1..clients.len());
122 log::info!(
123 "Simulating full disconnection of user {}",
124 user_ids[client_ix]
125 );
126 let removed_user_id = user_ids.remove(client_ix);
127 let user_connection_ids = server
128 .connection_pool
129 .lock()
130 .user_connection_ids(removed_user_id)
131 .collect::<Vec<_>>();
132 assert_eq!(user_connection_ids.len(), 1);
133 let removed_peer_id = user_connection_ids[0].into();
134 let client = clients.remove(client_ix);
135 op_start_signals.remove(client_ix);
136 server.forbid_connections();
137 server.disconnect_client(removed_peer_id);
138 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
139 deterministic.start_waiting();
140 log::info!("Waiting for user {} to exit...", removed_user_id);
141 let (client, mut client_cx) = client.await;
142 deterministic.finish_waiting();
143 server.allow_connections();
144
145 for project in &client.remote_projects {
146 project.read_with(&client_cx, |project, _| {
147 assert!(
148 project.is_read_only(),
149 "project {:?} should be read only",
150 project.remote_id()
151 )
152 });
153 }
154 for user_id in &user_ids {
155 let contacts = server.app_state.db.get_contacts(*user_id).await.unwrap();
156 let pool = server.connection_pool.lock();
157 for contact in contacts {
158 if let db::Contact::Accepted { user_id, .. } = contact {
159 if pool.is_user_online(user_id) {
160 assert_ne!(
161 user_id, removed_user_id,
162 "removed client is still a contact of another peer"
163 );
164 }
165 }
166 }
167 }
168
169 log::info!("{} removed", client.username);
170 available_users.push((removed_user_id, client.username.clone()));
171 client_cx.update(|cx| {
172 cx.clear_globals();
173 drop(client);
174 });
175
176 operations += 1;
177 }
178
179 25..=29 if clients.len() > 1 && allow_client_reconnection => {
180 let client_ix = rng.lock().gen_range(1..clients.len());
181 let user_id = user_ids[client_ix];
182 log::info!("Simulating temporary disconnection of user {}", user_id);
183 let user_connection_ids = server
184 .connection_pool
185 .lock()
186 .user_connection_ids(user_id)
187 .collect::<Vec<_>>();
188 assert_eq!(user_connection_ids.len(), 1);
189 let peer_id = user_connection_ids[0].into();
190 server.disconnect_client(peer_id);
191 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
192 operations += 1;
193 }
194
195 30..=34 if allow_server_restarts => {
196 log::info!("Simulating server restart");
197 server.reset().await;
198 deterministic.advance_clock(RECEIVE_TIMEOUT);
199 server.start().await.unwrap();
200 deterministic.advance_clock(CLEANUP_TIMEOUT);
201 let environment = &server.app_state.config.zed_environment;
202 let stale_room_ids = server
203 .app_state
204 .db
205 .stale_room_ids(environment, server.id())
206 .await
207 .unwrap();
208 assert_eq!(stale_room_ids, vec![]);
209 }
210
211 _ if !op_start_signals.is_empty() => {
212 while operations < max_operations && rng.lock().gen_bool(0.7) {
213 op_start_signals
214 .choose(&mut *rng.lock())
215 .unwrap()
216 .unbounded_send(())
217 .unwrap();
218 operations += 1;
219 }
220
221 if rng.lock().gen_bool(0.8) {
222 deterministic.run_until_parked();
223 }
224 }
225 _ => {}
226 }
227 }
228
229 drop(op_start_signals);
230 deterministic.start_waiting();
231 let clients = futures::future::join_all(clients).await;
232 deterministic.finish_waiting();
233 deterministic.run_until_parked();
234
235 for (client, client_cx) in &clients {
236 for guest_project in &client.remote_projects {
237 guest_project.read_with(client_cx, |guest_project, cx| {
238 let host_project = clients.iter().find_map(|(client, cx)| {
239 let project = client.local_projects.iter().find(|host_project| {
240 host_project.read_with(cx, |host_project, _| {
241 host_project.remote_id() == guest_project.remote_id()
242 })
243 })?;
244 Some((project, cx))
245 });
246
247 if !guest_project.is_read_only() {
248 if let Some((host_project, host_cx)) = host_project {
249 let host_worktree_snapshots =
250 host_project.read_with(host_cx, |host_project, cx| {
251 host_project
252 .worktrees(cx)
253 .map(|worktree| {
254 let worktree = worktree.read(cx);
255 (worktree.id(), worktree.snapshot())
256 })
257 .collect::<BTreeMap<_, _>>()
258 });
259 let guest_worktree_snapshots = guest_project
260 .worktrees(cx)
261 .map(|worktree| {
262 let worktree = worktree.read(cx);
263 (worktree.id(), worktree.snapshot())
264 })
265 .collect::<BTreeMap<_, _>>();
266
267 assert_eq!(
268 guest_worktree_snapshots.keys().collect::<Vec<_>>(),
269 host_worktree_snapshots.keys().collect::<Vec<_>>(),
270 "{} has different worktrees than the host",
271 client.username
272 );
273
274 for (id, host_snapshot) in &host_worktree_snapshots {
275 let guest_snapshot = &guest_worktree_snapshots[id];
276 assert_eq!(
277 guest_snapshot.root_name(),
278 host_snapshot.root_name(),
279 "{} has different root name than the host for worktree {}",
280 client.username,
281 id
282 );
283 assert_eq!(
284 guest_snapshot.abs_path(),
285 host_snapshot.abs_path(),
286 "{} has different abs path than the host for worktree {}",
287 client.username,
288 id
289 );
290 assert_eq!(
291 guest_snapshot.entries(false).collect::<Vec<_>>(),
292 host_snapshot.entries(false).collect::<Vec<_>>(),
293 "{} has different snapshot than the host for worktree {} ({:?}) and project {:?}",
294 client.username,
295 id,
296 host_snapshot.abs_path(),
297 host_project.read_with(host_cx, |project, _| project.remote_id())
298 );
299 assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id());
300 }
301 }
302 }
303
304 guest_project.check_invariants(cx);
305 });
306 }
307
308 for (guest_project, guest_buffers) in &client.buffers {
309 let project_id = if guest_project.read_with(client_cx, |project, _| {
310 project.is_local() || project.is_read_only()
311 }) {
312 continue;
313 } else {
314 guest_project
315 .read_with(client_cx, |project, _| project.remote_id())
316 .unwrap()
317 };
318 let guest_user_id = client.user_id().unwrap();
319
320 let host_project = clients.iter().find_map(|(client, cx)| {
321 let project = client.local_projects.iter().find(|host_project| {
322 host_project.read_with(cx, |host_project, _| {
323 host_project.remote_id() == Some(project_id)
324 })
325 })?;
326 Some((client.user_id().unwrap(), project, cx))
327 });
328
329 let (host_user_id, host_project, host_cx) =
330 if let Some((host_user_id, host_project, host_cx)) = host_project {
331 (host_user_id, host_project, host_cx)
332 } else {
333 continue;
334 };
335
336 for guest_buffer in guest_buffers {
337 let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
338 let host_buffer = host_project.read_with(host_cx, |project, cx| {
339 project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
340 panic!(
341 "host does not have buffer for guest:{}, peer:{:?}, id:{}",
342 client.username,
343 client.peer_id(),
344 buffer_id
345 )
346 })
347 });
348 let path = host_buffer
349 .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
350
351 assert_eq!(
352 guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()),
353 0,
354 "{}, buffer {}, path {:?} has deferred operations",
355 client.username,
356 buffer_id,
357 path,
358 );
359 assert_eq!(
360 guest_buffer.read_with(client_cx, |buffer, _| buffer.text()),
361 host_buffer.read_with(host_cx, |buffer, _| buffer.text()),
362 "{}, buffer {}, path {:?}, differs from the host's buffer",
363 client.username,
364 buffer_id,
365 path
366 );
367
368 let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned());
369 let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned());
370 match (host_file, guest_file) {
371 (Some(host_file), Some(guest_file)) => {
372 assert_eq!(guest_file.path(), host_file.path());
373 assert_eq!(guest_file.is_deleted(), host_file.is_deleted());
374 assert_eq!(
375 guest_file.mtime(),
376 host_file.mtime(),
377 "guest {} mtime does not match host {} for path {:?} in project {}",
378 guest_user_id,
379 host_user_id,
380 guest_file.path(),
381 project_id,
382 );
383 }
384 (None, None) => {}
385 (None, _) => panic!("host's file is None, guest's isn't "),
386 (_, None) => panic!("guest's file is None, hosts's isn't "),
387 }
388 }
389 }
390 }
391
392 for (client, mut cx) in clients {
393 cx.update(|cx| {
394 cx.clear_globals();
395 drop(client);
396 });
397 }
398}
399
400async fn simulate_client(
401 mut client: TestClient,
402 mut op_start_signal: futures::channel::mpsc::UnboundedReceiver<()>,
403 can_hang_up: bool,
404 rng: Arc<Mutex<StdRng>>,
405 mut cx: TestAppContext,
406) -> (TestClient, TestAppContext) {
407 // Setup language server
408 let mut language = Language::new(
409 LanguageConfig {
410 name: "Rust".into(),
411 path_suffixes: vec!["rs".to_string()],
412 ..Default::default()
413 },
414 None,
415 );
416 let _fake_language_servers = language
417 .set_fake_lsp_adapter(Arc::new(FakeLspAdapter {
418 name: "the-fake-language-server",
419 capabilities: lsp::LanguageServer::full_capabilities(),
420 initializer: Some(Box::new({
421 let rng = rng.clone();
422 let fs = client.fs.clone();
423 move |fake_server: &mut FakeLanguageServer| {
424 fake_server.handle_request::<lsp::request::Completion, _, _>(
425 |_, _| async move {
426 Ok(Some(lsp::CompletionResponse::Array(vec![
427 lsp::CompletionItem {
428 text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
429 range: lsp::Range::new(
430 lsp::Position::new(0, 0),
431 lsp::Position::new(0, 0),
432 ),
433 new_text: "the-new-text".to_string(),
434 })),
435 ..Default::default()
436 },
437 ])))
438 },
439 );
440
441 fake_server.handle_request::<lsp::request::CodeActionRequest, _, _>(
442 |_, _| async move {
443 Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction(
444 lsp::CodeAction {
445 title: "the-code-action".to_string(),
446 ..Default::default()
447 },
448 )]))
449 },
450 );
451
452 fake_server.handle_request::<lsp::request::PrepareRenameRequest, _, _>(
453 |params, _| async move {
454 Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new(
455 params.position,
456 params.position,
457 ))))
458 },
459 );
460
461 fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
462 let fs = fs.clone();
463 let rng = rng.clone();
464 move |_, _| {
465 let fs = fs.clone();
466 let rng = rng.clone();
467 async move {
468 let files = fs.files().await;
469 let mut rng = rng.lock();
470 let count = rng.gen_range::<usize, _>(1..3);
471 let files = (0..count)
472 .map(|_| files.choose(&mut *rng).unwrap())
473 .collect::<Vec<_>>();
474 log::info!("LSP: Returning definitions in files {:?}", &files);
475 Ok(Some(lsp::GotoDefinitionResponse::Array(
476 files
477 .into_iter()
478 .map(|file| lsp::Location {
479 uri: lsp::Url::from_file_path(file).unwrap(),
480 range: Default::default(),
481 })
482 .collect(),
483 )))
484 }
485 }
486 });
487
488 fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>({
489 let rng = rng.clone();
490 move |_, _| {
491 let mut highlights = Vec::new();
492 let highlight_count = rng.lock().gen_range(1..=5);
493 for _ in 0..highlight_count {
494 let start_row = rng.lock().gen_range(0..100);
495 let start_column = rng.lock().gen_range(0..100);
496 let start = PointUtf16::new(start_row, start_column);
497 let end_row = rng.lock().gen_range(0..100);
498 let end_column = rng.lock().gen_range(0..100);
499 let end = PointUtf16::new(end_row, end_column);
500 let range = if start > end { end..start } else { start..end };
501 highlights.push(lsp::DocumentHighlight {
502 range: range_to_lsp(range.clone()),
503 kind: Some(lsp::DocumentHighlightKind::READ),
504 });
505 }
506 highlights.sort_unstable_by_key(|highlight| {
507 (highlight.range.start, highlight.range.end)
508 });
509 async move { Ok(Some(highlights)) }
510 }
511 });
512 }
513 })),
514 ..Default::default()
515 }))
516 .await;
517 client.language_registry.add(Arc::new(language));
518
519 while op_start_signal.next().await.is_some() {
520 if let Err(error) =
521 randomly_mutate_client(&mut client, can_hang_up, rng.clone(), &mut cx).await
522 {
523 log::error!("{} error: {:?}", client.username, error);
524 }
525
526 cx.background().simulate_random_delay().await;
527 }
528 log::info!("{}: done", client.username);
529
530 (client, cx)
531}
532
533async fn randomly_mutate_client(
534 client: &mut TestClient,
535 can_hang_up: bool,
536 rng: Arc<Mutex<StdRng>>,
537 cx: &mut TestAppContext,
538) -> Result<()> {
539 let choice = rng.lock().gen_range(0..100);
540 match choice {
541 0..=19 => randomly_mutate_active_call(client, can_hang_up, &rng, cx).await?,
542 20..=49 => randomly_mutate_projects(client, &rng, cx).await?,
543 50..=59 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => {
544 randomly_mutate_worktrees(client, &rng, cx).await?;
545 }
546 60..=84 if !client.local_projects.is_empty() || !client.remote_projects.is_empty() => {
547 randomly_query_and_mutate_buffers(client, &rng, cx).await?;
548 }
549 _ => randomly_mutate_fs(client, &rng).await,
550 }
551
552 Ok(())
553}
554
555async fn randomly_mutate_active_call(
556 client: &mut TestClient,
557 can_hang_up: bool,
558 rng: &Mutex<StdRng>,
559 cx: &mut TestAppContext,
560) -> Result<()> {
561 let active_call = cx.read(ActiveCall::global);
562 if active_call.read_with(cx, |call, _| call.incoming().borrow().is_some()) {
563 if rng.lock().gen_bool(0.7) {
564 log::info!("{}: accepting incoming call", client.username);
565 active_call
566 .update(cx, |call, cx| call.accept_incoming(cx))
567 .await?;
568 } else {
569 log::info!("{}: declining incoming call", client.username);
570 active_call.update(cx, |call, _| call.decline_incoming())?;
571 }
572 } else {
573 let available_contacts = client.user_store.read_with(cx, |user_store, _| {
574 user_store
575 .contacts()
576 .iter()
577 .filter(|contact| contact.online && !contact.busy)
578 .cloned()
579 .collect::<Vec<_>>()
580 });
581
582 let distribution = rng.lock().gen_range(0..100);
583 match distribution {
584 0..=29 if !available_contacts.is_empty() => {
585 let contact = available_contacts.choose(&mut *rng.lock()).unwrap();
586 log::info!(
587 "{}: inviting {}",
588 client.username,
589 contact.user.github_login
590 );
591 active_call
592 .update(cx, |call, cx| call.invite(contact.user.id, None, cx))
593 .await?;
594 }
595 30..=39
596 if can_hang_up && active_call.read_with(cx, |call, _| call.room().is_some()) =>
597 {
598 log::info!("{}: hanging up", client.username);
599 active_call.update(cx, |call, cx| call.hang_up(cx))?;
600 }
601 _ => {}
602 }
603 }
604
605 Ok(())
606}
607
608async fn randomly_mutate_fs(client: &mut TestClient, rng: &Mutex<StdRng>) {
609 let is_dir = rng.lock().gen::<bool>();
610 let mut new_path = client
611 .fs
612 .directories()
613 .await
614 .choose(&mut *rng.lock())
615 .unwrap()
616 .clone();
617 new_path.push(gen_file_name(rng));
618 if is_dir {
619 log::info!("{}: creating local dir at {:?}", client.username, new_path);
620 client.fs.create_dir(&new_path).await.unwrap();
621 } else {
622 new_path.set_extension("rs");
623 log::info!("{}: creating local file at {:?}", client.username, new_path);
624 client
625 .fs
626 .create_file(&new_path, Default::default())
627 .await
628 .unwrap();
629 }
630}
631
632async fn randomly_mutate_projects(
633 client: &mut TestClient,
634 rng: &Mutex<StdRng>,
635 cx: &mut TestAppContext,
636) -> Result<()> {
637 let active_call = cx.read(ActiveCall::global);
638 let remote_projects =
639 if let Some(room) = active_call.read_with(cx, |call, _| call.room().cloned()) {
640 room.read_with(cx, |room, _| {
641 room.remote_participants()
642 .values()
643 .flat_map(|participant| participant.projects.clone())
644 .collect::<Vec<_>>()
645 })
646 } else {
647 Default::default()
648 };
649
650 let project = if remote_projects.is_empty() || rng.lock().gen() {
651 if client.local_projects.is_empty() || rng.lock().gen() {
652 let paths = client.fs.paths().await;
653 let local_project = if paths.is_empty() || rng.lock().gen() {
654 let root_path = client.create_new_root_dir();
655 client.fs.create_dir(&root_path).await.unwrap();
656 client
657 .fs
658 .create_file(&root_path.join("main.rs"), Default::default())
659 .await
660 .unwrap();
661 log::info!(
662 "{}: opening local project at {:?}",
663 client.username,
664 root_path
665 );
666 client.build_local_project(root_path, cx).await.0
667 } else {
668 let root_path = paths.choose(&mut *rng.lock()).unwrap();
669 log::info!(
670 "{}: opening local project at {:?}",
671 client.username,
672 root_path
673 );
674 client.build_local_project(root_path, cx).await.0
675 };
676 client.local_projects.push(local_project.clone());
677 local_project
678 } else {
679 client
680 .local_projects
681 .choose(&mut *rng.lock())
682 .unwrap()
683 .clone()
684 }
685 } else {
686 if client.remote_projects.is_empty() || rng.lock().gen() {
687 let remote_project_id = remote_projects.choose(&mut *rng.lock()).unwrap().id;
688 let remote_project = if let Some(project) =
689 client.remote_projects.iter().find(|project| {
690 project.read_with(cx, |project, _| {
691 project.remote_id() == Some(remote_project_id)
692 })
693 }) {
694 project.clone()
695 } else {
696 log::info!(
697 "{}: opening remote project {}",
698 client.username,
699 remote_project_id
700 );
701 let call = cx.read(ActiveCall::global);
702 let room = call.read_with(cx, |call, _| call.room().unwrap().clone());
703 let remote_project = room
704 .update(cx, |room, cx| {
705 room.join_project(
706 remote_project_id,
707 client.language_registry.clone(),
708 FakeFs::new(cx.background().clone()),
709 cx,
710 )
711 })
712 .await?;
713 client.remote_projects.push(remote_project.clone());
714 remote_project
715 };
716
717 remote_project
718 } else {
719 client
720 .remote_projects
721 .choose(&mut *rng.lock())
722 .unwrap()
723 .clone()
724 }
725 };
726
727 if active_call.read_with(cx, |call, _| call.room().is_some())
728 && project.read_with(cx, |project, _| project.is_local() && !project.is_shared())
729 {
730 match active_call
731 .update(cx, |call, cx| call.share_project(project.clone(), cx))
732 .await
733 {
734 Ok(project_id) => {
735 log::info!("{}: shared project with id {}", client.username, project_id);
736 }
737 Err(error) => {
738 log::error!("{}: error sharing project, {:?}", client.username, error);
739 }
740 }
741 }
742
743 let choice = rng.lock().gen_range(0..100);
744 match choice {
745 0..=19 if project.read_with(cx, |project, _| project.is_local()) => {
746 let paths = client.fs.paths().await;
747 let path = paths.choose(&mut *rng.lock()).unwrap();
748 log::info!(
749 "{}: finding/creating local worktree for path {:?}",
750 client.username,
751 path
752 );
753 project
754 .update(cx, |project, cx| {
755 project.find_or_create_local_worktree(&path, true, cx)
756 })
757 .await
758 .unwrap();
759 }
760 20..=24 if project.read_with(cx, |project, _| project.is_remote()) => {
761 log::info!(
762 "{}: dropping remote project {}",
763 client.username,
764 project.read_with(cx, |project, _| project.remote_id().unwrap())
765 );
766
767 cx.update(|_| {
768 client
769 .remote_projects
770 .retain(|remote_project| *remote_project != project);
771 client.buffers.remove(&project);
772 drop(project);
773 });
774 }
775 _ => {}
776 }
777
778 Ok(())
779}
780
781async fn randomly_mutate_worktrees(
782 client: &mut TestClient,
783 rng: &Mutex<StdRng>,
784 cx: &mut TestAppContext,
785) -> Result<()> {
786 let project = choose_random_project(client, rng).unwrap();
787 let Some(worktree) = project.read_with(cx, |project, cx| {
788 project
789 .worktrees(cx)
790 .filter(|worktree| {
791 let worktree = worktree.read(cx);
792 worktree.is_visible()
793 && worktree.entries(false).any(|e| e.is_file())
794 && worktree.root_entry().map_or(false, |e| e.is_dir())
795 })
796 .choose(&mut *rng.lock())
797 }) else {
798 return Ok(())
799 };
800
801 let (worktree_id, worktree_root_name) = worktree.read_with(cx, |worktree, _| {
802 (worktree.id(), worktree.root_name().to_string())
803 });
804
805 let is_dir = rng.lock().gen::<bool>();
806 let mut new_path = PathBuf::new();
807 new_path.push(gen_file_name(rng));
808 if !is_dir {
809 new_path.set_extension("rs");
810 }
811 log::info!(
812 "{}: creating {:?} in worktree {} ({})",
813 client.username,
814 new_path,
815 worktree_id,
816 worktree_root_name,
817 );
818 project
819 .update(cx, |project, cx| {
820 project.create_entry((worktree_id, new_path), is_dir, cx)
821 })
822 .unwrap()
823 .await?;
824 Ok(())
825}
826
827async fn randomly_query_and_mutate_buffers(
828 client: &mut TestClient,
829 rng: &Mutex<StdRng>,
830 cx: &mut TestAppContext,
831) -> Result<()> {
832 let project = choose_random_project(client, rng).unwrap();
833 let buffers = client.buffers.entry(project.clone()).or_default();
834 let buffer = if buffers.is_empty() || rng.lock().gen() {
835 let Some(worktree) = project.read_with(cx, |project, cx| {
836 project
837 .worktrees(cx)
838 .filter(|worktree| {
839 let worktree = worktree.read(cx);
840 worktree.is_visible() && worktree.entries(false).any(|e| e.is_file())
841 })
842 .choose(&mut *rng.lock())
843 }) else {
844 return Ok(());
845 };
846
847 let (worktree_root_name, project_path) = worktree.read_with(cx, |worktree, _| {
848 let entry = worktree
849 .entries(false)
850 .filter(|e| e.is_file())
851 .choose(&mut *rng.lock())
852 .unwrap();
853 (
854 worktree.root_name().to_string(),
855 (worktree.id(), entry.path.clone()),
856 )
857 });
858 log::info!(
859 "{}: opening path {:?} in worktree {} ({})",
860 client.username,
861 project_path.1,
862 project_path.0,
863 worktree_root_name,
864 );
865 let buffer = project
866 .update(cx, |project, cx| {
867 project.open_buffer(project_path.clone(), cx)
868 })
869 .await?;
870 log::info!(
871 "{}: opened path {:?} in worktree {} ({}) with buffer id {}",
872 client.username,
873 project_path.1,
874 project_path.0,
875 worktree_root_name,
876 buffer.read_with(cx, |buffer, _| buffer.remote_id())
877 );
878 buffers.insert(buffer.clone());
879 buffer
880 } else {
881 buffers.iter().choose(&mut *rng.lock()).unwrap().clone()
882 };
883
884 let choice = rng.lock().gen_range(0..100);
885 match choice {
886 0..=9 => {
887 cx.update(|cx| {
888 log::info!(
889 "{}: dropping buffer {:?}",
890 client.username,
891 buffer.read(cx).file().unwrap().full_path(cx)
892 );
893 buffers.remove(&buffer);
894 drop(buffer);
895 });
896 }
897 10..=19 => {
898 let completions = project.update(cx, |project, cx| {
899 log::info!(
900 "{}: requesting completions for buffer {} ({:?})",
901 client.username,
902 buffer.read(cx).remote_id(),
903 buffer.read(cx).file().unwrap().full_path(cx)
904 );
905 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
906 project.completions(&buffer, offset, cx)
907 });
908 let completions = cx.background().spawn(async move {
909 completions
910 .await
911 .map_err(|err| anyhow!("completions request failed: {:?}", err))
912 });
913 if rng.lock().gen_bool(0.3) {
914 log::info!("{}: detaching completions request", client.username);
915 cx.update(|cx| completions.detach_and_log_err(cx));
916 } else {
917 completions.await?;
918 }
919 }
920 20..=29 => {
921 let code_actions = project.update(cx, |project, cx| {
922 log::info!(
923 "{}: requesting code actions for buffer {} ({:?})",
924 client.username,
925 buffer.read(cx).remote_id(),
926 buffer.read(cx).file().unwrap().full_path(cx)
927 );
928 let range = buffer.read(cx).random_byte_range(0, &mut *rng.lock());
929 project.code_actions(&buffer, range, cx)
930 });
931 let code_actions = cx.background().spawn(async move {
932 code_actions
933 .await
934 .map_err(|err| anyhow!("code actions request failed: {:?}", err))
935 });
936 if rng.lock().gen_bool(0.3) {
937 log::info!("{}: detaching code actions request", client.username);
938 cx.update(|cx| code_actions.detach_and_log_err(cx));
939 } else {
940 code_actions.await?;
941 }
942 }
943 30..=39 if buffer.read_with(cx, |buffer, _| buffer.is_dirty()) => {
944 let (requested_version, save) = buffer.update(cx, |buffer, cx| {
945 log::info!(
946 "{}: saving buffer {} ({:?})",
947 client.username,
948 buffer.remote_id(),
949 buffer.file().unwrap().full_path(cx)
950 );
951 (buffer.version(), buffer.save(cx))
952 });
953 let save = cx.background().spawn(async move {
954 let (saved_version, _, _) = save
955 .await
956 .map_err(|err| anyhow!("save request failed: {:?}", err))?;
957 assert!(saved_version.observed_all(&requested_version));
958 Ok::<_, anyhow::Error>(())
959 });
960 if rng.lock().gen_bool(0.3) {
961 log::info!("{}: detaching save request", client.username);
962 cx.update(|cx| save.detach_and_log_err(cx));
963 } else {
964 save.await?;
965 }
966 }
967 40..=44 => {
968 let prepare_rename = project.update(cx, |project, cx| {
969 log::info!(
970 "{}: preparing rename for buffer {} ({:?})",
971 client.username,
972 buffer.read(cx).remote_id(),
973 buffer.read(cx).file().unwrap().full_path(cx)
974 );
975 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
976 project.prepare_rename(buffer, offset, cx)
977 });
978 let prepare_rename = cx.background().spawn(async move {
979 prepare_rename
980 .await
981 .map_err(|err| anyhow!("prepare rename request failed: {:?}", err))
982 });
983 if rng.lock().gen_bool(0.3) {
984 log::info!("{}: detaching prepare rename request", client.username);
985 cx.update(|cx| prepare_rename.detach_and_log_err(cx));
986 } else {
987 prepare_rename.await?;
988 }
989 }
990 45..=49 => {
991 let definitions = project.update(cx, |project, cx| {
992 log::info!(
993 "{}: requesting definitions for buffer {} ({:?})",
994 client.username,
995 buffer.read(cx).remote_id(),
996 buffer.read(cx).file().unwrap().full_path(cx)
997 );
998 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
999 project.definition(&buffer, offset, cx)
1000 });
1001 let definitions = cx.background().spawn(async move {
1002 definitions
1003 .await
1004 .map_err(|err| anyhow!("definitions request failed: {:?}", err))
1005 });
1006 if rng.lock().gen_bool(0.3) {
1007 log::info!("{}: detaching definitions request", client.username);
1008 cx.update(|cx| definitions.detach_and_log_err(cx));
1009 } else {
1010 buffers.extend(definitions.await?.into_iter().map(|loc| loc.target.buffer));
1011 }
1012 }
1013 50..=54 => {
1014 let highlights = project.update(cx, |project, cx| {
1015 log::info!(
1016 "{}: requesting highlights for buffer {} ({:?})",
1017 client.username,
1018 buffer.read(cx).remote_id(),
1019 buffer.read(cx).file().unwrap().full_path(cx)
1020 );
1021 let offset = rng.lock().gen_range(0..=buffer.read(cx).len());
1022 project.document_highlights(&buffer, offset, cx)
1023 });
1024 let highlights = cx.background().spawn(async move {
1025 highlights
1026 .await
1027 .map_err(|err| anyhow!("highlights request failed: {:?}", err))
1028 });
1029 if rng.lock().gen_bool(0.3) {
1030 log::info!("{}: detaching highlights request", client.username);
1031 cx.update(|cx| highlights.detach_and_log_err(cx));
1032 } else {
1033 highlights.await?;
1034 }
1035 }
1036 55..=59 => {
1037 let search = project.update(cx, |project, cx| {
1038 let query = rng.lock().gen_range('a'..='z');
1039 log::info!("{}: project-wide search {:?}", client.username, query);
1040 project.search(SearchQuery::text(query, false, false), cx)
1041 });
1042 let search = cx.background().spawn(async move {
1043 search
1044 .await
1045 .map_err(|err| anyhow!("search request failed: {:?}", err))
1046 });
1047 if rng.lock().gen_bool(0.3) {
1048 log::info!("{}: detaching search request", client.username);
1049 cx.update(|cx| search.detach_and_log_err(cx));
1050 } else {
1051 buffers.extend(search.await?.into_keys());
1052 }
1053 }
1054 _ => {
1055 buffer.update(cx, |buffer, cx| {
1056 log::info!(
1057 "{}: updating buffer {} ({:?})",
1058 client.username,
1059 buffer.remote_id(),
1060 buffer.file().unwrap().full_path(cx)
1061 );
1062 if rng.lock().gen_bool(0.7) {
1063 buffer.randomly_edit(&mut *rng.lock(), 5, cx);
1064 } else {
1065 buffer.randomly_undo_redo(&mut *rng.lock(), cx);
1066 }
1067 });
1068 }
1069 }
1070
1071 Ok(())
1072}
1073
1074fn choose_random_project(
1075 client: &mut TestClient,
1076 rng: &Mutex<StdRng>,
1077) -> Option<ModelHandle<Project>> {
1078 client
1079 .local_projects
1080 .iter()
1081 .chain(&client.remote_projects)
1082 .choose(&mut *rng.lock())
1083 .cloned()
1084}
1085
1086fn gen_file_name(rng: &Mutex<StdRng>) -> String {
1087 let mut name = String::new();
1088 for _ in 0..10 {
1089 let letter = rng.lock().gen_range('a'..='z');
1090 name.push(letter);
1091 }
1092 name
1093}