1use crate::{
2 db::{self, NewUserParams, UserId},
3 rpc::{CLEANUP_TIMEOUT, RECONNECT_TIMEOUT},
4 tests::{TestClient, TestServer},
5};
6use anyhow::{anyhow, Result};
7use call::ActiveCall;
8use client::RECEIVE_TIMEOUT;
9use collections::{BTreeMap, HashSet};
10use editor::Bias;
11use fs::{FakeFs, Fs as _};
12use futures::StreamExt as _;
13use gpui::{executor::Deterministic, ModelHandle, Task, TestAppContext};
14use language::{range_to_lsp, FakeLspAdapter, Language, LanguageConfig, PointUtf16};
15use lsp::FakeLanguageServer;
16use parking_lot::Mutex;
17use project::{search::SearchQuery, Project, ProjectPath};
18use rand::{
19 distributions::{Alphanumeric, DistString},
20 prelude::*,
21};
22use serde::{Deserialize, Serialize};
23use std::{
24 env,
25 ops::Range,
26 path::{Path, PathBuf},
27 rc::Rc,
28 sync::{
29 atomic::{AtomicBool, Ordering::SeqCst},
30 Arc,
31 },
32};
33use util::ResultExt;
34
35#[gpui::test(iterations = 100)]
36async fn test_random_collaboration(
37 cx: &mut TestAppContext,
38 deterministic: Arc<Deterministic>,
39 rng: StdRng,
40) {
41 deterministic.forbid_parking();
42
43 let max_peers = env::var("MAX_PEERS")
44 .map(|i| i.parse().expect("invalid `MAX_PEERS` variable"))
45 .unwrap_or(3);
46 let max_operations = env::var("OPERATIONS")
47 .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
48 .unwrap_or(10);
49
50 let plan_load_path = path_env_var("LOAD_PLAN");
51 let plan_save_path = path_env_var("SAVE_PLAN");
52
53 let mut server = TestServer::start(&deterministic).await;
54 let db = server.app_state.db.clone();
55
56 let mut users = Vec::new();
57 for ix in 0..max_peers {
58 let username = format!("user-{}", ix + 1);
59 let user_id = db
60 .create_user(
61 &format!("{username}@example.com"),
62 false,
63 NewUserParams {
64 github_login: username.clone(),
65 github_user_id: (ix + 1) as i32,
66 invite_count: 0,
67 },
68 )
69 .await
70 .unwrap()
71 .user_id;
72 users.push(UserTestPlan {
73 user_id,
74 username,
75 online: false,
76 next_root_id: 0,
77 operation_ix: 0,
78 });
79 }
80
81 for (ix, user_a) in users.iter().enumerate() {
82 for user_b in &users[ix + 1..] {
83 server
84 .app_state
85 .db
86 .send_contact_request(user_a.user_id, user_b.user_id)
87 .await
88 .unwrap();
89 server
90 .app_state
91 .db
92 .respond_to_contact_request(user_b.user_id, user_a.user_id, true)
93 .await
94 .unwrap();
95 }
96 }
97
98 let plan = Arc::new(Mutex::new(TestPlan::new(rng, users, max_operations)));
99
100 if let Some(path) = &plan_load_path {
101 eprintln!("loaded plan from path {:?}", path);
102 plan.lock().load(path);
103 }
104
105 let mut clients = Vec::new();
106 let mut client_tasks = Vec::new();
107 let mut operation_channels = Vec::new();
108
109 loop {
110 let Some((next_operation, skipped)) = plan.lock().next_server_operation(&clients) else { break };
111 let applied = apply_server_operation(
112 deterministic.clone(),
113 &mut server,
114 &mut clients,
115 &mut client_tasks,
116 &mut operation_channels,
117 plan.clone(),
118 next_operation,
119 cx,
120 )
121 .await;
122 if !applied {
123 skipped.store(false, SeqCst);
124 }
125 }
126
127 drop(operation_channels);
128 deterministic.start_waiting();
129 futures::future::join_all(client_tasks).await;
130 deterministic.finish_waiting();
131 deterministic.run_until_parked();
132
133 if let Some(path) = &plan_save_path {
134 eprintln!("saved test plan to path {:?}", path);
135 plan.lock().save(path);
136 }
137
138 for (client, client_cx) in &clients {
139 for guest_project in client.remote_projects().iter() {
140 guest_project.read_with(client_cx, |guest_project, cx| {
141 let host_project = clients.iter().find_map(|(client, cx)| {
142 let project = client
143 .local_projects()
144 .iter()
145 .find(|host_project| {
146 host_project.read_with(cx, |host_project, _| {
147 host_project.remote_id() == guest_project.remote_id()
148 })
149 })?
150 .clone();
151 Some((project, cx))
152 });
153
154 if !guest_project.is_read_only() {
155 if let Some((host_project, host_cx)) = host_project {
156 let host_worktree_snapshots =
157 host_project.read_with(host_cx, |host_project, cx| {
158 host_project
159 .worktrees(cx)
160 .map(|worktree| {
161 let worktree = worktree.read(cx);
162 (worktree.id(), worktree.snapshot())
163 })
164 .collect::<BTreeMap<_, _>>()
165 });
166 let guest_worktree_snapshots = guest_project
167 .worktrees(cx)
168 .map(|worktree| {
169 let worktree = worktree.read(cx);
170 (worktree.id(), worktree.snapshot())
171 })
172 .collect::<BTreeMap<_, _>>();
173
174 assert_eq!(
175 guest_worktree_snapshots.keys().collect::<Vec<_>>(),
176 host_worktree_snapshots.keys().collect::<Vec<_>>(),
177 "{} has different worktrees than the host",
178 client.username
179 );
180
181 for (id, host_snapshot) in &host_worktree_snapshots {
182 let guest_snapshot = &guest_worktree_snapshots[id];
183 assert_eq!(
184 guest_snapshot.root_name(),
185 host_snapshot.root_name(),
186 "{} has different root name than the host for worktree {}",
187 client.username,
188 id
189 );
190 assert_eq!(
191 guest_snapshot.abs_path(),
192 host_snapshot.abs_path(),
193 "{} has different abs path than the host for worktree {}",
194 client.username,
195 id
196 );
197 assert_eq!(
198 guest_snapshot.entries(false).collect::<Vec<_>>(),
199 host_snapshot.entries(false).collect::<Vec<_>>(),
200 "{} has different snapshot than the host for worktree {} ({:?}) and project {:?}",
201 client.username,
202 id,
203 host_snapshot.abs_path(),
204 host_project.read_with(host_cx, |project, _| project.remote_id())
205 );
206 assert_eq!(guest_snapshot.scan_id(), host_snapshot.scan_id());
207 }
208 }
209 }
210
211 guest_project.check_invariants(cx);
212 });
213 }
214
215 let buffers = client.buffers().clone();
216 for (guest_project, guest_buffers) in &buffers {
217 let project_id = if guest_project.read_with(client_cx, |project, _| {
218 project.is_local() || project.is_read_only()
219 }) {
220 continue;
221 } else {
222 guest_project
223 .read_with(client_cx, |project, _| project.remote_id())
224 .unwrap()
225 };
226 let guest_user_id = client.user_id().unwrap();
227
228 let host_project = clients.iter().find_map(|(client, cx)| {
229 let project = client
230 .local_projects()
231 .iter()
232 .find(|host_project| {
233 host_project.read_with(cx, |host_project, _| {
234 host_project.remote_id() == Some(project_id)
235 })
236 })?
237 .clone();
238 Some((client.user_id().unwrap(), project, cx))
239 });
240
241 let (host_user_id, host_project, host_cx) =
242 if let Some((host_user_id, host_project, host_cx)) = host_project {
243 (host_user_id, host_project, host_cx)
244 } else {
245 continue;
246 };
247
248 for guest_buffer in guest_buffers {
249 let buffer_id = guest_buffer.read_with(client_cx, |buffer, _| buffer.remote_id());
250 let host_buffer = host_project.read_with(host_cx, |project, cx| {
251 project.buffer_for_id(buffer_id, cx).unwrap_or_else(|| {
252 panic!(
253 "host does not have buffer for guest:{}, peer:{:?}, id:{}",
254 client.username,
255 client.peer_id(),
256 buffer_id
257 )
258 })
259 });
260 let path = host_buffer
261 .read_with(host_cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
262
263 assert_eq!(
264 guest_buffer.read_with(client_cx, |buffer, _| buffer.deferred_ops_len()),
265 0,
266 "{}, buffer {}, path {:?} has deferred operations",
267 client.username,
268 buffer_id,
269 path,
270 );
271 assert_eq!(
272 guest_buffer.read_with(client_cx, |buffer, _| buffer.text()),
273 host_buffer.read_with(host_cx, |buffer, _| buffer.text()),
274 "{}, buffer {}, path {:?}, differs from the host's buffer",
275 client.username,
276 buffer_id,
277 path
278 );
279
280 let host_file = host_buffer.read_with(host_cx, |b, _| b.file().cloned());
281 let guest_file = guest_buffer.read_with(client_cx, |b, _| b.file().cloned());
282 match (host_file, guest_file) {
283 (Some(host_file), Some(guest_file)) => {
284 assert_eq!(guest_file.path(), host_file.path());
285 assert_eq!(guest_file.is_deleted(), host_file.is_deleted());
286 assert_eq!(
287 guest_file.mtime(),
288 host_file.mtime(),
289 "guest {} mtime does not match host {} for path {:?} in project {}",
290 guest_user_id,
291 host_user_id,
292 guest_file.path(),
293 project_id,
294 );
295 }
296 (None, None) => {}
297 (None, _) => panic!("host's file is None, guest's isn't"),
298 (_, None) => panic!("guest's file is None, hosts's isn't"),
299 }
300
301 let host_diff_base =
302 host_buffer.read_with(host_cx, |b, _| b.diff_base().map(ToString::to_string));
303 let guest_diff_base = guest_buffer
304 .read_with(client_cx, |b, _| b.diff_base().map(ToString::to_string));
305 assert_eq!(guest_diff_base, host_diff_base);
306 }
307 }
308 }
309
310 for (client, mut cx) in clients {
311 cx.update(|cx| {
312 cx.clear_globals();
313 drop(client);
314 });
315 }
316}
317
318async fn apply_server_operation(
319 deterministic: Arc<Deterministic>,
320 server: &mut TestServer,
321 clients: &mut Vec<(Rc<TestClient>, TestAppContext)>,
322 client_tasks: &mut Vec<Task<()>>,
323 operation_channels: &mut Vec<futures::channel::mpsc::UnboundedSender<usize>>,
324 plan: Arc<Mutex<TestPlan>>,
325 operation: Operation,
326 cx: &mut TestAppContext,
327) -> bool {
328 match operation {
329 Operation::AddConnection { user_id } => {
330 let username;
331 {
332 let mut plan = plan.lock();
333 let mut user = plan.user(user_id);
334 if user.online {
335 return false;
336 }
337 user.online = true;
338 username = user.username.clone();
339 };
340 log::info!("Adding new connection for {}", username);
341 let next_entity_id = (user_id.0 * 10_000) as usize;
342 let mut client_cx = TestAppContext::new(
343 cx.foreground_platform(),
344 cx.platform(),
345 deterministic.build_foreground(user_id.0 as usize),
346 deterministic.build_background(),
347 cx.font_cache(),
348 cx.leak_detector(),
349 next_entity_id,
350 cx.function_name.clone(),
351 );
352
353 let (operation_tx, operation_rx) = futures::channel::mpsc::unbounded();
354 let client = Rc::new(server.create_client(&mut client_cx, &username).await);
355 operation_channels.push(operation_tx);
356 clients.push((client.clone(), client_cx.clone()));
357 client_tasks.push(client_cx.foreground().spawn(simulate_client(
358 client,
359 operation_rx,
360 plan.clone(),
361 client_cx,
362 )));
363
364 log::info!("Added connection for {}", username);
365 }
366
367 Operation::RemoveConnection { user_id } => {
368 log::info!("Simulating full disconnection of user {}", user_id);
369 let client_ix = clients
370 .iter()
371 .position(|(client, cx)| client.current_user_id(cx) == user_id);
372 let Some(client_ix) = client_ix else { return false };
373 let user_connection_ids = server
374 .connection_pool
375 .lock()
376 .user_connection_ids(user_id)
377 .collect::<Vec<_>>();
378 assert_eq!(user_connection_ids.len(), 1);
379 let removed_peer_id = user_connection_ids[0].into();
380 let (client, mut client_cx) = clients.remove(client_ix);
381 let client_task = client_tasks.remove(client_ix);
382 operation_channels.remove(client_ix);
383 server.forbid_connections();
384 server.disconnect_client(removed_peer_id);
385 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
386 deterministic.start_waiting();
387 log::info!("Waiting for user {} to exit...", user_id);
388 client_task.await;
389 deterministic.finish_waiting();
390 server.allow_connections();
391
392 for project in client.remote_projects().iter() {
393 project.read_with(&client_cx, |project, _| {
394 assert!(
395 project.is_read_only(),
396 "project {:?} should be read only",
397 project.remote_id()
398 )
399 });
400 }
401
402 for (client, cx) in clients {
403 let contacts = server
404 .app_state
405 .db
406 .get_contacts(client.current_user_id(cx))
407 .await
408 .unwrap();
409 let pool = server.connection_pool.lock();
410 for contact in contacts {
411 if let db::Contact::Accepted { user_id: id, .. } = contact {
412 if pool.is_user_online(id) {
413 assert_ne!(
414 id, user_id,
415 "removed client is still a contact of another peer"
416 );
417 }
418 }
419 }
420 }
421
422 log::info!("{} removed", client.username);
423 plan.lock().user(user_id).online = false;
424 client_cx.update(|cx| {
425 cx.clear_globals();
426 drop(client);
427 });
428 }
429
430 Operation::BounceConnection { user_id } => {
431 log::info!("Simulating temporary disconnection of user {}", user_id);
432 let user_connection_ids = server
433 .connection_pool
434 .lock()
435 .user_connection_ids(user_id)
436 .collect::<Vec<_>>();
437 if user_connection_ids.is_empty() {
438 return false;
439 }
440 assert_eq!(user_connection_ids.len(), 1);
441 let peer_id = user_connection_ids[0].into();
442 server.disconnect_client(peer_id);
443 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
444 }
445
446 Operation::RestartServer => {
447 log::info!("Simulating server restart");
448 server.reset().await;
449 deterministic.advance_clock(RECEIVE_TIMEOUT);
450 server.start().await.unwrap();
451 deterministic.advance_clock(CLEANUP_TIMEOUT);
452 let environment = &server.app_state.config.zed_environment;
453 let stale_room_ids = server
454 .app_state
455 .db
456 .stale_room_ids(environment, server.id())
457 .await
458 .unwrap();
459 assert_eq!(stale_room_ids, vec![]);
460 }
461
462 Operation::MutateClients {
463 user_ids,
464 batch_id,
465 quiesce,
466 } => {
467 let mut applied = false;
468 for user_id in user_ids {
469 let client_ix = clients
470 .iter()
471 .position(|(client, cx)| client.current_user_id(cx) == user_id);
472 let Some(client_ix) = client_ix else { continue };
473 applied = true;
474 if let Err(err) = operation_channels[client_ix].unbounded_send(batch_id) {
475 // panic!("error signaling user {}, client {}", user_id, client_ix);
476 }
477 }
478
479 if quiesce && applied {
480 deterministic.run_until_parked();
481 }
482
483 return applied;
484 }
485 }
486 true
487}
488
489async fn apply_client_operation(
490 client: &TestClient,
491 operation: ClientOperation,
492 cx: &mut TestAppContext,
493) -> Result<bool> {
494 match operation {
495 ClientOperation::AcceptIncomingCall => {
496 let active_call = cx.read(ActiveCall::global);
497 if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
498 return Ok(false);
499 }
500
501 log::info!("{}: accepting incoming call", client.username);
502 active_call
503 .update(cx, |call, cx| call.accept_incoming(cx))
504 .await?;
505 }
506
507 ClientOperation::RejectIncomingCall => {
508 let active_call = cx.read(ActiveCall::global);
509 if active_call.read_with(cx, |call, _| call.incoming().borrow().is_none()) {
510 return Ok(false);
511 }
512
513 log::info!("{}: declining incoming call", client.username);
514 active_call.update(cx, |call, _| call.decline_incoming())?;
515 }
516
517 ClientOperation::LeaveCall => {
518 let active_call = cx.read(ActiveCall::global);
519 if active_call.read_with(cx, |call, _| call.room().is_none()) {
520 return Ok(false);
521 }
522
523 log::info!("{}: hanging up", client.username);
524 active_call.update(cx, |call, cx| call.hang_up(cx))?;
525 }
526
527 ClientOperation::InviteContactToCall { user_id } => {
528 let active_call = cx.read(ActiveCall::global);
529
530 log::info!("{}: inviting {}", client.username, user_id,);
531 active_call
532 .update(cx, |call, cx| call.invite(user_id.to_proto(), None, cx))
533 .await
534 .log_err();
535 }
536
537 ClientOperation::OpenLocalProject { first_root_name } => {
538 log::info!(
539 "{}: opening local project at {:?}",
540 client.username,
541 first_root_name
542 );
543
544 let root_path = Path::new("/").join(&first_root_name);
545 client.fs.create_dir(&root_path).await.unwrap();
546 client
547 .fs
548 .create_file(&root_path.join("main.rs"), Default::default())
549 .await
550 .unwrap();
551 let project = client.build_local_project(root_path, cx).await.0;
552 ensure_project_shared(&project, client, cx).await;
553 client.local_projects_mut().push(project.clone());
554 }
555
556 ClientOperation::AddWorktreeToProject {
557 project_root_name,
558 new_root_path,
559 } => {
560 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
561 return Ok(false)
562 };
563
564 log::info!(
565 "{}: finding/creating local worktree at {:?} to project with root path {}",
566 client.username,
567 new_root_path,
568 project_root_name
569 );
570
571 ensure_project_shared(&project, client, cx).await;
572 if !client.fs.paths().await.contains(&new_root_path) {
573 client.fs.create_dir(&new_root_path).await.unwrap();
574 }
575 project
576 .update(cx, |project, cx| {
577 project.find_or_create_local_worktree(&new_root_path, true, cx)
578 })
579 .await
580 .unwrap();
581 }
582
583 ClientOperation::CloseRemoteProject { project_root_name } => {
584 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
585 return Ok(false)
586 };
587
588 log::info!(
589 "{}: closing remote project with root path {}",
590 client.username,
591 project_root_name,
592 );
593
594 let ix = client
595 .remote_projects()
596 .iter()
597 .position(|p| p == &project)
598 .unwrap();
599 cx.update(|_| {
600 client.remote_projects_mut().remove(ix);
601 client.buffers().retain(|project, _| project != project);
602 drop(project);
603 });
604 }
605
606 ClientOperation::OpenRemoteProject {
607 host_id,
608 first_root_name,
609 } => {
610 let active_call = cx.read(ActiveCall::global);
611 let project = active_call.update(cx, |call, cx| {
612 let room = call.room().cloned()?;
613 let participant = room
614 .read(cx)
615 .remote_participants()
616 .get(&host_id.to_proto())?;
617 let project_id = participant
618 .projects
619 .iter()
620 .find(|project| project.worktree_root_names[0] == first_root_name)?
621 .id;
622 Some(room.update(cx, |room, cx| {
623 room.join_project(
624 project_id,
625 client.language_registry.clone(),
626 FakeFs::new(cx.background().clone()),
627 cx,
628 )
629 }))
630 });
631 let Some(project) = project else {
632 return Ok(false)
633 };
634
635 log::info!(
636 "{}: joining remote project of user {}, root name {}",
637 client.username,
638 host_id,
639 first_root_name,
640 );
641
642 let project = project.await?;
643 client.remote_projects_mut().push(project.clone());
644 }
645
646 ClientOperation::CreateWorktreeEntry {
647 project_root_name,
648 is_local,
649 full_path,
650 is_dir,
651 } => {
652 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
653 return Ok(false);
654 };
655 let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else {
656 return Ok(false);
657 };
658
659 log::info!(
660 "{}: creating {} at path {:?} in {} project {}",
661 client.username,
662 if is_dir { "dir" } else { "file" },
663 full_path,
664 if is_local { "local" } else { "remote" },
665 project_root_name,
666 );
667
668 ensure_project_shared(&project, client, cx).await;
669 project
670 .update(cx, |p, cx| p.create_entry(project_path, is_dir, cx))
671 .unwrap()
672 .await?;
673 }
674
675 ClientOperation::OpenBuffer {
676 project_root_name,
677 is_local,
678 full_path,
679 } => {
680 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
681 return Ok(false);
682 };
683 let Some(project_path) = project_path_for_full_path(&project, &full_path, cx) else {
684 return Ok(false);
685 };
686
687 log::info!(
688 "{}: opening buffer {:?} in {} project {}",
689 client.username,
690 full_path,
691 if is_local { "local" } else { "remote" },
692 project_root_name,
693 );
694
695 ensure_project_shared(&project, client, cx).await;
696 let buffer = project
697 .update(cx, |project, cx| project.open_buffer(project_path, cx))
698 .await?;
699 client.buffers_for_project(&project).insert(buffer);
700 }
701
702 ClientOperation::EditBuffer {
703 project_root_name,
704 is_local,
705 full_path,
706 edits,
707 } => {
708 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
709 return Ok(false);
710 };
711 let Some(buffer) =
712 buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else {
713 return Ok(false);
714 };
715
716 log::info!(
717 "{}: editing buffer {:?} in {} project {} with {:?}",
718 client.username,
719 full_path,
720 if is_local { "local" } else { "remote" },
721 project_root_name,
722 edits
723 );
724
725 ensure_project_shared(&project, client, cx).await;
726 buffer.update(cx, |buffer, cx| {
727 let snapshot = buffer.snapshot();
728 buffer.edit(
729 edits.into_iter().map(|(range, text)| {
730 let start = snapshot.clip_offset(range.start, Bias::Left);
731 let end = snapshot.clip_offset(range.end, Bias::Right);
732 (start..end, text)
733 }),
734 None,
735 cx,
736 );
737 });
738 }
739
740 ClientOperation::CloseBuffer {
741 project_root_name,
742 is_local,
743 full_path,
744 } => {
745 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
746 return Ok(false);
747 };
748 let Some(buffer) =
749 buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else {
750 return Ok(false);
751 };
752
753 log::info!(
754 "{}: closing buffer {:?} in {} project {}",
755 client.username,
756 full_path,
757 if is_local { "local" } else { "remote" },
758 project_root_name
759 );
760
761 ensure_project_shared(&project, client, cx).await;
762 cx.update(|_| {
763 client.buffers_for_project(&project).remove(&buffer);
764 drop(buffer);
765 });
766 }
767
768 ClientOperation::SaveBuffer {
769 project_root_name,
770 is_local,
771 full_path,
772 detach,
773 } => {
774 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
775 return Ok(false);
776 };
777 let Some(buffer) =
778 buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else {
779 return Ok(false);
780 };
781
782 log::info!(
783 "{}: saving buffer {:?} in {} project {}{}",
784 client.username,
785 full_path,
786 if is_local { "local" } else { "remote" },
787 project_root_name,
788 if detach { ", detaching" } else { ", awaiting" }
789 );
790
791 ensure_project_shared(&project, client, cx).await;
792 let (requested_version, save) =
793 buffer.update(cx, |buffer, cx| (buffer.version(), buffer.save(cx)));
794 let save = cx.background().spawn(async move {
795 let (saved_version, _, _) = save
796 .await
797 .map_err(|err| anyhow!("save request failed: {:?}", err))?;
798 assert!(saved_version.observed_all(&requested_version));
799 anyhow::Ok(())
800 });
801 if detach {
802 cx.update(|cx| save.detach_and_log_err(cx));
803 } else {
804 save.await?;
805 }
806 }
807
808 ClientOperation::RequestLspDataInBuffer {
809 project_root_name,
810 is_local,
811 full_path,
812 offset,
813 kind,
814 detach,
815 } => {
816 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
817 return Ok(false);
818 };
819 let Some(buffer) =
820 buffer_for_full_path(&*client.buffers_for_project(&project), &full_path, cx) else {
821 return Ok(false);
822 };
823
824 log::info!(
825 "{}: request LSP {:?} for buffer {:?} in {} project {}{}",
826 client.username,
827 kind,
828 full_path,
829 if is_local { "local" } else { "remote" },
830 project_root_name,
831 if detach { ", detaching" } else { ", awaiting" }
832 );
833
834 let offset = buffer.read_with(cx, |b, _| b.clip_offset(offset, Bias::Left));
835 let request = match kind {
836 LspRequestKind::Rename => cx.spawn(|mut cx| async move {
837 project
838 .update(&mut cx, |p, cx| p.prepare_rename(buffer, offset, cx))
839 .await?;
840 anyhow::Ok(())
841 }),
842 LspRequestKind::Completion => cx.spawn(|mut cx| async move {
843 project
844 .update(&mut cx, |p, cx| p.completions(&buffer, offset, cx))
845 .await?;
846 Ok(())
847 }),
848 LspRequestKind::CodeAction => cx.spawn(|mut cx| async move {
849 project
850 .update(&mut cx, |p, cx| p.code_actions(&buffer, offset..offset, cx))
851 .await?;
852 Ok(())
853 }),
854 LspRequestKind::Definition => cx.spawn(|mut cx| async move {
855 project
856 .update(&mut cx, |p, cx| p.definition(&buffer, offset, cx))
857 .await?;
858 Ok(())
859 }),
860 LspRequestKind::Highlights => cx.spawn(|mut cx| async move {
861 project
862 .update(&mut cx, |p, cx| p.document_highlights(&buffer, offset, cx))
863 .await?;
864 Ok(())
865 }),
866 };
867 if detach {
868 request.detach();
869 } else {
870 request.await?;
871 }
872 }
873
874 ClientOperation::SearchProject {
875 project_root_name,
876 is_local,
877 query,
878 detach,
879 } => {
880 let Some(project) = project_for_root_name(client, &project_root_name, cx) else {
881 return Ok(false);
882 };
883
884 log::info!(
885 "{}: search {} project {} for {:?}{}",
886 client.username,
887 if is_local { "local" } else { "remote" },
888 project_root_name,
889 query,
890 if detach { ", detaching" } else { ", awaiting" }
891 );
892
893 let search = project.update(cx, |project, cx| {
894 project.search(SearchQuery::text(query, false, false), cx)
895 });
896 let search = cx.background().spawn(async move {
897 search
898 .await
899 .map_err(|err| anyhow!("search request failed: {:?}", err))
900 });
901 if detach {
902 cx.update(|cx| search.detach_and_log_err(cx));
903 } else {
904 search.await?;
905 }
906 }
907
908 ClientOperation::CreateFsEntry { path, is_dir } => {
909 if client.fs.metadata(&path.parent().unwrap()).await?.is_none() {
910 return Ok(false);
911 }
912
913 log::info!(
914 "{}: creating {} at {:?}",
915 client.username,
916 if is_dir { "dir" } else { "file" },
917 path
918 );
919
920 if is_dir {
921 client.fs.create_dir(&path).await.unwrap();
922 } else {
923 client
924 .fs
925 .create_file(&path, Default::default())
926 .await
927 .unwrap();
928 }
929 }
930
931 ClientOperation::WriteGitIndex {
932 repo_path,
933 contents,
934 } => {
935 if !client
936 .fs
937 .metadata(&repo_path)
938 .await?
939 .map_or(false, |m| m.is_dir)
940 {
941 return Ok(false);
942 }
943
944 log::info!(
945 "{}: writing git index for repo {:?}: {:?}",
946 client.username,
947 repo_path,
948 contents
949 );
950
951 let dot_git_dir = repo_path.join(".git");
952 let contents = contents
953 .iter()
954 .map(|(path, contents)| (path.as_path(), contents.clone()))
955 .collect::<Vec<_>>();
956 if client.fs.metadata(&dot_git_dir).await?.is_none() {
957 client.fs.create_dir(&dot_git_dir).await?;
958 }
959 client.fs.set_index_for_repo(&dot_git_dir, &contents).await;
960 }
961 }
962 Ok(true)
963}
964
965struct TestPlan {
966 rng: StdRng,
967 replay: bool,
968 stored_operations: Vec<(StoredOperation, Arc<AtomicBool>)>,
969 max_operations: usize,
970 operation_ix: usize,
971 users: Vec<UserTestPlan>,
972 next_batch_id: usize,
973 allow_server_restarts: bool,
974 allow_client_reconnection: bool,
975 allow_client_disconnection: bool,
976}
977
978struct UserTestPlan {
979 user_id: UserId,
980 username: String,
981 next_root_id: usize,
982 operation_ix: usize,
983 online: bool,
984}
985
986#[derive(Clone, Debug, Serialize, Deserialize)]
987#[serde(untagged)]
988enum StoredOperation {
989 Server(Operation),
990 Client {
991 user_id: UserId,
992 batch_id: usize,
993 operation: ClientOperation,
994 },
995}
996
997#[derive(Clone, Debug, Serialize, Deserialize)]
998enum Operation {
999 AddConnection {
1000 user_id: UserId,
1001 },
1002 RemoveConnection {
1003 user_id: UserId,
1004 },
1005 BounceConnection {
1006 user_id: UserId,
1007 },
1008 RestartServer,
1009 MutateClients {
1010 batch_id: usize,
1011 #[serde(skip_serializing)]
1012 #[serde(skip_deserializing)]
1013 user_ids: Vec<UserId>,
1014 quiesce: bool,
1015 },
1016}
1017
1018#[derive(Clone, Debug, Serialize, Deserialize)]
1019enum ClientOperation {
1020 AcceptIncomingCall,
1021 RejectIncomingCall,
1022 LeaveCall,
1023 InviteContactToCall {
1024 user_id: UserId,
1025 },
1026 OpenLocalProject {
1027 first_root_name: String,
1028 },
1029 OpenRemoteProject {
1030 host_id: UserId,
1031 first_root_name: String,
1032 },
1033 AddWorktreeToProject {
1034 project_root_name: String,
1035 new_root_path: PathBuf,
1036 },
1037 CloseRemoteProject {
1038 project_root_name: String,
1039 },
1040 OpenBuffer {
1041 project_root_name: String,
1042 is_local: bool,
1043 full_path: PathBuf,
1044 },
1045 SearchProject {
1046 project_root_name: String,
1047 is_local: bool,
1048 query: String,
1049 detach: bool,
1050 },
1051 EditBuffer {
1052 project_root_name: String,
1053 is_local: bool,
1054 full_path: PathBuf,
1055 edits: Vec<(Range<usize>, Arc<str>)>,
1056 },
1057 CloseBuffer {
1058 project_root_name: String,
1059 is_local: bool,
1060 full_path: PathBuf,
1061 },
1062 SaveBuffer {
1063 project_root_name: String,
1064 is_local: bool,
1065 full_path: PathBuf,
1066 detach: bool,
1067 },
1068 RequestLspDataInBuffer {
1069 project_root_name: String,
1070 is_local: bool,
1071 full_path: PathBuf,
1072 offset: usize,
1073 kind: LspRequestKind,
1074 detach: bool,
1075 },
1076 CreateWorktreeEntry {
1077 project_root_name: String,
1078 is_local: bool,
1079 full_path: PathBuf,
1080 is_dir: bool,
1081 },
1082 CreateFsEntry {
1083 path: PathBuf,
1084 is_dir: bool,
1085 },
1086 WriteGitIndex {
1087 repo_path: PathBuf,
1088 contents: Vec<(PathBuf, String)>,
1089 },
1090}
1091
1092#[derive(Clone, Debug, Serialize, Deserialize)]
1093enum LspRequestKind {
1094 Rename,
1095 Completion,
1096 CodeAction,
1097 Definition,
1098 Highlights,
1099}
1100
1101impl TestPlan {
1102 fn new(mut rng: StdRng, users: Vec<UserTestPlan>, max_operations: usize) -> Self {
1103 Self {
1104 replay: false,
1105 allow_server_restarts: rng.gen_bool(0.7),
1106 allow_client_reconnection: rng.gen_bool(0.7),
1107 allow_client_disconnection: rng.gen_bool(0.1),
1108 stored_operations: Vec::new(),
1109 operation_ix: 0,
1110 next_batch_id: 0,
1111 max_operations,
1112 users,
1113 rng,
1114 }
1115 }
1116
1117 fn load(&mut self, path: &Path) {
1118 let json = std::fs::read_to_string(path).unwrap();
1119 self.replay = true;
1120 let stored_operations: Vec<StoredOperation> = serde_json::from_str(&json).unwrap();
1121 self.stored_operations = stored_operations
1122 .iter()
1123 .cloned()
1124 .enumerate()
1125 .map(|(i, mut operation)| {
1126 if let StoredOperation::Server(Operation::MutateClients {
1127 batch_id: current_batch_id,
1128 user_ids,
1129 ..
1130 }) = &mut operation
1131 {
1132 assert!(user_ids.is_empty());
1133 user_ids.extend(stored_operations[i + 1..].iter().filter_map(|operation| {
1134 if let StoredOperation::Client {
1135 user_id, batch_id, ..
1136 } = operation
1137 {
1138 if batch_id == current_batch_id {
1139 return Some(user_id);
1140 }
1141 }
1142 None
1143 }));
1144 user_ids.sort_unstable();
1145 }
1146 (operation, Arc::new(AtomicBool::new(false)))
1147 })
1148 .collect()
1149 }
1150
1151 fn save(&mut self, path: &Path) {
1152 // Format each operation as one line
1153 let mut json = Vec::new();
1154 json.push(b'[');
1155 for (operation, skipped) in &self.stored_operations {
1156 if skipped.load(SeqCst) {
1157 continue;
1158 }
1159 if json.len() > 1 {
1160 json.push(b',');
1161 }
1162 json.extend_from_slice(b"\n ");
1163 serde_json::to_writer(&mut json, operation).unwrap();
1164 }
1165 json.extend_from_slice(b"\n]\n");
1166 std::fs::write(path, &json).unwrap();
1167 }
1168
1169 fn next_server_operation(
1170 &mut self,
1171 clients: &[(Rc<TestClient>, TestAppContext)],
1172 ) -> Option<(Operation, Arc<AtomicBool>)> {
1173 if self.replay {
1174 while let Some(stored_operation) = self.stored_operations.get(self.operation_ix) {
1175 self.operation_ix += 1;
1176 if let (StoredOperation::Server(operation), skipped) = stored_operation {
1177 return Some((operation.clone(), skipped.clone()));
1178 }
1179 }
1180 None
1181 } else {
1182 let operation = self.generate_server_operation(clients)?;
1183 let skipped = Arc::new(AtomicBool::new(false));
1184 self.stored_operations
1185 .push((StoredOperation::Server(operation.clone()), skipped.clone()));
1186 Some((operation, skipped))
1187 }
1188 }
1189
1190 fn next_client_operation(
1191 &mut self,
1192 client: &TestClient,
1193 current_batch_id: usize,
1194 cx: &TestAppContext,
1195 ) -> Option<(ClientOperation, Arc<AtomicBool>)> {
1196 let current_user_id = client.current_user_id(cx);
1197 let user_ix = self
1198 .users
1199 .iter()
1200 .position(|user| user.user_id == current_user_id)
1201 .unwrap();
1202 let user_plan = &mut self.users[user_ix];
1203
1204 if self.replay {
1205 while let Some(stored_operation) = self.stored_operations.get(user_plan.operation_ix) {
1206 user_plan.operation_ix += 1;
1207 if let (
1208 StoredOperation::Client {
1209 user_id, operation, ..
1210 },
1211 skipped,
1212 ) = stored_operation
1213 {
1214 if user_id == ¤t_user_id {
1215 return Some((operation.clone(), skipped.clone()));
1216 }
1217 }
1218 }
1219 None
1220 } else {
1221 let operation = self.generate_client_operation(current_user_id, client, cx)?;
1222 let skipped = Arc::new(AtomicBool::new(false));
1223 self.stored_operations.push((
1224 StoredOperation::Client {
1225 user_id: current_user_id,
1226 batch_id: current_batch_id,
1227 operation: operation.clone(),
1228 },
1229 skipped.clone(),
1230 ));
1231 Some((operation, skipped))
1232 }
1233 }
1234
1235 fn generate_server_operation(
1236 &mut self,
1237 clients: &[(Rc<TestClient>, TestAppContext)],
1238 ) -> Option<Operation> {
1239 if self.operation_ix == self.max_operations {
1240 return None;
1241 }
1242
1243 Some(loop {
1244 break match self.rng.gen_range(0..100) {
1245 0..=29 if clients.len() < self.users.len() => {
1246 let user = self
1247 .users
1248 .iter()
1249 .filter(|u| !u.online)
1250 .choose(&mut self.rng)
1251 .unwrap();
1252 self.operation_ix += 1;
1253 Operation::AddConnection {
1254 user_id: user.user_id,
1255 }
1256 }
1257 30..=34 if clients.len() > 1 && self.allow_client_disconnection => {
1258 let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
1259 let user_id = client.current_user_id(cx);
1260 self.operation_ix += 1;
1261 Operation::RemoveConnection { user_id }
1262 }
1263 35..=39 if clients.len() > 1 && self.allow_client_reconnection => {
1264 let (client, cx) = &clients[self.rng.gen_range(0..clients.len())];
1265 let user_id = client.current_user_id(cx);
1266 self.operation_ix += 1;
1267 Operation::BounceConnection { user_id }
1268 }
1269 40..=44 if self.allow_server_restarts && clients.len() > 1 => {
1270 self.operation_ix += 1;
1271 Operation::RestartServer
1272 }
1273 _ if !clients.is_empty() => {
1274 let count = self
1275 .rng
1276 .gen_range(1..10)
1277 .min(self.max_operations - self.operation_ix);
1278 let batch_id = util::post_inc(&mut self.next_batch_id);
1279 let mut user_ids = (0..count)
1280 .map(|_| {
1281 let ix = self.rng.gen_range(0..clients.len());
1282 let (client, cx) = &clients[ix];
1283 client.current_user_id(cx)
1284 })
1285 .collect::<Vec<_>>();
1286 user_ids.sort_unstable();
1287 Operation::MutateClients {
1288 user_ids,
1289 batch_id,
1290 quiesce: self.rng.gen_bool(0.7),
1291 }
1292 }
1293 _ => continue,
1294 };
1295 })
1296 }
1297
1298 fn generate_client_operation(
1299 &mut self,
1300 user_id: UserId,
1301 client: &TestClient,
1302 cx: &TestAppContext,
1303 ) -> Option<ClientOperation> {
1304 if self.operation_ix == self.max_operations {
1305 return None;
1306 }
1307
1308 let executor = cx.background();
1309 self.operation_ix += 1;
1310 let call = cx.read(ActiveCall::global);
1311 Some(loop {
1312 match self.rng.gen_range(0..100_u32) {
1313 // Mutate the call
1314 0..=29 => {
1315 // Respond to an incoming call
1316 if call.read_with(cx, |call, _| call.incoming().borrow().is_some()) {
1317 break if self.rng.gen_bool(0.7) {
1318 ClientOperation::AcceptIncomingCall
1319 } else {
1320 ClientOperation::RejectIncomingCall
1321 };
1322 }
1323
1324 match self.rng.gen_range(0..100_u32) {
1325 // Invite a contact to the current call
1326 0..=70 => {
1327 let available_contacts =
1328 client.user_store.read_with(cx, |user_store, _| {
1329 user_store
1330 .contacts()
1331 .iter()
1332 .filter(|contact| contact.online && !contact.busy)
1333 .cloned()
1334 .collect::<Vec<_>>()
1335 });
1336 if !available_contacts.is_empty() {
1337 let contact = available_contacts.choose(&mut self.rng).unwrap();
1338 break ClientOperation::InviteContactToCall {
1339 user_id: UserId(contact.user.id as i32),
1340 };
1341 }
1342 }
1343
1344 // Leave the current call
1345 71.. => {
1346 if self.allow_client_disconnection
1347 && call.read_with(cx, |call, _| call.room().is_some())
1348 {
1349 break ClientOperation::LeaveCall;
1350 }
1351 }
1352 }
1353 }
1354
1355 // Mutate projects
1356 30..=59 => match self.rng.gen_range(0..100_u32) {
1357 // Open a new project
1358 0..=70 => {
1359 // Open a remote project
1360 if let Some(room) = call.read_with(cx, |call, _| call.room().cloned()) {
1361 let existing_remote_project_ids = cx.read(|cx| {
1362 client
1363 .remote_projects()
1364 .iter()
1365 .map(|p| p.read(cx).remote_id().unwrap())
1366 .collect::<Vec<_>>()
1367 });
1368 let new_remote_projects = room.read_with(cx, |room, _| {
1369 room.remote_participants()
1370 .values()
1371 .flat_map(|participant| {
1372 participant.projects.iter().filter_map(|project| {
1373 if existing_remote_project_ids.contains(&project.id) {
1374 None
1375 } else {
1376 Some((
1377 UserId::from_proto(participant.user.id),
1378 project.worktree_root_names[0].clone(),
1379 ))
1380 }
1381 })
1382 })
1383 .collect::<Vec<_>>()
1384 });
1385 if !new_remote_projects.is_empty() {
1386 let (host_id, first_root_name) =
1387 new_remote_projects.choose(&mut self.rng).unwrap().clone();
1388 break ClientOperation::OpenRemoteProject {
1389 host_id,
1390 first_root_name,
1391 };
1392 }
1393 }
1394 // Open a local project
1395 else {
1396 let first_root_name = self.next_root_dir_name(user_id);
1397 break ClientOperation::OpenLocalProject { first_root_name };
1398 }
1399 }
1400
1401 // Close a remote project
1402 71..=80 => {
1403 if !client.remote_projects().is_empty() {
1404 let project = client
1405 .remote_projects()
1406 .choose(&mut self.rng)
1407 .unwrap()
1408 .clone();
1409 let first_root_name = root_name_for_project(&project, cx);
1410 break ClientOperation::CloseRemoteProject {
1411 project_root_name: first_root_name,
1412 };
1413 }
1414 }
1415
1416 // Mutate project worktrees
1417 81.. => match self.rng.gen_range(0..100_u32) {
1418 // Add a worktree to a local project
1419 0..=50 => {
1420 let Some(project) = client
1421 .local_projects()
1422 .choose(&mut self.rng)
1423 .cloned() else { continue };
1424 let project_root_name = root_name_for_project(&project, cx);
1425 let mut paths = executor.block(client.fs.paths());
1426 paths.remove(0);
1427 let new_root_path = if paths.is_empty() || self.rng.gen() {
1428 Path::new("/").join(&self.next_root_dir_name(user_id))
1429 } else {
1430 paths.choose(&mut self.rng).unwrap().clone()
1431 };
1432 break ClientOperation::AddWorktreeToProject {
1433 project_root_name,
1434 new_root_path,
1435 };
1436 }
1437
1438 // Add an entry to a worktree
1439 _ => {
1440 let Some(project) = choose_random_project(client, &mut self.rng) else { continue };
1441 let project_root_name = root_name_for_project(&project, cx);
1442 let is_local = project.read_with(cx, |project, _| project.is_local());
1443 let worktree = project.read_with(cx, |project, cx| {
1444 project
1445 .worktrees(cx)
1446 .filter(|worktree| {
1447 let worktree = worktree.read(cx);
1448 worktree.is_visible()
1449 && worktree.entries(false).any(|e| e.is_file())
1450 && worktree.root_entry().map_or(false, |e| e.is_dir())
1451 })
1452 .choose(&mut self.rng)
1453 });
1454 let Some(worktree) = worktree else { continue };
1455 let is_dir = self.rng.gen::<bool>();
1456 let mut full_path =
1457 worktree.read_with(cx, |w, _| PathBuf::from(w.root_name()));
1458 full_path.push(gen_file_name(&mut self.rng));
1459 if !is_dir {
1460 full_path.set_extension("rs");
1461 }
1462 break ClientOperation::CreateWorktreeEntry {
1463 project_root_name,
1464 is_local,
1465 full_path,
1466 is_dir,
1467 };
1468 }
1469 },
1470 },
1471
1472 // Query and mutate buffers
1473 60..=90 => {
1474 let Some(project) = choose_random_project(client, &mut self.rng) else { continue };
1475 let project_root_name = root_name_for_project(&project, cx);
1476 let is_local = project.read_with(cx, |project, _| project.is_local());
1477
1478 match self.rng.gen_range(0..100_u32) {
1479 // Manipulate an existing buffer
1480 0..=70 => {
1481 let Some(buffer) = client
1482 .buffers_for_project(&project)
1483 .iter()
1484 .choose(&mut self.rng)
1485 .cloned() else { continue };
1486
1487 let full_path = buffer
1488 .read_with(cx, |buffer, cx| buffer.file().unwrap().full_path(cx));
1489
1490 match self.rng.gen_range(0..100_u32) {
1491 // Close the buffer
1492 0..=15 => {
1493 break ClientOperation::CloseBuffer {
1494 project_root_name,
1495 is_local,
1496 full_path,
1497 };
1498 }
1499 // Save the buffer
1500 16..=29 if buffer.read_with(cx, |b, _| b.is_dirty()) => {
1501 let detach = self.rng.gen_bool(0.3);
1502 break ClientOperation::SaveBuffer {
1503 project_root_name,
1504 is_local,
1505 full_path,
1506 detach,
1507 };
1508 }
1509 // Edit the buffer
1510 30..=69 => {
1511 let edits = buffer.read_with(cx, |buffer, _| {
1512 buffer.get_random_edits(&mut self.rng, 3)
1513 });
1514 break ClientOperation::EditBuffer {
1515 project_root_name,
1516 is_local,
1517 full_path,
1518 edits,
1519 };
1520 }
1521 // Make an LSP request
1522 _ => {
1523 let offset = buffer.read_with(cx, |buffer, _| {
1524 buffer.clip_offset(
1525 self.rng.gen_range(0..=buffer.len()),
1526 language::Bias::Left,
1527 )
1528 });
1529 let detach = self.rng.gen();
1530 break ClientOperation::RequestLspDataInBuffer {
1531 project_root_name,
1532 full_path,
1533 offset,
1534 is_local,
1535 kind: match self.rng.gen_range(0..5_u32) {
1536 0 => LspRequestKind::Rename,
1537 1 => LspRequestKind::Highlights,
1538 2 => LspRequestKind::Definition,
1539 3 => LspRequestKind::CodeAction,
1540 4.. => LspRequestKind::Completion,
1541 },
1542 detach,
1543 };
1544 }
1545 }
1546 }
1547
1548 71..=80 => {
1549 let query = self.rng.gen_range('a'..='z').to_string();
1550 let detach = self.rng.gen_bool(0.3);
1551 break ClientOperation::SearchProject {
1552 project_root_name,
1553 is_local,
1554 query,
1555 detach,
1556 };
1557 }
1558
1559 // Open a buffer
1560 81.. => {
1561 let worktree = project.read_with(cx, |project, cx| {
1562 project
1563 .worktrees(cx)
1564 .filter(|worktree| {
1565 let worktree = worktree.read(cx);
1566 worktree.is_visible()
1567 && worktree.entries(false).any(|e| e.is_file())
1568 })
1569 .choose(&mut self.rng)
1570 });
1571 let Some(worktree) = worktree else { continue };
1572 let full_path = worktree.read_with(cx, |worktree, _| {
1573 let entry = worktree
1574 .entries(false)
1575 .filter(|e| e.is_file())
1576 .choose(&mut self.rng)
1577 .unwrap();
1578 if entry.path.as_ref() == Path::new("") {
1579 Path::new(worktree.root_name()).into()
1580 } else {
1581 Path::new(worktree.root_name()).join(&entry.path)
1582 }
1583 });
1584 break ClientOperation::OpenBuffer {
1585 project_root_name,
1586 is_local,
1587 full_path,
1588 };
1589 }
1590 }
1591 }
1592
1593 // Update a git index
1594 91..=95 => {
1595 let repo_path = executor
1596 .block(client.fs.directories())
1597 .choose(&mut self.rng)
1598 .unwrap()
1599 .clone();
1600
1601 let mut file_paths = executor
1602 .block(client.fs.files())
1603 .into_iter()
1604 .filter(|path| path.starts_with(&repo_path))
1605 .collect::<Vec<_>>();
1606 let count = self.rng.gen_range(0..=file_paths.len());
1607 file_paths.shuffle(&mut self.rng);
1608 file_paths.truncate(count);
1609
1610 let mut contents = Vec::new();
1611 for abs_child_file_path in &file_paths {
1612 let child_file_path = abs_child_file_path
1613 .strip_prefix(&repo_path)
1614 .unwrap()
1615 .to_path_buf();
1616 let new_base = Alphanumeric.sample_string(&mut self.rng, 16);
1617 contents.push((child_file_path, new_base));
1618 }
1619
1620 break ClientOperation::WriteGitIndex {
1621 repo_path,
1622 contents,
1623 };
1624 }
1625
1626 // Create a file or directory
1627 96.. => {
1628 let is_dir = self.rng.gen::<bool>();
1629 let mut path = cx
1630 .background()
1631 .block(client.fs.directories())
1632 .choose(&mut self.rng)
1633 .unwrap()
1634 .clone();
1635 path.push(gen_file_name(&mut self.rng));
1636 if !is_dir {
1637 path.set_extension("rs");
1638 }
1639 break ClientOperation::CreateFsEntry { path, is_dir };
1640 }
1641 }
1642 })
1643 }
1644
1645 fn next_root_dir_name(&mut self, user_id: UserId) -> String {
1646 let user_ix = self
1647 .users
1648 .iter()
1649 .position(|user| user.user_id == user_id)
1650 .unwrap();
1651 let root_id = util::post_inc(&mut self.users[user_ix].next_root_id);
1652 format!("dir-{user_id}-{root_id}")
1653 }
1654
1655 fn user(&mut self, user_id: UserId) -> &mut UserTestPlan {
1656 let ix = self
1657 .users
1658 .iter()
1659 .position(|user| user.user_id == user_id)
1660 .unwrap();
1661 &mut self.users[ix]
1662 }
1663}
1664
1665async fn simulate_client(
1666 client: Rc<TestClient>,
1667 mut operation_rx: futures::channel::mpsc::UnboundedReceiver<usize>,
1668 plan: Arc<Mutex<TestPlan>>,
1669 mut cx: TestAppContext,
1670) {
1671 // Setup language server
1672 let mut language = Language::new(
1673 LanguageConfig {
1674 name: "Rust".into(),
1675 path_suffixes: vec!["rs".to_string()],
1676 ..Default::default()
1677 },
1678 None,
1679 );
1680 let _fake_language_servers = language
1681 .set_fake_lsp_adapter(Arc::new(FakeLspAdapter {
1682 name: "the-fake-language-server",
1683 capabilities: lsp::LanguageServer::full_capabilities(),
1684 initializer: Some(Box::new({
1685 let plan = plan.clone();
1686 let fs = client.fs.clone();
1687 move |fake_server: &mut FakeLanguageServer| {
1688 fake_server.handle_request::<lsp::request::Completion, _, _>(
1689 |_, _| async move {
1690 Ok(Some(lsp::CompletionResponse::Array(vec![
1691 lsp::CompletionItem {
1692 text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
1693 range: lsp::Range::new(
1694 lsp::Position::new(0, 0),
1695 lsp::Position::new(0, 0),
1696 ),
1697 new_text: "the-new-text".to_string(),
1698 })),
1699 ..Default::default()
1700 },
1701 ])))
1702 },
1703 );
1704
1705 fake_server.handle_request::<lsp::request::CodeActionRequest, _, _>(
1706 |_, _| async move {
1707 Ok(Some(vec![lsp::CodeActionOrCommand::CodeAction(
1708 lsp::CodeAction {
1709 title: "the-code-action".to_string(),
1710 ..Default::default()
1711 },
1712 )]))
1713 },
1714 );
1715
1716 fake_server.handle_request::<lsp::request::PrepareRenameRequest, _, _>(
1717 |params, _| async move {
1718 Ok(Some(lsp::PrepareRenameResponse::Range(lsp::Range::new(
1719 params.position,
1720 params.position,
1721 ))))
1722 },
1723 );
1724
1725 fake_server.handle_request::<lsp::request::GotoDefinition, _, _>({
1726 let fs = fs.clone();
1727 let plan = plan.clone();
1728 move |_, _| {
1729 let fs = fs.clone();
1730 let plan = plan.clone();
1731 async move {
1732 let files = fs.files().await;
1733 let count = plan.lock().rng.gen_range::<usize, _>(1..3);
1734 let files = (0..count)
1735 .map(|_| files.choose(&mut plan.lock().rng).unwrap())
1736 .collect::<Vec<_>>();
1737 log::info!("LSP: Returning definitions in files {:?}", &files);
1738 Ok(Some(lsp::GotoDefinitionResponse::Array(
1739 files
1740 .into_iter()
1741 .map(|file| lsp::Location {
1742 uri: lsp::Url::from_file_path(file).unwrap(),
1743 range: Default::default(),
1744 })
1745 .collect(),
1746 )))
1747 }
1748 }
1749 });
1750
1751 fake_server.handle_request::<lsp::request::DocumentHighlightRequest, _, _>({
1752 let plan = plan.clone();
1753 move |_, _| {
1754 let mut highlights = Vec::new();
1755 let highlight_count = plan.lock().rng.gen_range(1..=5);
1756 for _ in 0..highlight_count {
1757 let start_row = plan.lock().rng.gen_range(0..100);
1758 let start_column = plan.lock().rng.gen_range(0..100);
1759 let start = PointUtf16::new(start_row, start_column);
1760 let end_row = plan.lock().rng.gen_range(0..100);
1761 let end_column = plan.lock().rng.gen_range(0..100);
1762 let end = PointUtf16::new(end_row, end_column);
1763 let range = if start > end { end..start } else { start..end };
1764 highlights.push(lsp::DocumentHighlight {
1765 range: range_to_lsp(range.clone()),
1766 kind: Some(lsp::DocumentHighlightKind::READ),
1767 });
1768 }
1769 highlights.sort_unstable_by_key(|highlight| {
1770 (highlight.range.start, highlight.range.end)
1771 });
1772 async move { Ok(Some(highlights)) }
1773 }
1774 });
1775 }
1776 })),
1777 ..Default::default()
1778 }))
1779 .await;
1780 client.language_registry.add(Arc::new(language));
1781
1782 while let Some(batch_id) = operation_rx.next().await {
1783 let Some((operation, skipped)) = plan.lock().next_client_operation(&client, batch_id, &cx) else { break };
1784 match apply_client_operation(&client, operation, &mut cx).await {
1785 Err(error) => {
1786 log::error!("{} error: {}", client.username, error);
1787 }
1788 Ok(applied) => {
1789 if !applied {
1790 skipped.store(true, SeqCst);
1791 }
1792 }
1793 }
1794 cx.background().simulate_random_delay().await;
1795 }
1796 log::info!("{}: done", client.username);
1797}
1798
1799fn buffer_for_full_path(
1800 buffers: &HashSet<ModelHandle<language::Buffer>>,
1801 full_path: &PathBuf,
1802 cx: &TestAppContext,
1803) -> Option<ModelHandle<language::Buffer>> {
1804 buffers
1805 .iter()
1806 .find(|buffer| {
1807 buffer.read_with(cx, |buffer, cx| {
1808 buffer.file().unwrap().full_path(cx) == *full_path
1809 })
1810 })
1811 .cloned()
1812}
1813
1814fn project_for_root_name(
1815 client: &TestClient,
1816 root_name: &str,
1817 cx: &TestAppContext,
1818) -> Option<ModelHandle<Project>> {
1819 if let Some(ix) = project_ix_for_root_name(&*client.local_projects(), root_name, cx) {
1820 return Some(client.local_projects()[ix].clone());
1821 }
1822 if let Some(ix) = project_ix_for_root_name(&*client.remote_projects(), root_name, cx) {
1823 return Some(client.remote_projects()[ix].clone());
1824 }
1825 None
1826}
1827
1828fn project_ix_for_root_name(
1829 projects: &[ModelHandle<Project>],
1830 root_name: &str,
1831 cx: &TestAppContext,
1832) -> Option<usize> {
1833 projects.iter().position(|project| {
1834 project.read_with(cx, |project, cx| {
1835 let worktree = project.visible_worktrees(cx).next().unwrap();
1836 worktree.read(cx).root_name() == root_name
1837 })
1838 })
1839}
1840
1841fn root_name_for_project(project: &ModelHandle<Project>, cx: &TestAppContext) -> String {
1842 project.read_with(cx, |project, cx| {
1843 project
1844 .visible_worktrees(cx)
1845 .next()
1846 .unwrap()
1847 .read(cx)
1848 .root_name()
1849 .to_string()
1850 })
1851}
1852
1853fn project_path_for_full_path(
1854 project: &ModelHandle<Project>,
1855 full_path: &Path,
1856 cx: &TestAppContext,
1857) -> Option<ProjectPath> {
1858 let mut components = full_path.components();
1859 let root_name = components.next().unwrap().as_os_str().to_str().unwrap();
1860 let path = components.as_path().into();
1861 let worktree_id = project.read_with(cx, |project, cx| {
1862 project.worktrees(cx).find_map(|worktree| {
1863 let worktree = worktree.read(cx);
1864 if worktree.root_name() == root_name {
1865 Some(worktree.id())
1866 } else {
1867 None
1868 }
1869 })
1870 })?;
1871 Some(ProjectPath { worktree_id, path })
1872}
1873
1874async fn ensure_project_shared(
1875 project: &ModelHandle<Project>,
1876 client: &TestClient,
1877 cx: &mut TestAppContext,
1878) {
1879 let first_root_name = root_name_for_project(project, cx);
1880 let active_call = cx.read(ActiveCall::global);
1881 if active_call.read_with(cx, |call, _| call.room().is_some())
1882 && project.read_with(cx, |project, _| project.is_local() && !project.is_shared())
1883 {
1884 match active_call
1885 .update(cx, |call, cx| call.share_project(project.clone(), cx))
1886 .await
1887 {
1888 Ok(project_id) => {
1889 log::info!(
1890 "{}: shared project {} with id {}",
1891 client.username,
1892 first_root_name,
1893 project_id
1894 );
1895 }
1896 Err(error) => {
1897 log::error!(
1898 "{}: error sharing project {}: {:?}",
1899 client.username,
1900 first_root_name,
1901 error
1902 );
1903 }
1904 }
1905 }
1906}
1907
1908fn choose_random_project(client: &TestClient, rng: &mut StdRng) -> Option<ModelHandle<Project>> {
1909 client
1910 .local_projects()
1911 .iter()
1912 .chain(client.remote_projects().iter())
1913 .choose(rng)
1914 .cloned()
1915}
1916
1917fn gen_file_name(rng: &mut StdRng) -> String {
1918 let mut name = String::new();
1919 for _ in 0..10 {
1920 let letter = rng.gen_range('a'..='z');
1921 name.push(letter);
1922 }
1923 name
1924}
1925
1926fn path_env_var(name: &str) -> Option<PathBuf> {
1927 let value = env::var(name).ok()?;
1928 let mut path = PathBuf::from(value);
1929 if path.is_relative() {
1930 let mut abs_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
1931 abs_path.pop();
1932 abs_path.pop();
1933 abs_path.push(path);
1934 path = abs_path
1935 }
1936 Some(path)
1937}