1//! Perf profiler for Zed tests. Outputs timings of tests marked with the `#[perf]`
2//! attribute to stdout in Markdown. See the documentation of `util_macros::perf`
3//! for usage details on the actual attribute.
4//!
5//! # Setup
6//! Make sure `hyperfine` is installed and in the shell path.
7//!
8//! # Usage
9//! Calling this tool rebuilds the targeted crate(s) with some cfg flags set for the
10//! perf proc macro *and* enables optimisations (`release-fast` profile), so expect
11//! it to take a little while.
12//!
13//! To test an individual crate, run:
14//! ```sh
15//! cargo perf-test -p $CRATE
16//! ```
17//!
18//! To test everything (which will be **VERY SLOW**), run:
19//! ```sh
20//! cargo perf-test --workspace
21//! ```
22//!
23//! Some command-line parameters are also recognised by this profiler. To filter
24//! out all tests below a certain importance (e.g. `important`), run:
25//! ```sh
26//! cargo perf-test $WHATEVER -- --important
27//! ```
28//!
29//! Similarly, to skip outputting progress to the command line, pass `-- --quiet`.
30//! These flags can be combined.
31//!
32//! ## Comparing runs
33//! Passing `--json=ident` will save per-crate run files in `.perf-runs`, e.g.
34//! `cargo perf-test -p gpui -- --json=blah` will result in `.perf-runs/blah.gpui.json`
35//! being created (unless no tests were run). These results can be automatically
36//! compared. To do so, run `cargo perf-compare new-ident old-ident`.
37//!
38//! NB: All files matching `.perf-runs/ident.*.json` will be considered when
39//! doing this comparison, so ensure there aren't leftover files in your `.perf-runs`
40//! directory that might match that!
41//!
42//! # Notes
43//! This should probably not be called manually unless you're working on the profiler
44//! itself; use the `cargo perf-test` alias (after building this crate) instead.
45
46use perf::{FailKind, Importance, Output, TestMdata, Timings, consts};
47
48use std::{
49 fs::OpenOptions,
50 io::Write,
51 num::NonZero,
52 path::{Path, PathBuf},
53 process::{Command, Stdio},
54 sync::atomic::{AtomicBool, Ordering},
55 time::{Duration, Instant},
56};
57
58/// How many iterations to attempt the first time a test is run.
59const DEFAULT_ITER_COUNT: NonZero<usize> = NonZero::new(3).unwrap();
60/// Multiplier for the iteration count when a test doesn't pass the noise cutoff.
61const ITER_COUNT_MUL: NonZero<usize> = NonZero::new(4).unwrap();
62
63/// Do we keep stderr empty while running the tests?
64static QUIET: AtomicBool = AtomicBool::new(false);
65
66/// Report a failure into the output and skip an iteration.
67macro_rules! fail {
68 ($output:ident, $name:expr, $kind:expr) => {{
69 $output.failure($name, None, None, $kind);
70 continue;
71 }};
72 ($output:ident, $name:expr, $mdata:expr, $kind:expr) => {{
73 $output.failure($name, Some($mdata), None, $kind);
74 continue;
75 }};
76 ($output:ident, $name:expr, $mdata:expr, $count:expr, $kind:expr) => {{
77 $output.failure($name, Some($mdata), Some($count), $kind);
78 continue;
79 }};
80}
81
82/// How does this perf run return its output?
83enum OutputKind<'a> {
84 /// Print markdown to the terminal.
85 Markdown,
86 /// Save JSON to a file.
87 Json(&'a Path),
88}
89
90impl OutputKind<'_> {
91 /// Logs the output of a run as per the `OutputKind`.
92 fn log(&self, output: &Output, t_bin: &str) {
93 match self {
94 OutputKind::Markdown => print!("{output}"),
95 OutputKind::Json(ident) => {
96 // We're going to be in tooling/perf/$whatever.
97 let wspace_dir = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap())
98 .join("..")
99 .join("..");
100 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
101 std::fs::create_dir_all(&runs_dir).unwrap();
102 assert!(
103 !ident.to_string_lossy().is_empty(),
104 "FATAL: Empty filename specified!"
105 );
106 // Get the test binary's crate's name; a path like
107 // target/release-fast/deps/gpui-061ff76c9b7af5d7
108 // would be reduced to just "gpui".
109 let test_bin_stripped = Path::new(t_bin)
110 .file_name()
111 .unwrap()
112 .to_str()
113 .unwrap()
114 .rsplit_once('-')
115 .unwrap()
116 .0;
117 let mut file_path = runs_dir.join(ident);
118 file_path
119 .as_mut_os_string()
120 .push(format!(".{test_bin_stripped}.json"));
121 let mut out_file = OpenOptions::new()
122 .write(true)
123 .create(true)
124 .truncate(true)
125 .open(&file_path)
126 .unwrap();
127 out_file
128 .write_all(&serde_json::to_vec(&output).unwrap())
129 .unwrap();
130 if !QUIET.load(Ordering::Relaxed) {
131 eprintln!("JSON output written to {}", file_path.display());
132 }
133 }
134 }
135 }
136}
137
138/// Runs a given metadata-returning function from a test handler, parsing its
139/// output into a `TestMdata`.
140fn parse_mdata(t_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
141 let mut cmd = Command::new(t_bin);
142 cmd.args([mdata_fn, "--exact", "--nocapture"]);
143 let out = cmd
144 .output()
145 .expect("FATAL: Could not run test binary {t_bin}");
146 assert!(out.status.success());
147 let stdout = String::from_utf8_lossy(&out.stdout);
148 let mut version = None;
149 let mut iterations = None;
150 let mut importance = Importance::default();
151 let mut weight = consts::WEIGHT_DEFAULT;
152 for line in stdout
153 .lines()
154 .filter_map(|l| l.strip_prefix(consts::MDATA_LINE_PREF))
155 {
156 let mut items = line.split_whitespace();
157 // For v0, we know the ident always comes first, then one field.
158 match items.next().ok_or(FailKind::BadMetadata)? {
159 consts::VERSION_LINE_NAME => {
160 let v = items
161 .next()
162 .ok_or(FailKind::BadMetadata)?
163 .parse::<u32>()
164 .map_err(|_| FailKind::BadMetadata)?;
165 if v > consts::MDATA_VER {
166 return Err(FailKind::VersionMismatch);
167 }
168 version = Some(v);
169 }
170 consts::ITER_COUNT_LINE_NAME => {
171 // This should never be zero!
172 iterations = Some(
173 items
174 .next()
175 .ok_or(FailKind::BadMetadata)?
176 .parse::<usize>()
177 .map_err(|_| FailKind::BadMetadata)?
178 .try_into()
179 .map_err(|_| FailKind::BadMetadata)?,
180 );
181 }
182 consts::IMPORTANCE_LINE_NAME => {
183 importance = match items.next().ok_or(FailKind::BadMetadata)? {
184 "critical" => Importance::Critical,
185 "important" => Importance::Important,
186 "average" => Importance::Average,
187 "iffy" => Importance::Iffy,
188 "fluff" => Importance::Fluff,
189 _ => return Err(FailKind::BadMetadata),
190 };
191 }
192 consts::WEIGHT_LINE_NAME => {
193 weight = items
194 .next()
195 .ok_or(FailKind::BadMetadata)?
196 .parse::<u8>()
197 .map_err(|_| FailKind::BadMetadata)?;
198 }
199 _ => unreachable!(),
200 }
201 }
202
203 Ok(TestMdata {
204 version: version.ok_or(FailKind::BadMetadata)?,
205 // Iterations may be determined by us and thus left unspecified.
206 iterations,
207 // In principle this should always be set, but just for the sake of
208 // stability allow the potentially-breaking change of not reporting the
209 // importance without erroring. Maybe we want to change this.
210 importance,
211 // Same with weight.
212 weight,
213 })
214}
215
216/// Compares the perf results of two profiles as per the arguments passed in.
217fn compare_profiles(args: &[String]) {
218 let ident_new = args.first().expect("FATAL: missing identifier for new run");
219 let ident_old = args.get(1).expect("FATAL: missing identifier for old run");
220 let wspace_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
221 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
222
223 // Use the blank outputs initially, so we can merge into these with prefixes.
224 let mut outputs_new = Output::blank();
225 let mut outputs_old = Output::blank();
226
227 for e in runs_dir.read_dir().unwrap() {
228 let Ok(entry) = e else {
229 continue;
230 };
231 let Ok(metadata) = entry.metadata() else {
232 continue;
233 };
234 if metadata.is_file() {
235 let Ok(name) = entry.file_name().into_string() else {
236 continue;
237 };
238
239 // A little helper to avoid code duplication. Reads the `output` from
240 // a json file, then merges it into what we have so far.
241 let read_into = |output: &mut Output| {
242 let mut elems = name.split('.').skip(1);
243 let prefix = elems.next().unwrap();
244 assert_eq!("json", elems.next().unwrap());
245 assert!(elems.next().is_none());
246 let handle = OpenOptions::new().read(true).open(entry.path()).unwrap();
247 let o_other: Output = serde_json::from_reader(handle).unwrap();
248 output.merge(o_other, prefix);
249 };
250
251 if name.starts_with(ident_old) {
252 read_into(&mut outputs_old);
253 } else if name.starts_with(ident_new) {
254 read_into(&mut outputs_new);
255 }
256 }
257 }
258
259 let res = outputs_new.compare_perf(outputs_old);
260 println!("{res}");
261}
262
263/// Runs a test binary, filtering out tests which aren't marked for perf triage
264/// and giving back the list of tests we care about.
265///
266/// The output of this is an iterator over `test_fn_name, test_mdata_name`.
267fn get_tests(t_bin: &str) -> impl ExactSizeIterator<Item = (String, String)> {
268 let mut cmd = Command::new(t_bin);
269 // --format=json is nightly-only :(
270 cmd.args(["--list", "--format=terse"]);
271 let out = cmd
272 .output()
273 .expect("FATAL: Could not run test binary {t_bin}");
274 assert!(
275 out.status.success(),
276 "FATAL: Cannot do perf check - test binary {t_bin} returned an error"
277 );
278 if !QUIET.load(Ordering::Relaxed) {
279 eprintln!("Test binary ran successfully; starting profile...");
280 }
281 // Parse the test harness output to look for tests we care about.
282 let stdout = String::from_utf8_lossy(&out.stdout);
283 let mut test_list: Vec<_> = stdout
284 .lines()
285 .filter_map(|line| {
286 // This should split only in two; e.g.,
287 // "app::test::test_arena: test" => "app::test::test_arena:", "test"
288 let line: Vec<_> = line.split_whitespace().collect();
289 match line[..] {
290 // Final byte of t_name is ":", which we need to ignore.
291 [t_name, kind] => (kind == "test").then(|| &t_name[..t_name.len() - 1]),
292 _ => None,
293 }
294 })
295 // Exclude tests that aren't marked for perf triage based on suffix.
296 .filter(|t_name| {
297 t_name.ends_with(consts::SUF_NORMAL) || t_name.ends_with(consts::SUF_MDATA)
298 })
299 .collect();
300
301 // Pulling itertools just for .dedup() would be quite a big dependency that's
302 // not used elsewhere, so do this on a vec instead.
303 test_list.sort_unstable();
304 test_list.dedup();
305
306 // Tests should come in pairs with their mdata fn!
307 assert!(
308 test_list.len().is_multiple_of(2),
309 "Malformed tests in test binary {t_bin}"
310 );
311
312 let out = test_list
313 .chunks_exact_mut(2)
314 .map(|pair| {
315 // Be resilient against changes to these constants.
316 if consts::SUF_NORMAL < consts::SUF_MDATA {
317 (pair[0].to_owned(), pair[1].to_owned())
318 } else {
319 (pair[1].to_owned(), pair[0].to_owned())
320 }
321 })
322 .collect::<Vec<_>>();
323 out.into_iter()
324}
325
326/// Runs the specified test `count` times, returning the time taken if the test
327/// succeeded.
328#[inline]
329fn spawn_and_iterate(t_bin: &str, t_name: &str, count: NonZero<usize>) -> Option<Duration> {
330 let mut cmd = Command::new(t_bin);
331 cmd.args([t_name, "--exact"]);
332 cmd.env(consts::ITER_ENV_VAR, format!("{count}"));
333 // Don't let the child muck up our stdin/out/err.
334 cmd.stdin(Stdio::null());
335 cmd.stdout(Stdio::null());
336 cmd.stderr(Stdio::null());
337 let pre = Instant::now();
338 // Discard the output beyond ensuring success.
339 let out = cmd.spawn().unwrap().wait();
340 let post = Instant::now();
341 out.iter().find_map(|s| s.success().then_some(post - pre))
342}
343
344/// Triage a test to determine the correct number of iterations that it should run.
345/// Specifically, repeatedly runs the given test until its execution time exceeds
346/// `thresh`, calling `step(iterations)` after every failed run to determine the new
347/// iteration count. Returns `None` if the test errored or `step` returned `None`,
348/// else `Some(iterations)`.
349///
350/// # Panics
351/// This will panic if `step(usize)` is not monotonically increasing, or if the test
352/// binary is invalid.
353fn triage_test(
354 t_bin: &str,
355 t_name: &str,
356 thresh: Duration,
357 mut step: impl FnMut(NonZero<usize>) -> Option<NonZero<usize>>,
358) -> Option<NonZero<usize>> {
359 let mut iter_count = DEFAULT_ITER_COUNT;
360 // It's possible that the first loop of a test might be an outlier (e.g. it's
361 // doing some caching), in which case we want to skip it.
362 let duration_once = spawn_and_iterate(t_bin, t_name, NonZero::new(1).unwrap())?;
363 loop {
364 let duration = spawn_and_iterate(t_bin, t_name, iter_count)?;
365 if duration.saturating_sub(duration_once) > thresh {
366 break Some(iter_count);
367 }
368 let new = step(iter_count)?;
369 assert!(
370 new > iter_count,
371 "FATAL: step must be monotonically increasing"
372 );
373 iter_count = new;
374 }
375}
376
377/// Profiles a given test with hyperfine, returning the mean and standard deviation
378/// for its runtime. If the test errors, returns `None` instead.
379fn hyp_profile(t_bin: &str, t_name: &str, iterations: NonZero<usize>) -> Option<Timings> {
380 let mut perf_cmd = Command::new("hyperfine");
381 // Warm up the cache and print markdown output to stdout, which we parse.
382 perf_cmd.args([
383 "--style",
384 "none",
385 "--warmup",
386 "1",
387 "--export-markdown",
388 "-",
389 // Parse json instead...
390 "--time-unit",
391 "millisecond",
392 &format!("{t_bin} --exact {t_name}"),
393 ]);
394 perf_cmd.env(consts::ITER_ENV_VAR, format!("{iterations}"));
395 let p_out = perf_cmd.output().unwrap();
396 if !p_out.status.success() {
397 return None;
398 }
399
400 let cmd_output = String::from_utf8_lossy(&p_out.stdout);
401 // Can't use .last() since we have a trailing newline. Sigh.
402 let results_line = cmd_output.lines().nth(3).unwrap();
403 // Grab the values out of the pretty-print.
404 // TODO: Parse json instead.
405 let mut res_iter = results_line.split_whitespace();
406 // Durations are given in milliseconds, so account for that.
407 let mean = Duration::from_secs_f64(res_iter.nth(5).unwrap().parse::<f64>().unwrap() / 1000.);
408 let stddev = Duration::from_secs_f64(res_iter.nth(1).unwrap().parse::<f64>().unwrap() / 1000.);
409
410 Some(Timings { mean, stddev })
411}
412
413fn main() {
414 let args = std::env::args().collect::<Vec<_>>();
415 // We get passed the test we need to run as the 1st argument after our own name.
416 let t_bin = args
417 .get(1)
418 .expect("FATAL: No test binary or command; this shouldn't be manually invoked!");
419
420 // We're being asked to compare two results, not run the profiler.
421 if t_bin == "compare" {
422 compare_profiles(&args[2..]);
423 return;
424 }
425
426 // Minimum test importance we care about this run.
427 let mut thresh = Importance::Iffy;
428 // Where to print the output of this run.
429 let mut out_kind = OutputKind::Markdown;
430
431 for arg in args.iter().skip(2) {
432 match arg.as_str() {
433 "--critical" => thresh = Importance::Critical,
434 "--important" => thresh = Importance::Important,
435 "--average" => thresh = Importance::Average,
436 "--iffy" => thresh = Importance::Iffy,
437 "--fluff" => thresh = Importance::Fluff,
438 "--quiet" => QUIET.store(true, Ordering::Relaxed),
439 s if s.starts_with("--json") => {
440 out_kind = OutputKind::Json(Path::new(
441 s.strip_prefix("--json=")
442 .expect("FATAL: Invalid json parameter; pass --json=ident"),
443 ));
444 }
445 _ => (),
446 }
447 }
448 if !QUIET.load(Ordering::Relaxed) {
449 eprintln!("Starting perf check");
450 }
451
452 let mut output = Output::default();
453
454 // Spawn and profile an instance of each perf-sensitive test, via hyperfine.
455 // Each test is a pair of (test, metadata-returning-fn), so grab both. We also
456 // know the list is sorted.
457 let i = get_tests(t_bin);
458 let len = i.len();
459 for (idx, (ref t_name, ref t_mdata)) in i.enumerate() {
460 if !QUIET.load(Ordering::Relaxed) {
461 eprint!("\rProfiling test {}/{}", idx + 1, len);
462 }
463 // Pretty-printable stripped name for the test.
464 let t_name_pretty = t_name.replace(consts::SUF_NORMAL, "");
465
466 // Get the metadata this test reports for us.
467 let t_mdata = match parse_mdata(t_bin, t_mdata) {
468 Ok(mdata) => mdata,
469 Err(err) => fail!(output, t_name_pretty, err),
470 };
471
472 if t_mdata.importance < thresh {
473 fail!(output, t_name_pretty, t_mdata, FailKind::Skipped);
474 }
475
476 // Time test execution to see how many iterations we need to do in order
477 // to account for random noise. This is skipped for tests with fixed
478 // iteration counts.
479 let final_iter_count = t_mdata.iterations.or_else(|| {
480 triage_test(t_bin, t_name, consts::NOISE_CUTOFF, |c| {
481 if let Some(c) = c.checked_mul(ITER_COUNT_MUL) {
482 Some(c)
483 } else {
484 // This should almost never happen, but maybe..?
485 eprintln!(
486 "WARNING: Ran nearly usize::MAX iterations of test {t_name_pretty}; skipping"
487 );
488 None
489 }
490 })
491 });
492
493 // Don't profile failing tests.
494 let Some(final_iter_count) = final_iter_count else {
495 fail!(output, t_name_pretty, t_mdata, FailKind::Triage);
496 };
497
498 // Now profile!
499 if let Some(timings) = hyp_profile(t_bin, t_name, final_iter_count) {
500 output.success(t_name_pretty, t_mdata, final_iter_count, timings);
501 } else {
502 fail!(
503 output,
504 t_name_pretty,
505 t_mdata,
506 final_iter_count,
507 FailKind::Profile
508 );
509 }
510 }
511 if !QUIET.load(Ordering::Relaxed) {
512 if output.is_empty() {
513 eprintln!("Nothing to do.");
514 } else {
515 // If stdout and stderr are on the same terminal, move us after the
516 // output from above.
517 eprintln!();
518 }
519 }
520
521 // No need making an empty json file on every empty test bin.
522 if output.is_empty() {
523 return;
524 }
525
526 out_kind.log(&output, t_bin);
527}