1//! Perf profiler for Zed tests. Outputs timings of tests marked with the `#[perf]`
2//! attribute to stdout in Markdown. See the documentation of `util_macros::perf`
3//! for usage details on the actual attribute.
4//!
5//! # Setup
6//! Make sure `hyperfine` is installed and in the shell path.
7//!
8//! # Usage
9//! Calling this tool rebuilds the targeted crate(s) with some cfg flags set for the
10//! perf proc macro *and* enables optimisations (`release-fast` profile), so expect
11//! it to take a little while.
12//!
13//! To test an individual crate, run:
14//! ```sh
15//! cargo perf-test -p $CRATE
16//! ```
17//!
18//! To test everything (which will be **VERY SLOW**), run:
19//! ```sh
20//! cargo perf-test --workspace
21//! ```
22//!
23//! Some command-line parameters are also recognised by this profiler. To filter
24//! out all tests below a certain importance (e.g. `important`), run:
25//! ```sh
26//! cargo perf-test $WHATEVER -- --important
27//! ```
28//!
29//! Similarly, to skip outputting progress to the command line, pass `-- --quiet`.
30//! These flags can be combined.
31//!
32//! ## Comparing runs
33//! Passing `--json=ident` will save per-crate run files in `.perf-runs`, e.g.
34//! `cargo perf-test -p gpui -- --json=blah` will result in `.perf-runs/blah.gpui.json`
35//! being created (unless no tests were run). These results can be automatically
36//! compared. To do so, run `cargo perf-compare new-ident old-ident`.
37//!
38//! NB: All files matching `.perf-runs/ident.*.json` will be considered when
39//! doing this comparison, so ensure there aren't leftover files in your `.perf-runs`
40//! directory that might match that!
41//!
42//! # Notes
43//! This should probably not be called manually unless you're working on the profiler
44//! itself; use the `cargo perf-test` alias (after building this crate) instead.
45
46use perf::{FailKind, Importance, Output, TestMdata, Timings, consts};
47
48use std::{
49 fs::OpenOptions,
50 io::Write,
51 num::NonZero,
52 path::{Path, PathBuf},
53 process::{Command, Stdio},
54 sync::atomic::{AtomicBool, Ordering},
55 time::{Duration, Instant},
56};
57
58/// How many iterations to attempt the first time a test is run.
59const DEFAULT_ITER_COUNT: NonZero<usize> = NonZero::new(3).unwrap();
60/// Multiplier for the iteration count when a test doesn't pass the noise cutoff.
61const ITER_COUNT_MUL: NonZero<usize> = NonZero::new(4).unwrap();
62
63/// Do we keep stderr empty while running the tests?
64static QUIET: AtomicBool = AtomicBool::new(false);
65
66/// Report a failure into the output and skip an iteration.
67macro_rules! fail {
68 ($output:ident, $name:expr, $kind:expr) => {{
69 $output.failure($name, None, None, $kind);
70 continue;
71 }};
72 ($output:ident, $name:expr, $mdata:expr, $kind:expr) => {{
73 $output.failure($name, Some($mdata), None, $kind);
74 continue;
75 }};
76 ($output:ident, $name:expr, $mdata:expr, $count:expr, $kind:expr) => {{
77 $output.failure($name, Some($mdata), Some($count), $kind);
78 continue;
79 }};
80}
81
82/// How does this perf run return its output?
83enum OutputKind<'a> {
84 /// Print markdown to the terminal.
85 Markdown,
86 /// Save JSON to a file.
87 Json(&'a Path),
88}
89
90impl OutputKind<'_> {
91 /// Logs the output of a run as per the `OutputKind`.
92 fn log(&self, output: &Output, t_bin: &str) {
93 match self {
94 OutputKind::Markdown => print!("{output}"),
95 OutputKind::Json(ident) => {
96 let wspace_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
97 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
98 std::fs::create_dir_all(&runs_dir).unwrap();
99 assert!(
100 !ident.to_string_lossy().is_empty(),
101 "FATAL: Empty filename specified!"
102 );
103 // Get the test binary's crate's name; a path like
104 // target/release-fast/deps/gpui-061ff76c9b7af5d7
105 // would be reduced to just "gpui".
106 let test_bin_stripped = Path::new(t_bin)
107 .file_name()
108 .unwrap()
109 .to_str()
110 .unwrap()
111 .rsplit_once('-')
112 .unwrap()
113 .0;
114 let mut file_path = runs_dir.join(ident);
115 file_path
116 .as_mut_os_string()
117 .push(format!(".{test_bin_stripped}.json"));
118 let mut out_file = OpenOptions::new()
119 .write(true)
120 .create(true)
121 .truncate(true)
122 .open(&file_path)
123 .unwrap();
124 out_file
125 .write_all(&serde_json::to_vec(&output).unwrap())
126 .unwrap();
127 if !QUIET.load(Ordering::Relaxed) {
128 eprintln!("JSON output written to {}", file_path.display());
129 }
130 }
131 }
132 }
133}
134
135/// Runs a given metadata-returning function from a test handler, parsing its
136/// output into a `TestMdata`.
137fn parse_mdata(t_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
138 let mut cmd = Command::new(t_bin);
139 cmd.args([mdata_fn, "--exact", "--nocapture"]);
140 let out = cmd
141 .output()
142 .expect("FATAL: Could not run test binary {t_bin}");
143 assert!(out.status.success());
144 let stdout = String::from_utf8_lossy(&out.stdout);
145 let mut version = None;
146 let mut iterations = None;
147 let mut importance = Importance::default();
148 let mut weight = consts::WEIGHT_DEFAULT;
149 for line in stdout
150 .lines()
151 .filter_map(|l| l.strip_prefix(consts::MDATA_LINE_PREF))
152 {
153 let mut items = line.split_whitespace();
154 // For v0, we know the ident always comes first, then one field.
155 match items.next().ok_or(FailKind::BadMetadata)? {
156 consts::VERSION_LINE_NAME => {
157 let v = items
158 .next()
159 .ok_or(FailKind::BadMetadata)?
160 .parse::<u32>()
161 .map_err(|_| FailKind::BadMetadata)?;
162 if v > consts::MDATA_VER {
163 return Err(FailKind::VersionMismatch);
164 }
165 version = Some(v);
166 }
167 consts::ITER_COUNT_LINE_NAME => {
168 // This should never be zero!
169 iterations = Some(
170 items
171 .next()
172 .ok_or(FailKind::BadMetadata)?
173 .parse::<usize>()
174 .map_err(|_| FailKind::BadMetadata)?
175 .try_into()
176 .map_err(|_| FailKind::BadMetadata)?,
177 );
178 }
179 consts::IMPORTANCE_LINE_NAME => {
180 importance = match items.next().ok_or(FailKind::BadMetadata)? {
181 "critical" => Importance::Critical,
182 "important" => Importance::Important,
183 "average" => Importance::Average,
184 "iffy" => Importance::Iffy,
185 "fluff" => Importance::Fluff,
186 _ => return Err(FailKind::BadMetadata),
187 };
188 }
189 consts::WEIGHT_LINE_NAME => {
190 weight = items
191 .next()
192 .ok_or(FailKind::BadMetadata)?
193 .parse::<u8>()
194 .map_err(|_| FailKind::BadMetadata)?;
195 }
196 _ => unreachable!(),
197 }
198 }
199
200 Ok(TestMdata {
201 version: version.ok_or(FailKind::BadMetadata)?,
202 // Iterations may be determined by us and thus left unspecified.
203 iterations,
204 // In principle this should always be set, but just for the sake of
205 // stability allow the potentially-breaking change of not reporting the
206 // importance without erroring. Maybe we want to change this.
207 importance,
208 // Same with weight.
209 weight,
210 })
211}
212
213/// Compares the perf results of two profiles as per the arguments passed in.
214fn compare_profiles(args: &[String]) {
215 let ident_new = args.first().expect("FATAL: missing identifier for new run");
216 let ident_old = args.get(1).expect("FATAL: missing identifier for old run");
217 // TODO: move this to a constant also tbh
218 let wspace_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
219 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
220
221 // Use the blank outputs initially, so we can merge into these with prefixes.
222 let mut outputs_new = Output::blank();
223 let mut outputs_old = Output::blank();
224
225 for e in runs_dir.read_dir().unwrap() {
226 let Ok(entry) = e else {
227 continue;
228 };
229 let Ok(metadata) = entry.metadata() else {
230 continue;
231 };
232 if metadata.is_file() {
233 let Ok(name) = entry.file_name().into_string() else {
234 continue;
235 };
236
237 // A little helper to avoid code duplication. Reads the `output` from
238 // a json file, then merges it into what we have so far.
239 let read_into = |output: &mut Output| {
240 let mut elems = name.split('.').skip(1);
241 let prefix = elems.next().unwrap();
242 assert_eq!("json", elems.next().unwrap());
243 assert!(elems.next().is_none());
244 let handle = OpenOptions::new().read(true).open(entry.path()).unwrap();
245 let o_other: Output = serde_json::from_reader(handle).unwrap();
246 output.merge(o_other, prefix);
247 };
248
249 if name.starts_with(ident_old) {
250 read_into(&mut outputs_old);
251 } else if name.starts_with(ident_new) {
252 read_into(&mut outputs_new);
253 }
254 }
255 }
256
257 let res = outputs_new.compare_perf(outputs_old);
258 println!("{res}");
259}
260
261/// Runs a test binary, filtering out tests which aren't marked for perf triage
262/// and giving back the list of tests we care about.
263///
264/// The output of this is an iterator over `test_fn_name, test_mdata_name`.
265fn get_tests(t_bin: &str) -> impl ExactSizeIterator<Item = (String, String)> {
266 let mut cmd = Command::new(t_bin);
267 // --format=json is nightly-only :(
268 cmd.args(["--list", "--format=terse"]);
269 let out = cmd
270 .output()
271 .expect("FATAL: Could not run test binary {t_bin}");
272 assert!(
273 out.status.success(),
274 "FATAL: Cannot do perf check - test binary {t_bin} returned an error"
275 );
276 if !QUIET.load(Ordering::Relaxed) {
277 eprintln!("Test binary ran successfully; starting profile...");
278 }
279 // Parse the test harness output to look for tests we care about.
280 let stdout = String::from_utf8_lossy(&out.stdout);
281 let mut test_list: Vec<_> = stdout
282 .lines()
283 .filter_map(|line| {
284 // This should split only in two; e.g.,
285 // "app::test::test_arena: test" => "app::test::test_arena:", "test"
286 let line: Vec<_> = line.split_whitespace().collect();
287 match line[..] {
288 // Final byte of t_name is ":", which we need to ignore.
289 [t_name, kind] => (kind == "test").then(|| &t_name[..t_name.len() - 1]),
290 _ => None,
291 }
292 })
293 // Exclude tests that aren't marked for perf triage based on suffix.
294 .filter(|t_name| {
295 t_name.ends_with(consts::SUF_NORMAL) || t_name.ends_with(consts::SUF_MDATA)
296 })
297 .collect();
298
299 // Pulling itertools just for .dedup() would be quite a big dependency that's
300 // not used elsewhere, so do this on a vec instead.
301 test_list.sort_unstable();
302 test_list.dedup();
303
304 // Tests should come in pairs with their mdata fn!
305 assert!(
306 test_list.len().is_multiple_of(2),
307 "Malformed tests in test binary {t_bin}"
308 );
309
310 let out = test_list
311 .chunks_exact_mut(2)
312 .map(|pair| {
313 // Be resilient against changes to these constants.
314 if consts::SUF_NORMAL < consts::SUF_MDATA {
315 (pair[0].to_owned(), pair[1].to_owned())
316 } else {
317 (pair[1].to_owned(), pair[0].to_owned())
318 }
319 })
320 .collect::<Vec<_>>();
321 out.into_iter()
322}
323
324/// Triage a test to determine the correct number of iterations that it should run.
325/// Specifically, repeatedly runs the given test until its execution time exceeds
326/// `thresh`, calling `step(iterations)` after every failed run to determine the new
327/// iteration count. Returns `None` if the test errored or `step` returned `None`,
328/// else `Some(iterations)`.
329///
330/// # Panics
331/// This will panic if `step(usize)` is not monotonically increasing.
332fn triage_test(
333 t_bin: &str,
334 t_name: &str,
335 thresh: Duration,
336 mut step: impl FnMut(NonZero<usize>) -> Option<NonZero<usize>>,
337) -> Option<NonZero<usize>> {
338 let mut iter_count = DEFAULT_ITER_COUNT;
339 loop {
340 let mut cmd = Command::new(t_bin);
341 cmd.args([t_name, "--exact"]);
342 cmd.env(consts::ITER_ENV_VAR, format!("{iter_count}"));
343 // Don't let the child muck up our stdin/out/err.
344 cmd.stdin(Stdio::null());
345 cmd.stdout(Stdio::null());
346 cmd.stderr(Stdio::null());
347 let pre = Instant::now();
348 // Discard the output beyond ensuring success.
349 let out = cmd.spawn().unwrap().wait();
350 let post = Instant::now();
351 if !out.unwrap().success() {
352 break None;
353 }
354 if post - pre > thresh {
355 break Some(iter_count);
356 }
357 let new = step(iter_count)?;
358 assert!(
359 new > iter_count,
360 "FATAL: step must be monotonically increasing"
361 );
362 iter_count = new;
363 }
364}
365
366/// Profiles a given test with hyperfine, returning the mean and standard deviation
367/// for its runtime. If the test errors, returns `None` instead.
368fn hyp_profile(t_bin: &str, t_name: &str, iterations: NonZero<usize>) -> Option<Timings> {
369 let mut perf_cmd = Command::new("hyperfine");
370 // Warm up the cache and print markdown output to stdout, which we parse.
371 perf_cmd.args([
372 "--style",
373 "none",
374 "--warmup",
375 "1",
376 "--export-markdown",
377 "-",
378 &format!("{t_bin} {t_name}"),
379 ]);
380 perf_cmd.env(consts::ITER_ENV_VAR, format!("{iterations}"));
381 let p_out = perf_cmd.output().unwrap();
382 if !p_out.status.success() {
383 return None;
384 }
385
386 let cmd_output = String::from_utf8_lossy(&p_out.stdout);
387 // Can't use .last() since we have a trailing newline. Sigh.
388 let results_line = cmd_output.lines().nth(3).unwrap();
389 // Grab the values out of the pretty-print.
390 // TODO: Parse json instead.
391 let mut res_iter = results_line.split_whitespace();
392 // Durations are given in milliseconds, so account for that.
393 let mean = Duration::from_secs_f64(res_iter.nth(4).unwrap().parse::<f64>().unwrap() / 1000.);
394 let stddev = Duration::from_secs_f64(res_iter.nth(1).unwrap().parse::<f64>().unwrap() / 1000.);
395
396 Some(Timings { mean, stddev })
397}
398
399fn main() {
400 let args = std::env::args().collect::<Vec<_>>();
401 // We get passed the test we need to run as the 1st argument after our own name.
402 let t_bin = args
403 .get(1)
404 .expect("FATAL: No test binary or command; this shouldn't be manually invoked!");
405
406 // We're being asked to compare two results, not run the profiler.
407 if t_bin == "compare" {
408 compare_profiles(&args[2..]);
409 return;
410 }
411
412 // Minimum test importance we care about this run.
413 let mut thresh = Importance::Iffy;
414 // Where to print the output of this run.
415 let mut out_kind = OutputKind::Markdown;
416
417 for arg in args.iter().skip(2) {
418 match arg.as_str() {
419 "--critical" => thresh = Importance::Critical,
420 "--important" => thresh = Importance::Important,
421 "--average" => thresh = Importance::Average,
422 "--iffy" => thresh = Importance::Iffy,
423 "--fluff" => thresh = Importance::Fluff,
424 "--quiet" => QUIET.store(true, Ordering::Relaxed),
425 s if s.starts_with("--json") => {
426 out_kind = OutputKind::Json(Path::new(
427 s.strip_prefix("--json=")
428 .expect("FATAL: Invalid json parameter; pass --json=ident"),
429 ));
430 }
431 _ => (),
432 }
433 }
434 if !QUIET.load(Ordering::Relaxed) {
435 eprintln!("Starting perf check");
436 }
437
438 let mut output = Output::default();
439
440 // Spawn and profile an instance of each perf-sensitive test, via hyperfine.
441 // Each test is a pair of (test, metadata-returning-fn), so grab both. We also
442 // know the list is sorted.
443 let i = get_tests(t_bin);
444 let len = i.len();
445 for (idx, (ref t_name, ref t_mdata)) in i.enumerate() {
446 if !QUIET.load(Ordering::Relaxed) {
447 eprint!("\rProfiling test {}/{}", idx + 1, len);
448 }
449 // Pretty-printable stripped name for the test.
450 let t_name_pretty = t_name.replace(consts::SUF_NORMAL, "");
451
452 // Get the metadata this test reports for us.
453 let t_mdata = match parse_mdata(t_bin, t_mdata) {
454 Ok(mdata) => mdata,
455 Err(err) => fail!(output, t_name_pretty, err),
456 };
457
458 if t_mdata.importance < thresh {
459 fail!(output, t_name_pretty, t_mdata, FailKind::Skipped);
460 }
461
462 // Time test execution to see how many iterations we need to do in order
463 // to account for random noise. This is skipped for tests with fixed
464 // iteration counts.
465 let final_iter_count = t_mdata.iterations.or_else(|| {
466 triage_test(t_bin, t_name, consts::NOISE_CUTOFF, |c| {
467 if let Some(c) = c.checked_mul(ITER_COUNT_MUL) {
468 Some(c)
469 } else {
470 // This should almost never happen, but maybe..?
471 eprintln!(
472 "WARNING: Ran nearly usize::MAX iterations of test {t_name_pretty}; skipping"
473 );
474 None
475 }
476 })
477 });
478
479 // Don't profile failing tests.
480 let Some(final_iter_count) = final_iter_count else {
481 fail!(output, t_name_pretty, t_mdata, FailKind::Triage);
482 };
483
484 // Now profile!
485 if let Some(timings) = hyp_profile(t_bin, t_name, final_iter_count) {
486 output.success(t_name_pretty, t_mdata, final_iter_count, timings);
487 } else {
488 fail!(
489 output,
490 t_name_pretty,
491 t_mdata,
492 final_iter_count,
493 FailKind::Profile
494 );
495 }
496 }
497 if !QUIET.load(Ordering::Relaxed) {
498 if output.is_empty() {
499 eprintln!("Nothing to do.");
500 } else {
501 // If stdout and stderr are on the same terminal, move us after the
502 // output from above.
503 eprintln!();
504 }
505 }
506
507 // No need making an empty json file on every empty test bin.
508 if output.is_empty() {
509 return;
510 }
511
512 out_kind.log(&output, t_bin);
513}