1//! Perf profiler for Zed tests. Outputs timings of tests marked with the `#[perf]`
2//! attribute to stdout in Markdown. See the documentation of `util_macros::perf`
3//! for usage details on the actual attribute.
4//!
5//! # Setup
6//! Make sure `hyperfine` is installed and in the shell path, then run
7//! `cargo build -p perf --release` to build the profiler.
8//!
9//! # Usage
10//! Calling this tool rebuilds the targeted crate(s) with some cfg flags set for the
11//! perf proc macro *and* enables optimisations (`release-fast` profile), so expect
12//! it to take a little while.
13//!
14//! To test an individual crate, run:
15//! ```sh
16//! cargo perf-test -p $CRATE
17//! ```
18//!
19//! To test everything (which will be **VERY SLOW**), run:
20//! ```sh
21//! cargo perf-test --workspace
22//! ```
23//!
24//! Some command-line parameters are also recognised by this profiler. To filter
25//! out all tests below a certain importance (e.g. `important`), run:
26//! ```sh
27//! cargo perf-test $WHATEVER -- --important
28//! ```
29//!
30//! Similarly, to skip outputting progress to the command line, pass `-- --quiet`.
31//! These flags can be combined.
32//!
33//! ## Comparing runs
34//! Passing `--json=ident` will save per-crate run files in `.perf-runs`, e.g.
35//! `cargo perf-test -p gpui -- --json=blah` will result in `.perf-runs/blah.gpui.json`
36//! being created (unless no tests were run). These results can be automatically
37//! compared. To do so, run `cargo perf-compare new-ident old-ident`.
38//!
39//! NB: All files matching `.perf-runs/ident.*.json` will be considered when
40//! doing this comparison, so ensure there aren't leftover files in your `.perf-runs`
41//! directory that might match that!
42//!
43//! # Notes
44//! This should probably not be called manually unless you're working on the profiler
45//! itself; use the `cargo perf-test` alias (after building this crate) instead.
46
47#[allow(clippy::wildcard_imports, reason = "Our crate")]
48use perf::*;
49
50use std::{
51 fs::OpenOptions,
52 io::Write,
53 path::{Path, PathBuf},
54 process::{Command, Stdio},
55 time::{Duration, Instant},
56};
57
58/// How many iterations to attempt the first time a test is run.
59const DEFAULT_ITER_COUNT: usize = 3;
60/// Multiplier for the iteration count when a test doesn't pass the noise cutoff.
61const ITER_COUNT_MUL: usize = 4;
62
63/// Report a failure into the output and skip an iteration.
64macro_rules! fail {
65 ($output:ident, $name:expr, $kind:expr) => {{
66 $output.failure($name, None, None, $kind);
67 continue;
68 }};
69 ($output:ident, $name:expr, $mdata:expr, $kind:expr) => {{
70 $output.failure($name, Some($mdata), None, $kind);
71 continue;
72 }};
73 ($output:ident, $name:expr, $mdata:expr, $count:expr, $kind:expr) => {{
74 $output.failure($name, Some($mdata), Some($count), $kind);
75 continue;
76 }};
77}
78
79/// How does this perf run return its output?
80enum OutputKind<'a> {
81 /// Print markdown to the terminal.
82 Markdown,
83 /// Save JSON to a file.
84 Json(&'a Path),
85}
86
87/// Runs a given metadata-returning function from a test handler, parsing its
88/// output into a `TestMdata`.
89fn parse_mdata(test_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
90 let mut cmd = Command::new(test_bin);
91 cmd.args([mdata_fn, "--exact", "--nocapture"]);
92 let out = cmd
93 .output()
94 .expect("FATAL: Could not run test binary {test_bin}");
95 assert!(out.status.success());
96 let stdout = String::from_utf8_lossy(&out.stdout);
97 let mut version = None;
98 let mut iterations = None;
99 let mut importance = Importance::default();
100 let mut weight = consts::WEIGHT_DEFAULT;
101 for line in stdout
102 .lines()
103 .filter_map(|l| l.strip_prefix(consts::MDATA_LINE_PREF))
104 {
105 let mut items = line.split_whitespace();
106 // For v0, we know the ident always comes first, then one field.
107 match items.next().unwrap() {
108 consts::VERSION_LINE_NAME => {
109 let v = items.next().unwrap().parse::<u32>().unwrap();
110 if v > consts::MDATA_VER {
111 return Err(FailKind::VersionMismatch);
112 }
113 version = Some(v);
114 }
115 consts::ITER_COUNT_LINE_NAME => {
116 iterations = Some(items.next().unwrap().parse::<usize>().unwrap());
117 }
118 consts::IMPORTANCE_LINE_NAME => {
119 importance = match items.next().unwrap() {
120 "critical" => Importance::Critical,
121 "important" => Importance::Important,
122 "average" => Importance::Average,
123 "iffy" => Importance::Iffy,
124 "fluff" => Importance::Fluff,
125 _ => unreachable!(),
126 };
127 }
128 consts::WEIGHT_LINE_NAME => {
129 weight = items.next().unwrap().parse::<u8>().unwrap();
130 }
131 _ => unreachable!(),
132 }
133 }
134
135 Ok(TestMdata {
136 version: version.unwrap(),
137 // Iterations may be determined by us and thus left unspecified.
138 iterations,
139 // In principle this should always be set, but just for the sake of
140 // stability allow the potentially-breaking change of not reporting the
141 // importance without erroring. Maybe we want to change this.
142 importance,
143 // Same with weight.
144 weight,
145 })
146}
147
148/// Compares the perf results of two profiles as per the arguments passed in.
149fn compare_profiles(args: &[String]) {
150 let ident_new = args.first().expect("FATAL: missing identifier for new run");
151 let ident_old = args.get(1).expect("FATAL: missing identifier for old run");
152 // TODO: move this to a constant also tbh
153 let wspace_dir = std::env::var("CARGO_WORKSPACE_DIR").unwrap();
154 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
155
156 // Use the blank outputs initially, so we can merge into these with prefixes.
157 let mut outputs_new = Output::blank();
158 let mut outputs_old = Output::blank();
159
160 for e in runs_dir.read_dir().unwrap() {
161 let Ok(entry) = e else {
162 continue;
163 };
164 let Ok(metadata) = entry.metadata() else {
165 continue;
166 };
167 if metadata.is_file() {
168 let Ok(name) = entry.file_name().into_string() else {
169 continue;
170 };
171
172 // A little helper to avoid code duplication. Reads the `output` from
173 // a json file, then merges it into what we have so far.
174 let read_into = |output: &mut Output| {
175 let mut elems = name.split('.').skip(1);
176 let prefix = elems.next().unwrap();
177 assert_eq!("json", elems.next().unwrap());
178 assert!(elems.next().is_none());
179 let handle = OpenOptions::new().read(true).open(entry.path()).unwrap();
180 let o_other: Output = serde_json::from_reader(handle).unwrap();
181 output.merge(o_other, prefix);
182 };
183
184 if name.starts_with(ident_old) {
185 read_into(&mut outputs_old);
186 } else if name.starts_with(ident_new) {
187 read_into(&mut outputs_new);
188 }
189 }
190 }
191
192 let res = outputs_new.compare_perf(outputs_old);
193 println!("{res}");
194}
195
196#[expect(clippy::too_many_lines, reason = "This will be split up soon!")]
197fn main() {
198 let args = std::env::args().collect::<Vec<_>>();
199 // We get passed the test we need to run as the 1st argument after our own name.
200 let test_bin = args
201 .get(1)
202 .expect("FATAL: No test binary or command; this shouldn't be manually invoked!");
203
204 // We're being asked to compare two results, not run the profiler.
205 if test_bin == "compare" {
206 compare_profiles(&args[2..]);
207 return;
208 }
209
210 // Whether to skip printing some information to stderr.
211 let mut quiet = false;
212 // Minimum test importance we care about this run.
213 let mut thresh = Importance::Iffy;
214 // Where to print the output of this run.
215 let mut out_kind = OutputKind::Markdown;
216
217 for arg in args.iter().skip(2) {
218 match arg.as_str() {
219 "--critical" => thresh = Importance::Critical,
220 "--important" => thresh = Importance::Important,
221 "--average" => thresh = Importance::Average,
222 "--iffy" => thresh = Importance::Iffy,
223 "--fluff" => thresh = Importance::Fluff,
224 "--quiet" => quiet = true,
225 s if s.starts_with("--json") => {
226 out_kind = OutputKind::Json(Path::new(
227 s.strip_prefix("--json=")
228 .expect("FATAL: Invalid json parameter; pass --json=filename"),
229 ));
230 }
231 _ => (),
232 }
233 }
234 if !quiet {
235 eprintln!("Starting perf check");
236 }
237
238 let mut cmd = Command::new(test_bin);
239 // --format=json is nightly-only :(
240 cmd.args(["--list", "--format=terse"]);
241 let out = cmd
242 .output()
243 .expect("FATAL: Could not run test binary {test_bin}");
244 assert!(
245 out.status.success(),
246 "FATAL: Cannot do perf check - test binary {test_bin} returned an error"
247 );
248 if !quiet {
249 eprintln!("Test binary ran successfully; starting profile...");
250 }
251 // Parse the test harness output to look for tests we care about.
252 let stdout = String::from_utf8_lossy(&out.stdout);
253 let mut test_list: Vec<_> = stdout
254 .lines()
255 .filter_map(|line| {
256 // This should split only in two; e.g.,
257 // "app::test::test_arena: test" => "app::test::test_arena:", "test"
258 let line: Vec<_> = line.split_whitespace().collect();
259 match line[..] {
260 // Final byte of t_name is ":", which we need to ignore.
261 [t_name, kind] => (kind == "test").then(|| &t_name[..t_name.len() - 1]),
262 _ => None,
263 }
264 })
265 // Exclude tests that aren't marked for perf triage based on suffix.
266 .filter(|t_name| {
267 t_name.ends_with(consts::SUF_NORMAL) || t_name.ends_with(consts::SUF_MDATA)
268 })
269 .collect();
270
271 // Pulling itertools just for .dedup() would be quite a big dependency that's
272 // not used elsewhere, so do this on a vec instead.
273 test_list.sort_unstable();
274 test_list.dedup();
275
276 let len = test_list.len();
277
278 // Tests should come in pairs with their mdata fn!
279 assert!(
280 len.is_multiple_of(2),
281 "Malformed tests in test binary {test_bin}"
282 );
283
284 let mut output = Output::default();
285
286 // Spawn and profile an instance of each perf-sensitive test, via hyperfine.
287 // Each test is a pair of (test, metadata-returning-fn), so grab both. We also
288 // know the list is sorted.
289 for (idx, t_pair) in test_list.chunks_exact(2).enumerate() {
290 if !quiet {
291 eprint!("\rProfiling test {}/{}", idx + 1, len / 2);
292 }
293 // Be resilient against changes to these constants.
294 let (t_name, t_mdata) = if consts::SUF_NORMAL < consts::SUF_MDATA {
295 (t_pair[0], t_pair[1])
296 } else {
297 (t_pair[1], t_pair[0])
298 };
299 // Pretty-printable stripped name for the test.
300 let t_name_pretty = t_name.replace(consts::SUF_NORMAL, "");
301
302 // Get the metadata this test reports for us.
303 let t_mdata = match parse_mdata(test_bin, t_mdata) {
304 Ok(mdata) => mdata,
305 Err(err) => fail!(output, t_name_pretty, err),
306 };
307
308 if t_mdata.importance < thresh {
309 fail!(output, t_name_pretty, t_mdata, FailKind::Skipped);
310 }
311
312 // Time test execution to see how many iterations we need to do in order
313 // to account for random noise. This is skipped for tests with fixed
314 // iteration counts.
315 let mut errored = false;
316 let final_iter_count = t_mdata.iterations.unwrap_or_else(|| {
317 let mut iter_count = DEFAULT_ITER_COUNT;
318 loop {
319 let mut cmd = Command::new(test_bin);
320 cmd.args([t_name, "--exact"]);
321 cmd.env(consts::ITER_ENV_VAR, format!("{iter_count}"));
322 // Don't let the child muck up our stdin/out/err.
323 cmd.stdin(Stdio::null());
324 cmd.stdout(Stdio::null());
325 cmd.stderr(Stdio::null());
326 let pre = Instant::now();
327 // Discard the output beyond ensuring success.
328 let out = cmd.spawn().unwrap().wait();
329 let post = Instant::now();
330 if !out.unwrap().success() {
331 errored = true;
332 break iter_count;
333 }
334 if post - pre > consts::NOISE_CUTOFF {
335 break iter_count;
336 } else if let Some(c) = iter_count.checked_mul(ITER_COUNT_MUL) {
337 iter_count = c;
338 } else {
339 // This should almost never happen, but maybe..?
340 eprintln!(
341 "WARNING: Running nearly usize::MAX iterations of test {t_name_pretty}"
342 );
343 break iter_count;
344 }
345 }
346 });
347
348 // Don't profile failing tests.
349 if errored {
350 fail!(output, t_name_pretty, t_mdata, FailKind::Triage);
351 }
352
353 // Now profile!
354 let mut perf_cmd = Command::new("hyperfine");
355 // Warm up the cache and print markdown output to stdout.
356 // TODO: json
357 perf_cmd.args([
358 "--style",
359 "none",
360 "--warmup",
361 "1",
362 "--export-markdown",
363 "-",
364 &format!("{test_bin} {t_name}"),
365 ]);
366 perf_cmd.env(consts::ITER_ENV_VAR, format!("{final_iter_count}"));
367 let p_out = perf_cmd.output().unwrap();
368 if p_out.status.success() {
369 let cmd_output = String::from_utf8_lossy(&p_out.stdout);
370 // Can't use .last() since we have a trailing newline. Sigh.
371 let results_line = cmd_output.lines().nth(3).unwrap();
372 // Grab the values out of the pretty-print.
373 // TODO: Parse json instead.
374 let mut res_iter = results_line.split_whitespace();
375 // Durations are given in milliseconds, so account for that.
376 let mean =
377 Duration::from_secs_f64(res_iter.nth(4).unwrap().parse::<f64>().unwrap() / 1000.);
378 let stddev =
379 Duration::from_secs_f64(res_iter.nth(1).unwrap().parse::<f64>().unwrap() / 1000.);
380
381 output.success(
382 t_name_pretty,
383 t_mdata,
384 final_iter_count,
385 Timings { mean, stddev },
386 );
387 } else {
388 fail!(
389 output,
390 t_name_pretty,
391 t_mdata,
392 final_iter_count,
393 FailKind::Profile
394 );
395 }
396 }
397 if !quiet {
398 if output.is_empty() {
399 eprintln!("Nothing to do.");
400 } else {
401 // If stdout and stderr are on the same terminal, move us after the
402 // output from above.
403 eprintln!();
404 }
405 }
406
407 // No need making an empty json file on every empty test bin.
408 if output.is_empty() {
409 return;
410 }
411
412 match out_kind {
413 OutputKind::Markdown => print!("{output}"),
414 OutputKind::Json(user_path) => {
415 let wspace_dir = std::env::var("CARGO_WORKSPACE_DIR").unwrap();
416 let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
417 std::fs::create_dir_all(&runs_dir).unwrap();
418 assert!(
419 !user_path.to_string_lossy().is_empty(),
420 "FATAL: Empty filename specified!"
421 );
422 // Get the test binary's crate's name; a path like
423 // target/release-fast/deps/gpui-061ff76c9b7af5d7
424 // would be reduced to just "gpui".
425 let test_bin_stripped = Path::new(test_bin)
426 .file_name()
427 .unwrap()
428 .to_str()
429 .unwrap()
430 .rsplit_once('-')
431 .unwrap()
432 .0;
433 let mut file_path = runs_dir.join(user_path);
434 file_path
435 .as_mut_os_string()
436 .push(format!(".{test_bin_stripped}.json"));
437 let mut out_file = OpenOptions::new()
438 .write(true)
439 .create(true)
440 .truncate(true)
441 .open(&file_path)
442 .unwrap();
443 out_file
444 .write_all(&serde_json::to_vec(&output).unwrap())
445 .unwrap();
446 if !quiet {
447 eprintln!("JSON output written to {}", file_path.display());
448 }
449 }
450 }
451}