From 782058647db4f1b25d6c04c85728faac557e59cf Mon Sep 17 00:00:00 2001 From: Nia Date: Sat, 20 Sep 2025 09:04:32 +0200 Subject: [PATCH] tests: Add an automatic perf profiler (#38543) Add an auto-profiler for our tests, to hopefully allow better triage of performance impacts resulting from code changes. Comprehensive usage docs are in the code. Currently, it uses hyperfine under the hood and prints markdown to the command line for all crates with relevant tests enabled. We may want to expand this to allow outputting json in the future to allow e.g. automatically comparing the difference between two runs on different commits, and in general a lot of functionality could be added (maybe measuring memory usage?). It's enabled (mostly as an example) on two tests inside `gpui` and a bunch of those inside `vim`. I'd have happily used `cargo bench`, but that's nightly-only. Release Notes: - N/A --- .cargo/config.toml | 1 + Cargo.lock | 9 ++ Cargo.toml | 2 +- crates/gpui/Cargo.toml | 1 + crates/gpui/src/style.rs | 6 +- crates/util_macros/Cargo.toml | 3 + crates/util_macros/src/util_macros.rs | 147 +++++++++++++++++++- crates/vim/Cargo.toml | 1 + crates/vim/src/test.rs | 25 ++++ tooling/perf/Cargo.toml | 11 ++ tooling/perf/LICENSE-APACHE | 1 + tooling/perf/src/main.rs | 191 ++++++++++++++++++++++++++ 12 files changed, 393 insertions(+), 5 deletions(-) create mode 100644 tooling/perf/Cargo.toml create mode 120000 tooling/perf/LICENSE-APACHE create mode 100644 tooling/perf/src/main.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index 717c5e18c8d294bacf65207bc6b8ecb7dba1b152..74d34226af09c11b56faa6722e00afa218c924f5 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -4,6 +4,7 @@ rustflags = ["-C", "symbol-mangling-version=v0", "--cfg", "tokio_unstable"] [alias] xtask = "run --package xtask --" +perf-test = ["test", "--profile", "release-fast", "--lib", "--bins", "--tests", "--config", "target.'cfg(true)'.runner='target/release/perf'", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]"] [target.x86_64-unknown-linux-gnu] linker = "clang" diff --git a/Cargo.lock b/Cargo.lock index 548ff152066745344b65c75b0be80db71c6f7f5e..84ae8e613365ef3976970e61dfc7b03aaf969062 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7924,6 +7924,7 @@ dependencies = [ "unicode-segmentation", "usvg", "util", + "util_macros", "uuid", "waker-fn", "wayland-backend", @@ -12163,6 +12164,13 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "perf" +version = "0.1.0" +dependencies = [ + "workspace-hack", +] + [[package]] name = "pest" version = "2.8.0" @@ -18727,6 +18735,7 @@ dependencies = [ "tokio", "ui", "util", + "util_macros", "vim_mode_setting", "workspace", "workspace-hack", diff --git a/Cargo.toml b/Cargo.toml index aa95b1f4a78fe2599bcccd3036c2ebb65761ada3..ad07429243817b27b4c09fc651b50de820183a9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -220,7 +220,7 @@ members = [ # "tooling/workspace-hack", - "tooling/xtask", + "tooling/xtask", "tooling/perf", ] default-members = ["crates/zed"] diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index ac1bdf85cb478064db42b3dccde8e44adee72fdd..2919fecabf050a011109b2abfe69394a0ead2e67 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -110,6 +110,7 @@ resvg = { version = "0.45.0", default-features = false, features = [ "memmap-fonts", ] } usvg = { version = "0.45.0", default-features = false } +util_macros.workspace = true schemars.workspace = true seahash = "4.1" semantic_version.workspace = true diff --git a/crates/gpui/src/style.rs b/crates/gpui/src/style.rs index 78bca5a4993271883c555fe05366a7c9a0c472ac..8afb4e4eb8af70a78c1cd4fc0176a7fe3baf3c3e 100644 --- a/crates/gpui/src/style.rs +++ b/crates/gpui/src/style.rs @@ -1300,7 +1300,9 @@ mod tests { use super::*; - #[test] + use util_macros::perf; + + #[perf] fn test_basic_highlight_style_combination() { let style_a = HighlightStyle::default(); let style_b = HighlightStyle::default(); @@ -1385,7 +1387,7 @@ mod tests { ); } - #[test] + #[perf] fn test_combine_highlights() { assert_eq!( combine_highlights( diff --git a/crates/util_macros/Cargo.toml b/crates/util_macros/Cargo.toml index 996eefcb303ee5959f0e7fa920f1a91a509407eb..45145a68f6a7d54d759d932c3dc851d14f4939d9 100644 --- a/crates/util_macros/Cargo.toml +++ b/crates/util_macros/Cargo.toml @@ -17,3 +17,6 @@ doctest = false quote.workspace = true syn.workspace = true workspace-hack.workspace = true + +[features] +perf-enabled = [] diff --git a/crates/util_macros/src/util_macros.rs b/crates/util_macros/src/util_macros.rs index 9d0b06ab10a7454d6c0d19fd54722fd98db4ac25..d3f05afdecbca8cb3b4c8685054d3828e6c702fd 100644 --- a/crates/util_macros/src/util_macros.rs +++ b/crates/util_macros/src/util_macros.rs @@ -1,8 +1,9 @@ #![cfg_attr(not(target_os = "windows"), allow(unused))] +#![allow(clippy::test_attr_in_doctest)] use proc_macro::TokenStream; -use quote::quote; -use syn::{LitStr, parse_macro_input}; +use quote::{ToTokens, quote}; +use syn::{ItemFn, LitStr, parse_macro_input, parse_quote}; /// A macro used in tests for cross-platform path string literals in tests. On Windows it replaces /// `/` with `\\` and adds `C:` to the beginning of absolute paths. On other platforms, the path is @@ -87,3 +88,145 @@ pub fn line_endings(input: TokenStream) -> TokenStream { #text }) } + +/// Inner data for the perf macro. +struct PerfArgs { + /// How many times to loop a test before rerunning the test binary. + /// If left empty, the test harness will auto-determine this value. + iterations: Option, +} + +impl syn::parse::Parse for PerfArgs { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + if input.is_empty() { + return Ok(PerfArgs { iterations: None }); + } + + let mut iterations = None; + // In principle we only have one possible argument, but leave this as + // a loop in case we expand this in the future. + for meta in + syn::punctuated::Punctuated::::parse_terminated(input)? + { + match &meta { + syn::Meta::NameValue(meta_name_value) => { + if meta_name_value.path.is_ident("iterations") { + iterations = Some(meta_name_value.value.clone()); + } else { + return Err(syn::Error::new_spanned( + &meta_name_value.path, + "unexpected argument, expected 'iterations'", + )); + } + } + _ => { + return Err(syn::Error::new_spanned( + meta, + "expected name-value argument like 'iterations = 1'", + )); + } + } + } + + Ok(PerfArgs { iterations }) + } +} + +/// Marks a test as perf-sensitive, to be triaged when checking the performance +/// of a build. This also automatically applies `#[test]`. +/// +/// By default, the number of iterations when profiling this test is auto-determined. +/// If this needs to be overwritten, pass the desired iteration count to the macro +/// as a parameter (`#[perf(iterations = n)]`). Note that the actual profiler may still +/// run the test an arbitrary number times; this flag just sets the number of executions +/// before the process is restarted and global state is reset. +/// +/// # Usage notes +/// This should probably not be applied to tests that do any significant fs IO, as +/// locks on files may not be released in time when repeating a test many times. This +/// might lead to spurious failures. +/// +/// # Examples +/// ```rust +/// use util_macros::perf; +/// +/// #[perf] +/// fn expensive_computation_test() { +/// // Test goes here. +/// } +/// ``` +/// +/// This also works with `#[gpui::test]`s, though in most cases it shouldn't +/// be used with automatic iterations. +/// ```rust,ignore +/// use util_macros::perf; +/// +/// #[perf(iterations = 1)] +/// #[gpui::test] +/// fn oneshot_test(_cx: &mut gpui::TestAppContext) { +/// // Test goes here. +/// } +/// ``` +#[proc_macro_attribute] +pub fn perf(our_attr: TokenStream, input: TokenStream) -> TokenStream { + // If any of the below constants are changed, make sure to also update the perf + // profiler to match! + + /// The suffix on tests marked with `#[perf]`. + const SUF_NORMAL: &str = "__ZED_PERF"; + /// The suffix on tests marked with `#[perf(iterations = n)]`. + const SUF_FIXED: &str = "__ZED_PERF_FIXEDITER"; + /// The env var in which we pass the iteration count to our tests. + const ITER_ENV_VAR: &str = "ZED_PERF_ITER"; + + let iter_count = parse_macro_input!(our_attr as PerfArgs).iterations; + + let ItemFn { + mut attrs, + vis, + mut sig, + block, + } = parse_macro_input!(input as ItemFn); + attrs.push(parse_quote!(#[test])); + attrs.push(parse_quote!(#[allow(non_snake_case)])); + + let block: Box = if cfg!(perf_enabled) { + // Make the ident obvious when calling, for the test parser. + let mut new_ident = sig.ident.to_string(); + if iter_count.is_some() { + new_ident.push_str(SUF_FIXED); + } else { + new_ident.push_str(SUF_NORMAL); + } + + let new_ident = syn::Ident::new(&new_ident, sig.ident.span()); + sig.ident = new_ident; + // If we have a preset iteration count, just use that. + if let Some(iter_count) = iter_count { + parse_quote!({ + for _ in 0..#iter_count { + #block + } + }) + } else { + // Otherwise, the perf harness will pass us the value in an env var. + parse_quote!({ + let iter_count = std::env::var(#ITER_ENV_VAR).unwrap().parse::().unwrap(); + for _ in 0..iter_count { + #block + } + }) + } + } else { + block + }; + + ItemFn { + attrs, + vis, + sig, + block, + } + .into_token_stream() + .into() +} diff --git a/crates/vim/Cargo.toml b/crates/vim/Cargo.toml index abe92dd58594f05d8cf71dbde4fb129aafa26a03..a76d1f7ddc7b619ac231cd163a0721439255889a 100644 --- a/crates/vim/Cargo.toml +++ b/crates/vim/Cargo.toml @@ -46,6 +46,7 @@ theme.workspace = true tokio = { version = "1.15", features = ["full"], optional = true } ui.workspace = true util.workspace = true +util_macros.workspace = true vim_mode_setting.workspace = true workspace.workspace = true zed_actions.workspace = true diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index 2256c2577ecd282f690ee7b3afe9e2b21b6e8788..03adfc8af15cf92f7ee6c4c857c0f154e2c969f3 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -25,6 +25,9 @@ use search::BufferSearchBar; use crate::{PushSneak, PushSneakBackward, insert::NormalBefore, motion, state::Mode}; +use util_macros::perf; + +#[perf] #[gpui::test] async fn test_initially_disabled(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, false).await; @@ -44,6 +47,7 @@ async fn test_neovim(cx: &mut gpui::TestAppContext) { cx.assert_editor_state("ˇtest"); } +#[perf] #[gpui::test] async fn test_toggle_through_settings(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -80,6 +84,7 @@ async fn test_toggle_through_settings(cx: &mut gpui::TestAppContext) { assert_eq!(cx.mode(), Mode::Normal); } +#[perf] #[gpui::test] async fn test_cancel_selection(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -104,6 +109,7 @@ async fn test_cancel_selection(cx: &mut gpui::TestAppContext) { cx.assert_editor_state("The quick brown fox juˇmps over the lazy dog"); } +#[perf] #[gpui::test] async fn test_buffer_search(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -132,6 +138,7 @@ async fn test_buffer_search(cx: &mut gpui::TestAppContext) { }) } +#[perf] #[gpui::test] async fn test_count_down(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -143,6 +150,7 @@ async fn test_count_down(cx: &mut gpui::TestAppContext) { cx.assert_editor_state("aa\nbb\ncc\ndd\neˇe"); } +#[perf] #[gpui::test] async fn test_end_of_document_710(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -157,6 +165,7 @@ async fn test_end_of_document_710(cx: &mut gpui::TestAppContext) { cx.assert_editor_state("aˇa\nbb\ncc"); } +#[perf] #[gpui::test] async fn test_end_of_line_with_times(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -175,6 +184,7 @@ async fn test_end_of_line_with_times(cx: &mut gpui::TestAppContext) { cx.assert_editor_state("aa\nbb\ncˇc"); } +#[perf] #[gpui::test] async fn test_indent_outdent(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -241,6 +251,7 @@ async fn test_escape_command_palette(cx: &mut gpui::TestAppContext) { cx.assert_state("aˇbc\n", Mode::Insert); } +#[perf] #[gpui::test] async fn test_escape_cancels(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -251,6 +262,7 @@ async fn test_escape_cancels(cx: &mut gpui::TestAppContext) { cx.assert_state("aˇbc", Mode::Normal); } +#[perf] #[gpui::test] async fn test_selection_on_search(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -289,6 +301,7 @@ async fn test_selection_on_search(cx: &mut gpui::TestAppContext) { cx.assert_state(indoc! {"aa\nbb\nˇcc\ncc\ncc\n"}, Mode::Normal); } +#[perf] #[gpui::test] async fn test_word_characters(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new_typescript(cx).await; @@ -315,6 +328,7 @@ async fn test_word_characters(cx: &mut gpui::TestAppContext) { ) } +#[perf] #[gpui::test] async fn test_kebab_case(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new_html(cx).await; @@ -821,6 +835,7 @@ async fn test_paragraphs_dont_wrap(cx: &mut gpui::TestAppContext) { two"}); } +#[perf] #[gpui::test] async fn test_select_all_issue_2170(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -881,6 +896,7 @@ fn assert_pending_input(cx: &mut VimTestContext, expected: &str) { }); } +#[perf] #[gpui::test] async fn test_jk_multi(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -972,6 +988,7 @@ async fn test_comma_w(cx: &mut gpui::TestAppContext) { .assert_eq("hellˇo hello\nhello hello"); } +#[perf] #[gpui::test] async fn test_completion_menu_scroll_aside(cx: &mut TestAppContext) { let mut cx = VimTestContext::new_typescript(cx).await; @@ -1053,6 +1070,7 @@ async fn test_completion_menu_scroll_aside(cx: &mut TestAppContext) { }); } +#[perf] #[gpui::test] async fn test_rename(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new_typescript(cx).await; @@ -1088,6 +1106,7 @@ async fn test_rename(cx: &mut gpui::TestAppContext) { cx.assert_state("const afterˇ = 2; console.log(after)", Mode::Normal) } +#[perf(iterations = 1)] #[gpui::test] async fn test_remap(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1210,6 +1229,7 @@ async fn test_undo(cx: &mut gpui::TestAppContext) { 3"}); } +#[perf] #[gpui::test] async fn test_mouse_selection(cx: &mut TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1385,6 +1405,7 @@ async fn test_dw_eol(cx: &mut gpui::TestAppContext) { .assert_eq("twelve ˇtwelve char\ntwelve char"); } +#[perf] #[gpui::test] async fn test_toggle_comments(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1476,6 +1497,7 @@ async fn test_find_multibyte(cx: &mut gpui::TestAppContext) { .assert_eq(r#""#); } +#[perf] #[gpui::test] async fn test_sneak(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1695,6 +1717,7 @@ async fn test_ctrl_w_override(cx: &mut gpui::TestAppContext) { cx.shared_state().await.assert_eq("ˇ"); } +#[perf] #[gpui::test] async fn test_visual_indent_count(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1850,6 +1873,7 @@ async fn test_ctrl_o_dot(cx: &mut gpui::TestAppContext) { cx.shared_state().await.assert_eq("hellˇllo world."); } +#[perf(iterations = 1)] #[gpui::test] async fn test_folded_multibuffer_excerpts(cx: &mut gpui::TestAppContext) { VimTestContext::init(cx); @@ -2150,6 +2174,7 @@ async fn test_paragraph_multi_delete(cx: &mut gpui::TestAppContext) { cx.shared_state().await.assert_eq(indoc! {"ˇ"}); } +#[perf] #[gpui::test] async fn test_multi_cursor_replay(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; diff --git a/tooling/perf/Cargo.toml b/tooling/perf/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f5013a82836b9888d94fb39fa18f0efa00e1b0ce --- /dev/null +++ b/tooling/perf/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "perf" +version = "0.1.0" +publish.workspace = true +edition.workspace = true + +[lints] +workspace = true + +[dependencies] +workspace-hack.workspace = true diff --git a/tooling/perf/LICENSE-APACHE b/tooling/perf/LICENSE-APACHE new file mode 120000 index 0000000000000000000000000000000000000000..1cd601d0a3affae83854be02a0afdec3b7a9ec4d --- /dev/null +++ b/tooling/perf/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/tooling/perf/src/main.rs b/tooling/perf/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..a119811aba76afccc16dbef48e4dbee576b46fdc --- /dev/null +++ b/tooling/perf/src/main.rs @@ -0,0 +1,191 @@ +#![warn(clippy::all, clippy::pedantic, clippy::undocumented_unsafe_blocks)] +#![cfg_attr(release, deny(warnings))] + +//! Perf profiler for Zed tests. Outputs timings of tests marked with the `#[perf]` +//! attribute to stdout in Markdown. See the documentation of `util_macros::perf` +//! for usage details on the actual attribute. +//! +//! # Setup +//! Make sure `hyperfine` is installed and in the shell path, then run +//! `cargo build --bin perf --workspace --release` to build the profiler. +//! +//! # Usage +//! Calling this tool rebuilds everything with some cfg flags set for the perf +//! proc macro *and* enables optimisations (`release-fast` profile), so expect it +//! to take a little while. +//! +//! To test an individual crate, run: +//! ```sh +//! cargo perf-test -p $CRATE +//! ``` +//! +//! To test everything (which will be **VERY SLOW**), run: +//! ```sh +//! cargo perf-test --workspace +//! ``` +//! +//! # Notes +//! This should probably not be called manually unless you're working on the profiler +//! itself; use the `cargo perf-test` alias (after building this crate) instead. + +use std::{ + process::{Command, Stdio}, + time::{Duration, Instant}, +}; + +/// How many iterations to attempt the first time a test is run. +const DEFAULT_ITER_COUNT: usize = 12; +/// Multiplier for the iteration count when a test doesn't pass the noise cutoff. +const ITER_COUNT_MUL: usize = 4; +/// How long a test must have run to be assumed to be reliable-ish. +const NOISE_CUTOFF: Duration = Duration::from_millis(250); + +// If any of the below constants are changed, make sure to also update the perf +// proc macro to match! + +/// The suffix on tests marked with `#[perf]`. +const SUF_NORMAL: &str = "__ZED_PERF"; +/// The suffix on tests marked with `#[perf(iterations = n)]`. +const SUF_FIXED: &str = "__ZED_PERF_FIXEDITER"; +/// The env var in which we pass the iteration count to our tests. +const ITER_ENV_VAR: &str = "ZED_PERF_ITER"; + +#[allow(clippy::too_many_lines)] +fn main() { + // We get passed the test we need to run as the 1st argument after our own name. + let test_bin = std::env::args().nth(1).unwrap(); + let mut cmd = Command::new(&test_bin); + // --format=json is nightly-only :( + cmd.args(["--list", "--format=terse"]); + let out = cmd + .output() + .expect("FATAL: Could not run test binary {test_bin}"); + assert!( + out.status.success(), + "FATAL: Cannot do perf check - test binary {test_bin} returned an error" + ); + // Parse the test harness output to look for tests we care about. + let stdout = String::from_utf8_lossy(&out.stdout); + let mut test_list: Vec<_> = stdout + .lines() + .filter_map(|line| { + // This should split only in two; e.g., + // "app::test::test_arena: test" => "app::test::test_arena:", "test" + let line: Vec<_> = line.split_whitespace().collect(); + match line[..] { + // Final byte of t_name is ":", which we need to ignore. + [t_name, kind] => (kind == "test").then(|| &t_name[..t_name.len() - 1]), + _ => None, + } + }) + // Exclude tests that aren't marked for perf triage based on suffix. + .filter(|t_name| t_name.ends_with(SUF_NORMAL) || t_name.ends_with(SUF_FIXED)) + .collect(); + + // Pulling itertools just for .dedup() would be quite a big dependency that's + // not used elsewhere, so do this on the vec instead. + test_list.sort_unstable(); + test_list.dedup(); + + if !test_list.is_empty() { + // Print the markdown header which matches hyperfine's result. + // TODO: Support exporting JSON also. + println!( + "| Command | Mean [ms] | Min [ms] | Max [ms] | Iterations | Iter/sec |\n|:---|---:|---:|---:|---:|---:|" + ); + } + + // Spawn and profile an instance of each perf-sensitive test, via hyperfine. + for t_name in test_list { + // Pretty-print the stripped name for the test. + let t_name_normal = t_name.replace(SUF_FIXED, "").replace(SUF_NORMAL, ""); + // Time test execution to see how many iterations we need to do in order + // to account for random noise. This is skipped for tests with fixed + // iteration counts. + let final_iter_count = if t_name.ends_with(SUF_FIXED) { + None + } else { + let mut iter_count = DEFAULT_ITER_COUNT; + loop { + let mut cmd = Command::new(&test_bin); + cmd.args([t_name, "--exact"]); + cmd.env(ITER_ENV_VAR, format!("{iter_count}")); + // Don't let the child muck up our stdin/out/err. + cmd.stdin(Stdio::null()); + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + let pre = Instant::now(); + // Discard the output beyond ensuring success. + let out = cmd.spawn().unwrap().wait(); + let post = Instant::now(); + if !out.unwrap().success() { + println!( + "| {t_name_normal} (ERRORED IN TRIAGE) | N/A | N/A | N/A | {iter_count} | N/A |" + ); + return; + } + if post - pre > NOISE_CUTOFF { + break Some(iter_count); + } else if let Some(c) = iter_count.checked_mul(ITER_COUNT_MUL) { + iter_count = c; + } else { + // This should almost never happen, but maybe..? + eprintln!( + "WARNING: Running nearly usize::MAX iterations of test {t_name_normal}" + ); + break Some(iter_count); + } + } + }; + + // Now profile! + let mut perf_cmd = Command::new("hyperfine"); + // Warm up the cache and print markdown output to stdout. + perf_cmd.args([ + "--style", + "none", + "--warmup", + "1", + "--export-markdown", + "-", + &format!("{test_bin} {t_name}"), + ]); + if let Some(final_iter_count) = final_iter_count { + perf_cmd.env(ITER_ENV_VAR, format!("{final_iter_count}")); + } + let p_out = perf_cmd.output().unwrap(); + let fin_iter = match final_iter_count { + Some(i) => &format!("{i}"), + None => "(preset)", + }; + if p_out.status.success() { + let output = String::from_utf8_lossy(&p_out.stdout); + // Strip the name of the test binary from the table (and the space after it!) + // + our extraneous test bits + the "Relative" column (which is always at the end and "1.00"). + let output = output + .replace(&format!("{test_bin} "), "") + .replace(SUF_FIXED, "") + .replace(SUF_NORMAL, "") + .replace(" 1.00 |", ""); + // Can't use .last() since we have a trailing newline. Sigh. + let fin = output.lines().nth(3).unwrap(); + + // Calculate how many iterations this does per second, for easy comparison. + let ms = fin + .split_whitespace() + .nth(3) + .unwrap() + .parse::() + .unwrap(); + let mul_fac = 1000.0 / ms; + let iter_sec = match final_iter_count { + #[allow(clippy::cast_precision_loss)] + Some(c) => &format!("{:.1}", mul_fac * c as f64), + None => "(unknown)", + }; + println!("{fin} {fin_iter} | {iter_sec} |"); + } else { + println!("{t_name_normal} (ERRORED) | N/A | N/A | N/A | {fin_iter} | N/A |"); + } + } +}