@@ -4,7 +4,7 @@ rustflags = ["-C", "symbol-mangling-version=v0", "--cfg", "tokio_unstable"]
[alias]
xtask = "run --package xtask --"
-perf-test = ["test", "--profile", "release-fast", "--lib", "--bins", "--tests", "--config", "target.'cfg(true)'.runner='target/release/perf'", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]"]
+perf-test = ["test", "--profile", "release-fast", "--lib", "--bins", "--tests", "--config", "target.'cfg(true)'.runner='cargo run -p perf --release'", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]"]
perf-compare = ["run", "--release", "-p", "perf", "--", "compare"]
[target.x86_64-unknown-linux-gnu]
@@ -25,4 +25,3 @@ rustflags = [
[env]
MACOSX_DEPLOYMENT_TARGET = "10.15.7"
-CARGO_WORKSPACE_DIR = { value = "", relative = true }
@@ -3,7 +3,7 @@
use collections::HashMap;
use serde::{Deserialize, Serialize};
-use std::time::Duration;
+use std::{num::NonZero, time::Duration};
pub mod consts {
//! Preset idenitifiers and constants so that the profiler and proc macro agree
@@ -80,6 +80,8 @@ pub enum FailKind {
Profile,
/// Failed due to an incompatible version for the test.
VersionMismatch,
+ /// Could not parse metadata for a test.
+ BadMetadata,
/// Skipped due to filters applied on the perf run.
Skipped,
}
@@ -87,9 +89,10 @@ pub enum FailKind {
impl std::fmt::Display for FailKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
- FailKind::Triage => f.write_str("failed in triage"),
- FailKind::Profile => f.write_str("failed while profiling"),
+ FailKind::Triage => f.write_str("errored in triage"),
+ FailKind::Profile => f.write_str("errored while profiling"),
FailKind::VersionMismatch => f.write_str("test version mismatch"),
+ FailKind::BadMetadata => f.write_str("bad test metadata"),
FailKind::Skipped => f.write_str("skipped"),
}
}
@@ -108,8 +111,9 @@ pub struct TestMdata {
/// INVARIANT: If `version` <= `MDATA_VER`, this tool *must* be able to
/// correctly parse the output of this test.
pub version: u32,
- /// How many iterations to pass this test, if this is preset.
- pub iterations: Option<usize>,
+ /// How many iterations to pass this test if this is preset, or how many
+ /// iterations a test ended up running afterwards if determined at runtime.
+ pub iterations: Option<NonZero<usize>>,
/// The importance of this particular test. See the docs on `Importance` for
/// details.
pub importance: Importance,
@@ -134,16 +138,20 @@ impl Timings {
reason = "We only care about a couple sig figs anyways"
)]
#[must_use]
- pub fn iters_per_sec(&self, total_iters: usize) -> f64 {
- (1000. / self.mean.as_millis() as f64) * total_iters as f64
+ pub fn iters_per_sec(&self, total_iters: NonZero<usize>) -> f64 {
+ (1000. / self.mean.as_millis() as f64) * total_iters.get() as f64
}
}
+/// Aggregate results, meant to be used for a given importance category. Each
+/// test name corresponds to its benchmark results, iteration count, and weight.
+type CategoryInfo = HashMap<String, (Timings, NonZero<usize>, u8)>;
+
/// Aggregate output of all tests run by this handler.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Output {
- /// A list of test outputs. Format is `(test_name, iter_count, timings)`.
- /// The latter being set indicates the test succeeded.
+ /// A list of test outputs. Format is `(test_name, mdata, timings)`.
+ /// The latter being `Ok(_)` indicates the test succeeded.
///
/// INVARIANT: If the test succeeded, the second field is `Some(mdata)` and
/// `mdata.iterations` is `Some(_)`.
@@ -162,7 +170,7 @@ impl Output {
&mut self,
name: impl AsRef<str>,
mut mdata: TestMdata,
- iters: usize,
+ iters: NonZero<usize>,
timings: Timings,
) {
mdata.iterations = Some(iters);
@@ -179,7 +187,7 @@ impl Output {
&mut self,
name: impl AsRef<str>,
mut mdata: Option<TestMdata>,
- attempted_iters: Option<usize>,
+ attempted_iters: Option<NonZero<usize>>,
kind: FailKind,
) {
if let Some(ref mut mdata) = mdata {
@@ -195,7 +203,7 @@ impl Output {
self.tests.is_empty()
}
- /// Sorts the runs in the output in the order that we want it printed.
+ /// Sorts the runs in the output in the order that we want them printed.
pub fn sort(&mut self) {
self.tests.sort_unstable_by(|a, b| match (a, b) {
// Tests where we got no metadata go at the end.
@@ -218,16 +226,20 @@ impl Output {
/// Merges the output of two runs, appending a prefix to the results of the new run.
/// To be used in conjunction with `Output::blank()`, or else only some tests will have
/// a prefix set.
- pub fn merge(&mut self, other: Self, pref_other: impl AsRef<str>) {
+ pub fn merge<'a>(&mut self, other: Self, pref_other: impl Into<Option<&'a str>>) {
+ let pref = if let Some(pref) = pref_other.into() {
+ "crates/".to_string() + pref + "::"
+ } else {
+ String::new()
+ };
self.tests = std::mem::take(&mut self.tests)
.into_iter()
- .chain(other.tests.into_iter().map(|(name, md, tm)| {
- let mut new_name = "crates/".to_string();
- new_name.push_str(pref_other.as_ref());
- new_name.push_str("::");
- new_name.push_str(&name);
- (new_name, md, tm)
- }))
+ .chain(
+ other
+ .tests
+ .into_iter()
+ .map(|(name, md, tm)| (pref.clone() + &name, md, tm)),
+ )
.collect();
}
@@ -273,8 +285,8 @@ impl Output {
r_total_denominator += u32::from(weight);
}
let mean = r_total_numerator / f64::from(r_total_denominator);
- // TODO: also aggregate standard deviation? that's harder to keep
- // meaningful, though, since we dk which tests are correlated
+ // TODO: also aggregate standard deviation? That's harder to keep
+ // meaningful, though, since we dk which tests are correlated.
Some((cat, PerfDelta { max, mean, min }))
})
.collect();
@@ -282,9 +294,9 @@ impl Output {
PerfReport { deltas }
}
- /// Collapses the `PerfReport` into a `HashMap` of `Importance` <-> tests
- /// each represented as a map of `name, (Timings, iterations, weight)`.
- fn collapse(self) -> HashMap<Importance, HashMap<String, (Timings, usize, u8)>> {
+ /// Collapses the `PerfReport` into a `HashMap` over `Importance`, with
+ /// each importance category having its tests contained.
+ fn collapse(self) -> HashMap<Importance, CategoryInfo> {
let mut categories = HashMap::<Importance, HashMap<String, _>>::default();
for entry in self.tests {
if let Some(mdata) = entry.1
@@ -402,10 +414,28 @@ impl std::fmt::Display for PerfReport {
// a little jankily like this.
write!(f, "|:---|---:|---:|---:|")?;
for (cat, delta) in sorted.into_iter().rev() {
+ const SIGN_POS: &str = "↑";
+ const SIGN_NEG: &str = "↓";
+ const SIGN_NEUTRAL: &str = "±";
+
+ let prettify = |time: f64| {
+ let sign = if time > 0.05 {
+ SIGN_POS
+ } else if time < 0.05 && time > -0.05 {
+ SIGN_NEUTRAL
+ } else {
+ SIGN_NEG
+ };
+ format!("{} {:.1}%", sign, time.abs() * 100.)
+ };
+
+ // Pretty-print these instead of just using the float display impl.
write!(
f,
- "\n| {cat} | {:.3} | {:.3} | {:.3} |",
- delta.max, delta.mean, delta.min
+ "\n| {cat} | {} | {} | {} |",
+ prettify(delta.max),
+ prettify(delta.mean),
+ prettify(delta.min)
)?;
}
Ok(())
@@ -3,8 +3,7 @@
//! for usage details on the actual attribute.
//!
//! # Setup
-//! Make sure `hyperfine` is installed and in the shell path, then run
-//! `cargo build -p perf --release` to build the profiler.
+//! Make sure `hyperfine` is installed and in the shell path.
//!
//! # Usage
//! Calling this tool rebuilds the targeted crate(s) with some cfg flags set for the
@@ -44,21 +43,25 @@
//! This should probably not be called manually unless you're working on the profiler
//! itself; use the `cargo perf-test` alias (after building this crate) instead.
-#[allow(clippy::wildcard_imports, reason = "Our crate")]
-use perf::*;
+use perf::{FailKind, Importance, Output, TestMdata, Timings, consts};
use std::{
fs::OpenOptions,
io::Write,
+ num::NonZero,
path::{Path, PathBuf},
process::{Command, Stdio},
+ sync::atomic::{AtomicBool, Ordering},
time::{Duration, Instant},
};
/// How many iterations to attempt the first time a test is run.
-const DEFAULT_ITER_COUNT: usize = 3;
+const DEFAULT_ITER_COUNT: NonZero<usize> = NonZero::new(3).unwrap();
/// Multiplier for the iteration count when a test doesn't pass the noise cutoff.
-const ITER_COUNT_MUL: usize = 4;
+const ITER_COUNT_MUL: NonZero<usize> = NonZero::new(4).unwrap();
+
+/// Do we keep stderr empty while running the tests?
+static QUIET: AtomicBool = AtomicBool::new(false);
/// Report a failure into the output and skip an iteration.
macro_rules! fail {
@@ -84,14 +87,59 @@ enum OutputKind<'a> {
Json(&'a Path),
}
+impl OutputKind<'_> {
+ /// Logs the output of a run as per the `OutputKind`.
+ fn log(&self, output: &Output, t_bin: &str) {
+ match self {
+ OutputKind::Markdown => print!("{output}"),
+ OutputKind::Json(ident) => {
+ let wspace_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
+ let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
+ std::fs::create_dir_all(&runs_dir).unwrap();
+ assert!(
+ !ident.to_string_lossy().is_empty(),
+ "FATAL: Empty filename specified!"
+ );
+ // Get the test binary's crate's name; a path like
+ // target/release-fast/deps/gpui-061ff76c9b7af5d7
+ // would be reduced to just "gpui".
+ let test_bin_stripped = Path::new(t_bin)
+ .file_name()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .rsplit_once('-')
+ .unwrap()
+ .0;
+ let mut file_path = runs_dir.join(ident);
+ file_path
+ .as_mut_os_string()
+ .push(format!(".{test_bin_stripped}.json"));
+ let mut out_file = OpenOptions::new()
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(&file_path)
+ .unwrap();
+ out_file
+ .write_all(&serde_json::to_vec(&output).unwrap())
+ .unwrap();
+ if !QUIET.load(Ordering::Relaxed) {
+ eprintln!("JSON output written to {}", file_path.display());
+ }
+ }
+ }
+ }
+}
+
/// Runs a given metadata-returning function from a test handler, parsing its
/// output into a `TestMdata`.
-fn parse_mdata(test_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
- let mut cmd = Command::new(test_bin);
+fn parse_mdata(t_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
+ let mut cmd = Command::new(t_bin);
cmd.args([mdata_fn, "--exact", "--nocapture"]);
let out = cmd
.output()
- .expect("FATAL: Could not run test binary {test_bin}");
+ .expect("FATAL: Could not run test binary {t_bin}");
assert!(out.status.success());
let stdout = String::from_utf8_lossy(&out.stdout);
let mut version = None;
@@ -104,36 +152,53 @@ fn parse_mdata(test_bin: &str, mdata_fn: &str) -> Result<TestMdata, FailKind> {
{
let mut items = line.split_whitespace();
// For v0, we know the ident always comes first, then one field.
- match items.next().unwrap() {
+ match items.next().ok_or(FailKind::BadMetadata)? {
consts::VERSION_LINE_NAME => {
- let v = items.next().unwrap().parse::<u32>().unwrap();
+ let v = items
+ .next()
+ .ok_or(FailKind::BadMetadata)?
+ .parse::<u32>()
+ .map_err(|_| FailKind::BadMetadata)?;
if v > consts::MDATA_VER {
return Err(FailKind::VersionMismatch);
}
version = Some(v);
}
consts::ITER_COUNT_LINE_NAME => {
- iterations = Some(items.next().unwrap().parse::<usize>().unwrap());
+ // This should never be zero!
+ iterations = Some(
+ items
+ .next()
+ .ok_or(FailKind::BadMetadata)?
+ .parse::<usize>()
+ .map_err(|_| FailKind::BadMetadata)?
+ .try_into()
+ .map_err(|_| FailKind::BadMetadata)?,
+ );
}
consts::IMPORTANCE_LINE_NAME => {
- importance = match items.next().unwrap() {
+ importance = match items.next().ok_or(FailKind::BadMetadata)? {
"critical" => Importance::Critical,
"important" => Importance::Important,
"average" => Importance::Average,
"iffy" => Importance::Iffy,
"fluff" => Importance::Fluff,
- _ => unreachable!(),
+ _ => return Err(FailKind::BadMetadata),
};
}
consts::WEIGHT_LINE_NAME => {
- weight = items.next().unwrap().parse::<u8>().unwrap();
+ weight = items
+ .next()
+ .ok_or(FailKind::BadMetadata)?
+ .parse::<u8>()
+ .map_err(|_| FailKind::BadMetadata)?;
}
_ => unreachable!(),
}
}
Ok(TestMdata {
- version: version.unwrap(),
+ version: version.ok_or(FailKind::BadMetadata)?,
// Iterations may be determined by us and thus left unspecified.
iterations,
// In principle this should always be set, but just for the sake of
@@ -150,7 +215,7 @@ fn compare_profiles(args: &[String]) {
let ident_new = args.first().expect("FATAL: missing identifier for new run");
let ident_old = args.get(1).expect("FATAL: missing identifier for old run");
// TODO: move this to a constant also tbh
- let wspace_dir = std::env::var("CARGO_WORKSPACE_DIR").unwrap();
+ let wspace_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
// Use the blank outputs initially, so we can merge into these with prefixes.
@@ -193,59 +258,22 @@ fn compare_profiles(args: &[String]) {
println!("{res}");
}
-#[expect(clippy::too_many_lines, reason = "This will be split up soon!")]
-fn main() {
- let args = std::env::args().collect::<Vec<_>>();
- // We get passed the test we need to run as the 1st argument after our own name.
- let test_bin = args
- .get(1)
- .expect("FATAL: No test binary or command; this shouldn't be manually invoked!");
-
- // We're being asked to compare two results, not run the profiler.
- if test_bin == "compare" {
- compare_profiles(&args[2..]);
- return;
- }
-
- // Whether to skip printing some information to stderr.
- let mut quiet = false;
- // Minimum test importance we care about this run.
- let mut thresh = Importance::Iffy;
- // Where to print the output of this run.
- let mut out_kind = OutputKind::Markdown;
-
- for arg in args.iter().skip(2) {
- match arg.as_str() {
- "--critical" => thresh = Importance::Critical,
- "--important" => thresh = Importance::Important,
- "--average" => thresh = Importance::Average,
- "--iffy" => thresh = Importance::Iffy,
- "--fluff" => thresh = Importance::Fluff,
- "--quiet" => quiet = true,
- s if s.starts_with("--json") => {
- out_kind = OutputKind::Json(Path::new(
- s.strip_prefix("--json=")
- .expect("FATAL: Invalid json parameter; pass --json=filename"),
- ));
- }
- _ => (),
- }
- }
- if !quiet {
- eprintln!("Starting perf check");
- }
-
- let mut cmd = Command::new(test_bin);
+/// Runs a test binary, filtering out tests which aren't marked for perf triage
+/// and giving back the list of tests we care about.
+///
+/// The output of this is an iterator over `test_fn_name, test_mdata_name`.
+fn get_tests(t_bin: &str) -> impl ExactSizeIterator<Item = (String, String)> {
+ let mut cmd = Command::new(t_bin);
// --format=json is nightly-only :(
cmd.args(["--list", "--format=terse"]);
let out = cmd
.output()
- .expect("FATAL: Could not run test binary {test_bin}");
+ .expect("FATAL: Could not run test binary {t_bin}");
assert!(
out.status.success(),
- "FATAL: Cannot do perf check - test binary {test_bin} returned an error"
+ "FATAL: Cannot do perf check - test binary {t_bin} returned an error"
);
- if !quiet {
+ if !QUIET.load(Ordering::Relaxed) {
eprintln!("Test binary ran successfully; starting profile...");
}
// Parse the test harness output to look for tests we care about.
@@ -273,34 +301,156 @@ fn main() {
test_list.sort_unstable();
test_list.dedup();
- let len = test_list.len();
-
// Tests should come in pairs with their mdata fn!
assert!(
- len.is_multiple_of(2),
- "Malformed tests in test binary {test_bin}"
+ test_list.len().is_multiple_of(2),
+ "Malformed tests in test binary {t_bin}"
);
+ let out = test_list
+ .chunks_exact_mut(2)
+ .map(|pair| {
+ // Be resilient against changes to these constants.
+ if consts::SUF_NORMAL < consts::SUF_MDATA {
+ (pair[0].to_owned(), pair[1].to_owned())
+ } else {
+ (pair[1].to_owned(), pair[0].to_owned())
+ }
+ })
+ .collect::<Vec<_>>();
+ out.into_iter()
+}
+
+/// Triage a test to determine the correct number of iterations that it should run.
+/// Specifically, repeatedly runs the given test until its execution time exceeds
+/// `thresh`, calling `step(iterations)` after every failed run to determine the new
+/// iteration count. Returns `None` if the test errored or `step` returned `None`,
+/// else `Some(iterations)`.
+///
+/// # Panics
+/// This will panic if `step(usize)` is not monotonically increasing.
+fn triage_test(
+ t_bin: &str,
+ t_name: &str,
+ thresh: Duration,
+ mut step: impl FnMut(NonZero<usize>) -> Option<NonZero<usize>>,
+) -> Option<NonZero<usize>> {
+ let mut iter_count = DEFAULT_ITER_COUNT;
+ loop {
+ let mut cmd = Command::new(t_bin);
+ cmd.args([t_name, "--exact"]);
+ cmd.env(consts::ITER_ENV_VAR, format!("{iter_count}"));
+ // Don't let the child muck up our stdin/out/err.
+ cmd.stdin(Stdio::null());
+ cmd.stdout(Stdio::null());
+ cmd.stderr(Stdio::null());
+ let pre = Instant::now();
+ // Discard the output beyond ensuring success.
+ let out = cmd.spawn().unwrap().wait();
+ let post = Instant::now();
+ if !out.unwrap().success() {
+ break None;
+ }
+ if post - pre > thresh {
+ break Some(iter_count);
+ }
+ let new = step(iter_count)?;
+ assert!(
+ new > iter_count,
+ "FATAL: step must be monotonically increasing"
+ );
+ iter_count = new;
+ }
+}
+
+/// Profiles a given test with hyperfine, returning the mean and standard deviation
+/// for its runtime. If the test errors, returns `None` instead.
+fn hyp_profile(t_bin: &str, t_name: &str, iterations: NonZero<usize>) -> Option<Timings> {
+ let mut perf_cmd = Command::new("hyperfine");
+ // Warm up the cache and print markdown output to stdout, which we parse.
+ perf_cmd.args([
+ "--style",
+ "none",
+ "--warmup",
+ "1",
+ "--export-markdown",
+ "-",
+ &format!("{t_bin} {t_name}"),
+ ]);
+ perf_cmd.env(consts::ITER_ENV_VAR, format!("{iterations}"));
+ let p_out = perf_cmd.output().unwrap();
+ if !p_out.status.success() {
+ return None;
+ }
+
+ let cmd_output = String::from_utf8_lossy(&p_out.stdout);
+ // Can't use .last() since we have a trailing newline. Sigh.
+ let results_line = cmd_output.lines().nth(3).unwrap();
+ // Grab the values out of the pretty-print.
+ // TODO: Parse json instead.
+ let mut res_iter = results_line.split_whitespace();
+ // Durations are given in milliseconds, so account for that.
+ let mean = Duration::from_secs_f64(res_iter.nth(4).unwrap().parse::<f64>().unwrap() / 1000.);
+ let stddev = Duration::from_secs_f64(res_iter.nth(1).unwrap().parse::<f64>().unwrap() / 1000.);
+
+ Some(Timings { mean, stddev })
+}
+
+fn main() {
+ let args = std::env::args().collect::<Vec<_>>();
+ // We get passed the test we need to run as the 1st argument after our own name.
+ let t_bin = args
+ .get(1)
+ .expect("FATAL: No test binary or command; this shouldn't be manually invoked!");
+
+ // We're being asked to compare two results, not run the profiler.
+ if t_bin == "compare" {
+ compare_profiles(&args[2..]);
+ return;
+ }
+
+ // Minimum test importance we care about this run.
+ let mut thresh = Importance::Iffy;
+ // Where to print the output of this run.
+ let mut out_kind = OutputKind::Markdown;
+
+ for arg in args.iter().skip(2) {
+ match arg.as_str() {
+ "--critical" => thresh = Importance::Critical,
+ "--important" => thresh = Importance::Important,
+ "--average" => thresh = Importance::Average,
+ "--iffy" => thresh = Importance::Iffy,
+ "--fluff" => thresh = Importance::Fluff,
+ "--quiet" => QUIET.store(true, Ordering::Relaxed),
+ s if s.starts_with("--json") => {
+ out_kind = OutputKind::Json(Path::new(
+ s.strip_prefix("--json=")
+ .expect("FATAL: Invalid json parameter; pass --json=ident"),
+ ));
+ }
+ _ => (),
+ }
+ }
+ if !QUIET.load(Ordering::Relaxed) {
+ eprintln!("Starting perf check");
+ }
+
let mut output = Output::default();
// Spawn and profile an instance of each perf-sensitive test, via hyperfine.
// Each test is a pair of (test, metadata-returning-fn), so grab both. We also
// know the list is sorted.
- for (idx, t_pair) in test_list.chunks_exact(2).enumerate() {
- if !quiet {
- eprint!("\rProfiling test {}/{}", idx + 1, len / 2);
+ let i = get_tests(t_bin);
+ let len = i.len();
+ for (idx, (ref t_name, ref t_mdata)) in i.enumerate() {
+ if !QUIET.load(Ordering::Relaxed) {
+ eprint!("\rProfiling test {}/{}", idx + 1, len);
}
- // Be resilient against changes to these constants.
- let (t_name, t_mdata) = if consts::SUF_NORMAL < consts::SUF_MDATA {
- (t_pair[0], t_pair[1])
- } else {
- (t_pair[1], t_pair[0])
- };
// Pretty-printable stripped name for the test.
let t_name_pretty = t_name.replace(consts::SUF_NORMAL, "");
// Get the metadata this test reports for us.
- let t_mdata = match parse_mdata(test_bin, t_mdata) {
+ let t_mdata = match parse_mdata(t_bin, t_mdata) {
Ok(mdata) => mdata,
Err(err) => fail!(output, t_name_pretty, err),
};
@@ -312,78 +462,28 @@ fn main() {
// Time test execution to see how many iterations we need to do in order
// to account for random noise. This is skipped for tests with fixed
// iteration counts.
- let mut errored = false;
- let final_iter_count = t_mdata.iterations.unwrap_or_else(|| {
- let mut iter_count = DEFAULT_ITER_COUNT;
- loop {
- let mut cmd = Command::new(test_bin);
- cmd.args([t_name, "--exact"]);
- cmd.env(consts::ITER_ENV_VAR, format!("{iter_count}"));
- // Don't let the child muck up our stdin/out/err.
- cmd.stdin(Stdio::null());
- cmd.stdout(Stdio::null());
- cmd.stderr(Stdio::null());
- let pre = Instant::now();
- // Discard the output beyond ensuring success.
- let out = cmd.spawn().unwrap().wait();
- let post = Instant::now();
- if !out.unwrap().success() {
- errored = true;
- break iter_count;
- }
- if post - pre > consts::NOISE_CUTOFF {
- break iter_count;
- } else if let Some(c) = iter_count.checked_mul(ITER_COUNT_MUL) {
- iter_count = c;
+ let final_iter_count = t_mdata.iterations.or_else(|| {
+ triage_test(t_bin, t_name, consts::NOISE_CUTOFF, |c| {
+ if let Some(c) = c.checked_mul(ITER_COUNT_MUL) {
+ Some(c)
} else {
// This should almost never happen, but maybe..?
eprintln!(
- "WARNING: Running nearly usize::MAX iterations of test {t_name_pretty}"
+ "WARNING: Ran nearly usize::MAX iterations of test {t_name_pretty}; skipping"
);
- break iter_count;
+ None
}
- }
+ })
});
// Don't profile failing tests.
- if errored {
+ let Some(final_iter_count) = final_iter_count else {
fail!(output, t_name_pretty, t_mdata, FailKind::Triage);
- }
+ };
// Now profile!
- let mut perf_cmd = Command::new("hyperfine");
- // Warm up the cache and print markdown output to stdout.
- // TODO: json
- perf_cmd.args([
- "--style",
- "none",
- "--warmup",
- "1",
- "--export-markdown",
- "-",
- &format!("{test_bin} {t_name}"),
- ]);
- perf_cmd.env(consts::ITER_ENV_VAR, format!("{final_iter_count}"));
- let p_out = perf_cmd.output().unwrap();
- if p_out.status.success() {
- let cmd_output = String::from_utf8_lossy(&p_out.stdout);
- // Can't use .last() since we have a trailing newline. Sigh.
- let results_line = cmd_output.lines().nth(3).unwrap();
- // Grab the values out of the pretty-print.
- // TODO: Parse json instead.
- let mut res_iter = results_line.split_whitespace();
- // Durations are given in milliseconds, so account for that.
- let mean =
- Duration::from_secs_f64(res_iter.nth(4).unwrap().parse::<f64>().unwrap() / 1000.);
- let stddev =
- Duration::from_secs_f64(res_iter.nth(1).unwrap().parse::<f64>().unwrap() / 1000.);
-
- output.success(
- t_name_pretty,
- t_mdata,
- final_iter_count,
- Timings { mean, stddev },
- );
+ if let Some(timings) = hyp_profile(t_bin, t_name, final_iter_count) {
+ output.success(t_name_pretty, t_mdata, final_iter_count, timings);
} else {
fail!(
output,
@@ -394,7 +494,7 @@ fn main() {
);
}
}
- if !quiet {
+ if !QUIET.load(Ordering::Relaxed) {
if output.is_empty() {
eprintln!("Nothing to do.");
} else {
@@ -409,43 +509,5 @@ fn main() {
return;
}
- match out_kind {
- OutputKind::Markdown => print!("{output}"),
- OutputKind::Json(user_path) => {
- let wspace_dir = std::env::var("CARGO_WORKSPACE_DIR").unwrap();
- let runs_dir = PathBuf::from(&wspace_dir).join(consts::RUNS_DIR);
- std::fs::create_dir_all(&runs_dir).unwrap();
- assert!(
- !user_path.to_string_lossy().is_empty(),
- "FATAL: Empty filename specified!"
- );
- // Get the test binary's crate's name; a path like
- // target/release-fast/deps/gpui-061ff76c9b7af5d7
- // would be reduced to just "gpui".
- let test_bin_stripped = Path::new(test_bin)
- .file_name()
- .unwrap()
- .to_str()
- .unwrap()
- .rsplit_once('-')
- .unwrap()
- .0;
- let mut file_path = runs_dir.join(user_path);
- file_path
- .as_mut_os_string()
- .push(format!(".{test_bin_stripped}.json"));
- let mut out_file = OpenOptions::new()
- .write(true)
- .create(true)
- .truncate(true)
- .open(&file_path)
- .unwrap();
- out_file
- .write_all(&serde_json::to_vec(&output).unwrap())
- .unwrap();
- if !quiet {
- eprintln!("JSON output written to {}", file_path.display());
- }
- }
- }
+ out_kind.log(&output, t_bin);
}