Detailed changes
@@ -4,6 +4,7 @@ rustflags = ["-C", "symbol-mangling-version=v0", "--cfg", "tokio_unstable"]
[alias]
xtask = "run --package xtask --"
+perf-test = ["test", "--profile", "release-fast", "--lib", "--bins", "--tests", "--config", "target.'cfg(true)'.runner='target/release/perf'", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]"]
[target.x86_64-unknown-linux-gnu]
linker = "clang"
@@ -7924,6 +7924,7 @@ dependencies = [
"unicode-segmentation",
"usvg",
"util",
+ "util_macros",
"uuid",
"waker-fn",
"wayland-backend",
@@ -12163,6 +12164,13 @@ version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+[[package]]
+name = "perf"
+version = "0.1.0"
+dependencies = [
+ "workspace-hack",
+]
+
[[package]]
name = "pest"
version = "2.8.0"
@@ -18727,6 +18735,7 @@ dependencies = [
"tokio",
"ui",
"util",
+ "util_macros",
"vim_mode_setting",
"workspace",
"workspace-hack",
@@ -220,7 +220,7 @@ members = [
#
"tooling/workspace-hack",
- "tooling/xtask",
+ "tooling/xtask", "tooling/perf",
]
default-members = ["crates/zed"]
@@ -110,6 +110,7 @@ resvg = { version = "0.45.0", default-features = false, features = [
"memmap-fonts",
] }
usvg = { version = "0.45.0", default-features = false }
+util_macros.workspace = true
schemars.workspace = true
seahash = "4.1"
semantic_version.workspace = true
@@ -1300,7 +1300,9 @@ mod tests {
use super::*;
- #[test]
+ use util_macros::perf;
+
+ #[perf]
fn test_basic_highlight_style_combination() {
let style_a = HighlightStyle::default();
let style_b = HighlightStyle::default();
@@ -1385,7 +1387,7 @@ mod tests {
);
}
- #[test]
+ #[perf]
fn test_combine_highlights() {
assert_eq!(
combine_highlights(
@@ -17,3 +17,6 @@ doctest = false
quote.workspace = true
syn.workspace = true
workspace-hack.workspace = true
+
+[features]
+perf-enabled = []
@@ -1,8 +1,9 @@
#![cfg_attr(not(target_os = "windows"), allow(unused))]
+#![allow(clippy::test_attr_in_doctest)]
use proc_macro::TokenStream;
-use quote::quote;
-use syn::{LitStr, parse_macro_input};
+use quote::{ToTokens, quote};
+use syn::{ItemFn, LitStr, parse_macro_input, parse_quote};
/// A macro used in tests for cross-platform path string literals in tests. On Windows it replaces
/// `/` with `\\` and adds `C:` to the beginning of absolute paths. On other platforms, the path is
@@ -87,3 +88,145 @@ pub fn line_endings(input: TokenStream) -> TokenStream {
#text
})
}
+
+/// Inner data for the perf macro.
+struct PerfArgs {
+ /// How many times to loop a test before rerunning the test binary.
+ /// If left empty, the test harness will auto-determine this value.
+ iterations: Option<syn::Expr>,
+}
+
+impl syn::parse::Parse for PerfArgs {
+ fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
+ if input.is_empty() {
+ return Ok(PerfArgs { iterations: None });
+ }
+
+ let mut iterations = None;
+ // In principle we only have one possible argument, but leave this as
+ // a loop in case we expand this in the future.
+ for meta in
+ syn::punctuated::Punctuated::<syn::Meta, syn::Token![,]>::parse_terminated(input)?
+ {
+ match &meta {
+ syn::Meta::NameValue(meta_name_value) => {
+ if meta_name_value.path.is_ident("iterations") {
+ iterations = Some(meta_name_value.value.clone());
+ } else {
+ return Err(syn::Error::new_spanned(
+ &meta_name_value.path,
+ "unexpected argument, expected 'iterations'",
+ ));
+ }
+ }
+ _ => {
+ return Err(syn::Error::new_spanned(
+ meta,
+ "expected name-value argument like 'iterations = 1'",
+ ));
+ }
+ }
+ }
+
+ Ok(PerfArgs { iterations })
+ }
+}
+
+/// Marks a test as perf-sensitive, to be triaged when checking the performance
+/// of a build. This also automatically applies `#[test]`.
+///
+/// By default, the number of iterations when profiling this test is auto-determined.
+/// If this needs to be overwritten, pass the desired iteration count to the macro
+/// as a parameter (`#[perf(iterations = n)]`). Note that the actual profiler may still
+/// run the test an arbitrary number times; this flag just sets the number of executions
+/// before the process is restarted and global state is reset.
+///
+/// # Usage notes
+/// This should probably not be applied to tests that do any significant fs IO, as
+/// locks on files may not be released in time when repeating a test many times. This
+/// might lead to spurious failures.
+///
+/// # Examples
+/// ```rust
+/// use util_macros::perf;
+///
+/// #[perf]
+/// fn expensive_computation_test() {
+/// // Test goes here.
+/// }
+/// ```
+///
+/// This also works with `#[gpui::test]`s, though in most cases it shouldn't
+/// be used with automatic iterations.
+/// ```rust,ignore
+/// use util_macros::perf;
+///
+/// #[perf(iterations = 1)]
+/// #[gpui::test]
+/// fn oneshot_test(_cx: &mut gpui::TestAppContext) {
+/// // Test goes here.
+/// }
+/// ```
+#[proc_macro_attribute]
+pub fn perf(our_attr: TokenStream, input: TokenStream) -> TokenStream {
+ // If any of the below constants are changed, make sure to also update the perf
+ // profiler to match!
+
+ /// The suffix on tests marked with `#[perf]`.
+ const SUF_NORMAL: &str = "__ZED_PERF";
+ /// The suffix on tests marked with `#[perf(iterations = n)]`.
+ const SUF_FIXED: &str = "__ZED_PERF_FIXEDITER";
+ /// The env var in which we pass the iteration count to our tests.
+ const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
+
+ let iter_count = parse_macro_input!(our_attr as PerfArgs).iterations;
+
+ let ItemFn {
+ mut attrs,
+ vis,
+ mut sig,
+ block,
+ } = parse_macro_input!(input as ItemFn);
+ attrs.push(parse_quote!(#[test]));
+ attrs.push(parse_quote!(#[allow(non_snake_case)]));
+
+ let block: Box<syn::Block> = if cfg!(perf_enabled) {
+ // Make the ident obvious when calling, for the test parser.
+ let mut new_ident = sig.ident.to_string();
+ if iter_count.is_some() {
+ new_ident.push_str(SUF_FIXED);
+ } else {
+ new_ident.push_str(SUF_NORMAL);
+ }
+
+ let new_ident = syn::Ident::new(&new_ident, sig.ident.span());
+ sig.ident = new_ident;
+ // If we have a preset iteration count, just use that.
+ if let Some(iter_count) = iter_count {
+ parse_quote!({
+ for _ in 0..#iter_count {
+ #block
+ }
+ })
+ } else {
+ // Otherwise, the perf harness will pass us the value in an env var.
+ parse_quote!({
+ let iter_count = std::env::var(#ITER_ENV_VAR).unwrap().parse::<usize>().unwrap();
+ for _ in 0..iter_count {
+ #block
+ }
+ })
+ }
+ } else {
+ block
+ };
+
+ ItemFn {
+ attrs,
+ vis,
+ sig,
+ block,
+ }
+ .into_token_stream()
+ .into()
+}
@@ -46,6 +46,7 @@ theme.workspace = true
tokio = { version = "1.15", features = ["full"], optional = true }
ui.workspace = true
util.workspace = true
+util_macros.workspace = true
vim_mode_setting.workspace = true
workspace.workspace = true
zed_actions.workspace = true
@@ -25,6 +25,9 @@ use search::BufferSearchBar;
use crate::{PushSneak, PushSneakBackward, insert::NormalBefore, motion, state::Mode};
+use util_macros::perf;
+
+#[perf]
#[gpui::test]
async fn test_initially_disabled(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, false).await;
@@ -44,6 +47,7 @@ async fn test_neovim(cx: &mut gpui::TestAppContext) {
cx.assert_editor_state("ˇtest");
}
+#[perf]
#[gpui::test]
async fn test_toggle_through_settings(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -80,6 +84,7 @@ async fn test_toggle_through_settings(cx: &mut gpui::TestAppContext) {
assert_eq!(cx.mode(), Mode::Normal);
}
+#[perf]
#[gpui::test]
async fn test_cancel_selection(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -104,6 +109,7 @@ async fn test_cancel_selection(cx: &mut gpui::TestAppContext) {
cx.assert_editor_state("The quick brown fox juˇmps over the lazy dog");
}
+#[perf]
#[gpui::test]
async fn test_buffer_search(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -132,6 +138,7 @@ async fn test_buffer_search(cx: &mut gpui::TestAppContext) {
})
}
+#[perf]
#[gpui::test]
async fn test_count_down(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -143,6 +150,7 @@ async fn test_count_down(cx: &mut gpui::TestAppContext) {
cx.assert_editor_state("aa\nbb\ncc\ndd\neˇe");
}
+#[perf]
#[gpui::test]
async fn test_end_of_document_710(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -157,6 +165,7 @@ async fn test_end_of_document_710(cx: &mut gpui::TestAppContext) {
cx.assert_editor_state("aˇa\nbb\ncc");
}
+#[perf]
#[gpui::test]
async fn test_end_of_line_with_times(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -175,6 +184,7 @@ async fn test_end_of_line_with_times(cx: &mut gpui::TestAppContext) {
cx.assert_editor_state("aa\nbb\ncˇc");
}
+#[perf]
#[gpui::test]
async fn test_indent_outdent(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -241,6 +251,7 @@ async fn test_escape_command_palette(cx: &mut gpui::TestAppContext) {
cx.assert_state("aˇbc\n", Mode::Insert);
}
+#[perf]
#[gpui::test]
async fn test_escape_cancels(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -251,6 +262,7 @@ async fn test_escape_cancels(cx: &mut gpui::TestAppContext) {
cx.assert_state("aˇbc", Mode::Normal);
}
+#[perf]
#[gpui::test]
async fn test_selection_on_search(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -289,6 +301,7 @@ async fn test_selection_on_search(cx: &mut gpui::TestAppContext) {
cx.assert_state(indoc! {"aa\nbb\nˇcc\ncc\ncc\n"}, Mode::Normal);
}
+#[perf]
#[gpui::test]
async fn test_word_characters(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new_typescript(cx).await;
@@ -315,6 +328,7 @@ async fn test_word_characters(cx: &mut gpui::TestAppContext) {
)
}
+#[perf]
#[gpui::test]
async fn test_kebab_case(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new_html(cx).await;
@@ -821,6 +835,7 @@ async fn test_paragraphs_dont_wrap(cx: &mut gpui::TestAppContext) {
two"});
}
+#[perf]
#[gpui::test]
async fn test_select_all_issue_2170(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -881,6 +896,7 @@ fn assert_pending_input(cx: &mut VimTestContext, expected: &str) {
});
}
+#[perf]
#[gpui::test]
async fn test_jk_multi(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -972,6 +988,7 @@ async fn test_comma_w(cx: &mut gpui::TestAppContext) {
.assert_eq("hellˇo hello\nhello hello");
}
+#[perf]
#[gpui::test]
async fn test_completion_menu_scroll_aside(cx: &mut TestAppContext) {
let mut cx = VimTestContext::new_typescript(cx).await;
@@ -1053,6 +1070,7 @@ async fn test_completion_menu_scroll_aside(cx: &mut TestAppContext) {
});
}
+#[perf]
#[gpui::test]
async fn test_rename(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new_typescript(cx).await;
@@ -1088,6 +1106,7 @@ async fn test_rename(cx: &mut gpui::TestAppContext) {
cx.assert_state("const afterˇ = 2; console.log(after)", Mode::Normal)
}
+#[perf(iterations = 1)]
#[gpui::test]
async fn test_remap(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -1210,6 +1229,7 @@ async fn test_undo(cx: &mut gpui::TestAppContext) {
3"});
}
+#[perf]
#[gpui::test]
async fn test_mouse_selection(cx: &mut TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -1385,6 +1405,7 @@ async fn test_dw_eol(cx: &mut gpui::TestAppContext) {
.assert_eq("twelve ˇtwelve char\ntwelve char");
}
+#[perf]
#[gpui::test]
async fn test_toggle_comments(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -1476,6 +1497,7 @@ async fn test_find_multibyte(cx: &mut gpui::TestAppContext) {
.assert_eq(r#"<label for="guests">ˇo</label>"#);
}
+#[perf]
#[gpui::test]
async fn test_sneak(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -1695,6 +1717,7 @@ async fn test_ctrl_w_override(cx: &mut gpui::TestAppContext) {
cx.shared_state().await.assert_eq("ˇ");
}
+#[perf]
#[gpui::test]
async fn test_visual_indent_count(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -1850,6 +1873,7 @@ async fn test_ctrl_o_dot(cx: &mut gpui::TestAppContext) {
cx.shared_state().await.assert_eq("hellˇllo world.");
}
+#[perf(iterations = 1)]
#[gpui::test]
async fn test_folded_multibuffer_excerpts(cx: &mut gpui::TestAppContext) {
VimTestContext::init(cx);
@@ -2150,6 +2174,7 @@ async fn test_paragraph_multi_delete(cx: &mut gpui::TestAppContext) {
cx.shared_state().await.assert_eq(indoc! {"ˇ"});
}
+#[perf]
#[gpui::test]
async fn test_multi_cursor_replay(cx: &mut gpui::TestAppContext) {
let mut cx = VimTestContext::new(cx, true).await;
@@ -0,0 +1,11 @@
+[package]
+name = "perf"
+version = "0.1.0"
+publish.workspace = true
+edition.workspace = true
+
+[lints]
+workspace = true
+
+[dependencies]
+workspace-hack.workspace = true
@@ -0,0 +1 @@
+../../LICENSE-APACHE
@@ -0,0 +1,191 @@
+#![warn(clippy::all, clippy::pedantic, clippy::undocumented_unsafe_blocks)]
+#![cfg_attr(release, deny(warnings))]
+
+//! Perf profiler for Zed tests. Outputs timings of tests marked with the `#[perf]`
+//! attribute to stdout in Markdown. See the documentation of `util_macros::perf`
+//! for usage details on the actual attribute.
+//!
+//! # Setup
+//! Make sure `hyperfine` is installed and in the shell path, then run
+//! `cargo build --bin perf --workspace --release` to build the profiler.
+//!
+//! # Usage
+//! Calling this tool rebuilds everything with some cfg flags set for the perf
+//! proc macro *and* enables optimisations (`release-fast` profile), so expect it
+//! to take a little while.
+//!
+//! To test an individual crate, run:
+//! ```sh
+//! cargo perf-test -p $CRATE
+//! ```
+//!
+//! To test everything (which will be **VERY SLOW**), run:
+//! ```sh
+//! cargo perf-test --workspace
+//! ```
+//!
+//! # Notes
+//! This should probably not be called manually unless you're working on the profiler
+//! itself; use the `cargo perf-test` alias (after building this crate) instead.
+
+use std::{
+ process::{Command, Stdio},
+ time::{Duration, Instant},
+};
+
+/// How many iterations to attempt the first time a test is run.
+const DEFAULT_ITER_COUNT: usize = 12;
+/// Multiplier for the iteration count when a test doesn't pass the noise cutoff.
+const ITER_COUNT_MUL: usize = 4;
+/// How long a test must have run to be assumed to be reliable-ish.
+const NOISE_CUTOFF: Duration = Duration::from_millis(250);
+
+// If any of the below constants are changed, make sure to also update the perf
+// proc macro to match!
+
+/// The suffix on tests marked with `#[perf]`.
+const SUF_NORMAL: &str = "__ZED_PERF";
+/// The suffix on tests marked with `#[perf(iterations = n)]`.
+const SUF_FIXED: &str = "__ZED_PERF_FIXEDITER";
+/// The env var in which we pass the iteration count to our tests.
+const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
+
+#[allow(clippy::too_many_lines)]
+fn main() {
+ // We get passed the test we need to run as the 1st argument after our own name.
+ let test_bin = std::env::args().nth(1).unwrap();
+ let mut cmd = Command::new(&test_bin);
+ // --format=json is nightly-only :(
+ cmd.args(["--list", "--format=terse"]);
+ let out = cmd
+ .output()
+ .expect("FATAL: Could not run test binary {test_bin}");
+ assert!(
+ out.status.success(),
+ "FATAL: Cannot do perf check - test binary {test_bin} returned an error"
+ );
+ // Parse the test harness output to look for tests we care about.
+ let stdout = String::from_utf8_lossy(&out.stdout);
+ let mut test_list: Vec<_> = stdout
+ .lines()
+ .filter_map(|line| {
+ // This should split only in two; e.g.,
+ // "app::test::test_arena: test" => "app::test::test_arena:", "test"
+ let line: Vec<_> = line.split_whitespace().collect();
+ match line[..] {
+ // Final byte of t_name is ":", which we need to ignore.
+ [t_name, kind] => (kind == "test").then(|| &t_name[..t_name.len() - 1]),
+ _ => None,
+ }
+ })
+ // Exclude tests that aren't marked for perf triage based on suffix.
+ .filter(|t_name| t_name.ends_with(SUF_NORMAL) || t_name.ends_with(SUF_FIXED))
+ .collect();
+
+ // Pulling itertools just for .dedup() would be quite a big dependency that's
+ // not used elsewhere, so do this on the vec instead.
+ test_list.sort_unstable();
+ test_list.dedup();
+
+ if !test_list.is_empty() {
+ // Print the markdown header which matches hyperfine's result.
+ // TODO: Support exporting JSON also.
+ println!(
+ "| Command | Mean [ms] | Min [ms] | Max [ms] | Iterations | Iter/sec |\n|:---|---:|---:|---:|---:|---:|"
+ );
+ }
+
+ // Spawn and profile an instance of each perf-sensitive test, via hyperfine.
+ for t_name in test_list {
+ // Pretty-print the stripped name for the test.
+ let t_name_normal = t_name.replace(SUF_FIXED, "").replace(SUF_NORMAL, "");
+ // Time test execution to see how many iterations we need to do in order
+ // to account for random noise. This is skipped for tests with fixed
+ // iteration counts.
+ let final_iter_count = if t_name.ends_with(SUF_FIXED) {
+ None
+ } else {
+ let mut iter_count = DEFAULT_ITER_COUNT;
+ loop {
+ let mut cmd = Command::new(&test_bin);
+ cmd.args([t_name, "--exact"]);
+ cmd.env(ITER_ENV_VAR, format!("{iter_count}"));
+ // Don't let the child muck up our stdin/out/err.
+ cmd.stdin(Stdio::null());
+ cmd.stdout(Stdio::null());
+ cmd.stderr(Stdio::null());
+ let pre = Instant::now();
+ // Discard the output beyond ensuring success.
+ let out = cmd.spawn().unwrap().wait();
+ let post = Instant::now();
+ if !out.unwrap().success() {
+ println!(
+ "| {t_name_normal} (ERRORED IN TRIAGE) | N/A | N/A | N/A | {iter_count} | N/A |"
+ );
+ return;
+ }
+ if post - pre > NOISE_CUTOFF {
+ break Some(iter_count);
+ } else if let Some(c) = iter_count.checked_mul(ITER_COUNT_MUL) {
+ iter_count = c;
+ } else {
+ // This should almost never happen, but maybe..?
+ eprintln!(
+ "WARNING: Running nearly usize::MAX iterations of test {t_name_normal}"
+ );
+ break Some(iter_count);
+ }
+ }
+ };
+
+ // Now profile!
+ let mut perf_cmd = Command::new("hyperfine");
+ // Warm up the cache and print markdown output to stdout.
+ perf_cmd.args([
+ "--style",
+ "none",
+ "--warmup",
+ "1",
+ "--export-markdown",
+ "-",
+ &format!("{test_bin} {t_name}"),
+ ]);
+ if let Some(final_iter_count) = final_iter_count {
+ perf_cmd.env(ITER_ENV_VAR, format!("{final_iter_count}"));
+ }
+ let p_out = perf_cmd.output().unwrap();
+ let fin_iter = match final_iter_count {
+ Some(i) => &format!("{i}"),
+ None => "(preset)",
+ };
+ if p_out.status.success() {
+ let output = String::from_utf8_lossy(&p_out.stdout);
+ // Strip the name of the test binary from the table (and the space after it!)
+ // + our extraneous test bits + the "Relative" column (which is always at the end and "1.00").
+ let output = output
+ .replace(&format!("{test_bin} "), "")
+ .replace(SUF_FIXED, "")
+ .replace(SUF_NORMAL, "")
+ .replace(" 1.00 |", "");
+ // Can't use .last() since we have a trailing newline. Sigh.
+ let fin = output.lines().nth(3).unwrap();
+
+ // Calculate how many iterations this does per second, for easy comparison.
+ let ms = fin
+ .split_whitespace()
+ .nth(3)
+ .unwrap()
+ .parse::<f64>()
+ .unwrap();
+ let mul_fac = 1000.0 / ms;
+ let iter_sec = match final_iter_count {
+ #[allow(clippy::cast_precision_loss)]
+ Some(c) => &format!("{:.1}", mul_fac * c as f64),
+ None => "(unknown)",
+ };
+ println!("{fin} {fin_iter} | {iter_sec} |");
+ } else {
+ println!("{t_name_normal} (ERRORED) | N/A | N/A | N/A | {fin_iter} | N/A |");
+ }
+ }
+}