@@ -11,6 +11,7 @@ dependencies = [
  "agent_settings",
  "anyhow",
  "buffer_diff",
+ "collections",
  "editor",
  "env_logger 0.11.8",
  "file_icons",
@@ -35,11 +36,10 @@ dependencies = [
  "terminal",
  "ui",
  "url",
+ "util",
  "uuid",
  "watch",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -47,6 +47,7 @@ name = "acp_tools"
 version = "0.1.0"
 dependencies = [
  "agent-client-protocol",
+ "collections",
  "gpui",
  "language",
  "markdown",
@@ -56,10 +57,9 @@ dependencies = [
  "settings",
  "theme",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -69,6 +69,7 @@ dependencies = [
  "anyhow",
  "buffer_diff",
  "clock",
+ "collections",
  "ctor",
  "futures 0.3.31",
  "gpui",
@@ -81,10 +82,9 @@ dependencies = [
  "serde_json",
  "settings",
  "text",
+ "util",
  "watch",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zlog",
 ]
 
@@ -104,9 +104,9 @@ dependencies = [
  "release_channel",
  "smallvec",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-util",
 ]
 
 [[package]]
@@ -149,6 +149,7 @@ dependencies = [
  "chrono",
  "client",
  "cloud_llm_client",
+ "collections",
  "component",
  "context_server",
  "convert_case 0.8.0",
@@ -157,6 +158,7 @@ dependencies = [
  "git",
  "gpui",
  "heed",
+ "http_client",
  "icons",
  "indoc",
  "itertools 0.14.0",
@@ -183,12 +185,10 @@ dependencies = [
  "theme",
  "thiserror 2.0.12",
  "time",
+ "util",
  "uuid",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zed_env_vars",
  "zstd",
 ]
@@ -228,6 +228,7 @@ dependencies = [
  "client",
  "clock",
  "cloud_llm_client",
+ "collections",
  "context_server",
  "ctor",
  "db",
@@ -240,6 +241,7 @@ dependencies = [
  "gpui_tokio",
  "handlebars 4.5.0",
  "html_to_markdown",
+ "http_client",
  "indoc",
  "itertools 0.14.0",
  "language",
@@ -271,14 +273,12 @@ dependencies = [
  "tree-sitter-rust",
  "ui",
  "unindent",
+ "util",
  "uuid",
  "watch",
  "web_search",
  "workspace-hack",
  "worktree",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zed_env_vars",
  "zlog",
  "zstd",
@@ -296,11 +296,13 @@ dependencies = [
  "anyhow",
  "async-trait",
  "client",
+ "collections",
  "env_logger 0.11.8",
  "fs",
  "futures 0.3.31",
  "gpui",
  "gpui_tokio",
+ "http_client",
  "indoc",
  "language",
  "language_model",
@@ -319,12 +321,10 @@ dependencies = [
  "terminal",
  "thiserror 2.0.12",
  "ui",
+ "util",
  "uuid",
  "watch",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
 ]
 
 [[package]]
@@ -333,6 +333,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "cloud_llm_client",
+ "collections",
  "convert_case 0.8.0",
  "fs",
  "gpui",
@@ -344,9 +345,8 @@ dependencies = [
  "serde_json",
  "serde_json_lenient",
  "settings",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -373,6 +373,7 @@ dependencies = [
  "chrono",
  "client",
  "cloud_llm_client",
+ "collections",
  "command_palette_hooks",
  "component",
  "context_server",
@@ -387,6 +388,7 @@ dependencies = [
  "fuzzy",
  "gpui",
  "html_to_markdown",
+ "http_client",
  "indoc",
  "itertools 0.14.0",
  "jsonschema",
@@ -436,12 +438,10 @@ dependencies = [
  "unindent",
  "url",
  "urlencoding",
+ "util",
  "watch",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zed_actions",
 ]
 
@@ -659,6 +659,7 @@ dependencies = [
  "anyhow",
  "chrono",
  "futures 0.3.31",
+ "http_client",
  "schemars 1.0.1",
  "serde",
  "serde_json",
@@ -666,7 +667,6 @@ dependencies = [
  "strum 0.27.1",
  "thiserror 2.0.12",
  "workspace-hack",
- "zed-http-client",
 ]
 
 [[package]]
@@ -813,9 +813,9 @@ dependencies = [
  "net",
  "smol",
  "tempfile",
+ "util",
  "windows 0.61.1",
  "workspace-hack",
- "zed-util",
  "zeroize",
 ]
 
@@ -841,6 +841,7 @@ dependencies = [
  "client",
  "clock",
  "cloud_llm_client",
+ "collections",
  "context_server",
  "fs",
  "futures 0.3.31",
@@ -869,11 +870,10 @@ dependencies = [
  "text",
  "ui",
  "unindent",
+ "util",
  "uuid",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zed_env_vars",
 ]
 
@@ -883,6 +883,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "async-trait",
+ "collections",
  "derive_more",
  "extension",
  "futures 0.3.31",
@@ -894,10 +895,9 @@ dependencies = [
  "serde",
  "serde_json",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -907,6 +907,7 @@ dependencies = [
  "anyhow",
  "assistant_slash_command",
  "chrono",
+ "collections",
  "context_server",
  "editor",
  "feature_flags",
@@ -916,6 +917,7 @@ dependencies = [
  "globset",
  "gpui",
  "html_to_markdown",
+ "http_client",
  "language",
  "pretty_assertions",
  "project",
@@ -927,12 +929,10 @@ dependencies = [
  "smol",
  "text",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
  "worktree",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zlog",
 ]
 
@@ -944,6 +944,7 @@ dependencies = [
  "anyhow",
  "buffer_diff",
  "clock",
+ "collections",
  "ctor",
  "derive_more",
  "gpui",
@@ -961,10 +962,9 @@ dependencies = [
  "serde_json",
  "settings",
  "text",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zlog",
 ]
 
@@ -981,6 +981,7 @@ dependencies = [
  "client",
  "clock",
  "cloud_llm_client",
+ "collections",
  "component",
  "derive_more",
  "diffy",
@@ -992,6 +993,7 @@ dependencies = [
  "gpui_tokio",
  "handlebars 4.5.0",
  "html_to_markdown",
+ "http_client",
  "indoc",
  "itertools 0.14.0",
  "language",
@@ -1026,13 +1028,11 @@ dependencies = [
  "tree-sitter-rust",
  "ui",
  "unindent",
+ "util",
  "watch",
  "web_search",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zlog",
 ]
 
@@ -1407,6 +1407,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "async-tar",
+ "collections",
  "crossbeam",
  "denoise",
  "gpui",
@@ -1418,9 +1419,8 @@ dependencies = [
  "settings",
  "smol",
  "thiserror 2.0.12",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -1443,6 +1443,7 @@ dependencies = [
  "client",
  "db",
  "gpui",
+ "http_client",
  "log",
  "paths",
  "release_channel",
@@ -1454,7 +1455,6 @@ dependencies = [
  "which 6.0.3",
  "workspace",
  "workspace-hack",
- "zed-http-client",
 ]
 
 [[package]]
@@ -1478,15 +1478,15 @@ dependencies = [
  "client",
  "editor",
  "gpui",
+ "http_client",
  "markdown_preview",
  "release_channel",
  "serde",
  "serde_json",
  "smol",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-http-client",
- "zed-util",
 ]
 
 [[package]]
@@ -2013,8 +2013,8 @@ version = "0.1.0"
 dependencies = [
  "aws-smithy-runtime-api",
  "aws-smithy-types",
+ "http_client",
  "workspace-hack",
- "zed-http-client",
 ]
 
 [[package]]
@@ -2498,11 +2498,11 @@ dependencies = [
  "rand 0.9.1",
  "rope",
  "serde_json",
+ "sum_tree",
  "text",
  "unindent",
+ "util",
  "workspace-hack",
- "zed-sum-tree",
- "zed-util",
  "zlog",
 ]
 
@@ -2673,11 +2673,13 @@ dependencies = [
  "anyhow",
  "audio",
  "client",
+ "collections",
  "feature_flags",
  "fs",
  "futures 0.3.31",
  "gpui",
  "gpui_tokio",
+ "http_client",
  "language",
  "livekit_client",
  "log",
@@ -2686,10 +2688,8 @@ dependencies = [
  "serde",
  "settings",
  "telemetry",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
 ]
 
 [[package]]
@@ -2988,8 +2988,10 @@ dependencies = [
  "anyhow",
  "client",
  "clock",
+ "collections",
  "futures 0.3.31",
  "gpui",
+ "http_client",
  "language",
  "log",
  "postage",
@@ -2998,10 +3000,8 @@ dependencies = [
  "settings",
  "text",
  "time",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
 ]
 
 [[package]]
@@ -3137,6 +3137,7 @@ dependencies = [
  "anyhow",
  "askpass",
  "clap",
+ "collections",
  "core-foundation 0.10.0",
  "core-services",
  "exec",
@@ -3148,10 +3149,9 @@ dependencies = [
  "release_channel",
  "serde",
  "tempfile",
+ "util",
  "windows 0.61.1",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -3165,6 +3165,7 @@ dependencies = [
  "clock",
  "cloud_api_client",
  "cloud_llm_client",
+ "collections",
  "credentials_provider",
  "derive_more",
  "feature_flags",
@@ -3172,6 +3173,7 @@ dependencies = [
  "futures 0.3.31",
  "gpui",
  "gpui_tokio",
+ "http_client",
  "http_client_tls",
  "httparse",
  "log",
@@ -3201,12 +3203,10 @@ dependencies = [
  "tokio-rustls 0.26.2",
  "tokio-socks",
  "url",
+ "util",
  "windows 0.61.1",
  "workspace-hack",
  "worktree",
- "zed-collections",
- "zed-http-client",
- "zed-util",
 ]
 
 [[package]]
@@ -3228,11 +3228,11 @@ dependencies = [
  "futures 0.3.31",
  "gpui",
  "gpui_tokio",
+ "http_client",
  "parking_lot",
  "serde_json",
  "workspace-hack",
  "yawc",
- "zed-http-client",
 ]
 
 [[package]]
@@ -3372,6 +3372,7 @@ dependencies = [
  "edit_prediction_context",
  "futures 0.3.31",
  "gpui",
+ "http_client",
  "language",
  "language_models",
  "log",
@@ -3381,7 +3382,6 @@ dependencies = [
  "smol",
  "text",
  "workspace-hack",
- "zed-http-client",
 ]
 
 [[package]]
@@ -3408,6 +3408,7 @@ dependencies = [
  "client",
  "clock",
  "collab_ui",
+ "collections",
  "command_palette_hooks",
  "context_server",
  "ctor",
@@ -3428,6 +3429,7 @@ dependencies = [
  "gpui",
  "gpui_tokio",
  "hex",
+ "http_client",
  "hyper 0.14.32",
  "indoc",
  "language",
@@ -3457,6 +3459,7 @@ dependencies = [
  "rpc",
  "scrypt",
  "sea-orm",
+ "semantic_version",
  "semver",
  "serde",
  "serde_json",
@@ -3480,14 +3483,11 @@ dependencies = [
  "tracing",
  "tracing-subscriber",
  "unindent",
+ "util",
  "uuid",
  "workspace",
  "workspace-hack",
  "worktree",
- "zed-collections",
- "zed-http-client",
- "zed-semantic-version",
- "zed-util",
  "zlog",
 ]
 
@@ -3500,11 +3500,13 @@ dependencies = [
  "channel",
  "chrono",
  "client",
+ "collections",
  "db",
  "editor",
  "futures 0.3.31",
  "fuzzy",
  "gpui",
+ "http_client",
  "log",
  "menu",
  "notifications",
@@ -3525,11 +3527,18 @@ dependencies = [
  "title_bar",
  "tree-sitter-md",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
+]
+
+[[package]]
+name = "collections"
+version = "0.1.0"
+dependencies = [
+ "indexmap 2.9.0",
+ "rustc-hash 2.1.1",
+ "workspace-hack",
 ]
 
 [[package]]
@@ -3570,6 +3579,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "client",
+ "collections",
  "command_palette_hooks",
  "ctor",
  "db",
@@ -3591,10 +3601,9 @@ dependencies = [
  "theme",
  "time",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zed_actions",
 ]
 
@@ -3602,16 +3611,17 @@ dependencies = [
 name = "command_palette_hooks"
 version = "0.1.0"
 dependencies = [
+ "collections",
  "derive_more",
  "gpui",
  "workspace-hack",
- "zed-collections",
 ]
 
 [[package]]
 name = "component"
 version = "0.1.0"
 dependencies = [
+ "collections",
  "documented",
  "gpui",
  "inventory",
@@ -3619,7 +3629,6 @@ dependencies = [
  "strum 0.27.1",
  "theme",
  "workspace-hack",
- "zed-collections",
 ]
 
 [[package]]
@@ -3682,6 +3691,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "async-trait",
+ "collections",
  "futures 0.3.31",
  "gpui",
  "log",
@@ -3695,9 +3705,8 @@ dependencies = [
  "smol",
  "tempfile",
  "url",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -3733,6 +3742,7 @@ dependencies = [
  "chrono",
  "client",
  "clock",
+ "collections",
  "command_palette_hooks",
  "ctor",
  "dirs 4.0.0",
@@ -3741,6 +3751,7 @@ dependencies = [
  "fs",
  "futures 0.3.31",
  "gpui",
+ "http_client",
  "indoc",
  "itertools 0.14.0",
  "language",
@@ -3756,15 +3767,13 @@ dependencies = [
  "serde",
  "serde_json",
  "settings",
+ "sum_tree",
  "task",
  "theme",
  "ui",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-sum-tree",
- "zed-util",
  "zlog",
 ]
 
@@ -4478,10 +4487,12 @@ dependencies = [
  "async-tar",
  "async-trait",
  "client",
+ "collections",
  "dap-types",
  "fs",
  "futures 0.3.31",
  "gpui",
+ "http_client",
  "language",
  "libc",
  "log",
@@ -4499,10 +4510,8 @@ dependencies = [
  "telemetry",
  "tree-sitter",
  "tree-sitter-go",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-util",
  "zlog",
 ]
 
@@ -4522,6 +4531,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "async-trait",
+ "collections",
  "dap",
  "dotenvy",
  "fs",
@@ -4536,9 +4546,8 @@ dependencies = [
  "shlex",
  "smol",
  "task",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -4635,8 +4644,8 @@ dependencies = [
  "sqlez",
  "sqlez_macros",
  "tempfile",
+ "util",
  "workspace-hack",
- "zed-util",
  "zed_env_vars",
 ]
 
@@ -4662,8 +4671,8 @@ dependencies = [
  "gpui",
  "serde_json",
  "task",
+ "util",
  "workspace-hack",
- "zed-util",
 ]
 
 [[package]]
@@ -4679,9 +4688,9 @@ dependencies = [
  "serde_json",
  "settings",
  "smol",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-util",
 ]
 
 [[package]]
@@ -4692,6 +4701,7 @@ dependencies = [
  "anyhow",
  "bitflags 2.9.0",
  "client",
+ "collections",
  "command_palette_hooks",
  "dap",
  "dap_adapters",
@@ -4734,10 +4744,9 @@ dependencies = [
  "tree-sitter-json",
  "ui",
  "unindent",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zed_actions",
  "zlog",
 ]
@@ -4757,11 +4766,11 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "futures 0.3.31",
+ "http_client",
  "schemars 1.0.1",
  "serde",
  "serde_json",
  "workspace-hack",
- "zed-http-client",
 ]
 
 [[package]]
@@ -4839,6 +4848,16 @@ dependencies = [
  "syn 2.0.101",
 ]
 
+[[package]]
+name = "derive_refineable"
+version = "0.1.0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.101",
+ "workspace-hack",
+]
+
 [[package]]
 name = "deunicode"
 version = "1.6.2"
@@ -4851,6 +4870,7 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "client",
+ "collections",
  "component",
  "ctor",
  "editor",
@@ -4870,10 +4890,9 @@ dependencies = [
  "theme",
  "ui",
  "unindent",
+ "util",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zlog",
 ]
 
@@ -5030,9 +5049,9 @@ dependencies = [
  "settings",
  "task",
  "theme",
+ "util",
  "workspace-hack",
  "zed",
- "zed-util",
  "zlog",
 ]
 
@@ -5225,6 +5244,7 @@ dependencies = [
  "arrayvec",
  "clap",
  "cloud_llm_client",
+ "collections",
  "futures 0.3.31",
  "gpui",
  "hashbrown 0.15.3",
@@ -5247,9 +5267,8 @@ dependencies = [
  "tree-sitter-c",
  "tree-sitter-cpp",
  "tree-sitter-go",
+ "util",
  "workspace-hack",
- "zed-collections",
- "zed-util",
  "zlog",
 ]
 
@@ -5263,6 +5282,7 @@ dependencies = [
  "buffer_diff",
  "client",
  "clock",
+ "collections",
  "convert_case 0.8.0",
  "criterion",
  "ctor",
@@ -5276,6 +5296,7 @@ dependencies = [
  "fuzzy",
  "git",
  "gpui",
+ "http_client",
  "indoc",
  "itertools 0.14.0",
  "language",
@@ -5301,6 +5322,7 @@ dependencies = [
  "smallvec",
  "smol",
  "snippet",
+ "sum_tree",
  "task",
  "telemetry",
  "tempfile",
@@ -5319,14 +5341,11 @@ dependencies = [
  "unicode-segmentation",
  "unindent",
  "url",
+ "util",
  "uuid",
  "vim_mode_setting",
  "workspace",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-sum-tree",
- "zed-util",
  "zed_actions",
  "zlog",
 ]
@@ -5608,6 +5627,7 @@ dependencies = [
  "clap",
  "client",
  "cloud_llm_client",
+ "collections",
  "debug_adapter_extension",
  "dirs 4.0.0",
  "dotenvy",
@@ -5642,11 +5662,10 @@ dependencies = [
  "terminal_view",
  "toml 0.8.20",
  "unindent",
+ "util",
  "uuid",
  "watch",
  "workspace-hack",
- "zed-collections",
- "zed-util",
 ]
 
 [[package]]
@@ -5725,28 +5744,28 @@ dependencies = [
  "async-compression",
  "async-tar",
  "async-trait",
+ "collections",
  "dap",
  "fs",
  "futures 0.3.31",
  "gpui",
  "heck 0.5.0",
+ "http_client",
  "language",
  "log",
  "lsp",
  "parking_lot",
  "pretty_assertions",
+ "semantic_version",
  "serde",
  "serde_json",
  "task",
  "toml 0.8.20",
  "url",
+ "util",
  "wasm-encoder 0.221.3",
  "wasmparser 0.221.3",
  "workspace-hack",
- "zed-collections",
- "zed-http-client",
- "zed-semantic-version",
- "zed-util",
 ]
 
 [[package]]
  
  
  
    
    @@ -0,0 +1,447 @@
+//! The implementation of the this crate is kept in a separate module
+//! so that it is easy to publish this crate as part of GPUI's dependencies
+
+use collections::HashMap;
+use serde::{Deserialize, Serialize};
+use std::{num::NonZero, time::Duration};
+
+pub mod consts {
+    //! Preset idenitifiers and constants so that the profiler and proc macro agree
+    //! on their communication protocol.
+
+    /// The suffix on the actual test function.
+    pub const SUF_NORMAL: &str = "__ZED_PERF_FN";
+    /// The suffix on an extra function which prints metadata about a test to stdout.
+    pub const SUF_MDATA: &str = "__ZED_PERF_MDATA";
+    /// The env var in which we pass the iteration count to our tests.
+    pub const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
+    /// The prefix printed on all benchmark test metadata lines, to distinguish it from
+    /// possible output by the test harness itself.
+    pub const MDATA_LINE_PREF: &str = "ZED_MDATA_";
+    /// The version number for the data returned from the test metadata function.
+    /// Increment on non-backwards-compatible changes.
+    pub const MDATA_VER: u32 = 0;
+    /// The default weight, if none is specified.
+    pub const WEIGHT_DEFAULT: u8 = 50;
+    /// How long a test must have run to be assumed to be reliable-ish.
+    pub const NOISE_CUTOFF: std::time::Duration = std::time::Duration::from_millis(250);
+
+    /// Identifier for the iteration count of a test metadata.
+    pub const ITER_COUNT_LINE_NAME: &str = "iter_count";
+    /// Identifier for the weight of a test metadata.
+    pub const WEIGHT_LINE_NAME: &str = "weight";
+    /// Identifier for importance in test metadata.
+    pub const IMPORTANCE_LINE_NAME: &str = "importance";
+    /// Identifier for the test metadata version.
+    pub const VERSION_LINE_NAME: &str = "version";
+
+    /// Where to save json run information.
+    pub const RUNS_DIR: &str = ".perf-runs";
+}
+
+/// How relevant a benchmark is.
+#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
+pub enum Importance {
+    /// Regressions shouldn't be accepted without good reason.
+    Critical = 4,
+    /// Regressions should be paid extra attention.
+    Important = 3,
+    /// No extra attention should be paid to regressions, but they might still
+    /// be indicative of something happening.
+    #[default]
+    Average = 2,
+    /// Unclear if regressions are likely to be meaningful, but still worth keeping
+    /// an eye on. Lowest level that's checked by default by the profiler.
+    Iffy = 1,
+    /// Regressions are likely to be spurious or don't affect core functionality.
+    /// Only relevant if a lot of them happen, or as supplemental evidence for a
+    /// higher-importance benchmark regressing. Not checked by default.
+    Fluff = 0,
+}
+
+impl std::fmt::Display for Importance {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            Importance::Critical => f.write_str("critical"),
+            Importance::Important => f.write_str("important"),
+            Importance::Average => f.write_str("average"),
+            Importance::Iffy => f.write_str("iffy"),
+            Importance::Fluff => f.write_str("fluff"),
+        }
+    }
+}
+
+/// Why or when did this test fail?
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub enum FailKind {
+    /// Failed while triaging it to determine the iteration count.
+    Triage,
+    /// Failed while profiling it.
+    Profile,
+    /// Failed due to an incompatible version for the test.
+    VersionMismatch,
+    /// Could not parse metadata for a test.
+    BadMetadata,
+    /// Skipped due to filters applied on the perf run.
+    Skipped,
+}
+
+impl std::fmt::Display for FailKind {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            FailKind::Triage => f.write_str("errored in triage"),
+            FailKind::Profile => f.write_str("errored while profiling"),
+            FailKind::VersionMismatch => f.write_str("test version mismatch"),
+            FailKind::BadMetadata => f.write_str("bad test metadata"),
+            FailKind::Skipped => f.write_str("skipped"),
+        }
+    }
+}
+
+/// Information about a given perf test.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct TestMdata {
+    /// A version number for when the test was generated. If this is greater
+    /// than the version this test handler expects, one of the following will
+    /// happen in an unspecified manner:
+    /// - The test is skipped silently.
+    /// - The handler exits with an error message indicating the version mismatch
+    ///   or inability to parse the metadata.
+    ///
+    /// INVARIANT: If `version` <= `MDATA_VER`, this tool *must* be able to
+    /// correctly parse the output of this test.
+    pub version: u32,
+    /// How many iterations to pass this test if this is preset, or how many
+    /// iterations a test ended up running afterwards if determined at runtime.
+    pub iterations: Option<NonZero<usize>>,
+    /// The importance of this particular test. See the docs on `Importance` for
+    /// details.
+    pub importance: Importance,
+    /// The weight of this particular test within its importance category. Used
+    /// when comparing across runs.
+    pub weight: u8,
+}
+
+/// The actual timings of a test, as measured by Hyperfine.
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct Timings {
+    /// Mean runtime for `self.iter_total` runs of this test.
+    pub mean: Duration,
+    /// Standard deviation for the above.
+    pub stddev: Duration,
+}
+
+impl Timings {
+    /// How many iterations does this test seem to do per second?
+    #[expect(
+        clippy::cast_precision_loss,
+        reason = "We only care about a couple sig figs anyways"
+    )]
+    #[must_use]
+    pub fn iters_per_sec(&self, total_iters: NonZero<usize>) -> f64 {
+        (1000. / self.mean.as_millis() as f64) * total_iters.get() as f64
+    }
+}
+
+/// Aggregate results, meant to be used for a given importance category. Each
+/// test name corresponds to its benchmark results, iteration count, and weight.
+type CategoryInfo = HashMap<String, (Timings, NonZero<usize>, u8)>;
+
+/// Aggregate output of all tests run by this handler.
+#[derive(Clone, Debug, Default, Serialize, Deserialize)]
+pub struct Output {
+    /// A list of test outputs. Format is `(test_name, mdata, timings)`.
+    /// The latter being `Ok(_)` indicates the test succeeded.
+    ///
+    /// INVARIANT: If the test succeeded, the second field is `Some(mdata)` and
+    /// `mdata.iterations` is `Some(_)`.
+    tests: Vec<(String, Option<TestMdata>, Result<Timings, FailKind>)>,
+}
+
+impl Output {
+    /// Instantiates an empty "output". Useful for merging.
+    #[must_use]
+    pub fn blank() -> Self {
+        Output { tests: Vec::new() }
+    }
+
+    /// Reports a success and adds it to this run's `Output`.
+    pub fn success(
+        &mut self,
+        name: impl AsRef<str>,
+        mut mdata: TestMdata,
+        iters: NonZero<usize>,
+        timings: Timings,
+    ) {
+        mdata.iterations = Some(iters);
+        self.tests
+            .push((name.as_ref().to_string(), Some(mdata), Ok(timings)));
+    }
+
+    /// Reports a failure and adds it to this run's `Output`. If this test was tried
+    /// with some number of iterations (i.e. this was not a version mismatch or skipped
+    /// test), it should be reported also.
+    ///
+    /// Using the `fail!()` macro is usually more convenient.
+    pub fn failure(
+        &mut self,
+        name: impl AsRef<str>,
+        mut mdata: Option<TestMdata>,
+        attempted_iters: Option<NonZero<usize>>,
+        kind: FailKind,
+    ) {
+        if let Some(ref mut mdata) = mdata {
+            mdata.iterations = attempted_iters;
+        }
+        self.tests
+            .push((name.as_ref().to_string(), mdata, Err(kind)));
+    }
+
+    /// True if no tests executed this run.
+    #[must_use]
+    pub fn is_empty(&self) -> bool {
+        self.tests.is_empty()
+    }
+
+    /// Sorts the runs in the output in the order that we want them printed.
+    pub fn sort(&mut self) {
+        self.tests.sort_unstable_by(|a, b| match (a, b) {
+            // Tests where we got no metadata go at the end.
+            ((_, Some(_), _), (_, None, _)) => std::cmp::Ordering::Greater,
+            ((_, None, _), (_, Some(_), _)) => std::cmp::Ordering::Less,
+            // Then sort by importance, then weight.
+            ((_, Some(a_mdata), _), (_, Some(b_mdata), _)) => {
+                let c = a_mdata.importance.cmp(&b_mdata.importance);
+                if matches!(c, std::cmp::Ordering::Equal) {
+                    a_mdata.weight.cmp(&b_mdata.weight)
+                } else {
+                    c
+                }
+            }
+            // Lastly by name.
+            ((a_name, ..), (b_name, ..)) => a_name.cmp(b_name),
+        });
+    }
+
+    /// Merges the output of two runs, appending a prefix to the results of the new run.
+    /// To be used in conjunction with `Output::blank()`, or else only some tests will have
+    /// a prefix set.
+    pub fn merge<'a>(&mut self, other: Self, pref_other: impl Into<Option<&'a str>>) {
+        let pref = if let Some(pref) = pref_other.into() {
+            "crates/".to_string() + pref + "::"
+        } else {
+            String::new()
+        };
+        self.tests = std::mem::take(&mut self.tests)
+            .into_iter()
+            .chain(
+                other
+                    .tests
+                    .into_iter()
+                    .map(|(name, md, tm)| (pref.clone() + &name, md, tm)),
+            )
+            .collect();
+    }
+
+    /// Evaluates the performance of `self` against `baseline`. The latter is taken
+    /// as the comparison point, i.e. a positive resulting `PerfReport` means that
+    /// `self` performed better.
+    ///
+    /// # Panics
+    /// `self` and `baseline` are assumed to have the iterations field on all
+    /// `TestMdata`s set to `Some(_)` if the `TestMdata` is present itself.
+    #[must_use]
+    pub fn compare_perf(self, baseline: Self) -> PerfReport {
+        let self_categories = self.collapse();
+        let mut other_categories = baseline.collapse();
+
+        let deltas = self_categories
+            .into_iter()
+            .filter_map(|(cat, self_data)| {
+                // Only compare categories where both           meow
+                // runs have data.                              /
+                let mut other_data = other_categories.remove(&cat)?;
+                let mut max = f64::MIN;
+                let mut min = f64::MAX;
+
+                // Running totals for averaging out tests.
+                let mut r_total_numerator = 0.;
+                let mut r_total_denominator = 0;
+                // Yeah this is O(n^2), but realistically it'll hardly be a bottleneck.
+                for (name, (s_timings, s_iters, weight)) in self_data {
+                    // Only use the new weights if they conflict.
+                    let Some((o_timings, o_iters, _)) = other_data.remove(&name) else {
+                        continue;
+                    };
+                    let shift =
+                        (o_timings.iters_per_sec(o_iters) / s_timings.iters_per_sec(s_iters)) - 1.;
+                    if shift > max {
+                        max = shift;
+                    }
+                    if shift < min {
+                        min = shift;
+                    }
+                    r_total_numerator += shift * f64::from(weight);
+                    r_total_denominator += u32::from(weight);
+                }
+                // There were no runs here!
+                if r_total_denominator == 0 {
+                    None
+                } else {
+                    let mean = r_total_numerator / f64::from(r_total_denominator);
+                    // TODO: also aggregate standard deviation? That's harder to keep
+                    // meaningful, though, since we dk which tests are correlated.
+                    Some((cat, PerfDelta { max, mean, min }))
+                }
+            })
+            .collect();
+
+        PerfReport { deltas }
+    }
+
+    /// Collapses the `PerfReport` into a `HashMap` over `Importance`, with
+    /// each importance category having its tests contained.
+    fn collapse(self) -> HashMap<Importance, CategoryInfo> {
+        let mut categories = HashMap::<Importance, HashMap<String, _>>::default();
+        for entry in self.tests {
+            if let Some(mdata) = entry.1
+                && let Ok(timings) = entry.2
+            {
+                if let Some(handle) = categories.get_mut(&mdata.importance) {
+                    handle.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
+                } else {
+                    let mut new = HashMap::default();
+                    new.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
+                    categories.insert(mdata.importance, new);
+                }
+            }
+        }
+
+        categories
+    }
+}
+
+impl std::fmt::Display for Output {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // Don't print the header for an empty run.
+        if self.tests.is_empty() {
+            return Ok(());
+        }
+
+        // We want to print important tests at the top, then alphabetical.
+        let mut sorted = self.clone();
+        sorted.sort();
+        // Markdown header for making a nice little table :>
+        writeln!(
+            f,
+            "| Command | Iter/sec | Mean [ms] | SD [ms] | Iterations | Importance (weight) |",
+        )?;
+        writeln!(f, "|:---|---:|---:|---:|---:|---:|")?;
+        for (name, metadata, timings) in &sorted.tests {
+            match metadata {
+                Some(metadata) => match timings {
+                    // Happy path.
+                    Ok(timings) => {
+                        // If the test succeeded, then metadata.iterations is Some(_).
+                        writeln!(
+                            f,
+                            "| {} | {:.2} | {} | {:.2} | {} | {} ({}) |",
+                            name,
+                            timings.iters_per_sec(metadata.iterations.unwrap()),
+                            {
+                                // Very small mean runtimes will give inaccurate
+                                // results. Should probably also penalise weight.
+                                let mean = timings.mean.as_secs_f64() * 1000.;
+                                if mean < consts::NOISE_CUTOFF.as_secs_f64() * 1000. / 8. {
+                                    format!("{mean:.2} (unreliable)")
+                                } else {
+                                    format!("{mean:.2}")
+                                }
+                            },
+                            timings.stddev.as_secs_f64() * 1000.,
+                            metadata.iterations.unwrap(),
+                            metadata.importance,
+                            metadata.weight,
+                        )?;
+                    }
+                    // We have (some) metadata, but the test errored.
+                    Err(err) => writeln!(
+                        f,
+                        "| ({}) {} | N/A | N/A | N/A | {} | {} ({}) |",
+                        err,
+                        name,
+                        metadata
+                            .iterations
+                            .map_or_else(|| "N/A".to_owned(), |i| format!("{i}")),
+                        metadata.importance,
+                        metadata.weight
+                    )?,
+                },
+                // No metadata, couldn't even parse the test output.
+                None => writeln!(
+                    f,
+                    "| ({}) {} | N/A | N/A | N/A | N/A | N/A |",
+                    timings.as_ref().unwrap_err(),
+                    name
+                )?,
+            }
+        }
+        Ok(())
+    }
+}
+
+/// The difference in performance between two runs within a given importance
+/// category.
+struct PerfDelta {
+    /// The biggest improvement / least bad regression.
+    max: f64,
+    /// The weighted average change in test times.
+    mean: f64,
+    /// The worst regression / smallest improvement.
+    min: f64,
+}
+
+/// Shim type for reporting all performance deltas across importance categories.
+pub struct PerfReport {
+    /// Inner (group, diff) pairing.
+    deltas: HashMap<Importance, PerfDelta>,
+}
+
+impl std::fmt::Display for PerfReport {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        if self.deltas.is_empty() {
+            return write!(f, "(no matching tests)");
+        }
+        let sorted = self.deltas.iter().collect::<Vec<_>>();
+        writeln!(f, "| Category | Max | Mean | Min |")?;
+        // We don't want to print too many newlines at the end, so handle newlines
+        // a little jankily like this.
+        write!(f, "|:---|---:|---:|---:|")?;
+        for (cat, delta) in sorted.into_iter().rev() {
+            const SIGN_POS: &str = "↑";
+            const SIGN_NEG: &str = "↓";
+            const SIGN_NEUTRAL: &str = "±";
+
+            let prettify = |time: f64| {
+                let sign = if time > 0.05 {
+                    SIGN_POS
+                } else if time < 0.05 && time > -0.05 {
+                    SIGN_NEUTRAL
+                } else {
+                    SIGN_NEG
+                };
+                format!("{} {:.1}%", sign, time.abs() * 100.)
+            };
+
+            // Pretty-print these instead of just using the float display impl.
+            write!(
+                f,
+                "\n| {cat} | {} | {} | {} |",
+                prettify(delta.max),
+                prettify(delta.mean),
+                prettify(delta.min)
+            )?;
+        }
+        Ok(())
+    }
+}
  
  
  
    
    @@ -3,447 +3,5 @@
 //!
 //! For usage documentation, see the docs on this crate's binary.
 
-use collections::HashMap;
-use serde::{Deserialize, Serialize};
-use std::{num::NonZero, time::Duration};
-
-pub mod consts {
-    //! Preset idenitifiers and constants so that the profiler and proc macro agree
-    //! on their communication protocol.
-
-    /// The suffix on the actual test function.
-    pub const SUF_NORMAL: &str = "__ZED_PERF_FN";
-    /// The suffix on an extra function which prints metadata about a test to stdout.
-    pub const SUF_MDATA: &str = "__ZED_PERF_MDATA";
-    /// The env var in which we pass the iteration count to our tests.
-    pub const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
-    /// The prefix printed on all benchmark test metadata lines, to distinguish it from
-    /// possible output by the test harness itself.
-    pub const MDATA_LINE_PREF: &str = "ZED_MDATA_";
-    /// The version number for the data returned from the test metadata function.
-    /// Increment on non-backwards-compatible changes.
-    pub const MDATA_VER: u32 = 0;
-    /// The default weight, if none is specified.
-    pub const WEIGHT_DEFAULT: u8 = 50;
-    /// How long a test must have run to be assumed to be reliable-ish.
-    pub const NOISE_CUTOFF: std::time::Duration = std::time::Duration::from_millis(250);
-
-    /// Identifier for the iteration count of a test metadata.
-    pub const ITER_COUNT_LINE_NAME: &str = "iter_count";
-    /// Identifier for the weight of a test metadata.
-    pub const WEIGHT_LINE_NAME: &str = "weight";
-    /// Identifier for importance in test metadata.
-    pub const IMPORTANCE_LINE_NAME: &str = "importance";
-    /// Identifier for the test metadata version.
-    pub const VERSION_LINE_NAME: &str = "version";
-
-    /// Where to save json run information.
-    pub const RUNS_DIR: &str = ".perf-runs";
-}
-
-/// How relevant a benchmark is.
-#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
-pub enum Importance {
-    /// Regressions shouldn't be accepted without good reason.
-    Critical = 4,
-    /// Regressions should be paid extra attention.
-    Important = 3,
-    /// No extra attention should be paid to regressions, but they might still
-    /// be indicative of something happening.
-    #[default]
-    Average = 2,
-    /// Unclear if regressions are likely to be meaningful, but still worth keeping
-    /// an eye on. Lowest level that's checked by default by the profiler.
-    Iffy = 1,
-    /// Regressions are likely to be spurious or don't affect core functionality.
-    /// Only relevant if a lot of them happen, or as supplemental evidence for a
-    /// higher-importance benchmark regressing. Not checked by default.
-    Fluff = 0,
-}
-
-impl std::fmt::Display for Importance {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        match self {
-            Importance::Critical => f.write_str("critical"),
-            Importance::Important => f.write_str("important"),
-            Importance::Average => f.write_str("average"),
-            Importance::Iffy => f.write_str("iffy"),
-            Importance::Fluff => f.write_str("fluff"),
-        }
-    }
-}
-
-/// Why or when did this test fail?
-#[derive(Clone, Debug, Serialize, Deserialize)]
-pub enum FailKind {
-    /// Failed while triaging it to determine the iteration count.
-    Triage,
-    /// Failed while profiling it.
-    Profile,
-    /// Failed due to an incompatible version for the test.
-    VersionMismatch,
-    /// Could not parse metadata for a test.
-    BadMetadata,
-    /// Skipped due to filters applied on the perf run.
-    Skipped,
-}
-
-impl std::fmt::Display for FailKind {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        match self {
-            FailKind::Triage => f.write_str("errored in triage"),
-            FailKind::Profile => f.write_str("errored while profiling"),
-            FailKind::VersionMismatch => f.write_str("test version mismatch"),
-            FailKind::BadMetadata => f.write_str("bad test metadata"),
-            FailKind::Skipped => f.write_str("skipped"),
-        }
-    }
-}
-
-/// Information about a given perf test.
-#[derive(Clone, Debug, Serialize, Deserialize)]
-pub struct TestMdata {
-    /// A version number for when the test was generated. If this is greater
-    /// than the version this test handler expects, one of the following will
-    /// happen in an unspecified manner:
-    /// - The test is skipped silently.
-    /// - The handler exits with an error message indicating the version mismatch
-    ///   or inability to parse the metadata.
-    ///
-    /// INVARIANT: If `version` <= `MDATA_VER`, this tool *must* be able to
-    /// correctly parse the output of this test.
-    pub version: u32,
-    /// How many iterations to pass this test if this is preset, or how many
-    /// iterations a test ended up running afterwards if determined at runtime.
-    pub iterations: Option<NonZero<usize>>,
-    /// The importance of this particular test. See the docs on `Importance` for
-    /// details.
-    pub importance: Importance,
-    /// The weight of this particular test within its importance category. Used
-    /// when comparing across runs.
-    pub weight: u8,
-}
-
-/// The actual timings of a test, as measured by Hyperfine.
-#[derive(Clone, Debug, Serialize, Deserialize)]
-pub struct Timings {
-    /// Mean runtime for `self.iter_total` runs of this test.
-    pub mean: Duration,
-    /// Standard deviation for the above.
-    pub stddev: Duration,
-}
-
-impl Timings {
-    /// How many iterations does this test seem to do per second?
-    #[expect(
-        clippy::cast_precision_loss,
-        reason = "We only care about a couple sig figs anyways"
-    )]
-    #[must_use]
-    pub fn iters_per_sec(&self, total_iters: NonZero<usize>) -> f64 {
-        (1000. / self.mean.as_millis() as f64) * total_iters.get() as f64
-    }
-}
-
-/// Aggregate results, meant to be used for a given importance category. Each
-/// test name corresponds to its benchmark results, iteration count, and weight.
-type CategoryInfo = HashMap<String, (Timings, NonZero<usize>, u8)>;
-
-/// Aggregate output of all tests run by this handler.
-#[derive(Clone, Debug, Default, Serialize, Deserialize)]
-pub struct Output {
-    /// A list of test outputs. Format is `(test_name, mdata, timings)`.
-    /// The latter being `Ok(_)` indicates the test succeeded.
-    ///
-    /// INVARIANT: If the test succeeded, the second field is `Some(mdata)` and
-    /// `mdata.iterations` is `Some(_)`.
-    tests: Vec<(String, Option<TestMdata>, Result<Timings, FailKind>)>,
-}
-
-impl Output {
-    /// Instantiates an empty "output". Useful for merging.
-    #[must_use]
-    pub fn blank() -> Self {
-        Output { tests: Vec::new() }
-    }
-
-    /// Reports a success and adds it to this run's `Output`.
-    pub fn success(
-        &mut self,
-        name: impl AsRef<str>,
-        mut mdata: TestMdata,
-        iters: NonZero<usize>,
-        timings: Timings,
-    ) {
-        mdata.iterations = Some(iters);
-        self.tests
-            .push((name.as_ref().to_string(), Some(mdata), Ok(timings)));
-    }
-
-    /// Reports a failure and adds it to this run's `Output`. If this test was tried
-    /// with some number of iterations (i.e. this was not a version mismatch or skipped
-    /// test), it should be reported also.
-    ///
-    /// Using the `fail!()` macro is usually more convenient.
-    pub fn failure(
-        &mut self,
-        name: impl AsRef<str>,
-        mut mdata: Option<TestMdata>,
-        attempted_iters: Option<NonZero<usize>>,
-        kind: FailKind,
-    ) {
-        if let Some(ref mut mdata) = mdata {
-            mdata.iterations = attempted_iters;
-        }
-        self.tests
-            .push((name.as_ref().to_string(), mdata, Err(kind)));
-    }
-
-    /// True if no tests executed this run.
-    #[must_use]
-    pub fn is_empty(&self) -> bool {
-        self.tests.is_empty()
-    }
-
-    /// Sorts the runs in the output in the order that we want them printed.
-    pub fn sort(&mut self) {
-        self.tests.sort_unstable_by(|a, b| match (a, b) {
-            // Tests where we got no metadata go at the end.
-            ((_, Some(_), _), (_, None, _)) => std::cmp::Ordering::Greater,
-            ((_, None, _), (_, Some(_), _)) => std::cmp::Ordering::Less,
-            // Then sort by importance, then weight.
-            ((_, Some(a_mdata), _), (_, Some(b_mdata), _)) => {
-                let c = a_mdata.importance.cmp(&b_mdata.importance);
-                if matches!(c, std::cmp::Ordering::Equal) {
-                    a_mdata.weight.cmp(&b_mdata.weight)
-                } else {
-                    c
-                }
-            }
-            // Lastly by name.
-            ((a_name, ..), (b_name, ..)) => a_name.cmp(b_name),
-        });
-    }
-
-    /// Merges the output of two runs, appending a prefix to the results of the new run.
-    /// To be used in conjunction with `Output::blank()`, or else only some tests will have
-    /// a prefix set.
-    pub fn merge<'a>(&mut self, other: Self, pref_other: impl Into<Option<&'a str>>) {
-        let pref = if let Some(pref) = pref_other.into() {
-            "crates/".to_string() + pref + "::"
-        } else {
-            String::new()
-        };
-        self.tests = std::mem::take(&mut self.tests)
-            .into_iter()
-            .chain(
-                other
-                    .tests
-                    .into_iter()
-                    .map(|(name, md, tm)| (pref.clone() + &name, md, tm)),
-            )
-            .collect();
-    }
-
-    /// Evaluates the performance of `self` against `baseline`. The latter is taken
-    /// as the comparison point, i.e. a positive resulting `PerfReport` means that
-    /// `self` performed better.
-    ///
-    /// # Panics
-    /// `self` and `baseline` are assumed to have the iterations field on all
-    /// `TestMdata`s set to `Some(_)` if the `TestMdata` is present itself.
-    #[must_use]
-    pub fn compare_perf(self, baseline: Self) -> PerfReport {
-        let self_categories = self.collapse();
-        let mut other_categories = baseline.collapse();
-
-        let deltas = self_categories
-            .into_iter()
-            .filter_map(|(cat, self_data)| {
-                // Only compare categories where both           meow
-                // runs have data.                              /
-                let mut other_data = other_categories.remove(&cat)?;
-                let mut max = f64::MIN;
-                let mut min = f64::MAX;
-
-                // Running totals for averaging out tests.
-                let mut r_total_numerator = 0.;
-                let mut r_total_denominator = 0;
-                // Yeah this is O(n^2), but realistically it'll hardly be a bottleneck.
-                for (name, (s_timings, s_iters, weight)) in self_data {
-                    // Only use the new weights if they conflict.
-                    let Some((o_timings, o_iters, _)) = other_data.remove(&name) else {
-                        continue;
-                    };
-                    let shift =
-                        (o_timings.iters_per_sec(o_iters) / s_timings.iters_per_sec(s_iters)) - 1.;
-                    if shift > max {
-                        max = shift;
-                    }
-                    if shift < min {
-                        min = shift;
-                    }
-                    r_total_numerator += shift * f64::from(weight);
-                    r_total_denominator += u32::from(weight);
-                }
-                // There were no runs here!
-                if r_total_denominator == 0 {
-                    None
-                } else {
-                    let mean = r_total_numerator / f64::from(r_total_denominator);
-                    // TODO: also aggregate standard deviation? That's harder to keep
-                    // meaningful, though, since we dk which tests are correlated.
-                    Some((cat, PerfDelta { max, mean, min }))
-                }
-            })
-            .collect();
-
-        PerfReport { deltas }
-    }
-
-    /// Collapses the `PerfReport` into a `HashMap` over `Importance`, with
-    /// each importance category having its tests contained.
-    fn collapse(self) -> HashMap<Importance, CategoryInfo> {
-        let mut categories = HashMap::<Importance, HashMap<String, _>>::default();
-        for entry in self.tests {
-            if let Some(mdata) = entry.1
-                && let Ok(timings) = entry.2
-            {
-                if let Some(handle) = categories.get_mut(&mdata.importance) {
-                    handle.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
-                } else {
-                    let mut new = HashMap::default();
-                    new.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
-                    categories.insert(mdata.importance, new);
-                }
-            }
-        }
-
-        categories
-    }
-}
-
-impl std::fmt::Display for Output {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        // Don't print the header for an empty run.
-        if self.tests.is_empty() {
-            return Ok(());
-        }
-
-        // We want to print important tests at the top, then alphabetical.
-        let mut sorted = self.clone();
-        sorted.sort();
-        // Markdown header for making a nice little table :>
-        writeln!(
-            f,
-            "| Command | Iter/sec | Mean [ms] | SD [ms] | Iterations | Importance (weight) |",
-        )?;
-        writeln!(f, "|:---|---:|---:|---:|---:|---:|")?;
-        for (name, metadata, timings) in &sorted.tests {
-            match metadata {
-                Some(metadata) => match timings {
-                    // Happy path.
-                    Ok(timings) => {
-                        // If the test succeeded, then metadata.iterations is Some(_).
-                        writeln!(
-                            f,
-                            "| {} | {:.2} | {} | {:.2} | {} | {} ({}) |",
-                            name,
-                            timings.iters_per_sec(metadata.iterations.unwrap()),
-                            {
-                                // Very small mean runtimes will give inaccurate
-                                // results. Should probably also penalise weight.
-                                let mean = timings.mean.as_secs_f64() * 1000.;
-                                if mean < consts::NOISE_CUTOFF.as_secs_f64() * 1000. / 8. {
-                                    format!("{mean:.2} (unreliable)")
-                                } else {
-                                    format!("{mean:.2}")
-                                }
-                            },
-                            timings.stddev.as_secs_f64() * 1000.,
-                            metadata.iterations.unwrap(),
-                            metadata.importance,
-                            metadata.weight,
-                        )?;
-                    }
-                    // We have (some) metadata, but the test errored.
-                    Err(err) => writeln!(
-                        f,
-                        "| ({}) {} | N/A | N/A | N/A | {} | {} ({}) |",
-                        err,
-                        name,
-                        metadata
-                            .iterations
-                            .map_or_else(|| "N/A".to_owned(), |i| format!("{i}")),
-                        metadata.importance,
-                        metadata.weight
-                    )?,
-                },
-                // No metadata, couldn't even parse the test output.
-                None => writeln!(
-                    f,
-                    "| ({}) {} | N/A | N/A | N/A | N/A | N/A |",
-                    timings.as_ref().unwrap_err(),
-                    name
-                )?,
-            }
-        }
-        Ok(())
-    }
-}
-
-/// The difference in performance between two runs within a given importance
-/// category.
-struct PerfDelta {
-    /// The biggest improvement / least bad regression.
-    max: f64,
-    /// The weighted average change in test times.
-    mean: f64,
-    /// The worst regression / smallest improvement.
-    min: f64,
-}
-
-/// Shim type for reporting all performance deltas across importance categories.
-pub struct PerfReport {
-    /// Inner (group, diff) pairing.
-    deltas: HashMap<Importance, PerfDelta>,
-}
-
-impl std::fmt::Display for PerfReport {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        if self.deltas.is_empty() {
-            return write!(f, "(no matching tests)");
-        }
-        let sorted = self.deltas.iter().collect::<Vec<_>>();
-        writeln!(f, "| Category | Max | Mean | Min |")?;
-        // We don't want to print too many newlines at the end, so handle newlines
-        // a little jankily like this.
-        write!(f, "|:---|---:|---:|---:|")?;
-        for (cat, delta) in sorted.into_iter().rev() {
-            const SIGN_POS: &str = "↑";
-            const SIGN_NEG: &str = "↓";
-            const SIGN_NEUTRAL: &str = "±";
-
-            let prettify = |time: f64| {
-                let sign = if time > 0.05 {
-                    SIGN_POS
-                } else if time < 0.05 && time > -0.05 {
-                    SIGN_NEUTRAL
-                } else {
-                    SIGN_NEG
-                };
-                format!("{} {:.1}%", sign, time.abs() * 100.)
-            };
-
-            // Pretty-print these instead of just using the float display impl.
-            write!(
-                f,
-                "\n| {cat} | {} | {} | {} |",
-                prettify(delta.max),
-                prettify(delta.mean),
-                prettify(delta.min)
-            )?;
-        }
-        Ok(())
-    }
-}
+mod implementation;
+pub use implementation::*;
  
  
  
    
    @@ -7,13 +7,13 @@ use clap::Parser;
 
 #[derive(Parser)]
 pub struct PublishGpuiArgs {
-    /// Optional pre-release identifier to append to the version (e.g., alpha, test.1). Always bumps the minor version.
-    #[arg(long)]
-    pre_release: Option<String>,
-
     /// Perform a dry-run and wait for user confirmation before each publish
     #[arg(long)]
     dry_run: bool,
+
+    /// Skip to a specific package (by package name or crate name) and start from there
+    #[arg(long)]
+    skip_to: Option<String>,
 }
 
 pub fn run_publish_gpui(args: PublishGpuiArgs) -> Result<()> {
@@ -24,12 +24,16 @@ pub fn run_publish_gpui(args: PublishGpuiArgs) -> Result<()> {
 
     let start_time = std::time::Instant::now();
     check_workspace_root()?;
-    ensure_cargo_set_version()?;
-    check_git_clean()?;
+
+    if args.skip_to.is_none() {
+        check_git_clean()?;
+    } else {
+        println!("Skipping git clean check due to --skip-to flag");
+    }
 
     let version = read_gpui_version()?;
     println!("Updating GPUI to version: {}", version);
-    publish_dependencies(&version, args.dry_run)?;
+    publish_dependencies(&version, args.dry_run, args.skip_to.as_deref())?;
     publish_gpui(&version, args.dry_run)?;
     println!("GPUI published in {}s", start_time.elapsed().as_secs_f32());
     Ok(())
@@ -52,62 +56,106 @@ fn read_gpui_version() -> Result<String> {
     Ok(version.to_string())
 }
 
-fn publish_dependencies(new_version: &str, dry_run: bool) -> Result<()> {
+fn publish_dependencies(new_version: &str, dry_run: bool, skip_to: Option<&str>) -> Result<()> {
     let gpui_dependencies = vec![
-        ("zed-collections", "collections"),
-        ("zed-perf", "perf"),
-        ("zed-util-macros", "util_macros"),
-        ("zed-util", "util"),
-        ("gpui-macros", "gpui_macros"),
-        ("zed-http-client", "http_client"),
-        ("zed-derive-refineable", "derive_refineable"),
-        ("zed-refineable", "refineable"),
-        ("zed-semantic-version", "semantic_version"),
-        ("zed-sum-tree", "sum_tree"),
-        ("zed-media", "media"),
+        ("collections", "gpui_collections", "crates"),
+        ("perf", "gpui_perf", "tooling"),
+        ("util_macros", "gpui_util_macros", "crates"),
+        ("util", "gpui_util", "crates"),
+        ("gpui_macros", "gpui-macros", "crates"),
+        ("http_client", "gpui_http_client", "crates"),
+        (
+            "derive_refineable",
+            "gpui_derive_refineable",
+            "crates/refineable",
+        ),
+        ("refineable", "gpui_refineable", "crates"),
+        ("semantic_version", "gpui_semantic_version", "crates"),
+        ("sum_tree", "gpui_sum_tree", "crates"),
+        ("media", "gpui_media", "crates"),
     ];
 
-    for (crate_name, package_name) in gpui_dependencies {
+    let mut should_skip = skip_to.is_some();
+    let skip_target = skip_to.unwrap_or("");
+
+    for (package_name, crate_name, package_dir) in gpui_dependencies {
+        if should_skip {
+            if package_name == skip_target || crate_name == skip_target {
+                println!("Found skip target: {} ({})", crate_name, package_name);
+                should_skip = false;
+            } else {
+                println!("Skipping: {} ({})", crate_name, package_name);
+                continue;
+            }
+        }
+
         println!(
             "Publishing dependency: {} (package: {})",
             crate_name, package_name
         );
 
-        update_crate_version(crate_name, new_version)?;
-        update_workspace_dependency_version(package_name, new_version)?;
+        update_crate_cargo_toml(package_name, crate_name, package_dir, new_version)?;
+        update_workspace_dependency_version(package_name, crate_name, new_version)?;
         publish_crate(crate_name, dry_run)?;
+    }
 
-        // println!("Waiting 60s for the rate limit...");
-        // thread::sleep(Duration::from_secs(60));
+    if should_skip {
+        bail!(
+            "Could not find package or crate named '{}' to skip to",
+            skip_target
+        );
     }
 
     Ok(())
 }
 
 fn publish_gpui(new_version: &str, dry_run: bool) -> Result<()> {
-    update_crate_version("gpui", new_version)?;
+    update_crate_cargo_toml("gpui", "gpui", "crates", new_version)?;
 
     publish_crate("gpui", dry_run)?;
 
     Ok(())
 }
 
-fn update_crate_version(package_name: &str, new_version: &str) -> Result<()> {
-    let output = run_command(
-        Command::new("cargo")
-            .arg("set-version")
-            .arg("--package")
-            .arg(package_name)
-            .arg(new_version),
-    )?;
+fn update_crate_cargo_toml(
+    package_name: &str,
+    crate_name: &str,
+    package_dir: &str,
+    new_version: &str,
+) -> Result<()> {
+    let cargo_toml_path = format!("{}/{}/Cargo.toml", package_dir, package_name);
+    let contents = std::fs::read_to_string(&cargo_toml_path)
+        .context(format!("Failed to read {}", cargo_toml_path))?;
 
-    if !output.status.success() {
-        bail!("Failed to set version for package {}", package_name);
-    }
+    let updated = update_crate_package_fields(&contents, crate_name, new_version)?;
+
+    std::fs::write(&cargo_toml_path, updated)
+        .context(format!("Failed to write {}", cargo_toml_path))?;
 
     Ok(())
 }
 
+fn update_crate_package_fields(
+    toml_contents: &str,
+    crate_name: &str,
+    new_version: &str,
+) -> Result<String> {
+    let mut doc = toml_contents
+        .parse::<toml_edit::DocumentMut>()
+        .context("Failed to parse TOML")?;
+
+    let package = doc
+        .get_mut("package")
+        .and_then(|p| p.as_table_like_mut())
+        .context("Failed to find [package] section")?;
+
+    package.insert("name", toml_edit::value(crate_name));
+    package.insert("version", toml_edit::value(new_version));
+    package.insert("publish", toml_edit::value(true));
+
+    Ok(doc.to_string())
+}
+
 fn publish_crate(crate_name: &str, dry_run: bool) -> Result<()> {
     let publish_crate_impl = |crate_name, dry_run| {
         let cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string());
@@ -142,29 +190,34 @@ fn publish_crate(crate_name: &str, dry_run: bool) -> Result<()> {
     Ok(())
 }
 
-fn update_workspace_dependency_version(package_name: &str, new_version: &str) -> Result<()> {
+fn update_workspace_dependency_version(
+    package_name: &str,
+    crate_name: &str,
+    new_version: &str,
+) -> Result<()> {
     let workspace_cargo_toml_path = "Cargo.toml";
     let contents = std::fs::read_to_string(workspace_cargo_toml_path)
         .context("Failed to read workspace Cargo.toml")?;
 
-    let updated = update_dependency_version_in_toml(&contents, package_name, new_version)?;
+    let mut doc = contents
+        .parse::<toml_edit::DocumentMut>()
+        .context("Failed to parse TOML")?;
+
+    update_dependency_version_in_doc(&mut doc, package_name, crate_name, new_version)?;
+    update_profile_override_in_doc(&mut doc, package_name, crate_name)?;
 
-    std::fs::write(workspace_cargo_toml_path, updated)
+    std::fs::write(workspace_cargo_toml_path, doc.to_string())
         .context("Failed to write workspace Cargo.toml")?;
 
     Ok(())
 }
 
-fn update_dependency_version_in_toml(
-    toml_contents: &str,
+fn update_dependency_version_in_doc(
+    doc: &mut toml_edit::DocumentMut,
     package_name: &str,
+    crate_name: &str,
     new_version: &str,
-) -> Result<String> {
-    let mut doc = toml_contents
-        .parse::<toml_edit::DocumentMut>()
-        .context("Failed to parse TOML")?;
-
-    // Navigate to workspace.dependencies.<package_name>
+) -> Result<()> {
     let dependency = doc
         .get_mut("workspace")
         .and_then(|w| w.get_mut("dependencies"))
@@ -174,21 +227,35 @@ fn update_dependency_version_in_toml(
             package_name
         ))?;
 
-    // Update the version field if it exists
     if let Some(dep_table) = dependency.as_table_like_mut() {
-        if dep_table.contains_key("version") {
-            dep_table.insert("version", toml_edit::value(new_version));
-        } else {
-            bail!(
-                "No version field found for {} in workspace dependencies",
-                package_name
-            );
-        }
+        dep_table.insert("version", toml_edit::value(new_version));
+        dep_table.insert("package", toml_edit::value(crate_name));
     } else {
         bail!("{} is not a table in workspace dependencies", package_name);
     }
 
-    Ok(doc.to_string())
+    Ok(())
+}
+
+fn update_profile_override_in_doc(
+    doc: &mut toml_edit::DocumentMut,
+    package_name: &str,
+    crate_name: &str,
+) -> Result<()> {
+    if let Some(profile_dev_package) = doc
+        .get_mut("profile")
+        .and_then(|p| p.get_mut("dev"))
+        .and_then(|d| d.get_mut("package"))
+        .and_then(|p| p.as_table_like_mut())
+    {
+        if let Some(old_entry) = profile_dev_package.get(package_name) {
+            let old_entry_clone = old_entry.clone();
+            profile_dev_package.remove(package_name);
+            profile_dev_package.insert(crate_name, old_entry_clone);
+        }
+    }
+
+    Ok(())
 }
 
 fn check_workspace_root() -> Result<()> {
@@ -215,27 +282,6 @@ fn check_workspace_root() -> Result<()> {
     Ok(())
 }
 
-fn ensure_cargo_set_version() -> Result<()> {
-    let output = run_command(
-        Command::new("which")
-            .arg("cargo-set-version")
-            .stdout(Stdio::piped()),
-    )
-    .context("Failed to check for cargo-set-version")?;
-
-    if !output.status.success() {
-        println!("cargo-set-version not found. Installing cargo-edit...");
-
-        let install_output = run_command(Command::new("cargo").arg("install").arg("cargo-edit"))?;
-
-        if !install_output.status.success() {
-            bail!("Failed to install cargo-edit");
-        }
-    }
-
-    Ok(())
-}
-
 fn check_git_clean() -> Result<()> {
     let output = run_command(
         Command::new("git")
@@ -281,6 +327,10 @@ fn run_command(command: &mut Command) -> Result<Output> {
         .wait_with_output()
         .context("failed to wait for child process")?;
 
+    if !output.status.success() {
+        bail!("Command failed with status {}", output.status);
+    }
+
     Ok(output)
 }
 
@@ -298,12 +348,17 @@ mod tests {
 
             [workspace.dependencies]
             # here's a comment
-            collections = { path = "crates/collections", package = "zed-collections", version = "0.1.0" }
+            collections = { path = "crates/collections" }
 
             util = { path = "crates/util", package = "zed-util", version = "0.1.0" }
         "#};
 
-        let result = update_dependency_version_in_toml(input, "collections", "0.2.0").unwrap();
+        let mut doc = input.parse::<toml_edit::DocumentMut>().unwrap();
+
+        update_dependency_version_in_doc(&mut doc, "collections", "gpui_collections", "0.2.0")
+            .unwrap();
+
+        let result = doc.to_string();
 
         let output = indoc! {r#"
             [workspace]
@@ -311,11 +366,77 @@ mod tests {
 
             [workspace.dependencies]
             # here's a comment
-            collections = { path = "crates/collections", package = "zed-collections", version = "0.2.0" }
+            collections = { path = "crates/collections" , version = "0.2.0", package = "gpui_collections" }
 
             util = { path = "crates/util", package = "zed-util", version = "0.1.0" }
         "#};
 
         assert_eq!(result, output);
     }
+
+    #[test]
+    fn test_update_crate_package_fields() {
+        let input = indoc! {r#"
+            [package]
+            name = "collections"
+            version = "0.1.0"
+            edition = "2021"
+            publish = false
+            # some comment about the license
+            license = "GPL-3.0-or-later"
+
+            [dependencies]
+            serde = "1.0"
+        "#};
+
+        let result = update_crate_package_fields(input, "gpui_collections", "0.2.0").unwrap();
+
+        let output = indoc! {r#"
+            [package]
+            name = "gpui_collections"
+            version = "0.2.0"
+            edition = "2021"
+            publish = true
+            # some comment about the license
+            license = "GPL-3.0-or-later"
+
+            [dependencies]
+            serde = "1.0"
+        "#};
+
+        assert_eq!(result, output);
+    }
+
+    #[test]
+    fn test_update_profile_override_in_toml() {
+        let input = indoc! {r#"
+            [profile.dev]
+            split-debuginfo = "unpacked"
+
+            [profile.dev.package]
+            taffy = { opt-level = 3 }
+            collections = { codegen-units = 256 }
+            refineable = { codegen-units = 256 }
+            util = { codegen-units = 256 }
+        "#};
+
+        let mut doc = input.parse::<toml_edit::DocumentMut>().unwrap();
+
+        update_profile_override_in_doc(&mut doc, "collections", "gpui_collections").unwrap();
+
+        let result = doc.to_string();
+
+        let output = indoc! {r#"
+            [profile.dev]
+            split-debuginfo = "unpacked"
+
+            [profile.dev.package]
+            taffy = { opt-level = 3 }
+            refineable = { codegen-units = 256 }
+            util = { codegen-units = 256 }
+            gpui_collections = { codegen-units = 256 }
+        "#};
+
+        assert_eq!(result, output);
+    }
 }