Cargo.lock 🔗
@@ -10188,8 +10188,10 @@ name = "miniprofiler_ui"
version = "0.1.0"
dependencies = [
"gpui",
+ "rpc",
"serde_json",
"smol",
+ "theme",
"util",
"workspace",
"zed_actions",
John Tur created
Release Notes:
- The `zed: open performance profiler` action can now display profiling
data from the remote server.
Cargo.lock | 2
crates/collab/tests/integration/remote_editing_collaboration_tests.rs | 6
crates/gpui/src/app/async_context.rs | 2
crates/gpui/src/executor.rs | 1
crates/gpui/src/platform.rs | 4
crates/gpui/src/platform/linux/dispatcher.rs | 12
crates/gpui/src/platform/mac/dispatcher.rs | 15
crates/gpui/src/platform/test/dispatcher.rs | 9
crates/gpui/src/platform/windows/dispatcher.rs | 12
crates/gpui/src/platform_scheduler.rs | 1
crates/gpui/src/profiler.rs | 139
crates/miniprofiler_ui/Cargo.toml | 2
crates/miniprofiler_ui/src/miniprofiler_ui.rs | 615
crates/proto/proto/app.proto | 28
crates/proto/proto/zed.proto | 7
crates/proto/src/proto.rs | 6
crates/recent_projects/src/remote_connections.rs | 2
crates/remote_server/src/headless_project.rs | 53
crates/remote_server/src/remote_editing_tests.rs | 1
crates/remote_server/src/server.rs | 3
crates/scheduler/src/executor.rs | 2
crates/scheduler/src/scheduler.rs | 1
crates/scheduler/src/test_scheduler.rs | 1
crates/ui/src/components/scrollbar.rs | 4
24 files changed, 738 insertions(+), 190 deletions(-)
@@ -10188,8 +10188,10 @@ name = "miniprofiler_ui"
version = "0.1.0"
dependencies = [
"gpui",
+ "rpc",
"serde_json",
"smol",
+ "theme",
"util",
"workspace",
"zed_actions",
@@ -100,6 +100,7 @@ async fn test_sharing_an_ssh_remote_project(
node_runtime: node,
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -261,6 +262,7 @@ async fn test_ssh_collaboration_git_branches(
node_runtime: node,
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -466,6 +468,7 @@ async fn test_ssh_collaboration_formatting_with_prettier(
node_runtime: NodeRuntime::unavailable(),
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -628,6 +631,7 @@ async fn test_remote_server_debugger(
node_runtime: node,
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -740,6 +744,7 @@ async fn test_slow_adapter_startup_retries(
node_runtime: node,
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -946,6 +951,7 @@ async fn test_ssh_remote_worktree_trust(cx_a: &mut TestAppContext, server_cx: &m
node_runtime: node,
languages,
extension_host_proxy: Arc::new(ExtensionHostProxy::new()),
+ startup_time: std::time::Instant::now(),
},
true,
cx,
@@ -104,6 +104,7 @@ impl AppContext for AsyncApp {
lock.read_window(window, read)
}
+ #[track_caller]
fn background_spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
where
R: Send + 'static,
@@ -407,6 +408,7 @@ impl AppContext for AsyncWindowContext {
self.app.read_window(window, read)
}
+ #[track_caller]
fn background_spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
where
R: Send + 'static,
@@ -269,6 +269,7 @@ impl BackgroundExecutor {
/// Returns a task that will complete after the given duration.
/// Depending on other concurrent tasks the elapsed duration may be longer
/// than requested.
+ #[track_caller]
pub fn timer(&self, duration: Duration) -> Task<()> {
if duration.is_zero() {
return Task::ready(());
@@ -34,7 +34,7 @@ use crate::{
DEFAULT_WINDOW_SIZE, DevicePixels, DispatchEventResult, Font, FontId, FontMetrics, FontRun,
ForegroundExecutor, GlyphId, GpuSpecs, ImageSource, Keymap, LineLayout, Pixels, PlatformInput,
Point, Priority, RenderGlyphParams, RenderImage, RenderImageParams, RenderSvgParams, Scene,
- ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task, TaskTiming,
+ ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task,
ThreadTaskTimings, Window, WindowControlArea, hash, point, px, size,
};
use anyhow::Result;
@@ -620,7 +620,7 @@ impl Drop for TimerResolutionGuard {
#[doc(hidden)]
pub trait PlatformDispatcher: Send + Sync {
fn get_all_timings(&self) -> Vec<ThreadTaskTimings>;
- fn get_current_thread_timings(&self) -> Vec<TaskTiming>;
+ fn get_current_thread_timings(&self) -> ThreadTaskTimings;
fn is_main_thread(&self) -> bool;
fn dispatch(&self, runnable: RunnableVariant, priority: Priority);
fn dispatch_on_main_thread(&self, runnable: RunnableVariant, priority: Priority);
@@ -145,9 +145,11 @@ impl PlatformDispatcher for LinuxDispatcher {
ThreadTaskTimings::convert(&global_timings)
}
- fn get_current_thread_timings(&self) -> Vec<crate::TaskTiming> {
+ fn get_current_thread_timings(&self) -> crate::ThreadTaskTimings {
THREAD_TIMINGS.with(|timings| {
let timings = timings.lock();
+ let thread_name = timings.thread_name.clone();
+ let total_pushed = timings.total_pushed;
let timings = &timings.timings;
let mut vec = Vec::with_capacity(timings.len());
@@ -155,7 +157,13 @@ impl PlatformDispatcher for LinuxDispatcher {
let (s1, s2) = timings.as_slices();
vec.extend_from_slice(s1);
vec.extend_from_slice(s2);
- vec
+
+ crate::ThreadTaskTimings {
+ thread_name,
+ thread_id: std::thread::current().id(),
+ timings: vec,
+ total_pushed,
+ }
})
}
@@ -55,16 +55,25 @@ impl PlatformDispatcher for MacDispatcher {
ThreadTaskTimings::convert(&global_timings)
}
- fn get_current_thread_timings(&self) -> Vec<TaskTiming> {
+ fn get_current_thread_timings(&self) -> ThreadTaskTimings {
THREAD_TIMINGS.with(|timings| {
- let timings = &timings.lock().timings;
+ let timings = timings.lock();
+ let thread_name = timings.thread_name.clone();
+ let total_pushed = timings.total_pushed;
+ let timings = &timings.timings;
let mut vec = Vec::with_capacity(timings.len());
let (s1, s2) = timings.as_slices();
vec.extend_from_slice(s1);
vec.extend_from_slice(s2);
- vec
+
+ ThreadTaskTimings {
+ thread_name,
+ thread_id: std::thread::current().id(),
+ timings: vec,
+ total_pushed,
+ }
})
}
@@ -102,8 +102,13 @@ impl PlatformDispatcher for TestDispatcher {
Vec::new()
}
- fn get_current_thread_timings(&self) -> Vec<crate::TaskTiming> {
- Vec::new()
+ fn get_current_thread_timings(&self) -> crate::ThreadTaskTimings {
+ crate::ThreadTaskTimings {
+ thread_name: None,
+ thread_id: std::thread::current().id(),
+ timings: Vec::new(),
+ total_pushed: 0,
+ }
}
fn is_main_thread(&self) -> bool {
@@ -113,9 +113,11 @@ impl PlatformDispatcher for WindowsDispatcher {
ThreadTaskTimings::convert(&global_thread_timings)
}
- fn get_current_thread_timings(&self) -> Vec<crate::TaskTiming> {
+ fn get_current_thread_timings(&self) -> crate::ThreadTaskTimings {
THREAD_TIMINGS.with(|timings| {
let timings = timings.lock();
+ let thread_name = timings.thread_name.clone();
+ let total_pushed = timings.total_pushed;
let timings = &timings.timings;
let mut vec = Vec::with_capacity(timings.len());
@@ -123,7 +125,13 @@ impl PlatformDispatcher for WindowsDispatcher {
let (s1, s2) = timings.as_slices();
vec.extend_from_slice(s1);
vec.extend_from_slice(s2);
- vec
+
+ crate::ThreadTaskTimings {
+ thread_name,
+ thread_id: std::thread::current().id(),
+ timings: vec,
+ total_pushed,
+ }
})
}
@@ -95,6 +95,7 @@ impl Scheduler for PlatformScheduler {
self.dispatcher.spawn_realtime(f);
}
+ #[track_caller]
fn timer(&self, duration: Duration) -> Timer {
use std::sync::{Arc, atomic::AtomicBool};
@@ -1,5 +1,6 @@
use std::{
cell::LazyCell,
+ collections::HashMap,
hash::Hasher,
hash::{DefaultHasher, Hash},
sync::Arc,
@@ -9,6 +10,8 @@ use std::{
use serde::{Deserialize, Serialize};
+use crate::SharedString;
+
#[doc(hidden)]
#[derive(Debug, Copy, Clone)]
pub struct TaskTiming {
@@ -23,6 +26,7 @@ pub struct ThreadTaskTimings {
pub thread_name: Option<String>,
pub thread_id: ThreadId,
pub timings: Vec<TaskTiming>,
+ pub total_pushed: u64,
}
impl ThreadTaskTimings {
@@ -36,6 +40,7 @@ impl ThreadTaskTimings {
.map(|(thread_id, timings)| {
let timings = timings.lock();
let thread_name = timings.thread_name.clone();
+ let total_pushed = timings.total_pushed;
let timings = &timings.timings;
let mut vec = Vec::with_capacity(timings.len());
@@ -48,6 +53,7 @@ impl ThreadTaskTimings {
thread_name,
thread_id,
timings: vec,
+ total_pushed,
}
})
.collect()
@@ -55,20 +61,20 @@ impl ThreadTaskTimings {
}
/// Serializable variant of [`core::panic::Location`]
-#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
-pub struct SerializedLocation<'a> {
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct SerializedLocation {
/// Name of the source file
- pub file: &'a str,
+ pub file: SharedString,
/// Line in the source file
pub line: u32,
/// Column in the source file
pub column: u32,
}
-impl<'a> From<&'a core::panic::Location<'a>> for SerializedLocation<'a> {
- fn from(value: &'a core::panic::Location<'a>) -> Self {
+impl From<&core::panic::Location<'static>> for SerializedLocation {
+ fn from(value: &core::panic::Location<'static>) -> Self {
SerializedLocation {
- file: value.file(),
+ file: value.file().into(),
line: value.line(),
column: value.column(),
}
@@ -77,23 +83,22 @@ impl<'a> From<&'a core::panic::Location<'a>> for SerializedLocation<'a> {
/// Serializable variant of [`TaskTiming`]
#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct SerializedTaskTiming<'a> {
+pub struct SerializedTaskTiming {
/// Location of the timing
- #[serde(borrow)]
- pub location: SerializedLocation<'a>,
+ pub location: SerializedLocation,
/// Time at which the measurement was reported in nanoseconds
pub start: u128,
/// Duration of the measurement in nanoseconds
pub duration: u128,
}
-impl<'a> SerializedTaskTiming<'a> {
+impl SerializedTaskTiming {
/// Convert an array of [`TaskTiming`] into their serializable format
///
/// # Params
///
/// `anchor` - [`Instant`] that should be earlier than all timings to use as base anchor
- pub fn convert(anchor: Instant, timings: &[TaskTiming]) -> Vec<SerializedTaskTiming<'static>> {
+ pub fn convert(anchor: Instant, timings: &[TaskTiming]) -> Vec<SerializedTaskTiming> {
let serialized = timings
.iter()
.map(|timing| {
@@ -117,26 +122,22 @@ impl<'a> SerializedTaskTiming<'a> {
/// Serializable variant of [`ThreadTaskTimings`]
#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct SerializedThreadTaskTimings<'a> {
+pub struct SerializedThreadTaskTimings {
/// Thread name
pub thread_name: Option<String>,
/// Hash of the thread id
pub thread_id: u64,
/// Timing records for this thread
- #[serde(borrow)]
- pub timings: Vec<SerializedTaskTiming<'a>>,
+ pub timings: Vec<SerializedTaskTiming>,
}
-impl<'a> SerializedThreadTaskTimings<'a> {
+impl SerializedThreadTaskTimings {
/// Convert [`ThreadTaskTimings`] into their serializable format
///
/// # Params
///
/// `anchor` - [`Instant`] that should be earlier than all timings to use as base anchor
- pub fn convert(
- anchor: Instant,
- timings: ThreadTaskTimings,
- ) -> SerializedThreadTaskTimings<'static> {
+ pub fn convert(anchor: Instant, timings: ThreadTaskTimings) -> SerializedThreadTaskTimings {
let serialized_timings = SerializedTaskTiming::convert(anchor, &timings.timings);
let mut hasher = DefaultHasher::new();
@@ -151,6 +152,96 @@ impl<'a> SerializedThreadTaskTimings<'a> {
}
}
+#[doc(hidden)]
+#[derive(Debug, Clone)]
+pub struct ThreadTimingsDelta {
+ /// Hashed thread id
+ pub thread_id: u64,
+ /// Thread name, if known
+ pub thread_name: Option<String>,
+ /// New timings since the last call. If the circular buffer wrapped around
+ /// since the previous poll, some entries may have been lost.
+ pub new_timings: Vec<SerializedTaskTiming>,
+}
+
+/// Tracks which timing events have already been seen so that callers can request only unseen events.
+#[doc(hidden)]
+pub struct ProfilingCollector {
+ startup_time: Instant,
+ cursors: HashMap<u64, u64>,
+}
+
+impl ProfilingCollector {
+ pub fn new(startup_time: Instant) -> Self {
+ Self {
+ startup_time,
+ cursors: HashMap::default(),
+ }
+ }
+
+ pub fn startup_time(&self) -> Instant {
+ self.startup_time
+ }
+
+ pub fn collect_unseen(
+ &mut self,
+ all_timings: Vec<ThreadTaskTimings>,
+ ) -> Vec<ThreadTimingsDelta> {
+ let mut deltas = Vec::with_capacity(all_timings.len());
+
+ for thread in all_timings {
+ let mut hasher = DefaultHasher::new();
+ thread.thread_id.hash(&mut hasher);
+ let hashed_id = hasher.finish();
+
+ let prev_cursor = self.cursors.get(&hashed_id).copied().unwrap_or(0);
+ let buffer_len = thread.timings.len() as u64;
+ let buffer_start = thread.total_pushed.saturating_sub(buffer_len);
+
+ let mut slice = if prev_cursor < buffer_start {
+ // Cursor fell behind the buffer — some entries were evicted.
+ // Return everything still in the buffer.
+ thread.timings.as_slice()
+ } else {
+ let skip = (prev_cursor - buffer_start) as usize;
+ &thread.timings[skip..]
+ };
+
+ // Don't emit the last entry if it's still in-progress (end: None).
+ let incomplete_at_end = slice.last().is_some_and(|t| t.end.is_none());
+ if incomplete_at_end {
+ slice = &slice[..slice.len() - 1];
+ }
+
+ let cursor_advance = if incomplete_at_end {
+ thread.total_pushed - 1
+ } else {
+ thread.total_pushed
+ };
+
+ self.cursors.insert(hashed_id, cursor_advance);
+
+ if slice.is_empty() {
+ continue;
+ }
+
+ let new_timings = SerializedTaskTiming::convert(self.startup_time, slice);
+
+ deltas.push(ThreadTimingsDelta {
+ thread_id: hashed_id,
+ thread_name: thread.thread_name,
+ new_timings,
+ });
+ }
+
+ deltas
+ }
+
+ pub fn reset(&mut self) {
+ self.cursors.clear();
+ }
+}
+
// Allow 20mb of task timing entries
const MAX_TASK_TIMINGS: usize = (20 * 1024 * 1024) / core::mem::size_of::<TaskTiming>();
@@ -190,6 +281,7 @@ pub(crate) struct ThreadTimings {
pub thread_name: Option<String>,
pub thread_id: ThreadId,
pub timings: Box<TaskTimings>,
+ pub total_pushed: u64,
}
impl ThreadTimings {
@@ -198,6 +290,7 @@ impl ThreadTimings {
thread_name,
thread_id,
timings: TaskTimings::boxed(),
+ total_pushed: 0,
}
}
}
@@ -221,15 +314,15 @@ impl Drop for ThreadTimings {
pub(crate) fn add_task_timing(timing: TaskTiming) {
THREAD_TIMINGS.with(|timings| {
let mut timings = timings.lock();
- let timings = &mut timings.timings;
- if let Some(last_timing) = timings.iter_mut().rev().next() {
- if last_timing.location == timing.location {
+ if let Some(last_timing) = timings.timings.back_mut() {
+ if last_timing.location == timing.location && last_timing.start == timing.start {
last_timing.end = timing.end;
return;
}
}
- timings.push_back(timing);
+ timings.timings.push_back(timing);
+ timings.total_pushed += 1;
});
}
@@ -13,6 +13,8 @@ path = "src/miniprofiler_ui.rs"
[dependencies]
gpui.workspace = true
+rpc.workspace = true
+theme.workspace = true
zed_actions.workspace = true
workspace.workspace = true
util.workspace = true
@@ -1,5 +1,5 @@
use std::{
- ops::Range,
+ hash::{DefaultHasher, Hash, Hasher},
path::PathBuf,
rc::Rc,
time::{Duration, Instant},
@@ -7,20 +7,60 @@ use std::{
use gpui::{
App, AppContext, ClipboardItem, Context, Div, Entity, Hsla, InteractiveElement,
- ParentElement as _, Render, SerializedTaskTiming, SharedString, StatefulInteractiveElement,
- Styled, Task, TaskTiming, TitlebarOptions, UniformListScrollHandle, WeakEntity, WindowBounds,
+ ParentElement as _, ProfilingCollector, Render, SerializedLocation, SerializedTaskTiming,
+ SerializedThreadTaskTimings, SharedString, StatefulInteractiveElement, Styled, Task,
+ ThreadTimingsDelta, TitlebarOptions, UniformListScrollHandle, WeakEntity, WindowBounds,
WindowOptions, div, prelude::FluentBuilder, px, relative, size, uniform_list,
};
+use rpc::{AnyProtoClient, proto};
use util::ResultExt;
use workspace::{
Workspace,
ui::{
- ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, Divider,
- ScrollableHandle as _, ToggleState, Tooltip, WithScrollbar, h_flex, v_flex,
+ ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ContextMenu, Divider,
+ DropdownMenu, ScrollAxes, ScrollableHandle as _, Scrollbars, ToggleState, Tooltip,
+ WithScrollbar, h_flex, v_flex,
},
};
use zed_actions::OpenPerformanceProfiler;
+const NANOS_PER_MS: u128 = 1_000_000;
+const VISIBLE_WINDOW_NANOS: u128 = 10 * 1_000_000_000;
+const REMOTE_POLL_INTERVAL: Duration = Duration::from_millis(500);
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum ProfileSource {
+ Foreground,
+ AllThreads,
+ RemoteForeground,
+ RemoteAllThreads,
+}
+
+impl ProfileSource {
+ fn label(&self) -> &'static str {
+ match self {
+ ProfileSource::Foreground => "Foreground",
+ ProfileSource::AllThreads => "All threads",
+ ProfileSource::RemoteForeground => "Remote: Foreground",
+ ProfileSource::RemoteAllThreads => "Remote: All threads",
+ }
+ }
+
+ fn is_remote(&self) -> bool {
+ matches!(
+ self,
+ ProfileSource::RemoteForeground | ProfileSource::RemoteAllThreads
+ )
+ }
+
+ fn foreground_only(&self) -> bool {
+ matches!(
+ self,
+ ProfileSource::Foreground | ProfileSource::RemoteForeground
+ )
+ }
+}
+
pub fn init(startup_time: Instant, cx: &mut App) {
cx.observe_new(move |workspace: &mut workspace::Workspace, _, cx| {
let workspace_handle = cx.entity().downgrade();
@@ -52,50 +92,54 @@ fn open_performance_profiler(
return;
}
- let default_bounds = size(px(1280.), px(720.)); // 16:9
-
- cx.open_window(
- WindowOptions {
- titlebar: Some(TitlebarOptions {
- title: Some("Profiler Window".into()),
- appears_transparent: false,
- traffic_light_position: None,
- }),
- focus: true,
- show: true,
- is_movable: true,
- kind: gpui::WindowKind::Normal,
- window_background: cx.theme().window_background_appearance(),
- window_decorations: None,
- window_min_size: Some(default_bounds),
- window_bounds: Some(WindowBounds::centered(default_bounds, cx)),
- ..Default::default()
- },
- |_window, cx| ProfilerWindow::new(startup_time, Some(workspace_handle), cx),
- )
- .log_err();
-}
-
-enum DataMode {
- Realtime(Option<Vec<TaskTiming>>),
- Snapshot(Vec<TaskTiming>),
+ let window_background = cx.theme().window_background_appearance();
+ let default_bounds = size(px(1280.), px(720.));
+
+ cx.defer(move |cx| {
+ cx.open_window(
+ WindowOptions {
+ titlebar: Some(TitlebarOptions {
+ title: Some("Profiler Window".into()),
+ appears_transparent: false,
+ traffic_light_position: None,
+ }),
+ focus: true,
+ show: true,
+ is_movable: true,
+ kind: gpui::WindowKind::Normal,
+ window_background,
+ window_decorations: None,
+ window_min_size: Some(default_bounds),
+ window_bounds: Some(WindowBounds::centered(default_bounds, cx)),
+ ..Default::default()
+ },
+ |_window, cx| ProfilerWindow::new(startup_time, Some(workspace_handle), cx),
+ )
+ .log_err();
+ });
}
struct TimingBar {
- location: &'static core::panic::Location<'static>,
- start: Instant,
- end: Instant,
+ location: SerializedLocation,
+ start_nanos: u128,
+ duration_nanos: u128,
color: Hsla,
}
pub struct ProfilerWindow {
- startup_time: Instant,
- data: DataMode,
+ collector: ProfilingCollector,
+ source: ProfileSource,
+ timings: Vec<SerializedThreadTaskTimings>,
+ paused: bool,
+ display_timings: Rc<Vec<SerializedTaskTiming>>,
include_self_timings: ToggleState,
autoscroll: bool,
scroll_handle: UniformListScrollHandle,
workspace: Option<WeakEntity<Workspace>>,
- _refresh: Option<Task<()>>,
+ has_remote: bool,
+ remote_now_nanos: u128,
+ remote_received_at: Option<Instant>,
+ _remote_poll_task: Option<Task<()>>,
}
impl ProfilerWindow {
@@ -104,75 +148,262 @@ impl ProfilerWindow {
workspace_handle: Option<WeakEntity<Workspace>>,
cx: &mut App,
) -> Entity<Self> {
- let entity = cx.new(|cx| ProfilerWindow {
- startup_time,
- data: DataMode::Realtime(None),
+ cx.new(|_cx| ProfilerWindow {
+ collector: ProfilingCollector::new(startup_time),
+ source: ProfileSource::Foreground,
+ timings: Vec::new(),
+ paused: false,
+ display_timings: Rc::new(Vec::new()),
include_self_timings: ToggleState::Unselected,
autoscroll: true,
scroll_handle: UniformListScrollHandle::default(),
workspace: workspace_handle,
- _refresh: Some(Self::begin_listen(cx)),
- });
+ has_remote: false,
+ remote_now_nanos: 0,
+ remote_received_at: None,
+ _remote_poll_task: None,
+ })
+ }
+
+ fn poll_timings(&mut self, cx: &App) {
+ self.has_remote = self.remote_proto_client(cx).is_some();
+ match self.source {
+ ProfileSource::Foreground => {
+ let dispatcher = cx.foreground_executor().dispatcher();
+ let current_thread = dispatcher.get_current_thread_timings();
+ let deltas = self.collector.collect_unseen(vec![current_thread]);
+ self.apply_deltas(deltas);
+ }
+ ProfileSource::AllThreads => {
+ let dispatcher = cx.foreground_executor().dispatcher();
+ let all_timings = dispatcher.get_all_timings();
+ let deltas = self.collector.collect_unseen(all_timings);
+ self.apply_deltas(deltas);
+ }
+ ProfileSource::RemoteForeground | ProfileSource::RemoteAllThreads => {
+ // Remote timings arrive asynchronously via apply_remote_response.
+ }
+ }
+ self.rebuild_display_timings();
+ }
- entity
+ fn rebuild_display_timings(&mut self) {
+ let include_self = self.include_self_timings.selected();
+ let cutoff_nanos = self.now_nanos().saturating_sub(VISIBLE_WINDOW_NANOS);
+
+ let per_thread: Vec<Vec<SerializedTaskTiming>> = self
+ .timings
+ .iter()
+ .map(|thread| {
+ let visible = visible_tail(&thread.timings, cutoff_nanos);
+ filter_timings(visible.iter().cloned(), include_self)
+ })
+ .collect();
+ self.display_timings = Rc::new(kway_merge(per_thread));
}
- fn begin_listen(cx: &mut Context<Self>) -> Task<()> {
- cx.spawn(async move |this, cx| {
+ fn now_nanos(&self) -> u128 {
+ if self.source.is_remote() {
+ let elapsed_since_poll = self
+ .remote_received_at
+ .map(|at| Instant::now().duration_since(at).as_nanos())
+ .unwrap_or(0);
+ self.remote_now_nanos + elapsed_since_poll
+ } else {
+ Instant::now()
+ .duration_since(self.collector.startup_time())
+ .as_nanos()
+ }
+ }
+
+ fn set_source(&mut self, source: ProfileSource, cx: &mut Context<Self>) {
+ if self.source == source {
+ return;
+ }
+
+ self.source = source;
+
+ self.timings.clear();
+ self.collector.reset();
+ self.display_timings = Rc::new(Vec::new());
+ self.remote_now_nanos = 0;
+ self.remote_received_at = None;
+ self.has_remote = self.remote_proto_client(cx).is_some();
+
+ if source.is_remote() {
+ self.start_remote_polling(cx);
+ } else {
+ self._remote_poll_task = None;
+ }
+ }
+
+ fn remote_proto_client(&self, cx: &App) -> Option<AnyProtoClient> {
+ let workspace = self.workspace.as_ref()?;
+ workspace
+ .read_with(cx, |workspace, cx| {
+ let project = workspace.project().read(cx);
+ let remote_client = project.remote_client()?;
+ Some(remote_client.read(cx).proto_client())
+ })
+ .log_err()
+ .flatten()
+ }
+
+ fn start_remote_polling(&mut self, cx: &mut Context<Self>) {
+ let Some(proto_client) = self.remote_proto_client(cx) else {
+ return;
+ };
+
+ let source_foreground_only = self.source.foreground_only();
+ let weak = cx.weak_entity();
+ self._remote_poll_task = Some(cx.spawn(async move |_this, cx| {
loop {
- let data = cx
- .foreground_executor()
- .dispatcher()
- .get_current_thread_timings();
-
- this.update(cx, |this: &mut ProfilerWindow, cx| {
- this.data = DataMode::Realtime(Some(data));
- cx.notify();
- })
- .ok();
-
- // yield to the executor
- cx.background_executor()
- .timer(Duration::from_micros(1))
+ let response = proto_client
+ .request(proto::GetRemoteProfilingData {
+ project_id: proto::REMOTE_SERVER_PROJECT_ID,
+ foreground_only: source_foreground_only,
+ })
.await;
+
+ match response {
+ Ok(response) => {
+ let ok = weak.update(&mut cx.clone(), |this, cx| {
+ this.apply_remote_response(response);
+ cx.notify();
+ });
+ if ok.is_err() {
+ break;
+ }
+ }
+ Err(error) => {
+ Err::<(), _>(error).log_err();
+ }
+ }
+
+ cx.background_executor().timer(REMOTE_POLL_INTERVAL).await;
}
- })
+ }));
}
- fn get_timings(&self) -> Option<&Vec<TaskTiming>> {
- match &self.data {
- DataMode::Realtime(data) => data.as_ref(),
- DataMode::Snapshot(data) => Some(data),
- }
- }
+ fn apply_remote_response(&mut self, response: proto::GetRemoteProfilingDataResponse) {
+ self.has_remote = true;
+ self.remote_now_nanos = response.now_nanos as u128;
+ self.remote_received_at = Some(Instant::now());
+ let deltas = response
+ .threads
+ .into_iter()
+ .map(|thread| {
+ let new_timings = thread
+ .timings
+ .into_iter()
+ .map(|t| {
+ let location = t.location.unwrap_or_default();
+ SerializedTaskTiming {
+ location: SerializedLocation {
+ file: SharedString::from(location.file),
+ line: location.line,
+ column: location.column,
+ },
+ start: t.start_nanos as u128,
+ duration: t.duration_nanos as u128,
+ }
+ })
+ .collect();
+ ThreadTimingsDelta {
+ thread_id: thread.thread_id,
+ thread_name: thread.thread_name,
+ new_timings,
+ }
+ })
+ .collect();
- fn render_timing(value_range: Range<Instant>, item: TimingBar, cx: &App) -> Div {
- let time_ms = item.end.duration_since(item.start).as_secs_f32() * 1000f32;
+ self.apply_deltas(deltas);
+ self.rebuild_display_timings();
+ }
- let remap = value_range
- .end
- .duration_since(value_range.start)
- .as_secs_f32()
- * 1000f32;
+ fn apply_deltas(&mut self, deltas: Vec<ThreadTimingsDelta>) {
+ for delta in deltas {
+ append_to_thread(
+ &mut self.timings,
+ delta.thread_id,
+ delta.thread_name,
+ delta.new_timings,
+ );
+ }
+ }
- let start = (item.start.duration_since(value_range.start).as_secs_f32() * 1000f32) / remap;
- let end = (item.end.duration_since(value_range.start).as_secs_f32() * 1000f32) / remap;
+ fn render_source_dropdown(
+ &self,
+ window: &mut gpui::Window,
+ cx: &mut Context<Self>,
+ ) -> DropdownMenu {
+ let weak = cx.weak_entity();
+ let current_source = self.source;
+ let has_remote = self.has_remote;
+
+ let mut sources = vec![ProfileSource::Foreground, ProfileSource::AllThreads];
+ if has_remote {
+ sources.push(ProfileSource::RemoteForeground);
+ sources.push(ProfileSource::RemoteAllThreads);
+ }
- let bar_width = end - start.abs();
+ DropdownMenu::new(
+ "profile-source",
+ current_source.label(),
+ ContextMenu::build(window, cx, move |mut menu, window, cx| {
+ for source in &sources {
+ let source = *source;
+ let weak = weak.clone();
+ menu = menu.entry(source.label(), None, move |_, cx| {
+ weak.update(cx, |this, cx| {
+ this.set_source(source, cx);
+ cx.notify();
+ })
+ .log_err();
+ });
+ }
+ if let Some(index) = sources.iter().position(|s| *s == current_source) {
+ for _ in 0..=index {
+ menu.select_next(&Default::default(), window, cx);
+ }
+ }
+ menu
+ }),
+ )
+ }
- let location = item
- .location
- .file()
- .rsplit_once("/")
- .unwrap_or(("", item.location.file()))
- .1;
- let location = location.rsplit_once("\\").unwrap_or(("", location)).1;
+ fn render_timing(
+ window_start_nanos: u128,
+ window_duration_nanos: u128,
+ item: TimingBar,
+ cx: &App,
+ ) -> Div {
+ let time_ms = item.duration_nanos as f32 / NANOS_PER_MS as f32;
+
+ let start_fraction = if item.start_nanos >= window_start_nanos {
+ (item.start_nanos - window_start_nanos) as f32 / window_duration_nanos as f32
+ } else {
+ 0.0
+ };
+
+ let end_nanos = item.start_nanos + item.duration_nanos;
+ let end_fraction = if end_nanos >= window_start_nanos {
+ (end_nanos - window_start_nanos) as f32 / window_duration_nanos as f32
+ } else {
+ 0.0
+ };
+
+ let start_fraction = start_fraction.clamp(0.0, 1.0);
+ let end_fraction = end_fraction.clamp(0.0, 1.0);
+ let bar_width = (end_fraction - start_fraction).max(0.0);
+
+ let file_str: &str = &item.location.file;
+ let basename = file_str.rsplit_once("/").unwrap_or(("", file_str)).1;
+ let basename = basename.rsplit_once("\\").unwrap_or(("", basename)).1;
let label = SharedString::from(format!(
"{}:{}:{}",
- location,
- item.location.line(),
- item.location.column()
+ basename, item.location.line, item.location.column
));
h_flex()
@@ -205,7 +436,7 @@ impl ProfilerWindow {
.h_full()
.rounded_sm()
.bg(item.color)
- .left(relative(start.max(0f32)))
+ .left(relative(start_fraction.max(0.0)))
.w(relative(bar_width)),
),
)
@@ -225,6 +456,12 @@ impl Render for ProfilerWindow {
window: &mut gpui::Window,
cx: &mut gpui::Context<Self>,
) -> impl gpui::IntoElement {
+ let ui_font = theme::setup_ui_font(window, cx);
+ if !self.paused {
+ self.poll_timings(cx);
+ window.request_animation_frame();
+ }
+
let scroll_offset = self.scroll_handle.offset();
let max_offset = self.scroll_handle.max_offset();
self.autoscroll = -scroll_offset.y >= (max_offset.height - px(24.));
@@ -232,8 +469,11 @@ impl Render for ProfilerWindow {
self.scroll_handle.scroll_to_bottom();
}
+ let display_timings = self.display_timings.clone();
+
v_flex()
.id("profiler")
+ .font(ui_font)
.w_full()
.h_full()
.bg(cx.theme().colors().surface_background)
@@ -247,28 +487,21 @@ impl Render for ProfilerWindow {
.child(
h_flex()
.gap_2()
+ .child(self.render_source_dropdown(window, cx))
.child(
Button::new(
"switch-mode",
- match self.data {
- DataMode::Snapshot { .. } => "Resume",
- DataMode::Realtime(_) => "Pause",
- },
+ if self.paused { "Resume" } else { "Pause" },
)
.style(ButtonStyle::Filled)
.on_click(cx.listener(
|this, _, _window, cx| {
- match &this.data {
- DataMode::Realtime(Some(data)) => {
- this._refresh = None;
- this.data = DataMode::Snapshot(data.clone());
- }
- DataMode::Snapshot { .. } => {
- this._refresh = Some(Self::begin_listen(cx));
- this.data = DataMode::Realtime(None);
- }
- _ => {}
- };
+ this.paused = !this.paused;
+ if !this.paused && this.source.is_remote() {
+ this.start_remote_polling(cx);
+ } else if this.paused && this.source.is_remote() {
+ this._remote_poll_task = None;
+ }
cx.notify();
},
)),
@@ -281,11 +514,24 @@ impl Render for ProfilerWindow {
return;
};
- let Some(data) = this.get_timings() else {
+ if this.timings.iter().all(|t| t.timings.is_empty()) {
+ return;
+ }
+
+ let serialized = if this.source.foreground_only() {
+ let flat: Vec<&SerializedTaskTiming> = this
+ .timings
+ .iter()
+ .flat_map(|t| &t.timings)
+ .collect();
+ serde_json::to_string(&flat)
+ } else {
+ serde_json::to_string(&this.timings)
+ };
+
+ let Some(serialized) = serialized.log_err() else {
return;
};
- let timings =
- SerializedTaskTiming::convert(this.startup_time, &data);
let active_path = workspace
.read_with(cx, |workspace, cx| {
@@ -310,13 +556,7 @@ impl Render for ProfilerWindow {
return;
};
- let Some(timings) =
- serde_json::to_string(&timings).log_err()
- else {
- return;
- };
-
- smol::fs::write(path, &timings).await.log_err();
+ smol::fs::write(path, &serialized).await.log_err();
})
.detach();
})),
@@ -331,33 +571,11 @@ impl Render for ProfilerWindow {
})),
),
)
- .when_some(self.get_timings(), |div, e| {
- if e.len() == 0 {
- return div;
- }
+ .when(!display_timings.is_empty(), |div| {
+ let now_nanos = self.now_nanos();
- let min = e[0].start;
- let max = e[e.len() - 1].end.unwrap_or_else(|| Instant::now());
- let timings = Rc::new(
- e.into_iter()
- .filter(|timing| {
- timing
- .end
- .unwrap_or_else(|| Instant::now())
- .duration_since(timing.start)
- .as_millis()
- >= 1
- })
- .filter(|timing| {
- if self.include_self_timings.selected() {
- true
- } else {
- !timing.location.file().ends_with("miniprofiler_ui.rs")
- }
- })
- .cloned()
- .collect::<Vec<_>>(),
- );
+ let window_start_nanos = now_nanos.saturating_sub(VISIBLE_WINDOW_NANOS);
+ let window_duration_nanos = VISIBLE_WINDOW_NANOS;
div.child(Divider::horizontal()).child(
v_flex()
@@ -366,25 +584,22 @@ impl Render for ProfilerWindow {
.h_full()
.gap_2()
.child(
- uniform_list("list", timings.len(), {
- let timings = timings.clone();
+ uniform_list("list", display_timings.len(), {
+ let timings = display_timings.clone();
move |visible_range, _, cx| {
let mut items = vec![];
for i in visible_range {
let timing = &timings[i];
- let value_range =
- max.checked_sub(Duration::from_secs(10)).unwrap_or(min)
- ..max;
items.push(Self::render_timing(
- value_range,
+ window_start_nanos,
+ window_duration_nanos,
TimingBar {
- location: timing.location,
- start: timing.start,
- end: timing.end.unwrap_or_else(|| Instant::now()),
- color: cx
- .theme()
- .accents()
- .color_for_index(i as u32),
+ location: timing.location.clone(),
+ start_nanos: timing.start,
+ duration_nanos: timing.duration,
+ color: cx.theme().accents().color_for_index(
+ location_color_index(&timing.location),
+ ),
},
cx,
));
@@ -400,8 +615,102 @@ impl Render for ProfilerWindow {
.track_scroll(&self.scroll_handle)
.size_full(),
)
- .vertical_scrollbar_for(&self.scroll_handle, window, cx),
+ .custom_scrollbars(
+ Scrollbars::always_visible(ScrollAxes::Vertical)
+ .tracked_scroll_handle(&self.scroll_handle),
+ window,
+ cx,
+ ),
)
})
}
}
+
+const MAX_VISIBLE_PER_THREAD: usize = 10_000;
+
+fn visible_tail(timings: &[SerializedTaskTiming], cutoff_nanos: u128) -> &[SerializedTaskTiming] {
+ let len = timings.len();
+ let limit = len.min(MAX_VISIBLE_PER_THREAD);
+ let search_start = len - limit;
+ let tail = &timings[search_start..];
+
+ let mut first_visible = 0;
+ for (i, timing) in tail.iter().enumerate().rev() {
+ if timing.start + timing.duration < cutoff_nanos {
+ first_visible = i + 1;
+ break;
+ }
+ }
+ &tail[first_visible..]
+}
+
+fn filter_timings(
+ timings: impl Iterator<Item = SerializedTaskTiming>,
+ include_self: bool,
+) -> Vec<SerializedTaskTiming> {
+ timings
+ .filter(|t| t.duration / NANOS_PER_MS >= 1)
+ .filter(|t| include_self || !t.location.file.ends_with("miniprofiler_ui.rs"))
+ .collect()
+}
+
+fn location_color_index(location: &SerializedLocation) -> u32 {
+ let mut hasher = DefaultHasher::new();
+ location.file.hash(&mut hasher);
+ location.line.hash(&mut hasher);
+ location.column.hash(&mut hasher);
+ hasher.finish() as u32
+}
+
+/// Merge K sorted `Vec<SerializedTaskTiming>` into a single sorted vec.
+/// Each input vec must already be sorted by `start`.
+fn kway_merge(lists: Vec<Vec<SerializedTaskTiming>>) -> Vec<SerializedTaskTiming> {
+ let total_len: usize = lists.iter().map(|l| l.len()).sum();
+ let mut result = Vec::with_capacity(total_len);
+ let mut cursors = vec![0usize; lists.len()];
+
+ loop {
+ let mut min_start = u128::MAX;
+ let mut min_list = None;
+
+ for (list_idx, list) in lists.iter().enumerate() {
+ let cursor = cursors[list_idx];
+ if let Some(timing) = list.get(cursor) {
+ if timing.start < min_start {
+ min_start = timing.start;
+ min_list = Some(list_idx);
+ }
+ }
+ }
+
+ match min_list {
+ Some(idx) => {
+ result.push(lists[idx][cursors[idx]].clone());
+ cursors[idx] += 1;
+ }
+ None => break,
+ }
+ }
+
+ result
+}
+
+fn append_to_thread(
+ threads: &mut Vec<SerializedThreadTaskTimings>,
+ thread_id: u64,
+ thread_name: Option<String>,
+ new_timings: Vec<SerializedTaskTiming>,
+) {
+ if let Some(existing) = threads.iter_mut().find(|t| t.thread_id == thread_id) {
+ existing.timings.extend(new_timings);
+ if existing.thread_name.is_none() {
+ existing.thread_name = thread_name;
+ }
+ } else {
+ threads.push(SerializedThreadTaskTimings {
+ thread_name,
+ thread_id,
+ timings: new_timings,
+ });
+ }
+}
@@ -63,3 +63,31 @@ message AskPassRequest {
message AskPassResponse {
string response = 1;
}
+
+message GetRemoteProfilingData {
+ uint64 project_id = 1;
+ bool foreground_only = 2;
+}
+
+message GetRemoteProfilingDataResponse {
+ repeated RemoteProfilingThread threads = 1;
+ uint64 now_nanos = 2;
+}
+
+message RemoteProfilingThread {
+ optional string thread_name = 1;
+ uint64 thread_id = 2;
+ repeated RemoteProfilingTiming timings = 3;
+}
+
+message RemoteProfilingTiming {
+ RemoteProfilingLocation location = 1;
+ uint64 start_nanos = 2;
+ uint64 duration_nanos = 3;
+}
+
+message RemoteProfilingLocation {
+ string file = 1;
+ uint32 line = 2;
+ uint32 column = 3;
+}
@@ -457,7 +457,7 @@ message Envelope {
FindSearchCandidatesCancelled find_search_candidates_cancelled = 410;
GetContextServerCommand get_context_server_command = 411;
ContextServerCommand context_server_command = 412;
-
+
AllocateWorktreeId allocate_worktree_id = 413;
AllocateWorktreeIdResponse allocate_worktree_id_response = 414;
@@ -469,7 +469,10 @@ message Envelope {
SemanticTokensResponse semantic_tokens_response = 419;
RefreshSemanticTokens refresh_semantic_tokens = 420;
GetFoldingRanges get_folding_ranges = 421;
- GetFoldingRangesResponse get_folding_ranges_response = 422; // current max
+ GetFoldingRangesResponse get_folding_ranges_response = 422;
+
+ GetRemoteProfilingData get_remote_profiling_data = 423;
+ GetRemoteProfilingDataResponse get_remote_profiling_data_response = 424; // current max
}
reserved 87 to 88;
@@ -359,6 +359,8 @@ messages!(
(GetSharedAgentThreadResponse, Foreground),
(FindSearchCandidatesChunk, Background),
(FindSearchCandidatesCancelled, Background),
+ (GetRemoteProfilingData, Background),
+ (GetRemoteProfilingDataResponse, Background),
);
request_messages!(
@@ -555,6 +557,7 @@ request_messages!(
(TrustWorktrees, Ack),
(RestrictWorktrees, Ack),
(FindSearchCandidatesChunk, Ack),
+ (GetRemoteProfilingData, GetRemoteProfilingDataResponse),
);
lsp_messages!(
@@ -741,7 +744,8 @@ entity_messages!(
RestrictWorktrees,
FindSearchCandidatesChunk,
FindSearchCandidatesCancelled,
- DownloadFileByPath
+ DownloadFileByPath,
+ GetRemoteProfilingData
);
entity_messages!(
@@ -562,6 +562,7 @@ mod tests {
node_runtime,
languages,
extension_host_proxy: proxy,
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -643,6 +644,7 @@ mod tests {
node_runtime,
languages,
extension_host_proxy: proxy,
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -40,6 +40,7 @@ use std::{
Arc,
atomic::{AtomicU64, AtomicUsize, Ordering},
},
+ time::Instant,
};
use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind};
use util::{ResultExt, paths::PathStyle, rel_path::RelPath};
@@ -62,6 +63,7 @@ pub struct HeadlessProject {
pub extensions: Entity<HeadlessExtensionStore>,
pub git_store: Entity<GitStore>,
pub environment: Entity<ProjectEnvironment>,
+ pub profiling_collector: gpui::ProfilingCollector,
// Used mostly to keep alive the toolchain store for RPC handlers.
// Local variant is used within LSP store, but that's a separate entity.
pub _toolchain_store: Entity<ToolchainStore>,
@@ -74,6 +76,7 @@ pub struct HeadlessAppState {
pub node_runtime: NodeRuntime,
pub languages: Arc<LanguageRegistry>,
pub extension_host_proxy: Arc<ExtensionHostProxy>,
+ pub startup_time: Instant,
}
impl HeadlessProject {
@@ -90,6 +93,7 @@ impl HeadlessProject {
node_runtime,
languages,
extension_host_proxy: proxy,
+ startup_time,
}: HeadlessAppState,
init_worktree_trust: bool,
cx: &mut Context<Self>,
@@ -286,6 +290,7 @@ impl HeadlessProject {
session.add_request_handler(cx.weak_entity(), Self::handle_shutdown_remote_server);
session.add_request_handler(cx.weak_entity(), Self::handle_ping);
session.add_request_handler(cx.weak_entity(), Self::handle_get_processes);
+ session.add_request_handler(cx.weak_entity(), Self::handle_get_remote_profiling_data);
session.add_entity_request_handler(Self::handle_add_worktree);
session.add_request_handler(cx.weak_entity(), Self::handle_remove_worktree);
@@ -344,6 +349,7 @@ impl HeadlessProject {
extensions,
git_store,
environment,
+ profiling_collector: gpui::ProfilingCollector::new(startup_time),
_toolchain_store: toolchain_store,
}
}
@@ -1101,6 +1107,53 @@ impl HeadlessProject {
Ok(proto::GetProcessesResponse { processes })
}
+ async fn handle_get_remote_profiling_data(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GetRemoteProfilingData>,
+ cx: AsyncApp,
+ ) -> Result<proto::GetRemoteProfilingDataResponse> {
+ let foreground_only = envelope.payload.foreground_only;
+
+ let (deltas, now_nanos) = cx.update(|cx| {
+ let dispatcher = cx.foreground_executor().dispatcher();
+ let timings = if foreground_only {
+ vec![dispatcher.get_current_thread_timings()]
+ } else {
+ dispatcher.get_all_timings()
+ };
+ this.update(cx, |this, _cx| {
+ let deltas = this.profiling_collector.collect_unseen(timings);
+ let now_nanos = Instant::now()
+ .duration_since(this.profiling_collector.startup_time())
+ .as_nanos() as u64;
+ (deltas, now_nanos)
+ })
+ });
+
+ let threads = deltas
+ .into_iter()
+ .map(|delta| proto::RemoteProfilingThread {
+ thread_name: delta.thread_name,
+ thread_id: delta.thread_id,
+ timings: delta
+ .new_timings
+ .into_iter()
+ .map(|t| proto::RemoteProfilingTiming {
+ location: Some(proto::RemoteProfilingLocation {
+ file: t.location.file.to_string(),
+ line: t.location.line,
+ column: t.location.column,
+ }),
+ start_nanos: t.start as u64,
+ duration_nanos: t.duration as u64,
+ })
+ .collect(),
+ })
+ .collect();
+
+ Ok(proto::GetRemoteProfilingDataResponse { threads, now_nanos })
+ }
+
async fn handle_get_directory_environment(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GetDirectoryEnvironment>,
@@ -2091,6 +2091,7 @@ pub async fn init_test(
node_runtime,
languages,
extension_host_proxy: proxy,
+ startup_time: std::time::Instant::now(),
},
false,
cx,
@@ -54,6 +54,7 @@ use std::{
path::{Path, PathBuf},
str::FromStr,
sync::{Arc, LazyLock},
+ time::Instant,
};
use thiserror::Error;
use util::{ResultExt, command::new_command};
@@ -447,6 +448,7 @@ pub fn execute_run(
) -> Result<()> {
init_paths()?;
+ let startup_time = Instant::now();
let app = gpui::Application::headless();
let pid = std::process::id();
let id = pid.to_string();
@@ -567,6 +569,7 @@ pub fn execute_run(
node_runtime,
languages,
extension_host_proxy,
+ startup_time,
},
true,
cx,
@@ -116,6 +116,7 @@ impl ForegroundExecutor {
}
}
+ #[track_caller]
pub fn timer(&self, duration: Duration) -> Timer {
self.scheduler.timer(duration)
}
@@ -211,6 +212,7 @@ impl BackgroundExecutor {
Task(TaskState::Spawned(task))
}
+ #[track_caller]
pub fn timer(&self, duration: Duration) -> Timer {
self.scheduler.timer(duration)
}
@@ -113,6 +113,7 @@ pub trait Scheduler: Send + Sync {
self.schedule_background_with_priority(runnable, Priority::default());
}
+ #[track_caller]
fn timer(&self, timeout: Duration) -> Timer;
fn clock(&self) -> Arc<dyn Clock>;
@@ -614,6 +614,7 @@ impl Scheduler for TestScheduler {
});
}
+ #[track_caller]
fn timer(&self, duration: Duration) -> Timer {
let (tx, rx) = oneshot::channel();
let state = &mut *self.state.lock();
@@ -395,6 +395,10 @@ impl Scrollbars {
Self::new_with_setting(show_along, |_| ShowScrollbar::default())
}
+ pub fn always_visible(show_along: ScrollAxes) -> Self {
+ Self::new_with_setting(show_along, |_| ShowScrollbar::Always)
+ }
+
pub fn for_settings<S: ScrollbarVisibility>() -> Scrollbars {
Scrollbars::new_with_setting(ScrollAxes::Both, |cx| S::get_value(cx).visibility(cx))
}