Detailed changes
@@ -180,6 +180,13 @@ pub async fn run_randomized_test<T: RandomizedTest>(
T::on_quiesce(&mut server, &mut clients).await;
for (client, cx) in clients {
+ cx.update(|cx| {
+ for window in cx.windows() {
+ window
+ .update(cx, |_, window, _| window.remove_window())
+ .ok();
+ }
+ });
cx.update(|cx| {
let settings = cx.remove_global::<SettingsStore>();
cx.clear_globals();
@@ -187,8 +194,8 @@ pub async fn run_randomized_test<T: RandomizedTest>(
theme::init(theme::LoadThemes::JustBase, cx);
drop(client);
});
+ executor.run_until_parked();
}
- executor.run_until_parked();
if let Some(path) = plan_save_path() {
eprintln!("saved test plan to path {:?}", path);
@@ -556,6 +563,13 @@ impl<T: RandomizedTest> TestPlan<T> {
log::info!("{} removed", client.username);
plan.lock().user(removed_user_id).online = false;
+ client_cx.update(|cx| {
+ for window in cx.windows() {
+ window
+ .update(cx, |_, window, _| window.remove_window())
+ .ok();
+ }
+ });
client_cx.update(|cx| {
cx.clear_globals();
drop(client);
@@ -42,6 +42,14 @@ pub fn init(app_state: &Arc<AppState>, cx: &mut App) {
}
}
}
+
+ for window in notification_windows.drain(..) {
+ window
+ .update(cx, |_, window, _| {
+ window.remove_window();
+ })
+ .log_err();
+ }
})
.detach();
}
@@ -2398,7 +2398,9 @@ impl Editor {
diagnostics_max_severity,
hard_wrap: None,
completion_provider: project.clone().map(|project| Rc::new(project) as _),
- semantics_provider: project.clone().map(|project| Rc::new(project) as _),
+ semantics_provider: project
+ .as_ref()
+ .map(|project| Rc::new(project.downgrade()) as _),
collaboration_hub: project.clone().map(|project| Box::new(project) as _),
project,
blink_manager: blink_manager.clone(),
@@ -23944,7 +23946,7 @@ impl Editor {
}
pub fn refresh_inline_values(&mut self, cx: &mut Context<Self>) {
- let Some(project) = self.project.clone() else {
+ let Some(semantics) = self.semantics_provider.clone() else {
return;
};
@@ -23979,7 +23981,7 @@ impl Editor {
let range =
buffer.read(cx).anchor_before(0)..current_execution_position.text_anchor;
- project.inline_values(buffer, range, cx)
+ semantics.inline_values(buffer, range, cx)
})
.ok()
.flatten()?
@@ -26808,7 +26810,7 @@ pub trait SemanticsProvider {
buffer: Entity<Buffer>,
refresh: Option<RefreshForServer>,
cx: &mut App,
- ) -> Shared<Task<std::result::Result<BufferSemanticTokens, Arc<anyhow::Error>>>>;
+ ) -> Option<Shared<Task<std::result::Result<BufferSemanticTokens, Arc<anyhow::Error>>>>>;
fn supports_inlay_hints(&self, buffer: &Entity<Buffer>, cx: &mut App) -> bool;
@@ -27290,14 +27292,15 @@ impl CompletionProvider for Entity<Project> {
}
}
-impl SemanticsProvider for Entity<Project> {
+impl SemanticsProvider for WeakEntity<Project> {
fn hover(
&self,
buffer: &Entity<Buffer>,
position: text::Anchor,
cx: &mut App,
) -> Option<Task<Option<Vec<project::Hover>>>> {
- Some(self.update(cx, |project, cx| project.hover(buffer, position, cx)))
+ self.update(cx, |project, cx| project.hover(buffer, position, cx))
+ .ok()
}
fn document_highlights(
@@ -27306,9 +27309,10 @@ impl SemanticsProvider for Entity<Project> {
position: text::Anchor,
cx: &mut App,
) -> Option<Task<Result<Vec<DocumentHighlight>>>> {
- Some(self.update(cx, |project, cx| {
+ self.update(cx, |project, cx| {
project.document_highlights(buffer, position, cx)
- }))
+ })
+ .ok()
}
fn definitions(
@@ -27318,12 +27322,13 @@ impl SemanticsProvider for Entity<Project> {
kind: GotoDefinitionKind,
cx: &mut App,
) -> Option<Task<Result<Option<Vec<LocationLink>>>>> {
- Some(self.update(cx, |project, cx| match kind {
+ self.update(cx, |project, cx| match kind {
GotoDefinitionKind::Symbol => project.definitions(buffer, position, cx),
GotoDefinitionKind::Declaration => project.declarations(buffer, position, cx),
GotoDefinitionKind::Type => project.type_definitions(buffer, position, cx),
GotoDefinitionKind::Implementation => project.implementations(buffer, position, cx),
- }))
+ })
+ .ok()
}
fn supports_inlay_hints(&self, buffer: &Entity<Buffer>, cx: &mut App) -> bool {
@@ -27339,6 +27344,7 @@ impl SemanticsProvider for Entity<Project> {
project.any_language_server_supports_inlay_hints(buffer, cx)
})
})
+ .unwrap_or(false)
}
fn supports_semantic_tokens(&self, buffer: &Entity<Buffer>, cx: &mut App) -> bool {
@@ -27347,6 +27353,7 @@ impl SemanticsProvider for Entity<Project> {
project.any_language_server_supports_semantic_tokens(buffer, cx)
})
})
+ .unwrap_or(false)
}
fn inline_values(
@@ -27360,6 +27367,8 @@ impl SemanticsProvider for Entity<Project> {
Some(project.inline_values(session, active_stack_frame, buffer_handle, range, cx))
})
+ .ok()
+ .flatten()
}
fn applicable_inlay_chunks(
@@ -27368,15 +27377,21 @@ impl SemanticsProvider for Entity<Project> {
ranges: &[Range<text::Anchor>],
cx: &mut App,
) -> Vec<Range<BufferRow>> {
- self.read(cx).lsp_store().update(cx, |lsp_store, cx| {
- lsp_store.applicable_inlay_chunks(buffer, ranges, cx)
+ self.update(cx, |project, cx| {
+ project.lsp_store().update(cx, |lsp_store, cx| {
+ lsp_store.applicable_inlay_chunks(buffer, ranges, cx)
+ })
})
+ .unwrap_or_default()
}
fn invalidate_inlay_hints(&self, for_buffers: &HashSet<BufferId>, cx: &mut App) {
- self.read(cx).lsp_store().update(cx, |lsp_store, _| {
- lsp_store.invalidate_inlay_hints(for_buffers)
- });
+ self.update(cx, |project, cx| {
+ project.lsp_store().update(cx, |lsp_store, _| {
+ lsp_store.invalidate_inlay_hints(for_buffers)
+ })
+ })
+ .ok();
}
fn inlay_hints(
@@ -27387,9 +27402,12 @@ impl SemanticsProvider for Entity<Project> {
known_chunks: Option<(clock::Global, HashSet<Range<BufferRow>>)>,
cx: &mut App,
) -> Option<HashMap<Range<BufferRow>, Task<Result<CacheInlayHints>>>> {
- Some(self.read(cx).lsp_store().update(cx, |lsp_store, cx| {
- lsp_store.inlay_hints(invalidate, buffer, ranges, known_chunks, cx)
- }))
+ self.update(cx, |project, cx| {
+ project.lsp_store().update(cx, |lsp_store, cx| {
+ lsp_store.inlay_hints(invalidate, buffer, ranges, known_chunks, cx)
+ })
+ })
+ .ok()
}
fn semantic_tokens(
@@ -27397,10 +27415,13 @@ impl SemanticsProvider for Entity<Project> {
buffer: Entity<Buffer>,
refresh: Option<RefreshForServer>,
cx: &mut App,
- ) -> Shared<Task<std::result::Result<BufferSemanticTokens, Arc<anyhow::Error>>>> {
- self.read(cx).lsp_store().update(cx, |lsp_store, cx| {
- lsp_store.semantic_tokens(buffer, refresh, cx)
+ ) -> Option<Shared<Task<std::result::Result<BufferSemanticTokens, Arc<anyhow::Error>>>>> {
+ self.update(cx, |this, cx| {
+ this.lsp_store().update(cx, |lsp_store, cx| {
+ lsp_store.semantic_tokens(buffer, refresh, cx)
+ })
})
+ .ok()
}
fn range_for_rename(
@@ -27409,7 +27430,7 @@ impl SemanticsProvider for Entity<Project> {
position: text::Anchor,
cx: &mut App,
) -> Option<Task<Result<Option<Range<text::Anchor>>>>> {
- Some(self.update(cx, |project, cx| {
+ self.update(cx, |project, cx| {
let buffer = buffer.clone();
let task = project.prepare_rename(buffer.clone(), position, cx);
cx.spawn(async move |_, cx| {
@@ -27432,7 +27453,8 @@ impl SemanticsProvider for Entity<Project> {
}
})
})
- }))
+ })
+ .ok()
}
fn perform_rename(
@@ -27442,9 +27464,10 @@ impl SemanticsProvider for Entity<Project> {
new_name: String,
cx: &mut App,
) -> Option<Task<Result<ProjectTransaction>>> {
- Some(self.update(cx, |project, cx| {
+ self.update(cx, |project, cx| {
project.perform_rename(buffer.clone(), position, new_name, cx)
- }))
+ })
+ .ok()
}
}
@@ -217,8 +217,9 @@ impl Editor {
}) {
None
} else {
- let task = sema.semantic_tokens(buffer, for_server, cx);
- Some(async move { (buffer_id, query_version, task.await) })
+ sema.semantic_tokens(buffer, for_server, cx).map(
+ |task| async move { (buffer_id, query_version, task.await) },
+ )
}
})
.collect::<Vec<_>>()
@@ -123,8 +123,6 @@ pub fn assert_text_with_selections(
assert_eq!(actual, marked_text, "Selections don't match");
}
-// RA thinks this is dead code even though it is used in a whole lot of tests
-#[allow(dead_code)]
#[cfg(any(test, feature = "test-support"))]
pub(crate) fn build_editor(
buffer: Entity<MultiBuffer>,
@@ -1390,7 +1390,9 @@ mod tests {
(branch_list, cx)
}
- async fn init_fake_repository(cx: &mut TestAppContext) -> Entity<Repository> {
+ async fn init_fake_repository(
+ cx: &mut TestAppContext,
+ ) -> (Entity<Project>, Entity<Repository>) {
let fs = FakeFs::new(cx.executor());
fs.insert_tree(
path!("/dir"),
@@ -1413,7 +1415,7 @@ mod tests {
let project = Project::test(fs.clone(), [path!("/dir").as_ref()], cx).await;
let repository = cx.read(|cx| project.read(cx).active_repository(cx));
- repository.unwrap()
+ (project, repository.unwrap())
}
#[gpui::test]
@@ -1476,7 +1478,7 @@ mod tests {
#[gpui::test]
async fn test_delete_branch(cx: &mut TestAppContext) {
init_test(cx);
- let repository = init_fake_repository(cx).await;
+ let (_project, repository) = init_fake_repository(cx).await;
let branches = create_test_branches();
@@ -1534,7 +1536,7 @@ mod tests {
#[gpui::test]
async fn test_delete_remote(cx: &mut TestAppContext) {
init_test(cx);
- let repository = init_fake_repository(cx).await;
+ let (_project, repository) = init_fake_repository(cx).await;
let branches = vec![
create_test_branch("main", true, Some("origin"), Some(1000)),
create_test_branch("feature-auth", false, Some("origin"), Some(900)),
@@ -1721,7 +1723,7 @@ mod tests {
const NEW_BRANCH: &str = "new-feature-branch";
init_test(test_cx);
- let repository = init_fake_repository(test_cx).await;
+ let (_project, repository) = init_fake_repository(test_cx).await;
let branches = vec![
create_test_branch(MAIN_BRANCH, true, None, Some(1000)),
@@ -1785,7 +1787,7 @@ mod tests {
#[gpui::test]
async fn test_remote_url_detection_https(cx: &mut TestAppContext) {
init_test(cx);
- let repository = init_fake_repository(cx).await;
+ let (_project, repository) = init_fake_repository(cx).await;
let branches = vec![create_test_branch("main", true, None, Some(1000))];
let (branch_list, mut ctx) = init_branch_list_test(repository.into(), branches, cx).await;
@@ -753,6 +753,37 @@ impl App {
app
}
+ #[doc(hidden)]
+ pub fn ref_counts_drop_handle(&self) -> impl Sized + use<> {
+ self.entities.ref_counts_drop_handle()
+ }
+
+ /// Captures a snapshot of all entities that currently have alive handles.
+ ///
+ /// The returned [`LeakDetectorSnapshot`] can later be passed to
+ /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no
+ /// entities created after the snapshot are still alive.
+ #[cfg(any(test, feature = "leak-detection"))]
+ pub fn leak_detector_snapshot(&self) -> LeakDetectorSnapshot {
+ self.entities.leak_detector_snapshot()
+ }
+
+ /// Asserts that no entities created after `snapshot` still have alive handles.
+ ///
+ /// Entities that were already tracked at the time of the snapshot are ignored,
+ /// even if they still have handles. Only *new* entities (those whose
+ /// `EntityId` was not present in the snapshot) are considered leaks.
+ ///
+ /// # Panics
+ ///
+ /// Panics if any new entity handles exist. The panic message lists every
+ /// leaked entity with its type name, and includes allocation-site backtraces
+ /// when `LEAK_BACKTRACE` is set.
+ #[cfg(any(test, feature = "leak-detection"))]
+ pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) {
+ self.entities.assert_no_new_leaks(snapshot)
+ }
+
/// Quit the application gracefully. Handlers registered with [`Context::on_app_quit`]
/// will be given 100ms to complete before exiting.
pub fn shutdown(&mut self) {
@@ -4,7 +4,7 @@ use crate::{
PromptLevel, Render, Reservation, Result, Subscription, Task, VisualContext, Window,
WindowHandle,
};
-use anyhow::Context as _;
+use anyhow::{Context as _, bail};
use derive_more::{Deref, DerefMut};
use futures::channel::oneshot;
use futures::future::FutureExt;
@@ -88,6 +88,9 @@ impl AppContext for AsyncApp {
{
let app = self.app.upgrade().context("app was released")?;
let mut lock = app.try_borrow_mut()?;
+ if lock.quitting {
+ bail!("app is quitting");
+ }
lock.update_window(window, f)
}
@@ -101,6 +104,9 @@ impl AppContext for AsyncApp {
{
let app = self.app.upgrade().context("app was released")?;
let lock = app.borrow();
+ if lock.quitting {
+ bail!("app is quitting");
+ }
lock.read_window(window, read)
}
@@ -174,6 +180,9 @@ impl AsyncApp {
{
let app = self.app();
let mut lock = app.borrow_mut();
+ if lock.quitting {
+ bail!("app is quitting");
+ }
lock.open_window(options, build_root_view)
}
@@ -211,6 +220,9 @@ impl AsyncApp {
pub fn try_read_global<G: Global, R>(&self, read: impl FnOnce(&G, &App) -> R) -> Option<R> {
let app = self.app();
let app = app.borrow_mut();
+ if app.quitting {
+ return None;
+ }
Some(read(app.try_global()?, &app))
}
@@ -83,6 +83,32 @@ impl EntityMap {
}
}
+ #[doc(hidden)]
+ pub fn ref_counts_drop_handle(&self) -> impl Sized + use<> {
+ self.ref_counts.clone()
+ }
+
+ /// Captures a snapshot of all entities that currently have alive handles.
+ ///
+ /// The returned [`LeakDetectorSnapshot`] can later be passed to
+ /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no
+ /// entities created after the snapshot are still alive.
+ #[cfg(any(test, feature = "leak-detection"))]
+ pub fn leak_detector_snapshot(&self) -> LeakDetectorSnapshot {
+ self.ref_counts.read().leak_detector.snapshot()
+ }
+
+ /// Asserts that no entities created after `snapshot` still have alive handles.
+ ///
+ /// See [`LeakDetector::assert_no_new_leaks`] for details.
+ #[cfg(any(test, feature = "leak-detection"))]
+ pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) {
+ self.ref_counts
+ .read()
+ .leak_detector
+ .assert_no_new_leaks(snapshot)
+ }
+
/// Reserve a slot for an entity, which you can subsequently use with `insert`.
pub fn reserve<T: 'static>(&self) -> Slot<T> {
let id = self.ref_counts.write().counts.insert(1.into());
@@ -225,7 +251,12 @@ pub struct AnyEntity {
}
impl AnyEntity {
- fn new(id: EntityId, entity_type: TypeId, entity_map: Weak<RwLock<EntityRefCounts>>) -> Self {
+ fn new(
+ id: EntityId,
+ entity_type: TypeId,
+ entity_map: Weak<RwLock<EntityRefCounts>>,
+ #[cfg(any(test, feature = "leak-detection"))] type_name: &'static str,
+ ) -> Self {
Self {
entity_id: id,
entity_type,
@@ -236,7 +267,7 @@ impl AnyEntity {
.unwrap()
.write()
.leak_detector
- .handle_created(id),
+ .handle_created(id, Some(type_name)),
entity_map,
}
}
@@ -299,7 +330,7 @@ impl Clone for AnyEntity {
.unwrap()
.write()
.leak_detector
- .handle_created(self.entity_id),
+ .handle_created(self.entity_id, None),
}
}
}
@@ -395,7 +426,13 @@ impl<T: 'static> Entity<T> {
T: 'static,
{
Self {
- any_entity: AnyEntity::new(id, TypeId::of::<T>(), entity_map),
+ any_entity: AnyEntity::new(
+ id,
+ TypeId::of::<T>(),
+ entity_map,
+ #[cfg(any(test, feature = "leak-detection"))]
+ std::any::type_name::<T>(),
+ ),
entity_type: PhantomData,
}
}
@@ -574,7 +611,7 @@ impl AnyWeakEntity {
.unwrap()
.write()
.leak_detector
- .handle_created(self.entity_id),
+ .handle_created(self.entity_id, None),
})
}
@@ -892,7 +929,23 @@ pub(crate) struct HandleId {
#[cfg(any(test, feature = "leak-detection"))]
pub(crate) struct LeakDetector {
next_handle_id: u64,
- entity_handles: HashMap<EntityId, HashMap<HandleId, Option<backtrace::Backtrace>>>,
+ entity_handles: HashMap<EntityId, EntityLeakData>,
+}
+
+/// A snapshot of the set of alive entities at a point in time.
+///
+/// Created by [`LeakDetector::snapshot`]. Can later be passed to
+/// [`LeakDetector::assert_no_new_leaks`] to verify that no new entity
+/// handles remain between the snapshot and the current state.
+#[cfg(any(test, feature = "leak-detection"))]
+pub struct LeakDetectorSnapshot {
+ entity_ids: collections::HashSet<EntityId>,
+}
+
+#[cfg(any(test, feature = "leak-detection"))]
+struct EntityLeakData {
+ handles: HashMap<HandleId, Option<backtrace::Backtrace>>,
+ type_name: &'static str,
}
#[cfg(any(test, feature = "leak-detection"))]
@@ -903,11 +956,21 @@ impl LeakDetector {
/// the handle is dropped. If `LEAK_BACKTRACE` is set, captures a backtrace
/// at the allocation site.
#[track_caller]
- pub fn handle_created(&mut self, entity_id: EntityId) -> HandleId {
+ pub fn handle_created(
+ &mut self,
+ entity_id: EntityId,
+ type_name: Option<&'static str>,
+ ) -> HandleId {
let id = gpui_util::post_inc(&mut self.next_handle_id);
let handle_id = HandleId { id };
- let handles = self.entity_handles.entry(entity_id).or_default();
- handles.insert(
+ let handles = self
+ .entity_handles
+ .entry(entity_id)
+ .or_insert_with(|| EntityLeakData {
+ handles: HashMap::default(),
+ type_name: type_name.unwrap_or("<unknown>"),
+ });
+ handles.handles.insert(
handle_id,
LEAK_BACKTRACE.then(backtrace::Backtrace::new_unresolved),
);
@@ -919,8 +982,14 @@ impl LeakDetector {
/// This removes the handle from tracking. The `handle_id` should be the same
/// one returned by `handle_created` when the handle was allocated.
pub fn handle_released(&mut self, entity_id: EntityId, handle_id: HandleId) {
- let handles = self.entity_handles.entry(entity_id).or_default();
- handles.remove(&handle_id);
+ if let std::collections::hash_map::Entry::Occupied(mut data) =
+ self.entity_handles.entry(entity_id)
+ {
+ data.get_mut().handles.remove(&handle_id);
+ if data.get().handles.is_empty() {
+ data.remove();
+ }
+ }
}
/// Asserts that all handles to the given entity have been released.
@@ -932,11 +1001,10 @@ impl LeakDetector {
/// otherwise it suggests setting the environment variable to get more info.
pub fn assert_released(&mut self, entity_id: EntityId) {
use std::fmt::Write as _;
- let handles = self.entity_handles.entry(entity_id).or_default();
- if !handles.is_empty() {
+ if let Some(data) = self.entity_handles.remove(&entity_id) {
let mut out = String::new();
- for backtrace in handles.values_mut() {
- if let Some(mut backtrace) = backtrace.take() {
+ for (_, backtrace) in data.handles {
+ if let Some(mut backtrace) = backtrace {
backtrace.resolve();
writeln!(out, "Leaked handle:\n{:?}", backtrace).unwrap();
} else {
@@ -950,6 +1018,96 @@ impl LeakDetector {
panic!("{out}");
}
}
+
+ /// Captures a snapshot of all entity IDs that currently have alive handles.
+ ///
+ /// The returned [`LeakDetectorSnapshot`] can later be passed to
+ /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no
+ /// entities created after the snapshot are still alive.
+ pub fn snapshot(&self) -> LeakDetectorSnapshot {
+ LeakDetectorSnapshot {
+ entity_ids: self.entity_handles.keys().copied().collect(),
+ }
+ }
+
+ /// Asserts that no entities created after `snapshot` still have alive handles.
+ ///
+ /// Entities that were already tracked at the time of the snapshot are ignored,
+ /// even if they still have handles. Only *new* entities (those whose
+ /// `EntityId` was not present in the snapshot) are considered leaks.
+ ///
+ /// # Panics
+ ///
+ /// Panics if any new entity handles exist. The panic message lists every
+ /// leaked entity with its type name, and includes allocation-site backtraces
+ /// when `LEAK_BACKTRACE` is set.
+ pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) {
+ use std::fmt::Write as _;
+
+ let mut out = String::new();
+ for (entity_id, data) in &self.entity_handles {
+ if snapshot.entity_ids.contains(entity_id) {
+ continue;
+ }
+ for (_, backtrace) in &data.handles {
+ if let Some(backtrace) = backtrace {
+ let mut backtrace = backtrace.clone();
+ backtrace.resolve();
+ writeln!(
+ out,
+ "Leaked handle for entity {} ({entity_id:?}):\n{:?}",
+ data.type_name, backtrace
+ )
+ .unwrap();
+ } else {
+ writeln!(
+ out,
+ "Leaked handle for entity {} ({entity_id:?}): (export LEAK_BACKTRACE to find allocation site)",
+ data.type_name
+ )
+ .unwrap();
+ }
+ }
+ }
+
+ if !out.is_empty() {
+ panic!("New entity leaks detected since snapshot:\n{out}");
+ }
+ }
+}
+
+#[cfg(any(test, feature = "leak-detection"))]
+impl Drop for LeakDetector {
+ fn drop(&mut self) {
+ use std::fmt::Write;
+
+ if self.entity_handles.is_empty() || std::thread::panicking() {
+ return;
+ }
+
+ let mut out = String::new();
+ for (entity_id, data) in self.entity_handles.drain() {
+ for (_handle, backtrace) in data.handles {
+ if let Some(mut backtrace) = backtrace {
+ backtrace.resolve();
+ writeln!(
+ out,
+ "Leaked handle for entity {} ({entity_id:?}):\n{:?}",
+ data.type_name, backtrace
+ )
+ .unwrap();
+ } else {
+ writeln!(
+ out,
+ "Leaked handle for entity {} ({entity_id:?}): (export LEAK_BACKTRACE to find allocation site)",
+ data.type_name
+ )
+ .unwrap();
+ }
+ }
+ }
+ panic!("Exited with leaked handles:\n{out}");
+ }
}
#[cfg(test)]
@@ -1007,4 +1165,42 @@ mod test {
vec![1],
);
}
+
+ #[test]
+ fn test_leak_detector_snapshot_no_leaks() {
+ let mut entity_map = EntityMap::new();
+
+ let slot = entity_map.reserve::<TestEntity>();
+ let pre_existing = entity_map.insert(slot, TestEntity { i: 1 });
+
+ let snapshot = entity_map.leak_detector_snapshot();
+
+ let slot = entity_map.reserve::<TestEntity>();
+ let temporary = entity_map.insert(slot, TestEntity { i: 2 });
+ drop(temporary);
+
+ entity_map.assert_no_new_leaks(&snapshot);
+
+ drop(pre_existing);
+ }
+
+ #[test]
+ #[should_panic(expected = "New entity leaks detected since snapshot")]
+ fn test_leak_detector_snapshot_detects_new_leak() {
+ let mut entity_map = EntityMap::new();
+
+ let slot = entity_map.reserve::<TestEntity>();
+ let pre_existing = entity_map.insert(slot, TestEntity { i: 1 });
+
+ let snapshot = entity_map.leak_detector_snapshot();
+
+ let slot = entity_map.reserve::<TestEntity>();
+ let leaked = entity_map.insert(slot, TestEntity { i: 2 });
+
+ // `leaked` is still alive, so this should panic.
+ entity_map.assert_no_new_leaks(&snapshot);
+
+ drop(pre_existing);
+ drop(leaked);
+ }
}
@@ -5,7 +5,7 @@ use crate::{
ModifiersChangedEvent, MouseButton, MouseDownEvent, MouseMoveEvent, MouseUpEvent, Pixels,
Platform, Point, Render, Result, Size, Task, TestDispatcher, TestPlatform,
TestScreenCaptureSource, TestWindow, TextSystem, VisualContext, Window, WindowBounds,
- WindowHandle, WindowOptions, app::GpuiMode,
+ WindowHandle, WindowOptions, app::GpuiMode, window::ElementArenaScope,
};
use anyhow::{anyhow, bail};
use futures::{Stream, StreamExt, channel::oneshot};
@@ -18,18 +18,17 @@ use std::{
/// an implementation of `Context` with additional methods that are useful in tests.
#[derive(Clone)]
pub struct TestAppContext {
- #[doc(hidden)]
- pub app: Rc<AppCell>,
#[doc(hidden)]
pub background_executor: BackgroundExecutor,
#[doc(hidden)]
pub foreground_executor: ForegroundExecutor,
- #[doc(hidden)]
- pub dispatcher: TestDispatcher,
+ dispatcher: TestDispatcher,
test_platform: Rc<TestPlatform>,
text_system: Arc<TextSystem>,
fn_name: Option<&'static str>,
on_quit: Rc<RefCell<Vec<Box<dyn FnOnce() + 'static>>>>,
+ #[doc(hidden)]
+ pub app: Rc<AppCell>,
}
impl AppContext for TestAppContext {
@@ -402,8 +401,8 @@ impl TestAppContext {
}
/// Wait until there are no more pending tasks.
- pub fn run_until_parked(&mut self) {
- self.background_executor.run_until_parked()
+ pub fn run_until_parked(&self) {
+ self.dispatcher.run_until_parked();
}
/// Simulate dispatching an action to the currently focused node in the window.
@@ -819,6 +818,8 @@ impl VisualTestContext {
E: Element,
{
self.update(|window, cx| {
+ let _arena_scope = ElementArenaScope::enter(&cx.element_arena);
+
window.invalidator.set_phase(DrawPhase::Prepaint);
let mut element = Drawable::new(f(window, cx));
element.layout_as_root(space.into(), window, cx);
@@ -830,6 +831,9 @@ impl VisualTestContext {
window.invalidator.set_phase(DrawPhase::None);
window.refresh();
+ drop(element);
+ cx.element_arena.borrow_mut().clear();
+
(request_layout_state, prepaint_state)
})
}
@@ -1,5 +1,5 @@
#![doc = include_str!("../README.md")]
-#![deny(missing_docs)]
+#![warn(missing_docs)]
#![allow(clippy::type_complexity)] // Not useful, GPUI makes heavy use of callbacks
#![allow(clippy::collapsible_else_if)] // False positives in platform specific code
#![allow(unused_mut)] // False positives in platform specific code
@@ -48,6 +48,10 @@ impl TestDispatcher {
self.session_id
}
+ pub fn drain_tasks(&self) {
+ self.scheduler.drain_tasks();
+ }
+
pub fn advance_clock(&self, by: Duration) {
self.scheduler.advance_clock(by);
}
@@ -165,12 +165,13 @@ fn generate_test_function(
dispatcher.clone(),
Some(stringify!(#outer_fn_name)),
);
+ let _entity_refcounts = #cx_varname.app.borrow().ref_counts_drop_handle();
));
cx_teardowns.extend(quote!(
- dispatcher.run_until_parked();
- #cx_varname.executor().forbid_parking();
- #cx_varname.quit();
- dispatcher.run_until_parked();
+ #cx_varname.run_until_parked();
+ #cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); });
+ #cx_varname.run_until_parked();
+ drop(#cx_varname);
));
inner_fn_args.extend(quote!(&mut #cx_varname,));
continue;
@@ -191,10 +192,17 @@ fn generate_test_function(
&[#seeds],
#max_retries,
&mut |dispatcher, _seed| {
- let foreground_executor = gpui::ForegroundExecutor::new(std::sync::Arc::new(dispatcher.clone()));
+ let exec = std::sync::Arc::new(dispatcher.clone());
#cx_vars
- foreground_executor.block_test(#inner_fn_name(#inner_fn_args));
+ gpui::ForegroundExecutor::new(exec.clone()).block_test(#inner_fn_name(#inner_fn_args));
+ drop(exec);
#cx_teardowns
+ // Ideally we would only drop cancelled tasks, that way we could detect leaks due to task <-> entity
+ // cycles as cancelled tasks will be dropped properly once the runnable gets run again
+ //
+ // async-task does not give us the power to do this just yet though
+ dispatcher.drain_tasks();
+ drop(dispatcher);
},
#on_failure_fn_name
);
@@ -229,13 +237,15 @@ fn generate_test_function(
Some(stringify!(#outer_fn_name))
);
let mut #cx_varname_lock = #cx_varname.app.borrow_mut();
+ let _entity_refcounts = #cx_varname_lock.ref_counts_drop_handle();
));
inner_fn_args.extend(quote!(&mut #cx_varname_lock,));
cx_teardowns.extend(quote!(
drop(#cx_varname_lock);
- dispatcher.run_until_parked();
+ #cx_varname.run_until_parked();
#cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); });
- dispatcher.run_until_parked();
+ #cx_varname.run_until_parked();
+ drop(#cx_varname);
));
continue;
}
@@ -246,12 +256,13 @@ fn generate_test_function(
dispatcher.clone(),
Some(stringify!(#outer_fn_name))
);
+ let _entity_refcounts = #cx_varname.app.borrow().ref_counts_drop_handle();
));
cx_teardowns.extend(quote!(
- dispatcher.run_until_parked();
- #cx_varname.executor().forbid_parking();
- #cx_varname.quit();
- dispatcher.run_until_parked();
+ #cx_varname.run_until_parked();
+ #cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); });
+ #cx_varname.run_until_parked();
+ drop(#cx_varname);
));
inner_fn_args.extend(quote!(&mut #cx_varname,));
continue;
@@ -277,6 +288,12 @@ fn generate_test_function(
#cx_vars
#inner_fn_name(#inner_fn_args);
#cx_teardowns
+ // Ideally we would only drop cancelled tasks, that way we could detect leaks due to task <-> entity
+ // cycles as cancelled tasks will be dropped properly once they runnable gets run again
+ //
+ // async-task does not give us the power to do this just yet though
+ dispatcher.drain_tasks();
+ drop(dispatcher);
},
#on_failure_fn_name,
);
@@ -1,7 +1,6 @@
use remote::Interactive;
use std::{
any::Any,
- borrow::Borrow,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
@@ -83,7 +82,7 @@ impl From<ExternalAgentServerName> for SharedString {
}
}
-impl Borrow<str> for ExternalAgentServerName {
+impl std::borrow::Borrow<str> for ExternalAgentServerName {
fn borrow(&self) -> &str {
&self.0
}
@@ -869,7 +869,6 @@ impl BufferStore {
entry
.insert(
- // todo(lw): hot foreground spawn
cx.spawn(async move |this, cx| {
let load_result = load_buffer.await;
this.update(cx, |this, _cx| {
@@ -1407,35 +1407,38 @@ impl SettingsObserver {
let (mut user_tasks_file_rx, watcher_task) =
watch_config_file(cx.background_executor(), fs, file_path.clone());
let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
- let weak_entry = cx.weak_entity();
cx.spawn(async move |settings_observer, cx| {
let _watcher_task = watcher_task;
let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {
- settings_observer.task_store.clone()
+ settings_observer.task_store.downgrade()
}) else {
return;
};
if let Some(user_tasks_content) = user_tasks_content {
- task_store.update(cx, |task_store, cx| {
- task_store
- .update_user_tasks(
- TaskSettingsLocation::Global(&file_path),
- Some(&user_tasks_content),
- cx,
- )
- .log_err();
- });
+ task_store
+ .update(cx, |task_store, cx| {
+ task_store
+ .update_user_tasks(
+ TaskSettingsLocation::Global(&file_path),
+ Some(&user_tasks_content),
+ cx,
+ )
+ .log_err();
+ })
+ .ok();
}
while let Some(user_tasks_content) = user_tasks_file_rx.next().await {
- let result = task_store.update(cx, |task_store, cx| {
+ let Ok(result) = task_store.update(cx, |task_store, cx| {
task_store.update_user_tasks(
TaskSettingsLocation::Global(&file_path),
Some(&user_tasks_content),
cx,
)
- });
+ }) else {
+ continue;
+ };
- weak_entry
+ settings_observer
.update(cx, |_, cx| match result {
Ok(()) => cx.emit(SettingsObserverEvent::LocalTasksUpdated(Ok(
file_path.clone()
@@ -1459,35 +1462,38 @@ impl SettingsObserver {
let (mut user_tasks_file_rx, watcher_task) =
watch_config_file(cx.background_executor(), fs, file_path.clone());
let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
- let weak_entry = cx.weak_entity();
cx.spawn(async move |settings_observer, cx| {
let _watcher_task = watcher_task;
let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {
- settings_observer.task_store.clone()
+ settings_observer.task_store.downgrade()
}) else {
return;
};
if let Some(user_tasks_content) = user_tasks_content {
- task_store.update(cx, |task_store, cx| {
- task_store
- .update_user_debug_scenarios(
- TaskSettingsLocation::Global(&file_path),
- Some(&user_tasks_content),
- cx,
- )
- .log_err();
- });
+ task_store
+ .update(cx, |task_store, cx| {
+ task_store
+ .update_user_debug_scenarios(
+ TaskSettingsLocation::Global(&file_path),
+ Some(&user_tasks_content),
+ cx,
+ )
+ .log_err();
+ })
+ .ok();
}
while let Some(user_tasks_content) = user_tasks_file_rx.next().await {
- let result = task_store.update(cx, |task_store, cx| {
+ let Ok(result) = task_store.update(cx, |task_store, cx| {
task_store.update_user_debug_scenarios(
TaskSettingsLocation::Global(&file_path),
Some(&user_tasks_content),
cx,
)
- });
+ }) else {
+ continue;
+ };
- weak_entry
+ settings_observer
.update(cx, |_, cx| match result {
Ok(()) => cx.emit(SettingsObserverEvent::LocalDebugScenariosUpdated(Ok(
file_path.clone(),
@@ -1849,6 +1849,7 @@ impl RemoteServerProjects {
) {
let replace_window = window.window_handle().downcast::<MultiWorkspace>();
+ let app_state = Arc::downgrade(&app_state);
cx.spawn_in(window, async move |entity, cx| {
let (connection, starting_dir) =
match start_dev_container_with_config(context, config).await {
@@ -1882,6 +1883,9 @@ impl RemoteServerProjects {
})
.log_err();
+ let Some(app_state) = app_state.upgrade() else {
+ return;
+ };
let result = open_remote_project(
connection.into(),
vec![starting_dir].into_iter().map(PathBuf::from).collect(),
@@ -372,8 +372,9 @@ where
impl<F> Drop for Checked<F> {
fn drop(&mut self) {
- assert!(
- self.id == thread_id(),
+ assert_eq!(
+ self.id,
+ thread_id(),
"local task dropped by a thread that didn't spawn it. Task spawned at {}",
self.location
);
@@ -335,6 +335,28 @@ impl TestScheduler {
false
}
+ /// Drops all runnable tasks from the scheduler.
+ ///
+ /// This is used by the leak detector to ensure that all tasks have been dropped as tasks may keep entities alive otherwise.
+ /// Why do we even have tasks left when tests finish you may ask. The reason for that is simple, the scheduler itself is the executor and it retains the scheduled runnables.
+ /// A lot of tasks, including every foreground task contain an executor handle that keeps the test scheduler alive, causing a reference cycle, thus the need for this function right now.
+ pub fn drain_tasks(&self) {
+ // dropping runnables may reschedule tasks
+ // due to drop impls with executors in them
+ // so drop until we reach a fixpoint
+ loop {
+ let mut state = self.state.lock();
+ if state.runnables.is_empty() && state.timers.is_empty() {
+ break;
+ }
+ let runnables = std::mem::take(&mut state.runnables);
+ let timers = std::mem::take(&mut state.timers);
+ drop(state);
+ drop(timers);
+ drop(runnables);
+ }
+ }
+
pub fn advance_clock_to_next_timer(&self) -> bool {
if let Some(timer) = self.state.lock().timers.first() {
self.clock.advance(timer.expiration - self.clock.now());