diff --git a/crates/collab/tests/integration/randomized_test_helpers.rs b/crates/collab/tests/integration/randomized_test_helpers.rs index e3e4a122d1df069385ef850aeccaa4c5788d253d..a6772019768ba19e2a92843a1e33b256f0eb8b0c 100644 --- a/crates/collab/tests/integration/randomized_test_helpers.rs +++ b/crates/collab/tests/integration/randomized_test_helpers.rs @@ -180,6 +180,13 @@ pub async fn run_randomized_test( T::on_quiesce(&mut server, &mut clients).await; for (client, cx) in clients { + cx.update(|cx| { + for window in cx.windows() { + window + .update(cx, |_, window, _| window.remove_window()) + .ok(); + } + }); cx.update(|cx| { let settings = cx.remove_global::(); cx.clear_globals(); @@ -187,8 +194,8 @@ pub async fn run_randomized_test( theme::init(theme::LoadThemes::JustBase, cx); drop(client); }); + executor.run_until_parked(); } - executor.run_until_parked(); if let Some(path) = plan_save_path() { eprintln!("saved test plan to path {:?}", path); @@ -556,6 +563,13 @@ impl TestPlan { log::info!("{} removed", client.username); plan.lock().user(removed_user_id).online = false; + client_cx.update(|cx| { + for window in cx.windows() { + window + .update(cx, |_, window, _| window.remove_window()) + .ok(); + } + }); client_cx.update(|cx| { cx.clear_globals(); drop(client); diff --git a/crates/collab_ui/src/notifications/incoming_call_notification.rs b/crates/collab_ui/src/notifications/incoming_call_notification.rs index aabb477072c97f829ab64971488ab66d2f6a79e4..164b91395a8853c330e2f7842b5676fff0916e63 100644 --- a/crates/collab_ui/src/notifications/incoming_call_notification.rs +++ b/crates/collab_ui/src/notifications/incoming_call_notification.rs @@ -42,6 +42,14 @@ pub fn init(app_state: &Arc, cx: &mut App) { } } } + + for window in notification_windows.drain(..) { + window + .update(cx, |_, window, _| { + window.remove_window(); + }) + .log_err(); + } }) .detach(); } diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 25bc9996604773bd67964dcb9f5196c41df6cdce..daeb355b048d649d638a8830bdf3d367ea9cd40b 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -2398,7 +2398,9 @@ impl Editor { diagnostics_max_severity, hard_wrap: None, completion_provider: project.clone().map(|project| Rc::new(project) as _), - semantics_provider: project.clone().map(|project| Rc::new(project) as _), + semantics_provider: project + .as_ref() + .map(|project| Rc::new(project.downgrade()) as _), collaboration_hub: project.clone().map(|project| Box::new(project) as _), project, blink_manager: blink_manager.clone(), @@ -23944,7 +23946,7 @@ impl Editor { } pub fn refresh_inline_values(&mut self, cx: &mut Context) { - let Some(project) = self.project.clone() else { + let Some(semantics) = self.semantics_provider.clone() else { return; }; @@ -23979,7 +23981,7 @@ impl Editor { let range = buffer.read(cx).anchor_before(0)..current_execution_position.text_anchor; - project.inline_values(buffer, range, cx) + semantics.inline_values(buffer, range, cx) }) .ok() .flatten()? @@ -26808,7 +26810,7 @@ pub trait SemanticsProvider { buffer: Entity, refresh: Option, cx: &mut App, - ) -> Shared>>>; + ) -> Option>>>>; fn supports_inlay_hints(&self, buffer: &Entity, cx: &mut App) -> bool; @@ -27290,14 +27292,15 @@ impl CompletionProvider for Entity { } } -impl SemanticsProvider for Entity { +impl SemanticsProvider for WeakEntity { fn hover( &self, buffer: &Entity, position: text::Anchor, cx: &mut App, ) -> Option>>> { - Some(self.update(cx, |project, cx| project.hover(buffer, position, cx))) + self.update(cx, |project, cx| project.hover(buffer, position, cx)) + .ok() } fn document_highlights( @@ -27306,9 +27309,10 @@ impl SemanticsProvider for Entity { position: text::Anchor, cx: &mut App, ) -> Option>>> { - Some(self.update(cx, |project, cx| { + self.update(cx, |project, cx| { project.document_highlights(buffer, position, cx) - })) + }) + .ok() } fn definitions( @@ -27318,12 +27322,13 @@ impl SemanticsProvider for Entity { kind: GotoDefinitionKind, cx: &mut App, ) -> Option>>>> { - Some(self.update(cx, |project, cx| match kind { + self.update(cx, |project, cx| match kind { GotoDefinitionKind::Symbol => project.definitions(buffer, position, cx), GotoDefinitionKind::Declaration => project.declarations(buffer, position, cx), GotoDefinitionKind::Type => project.type_definitions(buffer, position, cx), GotoDefinitionKind::Implementation => project.implementations(buffer, position, cx), - })) + }) + .ok() } fn supports_inlay_hints(&self, buffer: &Entity, cx: &mut App) -> bool { @@ -27339,6 +27344,7 @@ impl SemanticsProvider for Entity { project.any_language_server_supports_inlay_hints(buffer, cx) }) }) + .unwrap_or(false) } fn supports_semantic_tokens(&self, buffer: &Entity, cx: &mut App) -> bool { @@ -27347,6 +27353,7 @@ impl SemanticsProvider for Entity { project.any_language_server_supports_semantic_tokens(buffer, cx) }) }) + .unwrap_or(false) } fn inline_values( @@ -27360,6 +27367,8 @@ impl SemanticsProvider for Entity { Some(project.inline_values(session, active_stack_frame, buffer_handle, range, cx)) }) + .ok() + .flatten() } fn applicable_inlay_chunks( @@ -27368,15 +27377,21 @@ impl SemanticsProvider for Entity { ranges: &[Range], cx: &mut App, ) -> Vec> { - self.read(cx).lsp_store().update(cx, |lsp_store, cx| { - lsp_store.applicable_inlay_chunks(buffer, ranges, cx) + self.update(cx, |project, cx| { + project.lsp_store().update(cx, |lsp_store, cx| { + lsp_store.applicable_inlay_chunks(buffer, ranges, cx) + }) }) + .unwrap_or_default() } fn invalidate_inlay_hints(&self, for_buffers: &HashSet, cx: &mut App) { - self.read(cx).lsp_store().update(cx, |lsp_store, _| { - lsp_store.invalidate_inlay_hints(for_buffers) - }); + self.update(cx, |project, cx| { + project.lsp_store().update(cx, |lsp_store, _| { + lsp_store.invalidate_inlay_hints(for_buffers) + }) + }) + .ok(); } fn inlay_hints( @@ -27387,9 +27402,12 @@ impl SemanticsProvider for Entity { known_chunks: Option<(clock::Global, HashSet>)>, cx: &mut App, ) -> Option, Task>>> { - Some(self.read(cx).lsp_store().update(cx, |lsp_store, cx| { - lsp_store.inlay_hints(invalidate, buffer, ranges, known_chunks, cx) - })) + self.update(cx, |project, cx| { + project.lsp_store().update(cx, |lsp_store, cx| { + lsp_store.inlay_hints(invalidate, buffer, ranges, known_chunks, cx) + }) + }) + .ok() } fn semantic_tokens( @@ -27397,10 +27415,13 @@ impl SemanticsProvider for Entity { buffer: Entity, refresh: Option, cx: &mut App, - ) -> Shared>>> { - self.read(cx).lsp_store().update(cx, |lsp_store, cx| { - lsp_store.semantic_tokens(buffer, refresh, cx) + ) -> Option>>>> { + self.update(cx, |this, cx| { + this.lsp_store().update(cx, |lsp_store, cx| { + lsp_store.semantic_tokens(buffer, refresh, cx) + }) }) + .ok() } fn range_for_rename( @@ -27409,7 +27430,7 @@ impl SemanticsProvider for Entity { position: text::Anchor, cx: &mut App, ) -> Option>>>> { - Some(self.update(cx, |project, cx| { + self.update(cx, |project, cx| { let buffer = buffer.clone(); let task = project.prepare_rename(buffer.clone(), position, cx); cx.spawn(async move |_, cx| { @@ -27432,7 +27453,8 @@ impl SemanticsProvider for Entity { } }) }) - })) + }) + .ok() } fn perform_rename( @@ -27442,9 +27464,10 @@ impl SemanticsProvider for Entity { new_name: String, cx: &mut App, ) -> Option>> { - Some(self.update(cx, |project, cx| { + self.update(cx, |project, cx| { project.perform_rename(buffer.clone(), position, new_name, cx) - })) + }) + .ok() } } diff --git a/crates/editor/src/semantic_tokens.rs b/crates/editor/src/semantic_tokens.rs index 6270e1b37c289c6deb033ebcbf9ea93dba84af8c..31a573f04787e3759a6a21ec15f36ec148a80f30 100644 --- a/crates/editor/src/semantic_tokens.rs +++ b/crates/editor/src/semantic_tokens.rs @@ -217,8 +217,9 @@ impl Editor { }) { None } else { - let task = sema.semantic_tokens(buffer, for_server, cx); - Some(async move { (buffer_id, query_version, task.await) }) + sema.semantic_tokens(buffer, for_server, cx).map( + |task| async move { (buffer_id, query_version, task.await) }, + ) } }) .collect::>() diff --git a/crates/editor/src/test.rs b/crates/editor/src/test.rs index 8052cf215e7ec879dba939a2f66699827bb58aeb..bef2b3fc3ec2b949ffb8288d59b1201f6f3dde90 100644 --- a/crates/editor/src/test.rs +++ b/crates/editor/src/test.rs @@ -123,8 +123,6 @@ pub fn assert_text_with_selections( assert_eq!(actual, marked_text, "Selections don't match"); } -// RA thinks this is dead code even though it is used in a whole lot of tests -#[allow(dead_code)] #[cfg(any(test, feature = "test-support"))] pub(crate) fn build_editor( buffer: Entity, diff --git a/crates/git_ui/src/branch_picker.rs b/crates/git_ui/src/branch_picker.rs index 08290cb88a273d1f3f17da5c08a5b4a402aa74cd..d1ab60b9042fb06a3f049625f7c0a809957a1543 100644 --- a/crates/git_ui/src/branch_picker.rs +++ b/crates/git_ui/src/branch_picker.rs @@ -1390,7 +1390,9 @@ mod tests { (branch_list, cx) } - async fn init_fake_repository(cx: &mut TestAppContext) -> Entity { + async fn init_fake_repository( + cx: &mut TestAppContext, + ) -> (Entity, Entity) { let fs = FakeFs::new(cx.executor()); fs.insert_tree( path!("/dir"), @@ -1413,7 +1415,7 @@ mod tests { let project = Project::test(fs.clone(), [path!("/dir").as_ref()], cx).await; let repository = cx.read(|cx| project.read(cx).active_repository(cx)); - repository.unwrap() + (project, repository.unwrap()) } #[gpui::test] @@ -1476,7 +1478,7 @@ mod tests { #[gpui::test] async fn test_delete_branch(cx: &mut TestAppContext) { init_test(cx); - let repository = init_fake_repository(cx).await; + let (_project, repository) = init_fake_repository(cx).await; let branches = create_test_branches(); @@ -1534,7 +1536,7 @@ mod tests { #[gpui::test] async fn test_delete_remote(cx: &mut TestAppContext) { init_test(cx); - let repository = init_fake_repository(cx).await; + let (_project, repository) = init_fake_repository(cx).await; let branches = vec![ create_test_branch("main", true, Some("origin"), Some(1000)), create_test_branch("feature-auth", false, Some("origin"), Some(900)), @@ -1721,7 +1723,7 @@ mod tests { const NEW_BRANCH: &str = "new-feature-branch"; init_test(test_cx); - let repository = init_fake_repository(test_cx).await; + let (_project, repository) = init_fake_repository(test_cx).await; let branches = vec![ create_test_branch(MAIN_BRANCH, true, None, Some(1000)), @@ -1785,7 +1787,7 @@ mod tests { #[gpui::test] async fn test_remote_url_detection_https(cx: &mut TestAppContext) { init_test(cx); - let repository = init_fake_repository(cx).await; + let (_project, repository) = init_fake_repository(cx).await; let branches = vec![create_test_branch("main", true, None, Some(1000))]; let (branch_list, mut ctx) = init_branch_list_test(repository.into(), branches, cx).await; diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index 1bd5cd6b3c6a74ee840ac93b08554a82b1f050fa..f1fe264f4ef4ccb09081a6672c7c4ddb1d24dc97 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -753,6 +753,37 @@ impl App { app } + #[doc(hidden)] + pub fn ref_counts_drop_handle(&self) -> impl Sized + use<> { + self.entities.ref_counts_drop_handle() + } + + /// Captures a snapshot of all entities that currently have alive handles. + /// + /// The returned [`LeakDetectorSnapshot`] can later be passed to + /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no + /// entities created after the snapshot are still alive. + #[cfg(any(test, feature = "leak-detection"))] + pub fn leak_detector_snapshot(&self) -> LeakDetectorSnapshot { + self.entities.leak_detector_snapshot() + } + + /// Asserts that no entities created after `snapshot` still have alive handles. + /// + /// Entities that were already tracked at the time of the snapshot are ignored, + /// even if they still have handles. Only *new* entities (those whose + /// `EntityId` was not present in the snapshot) are considered leaks. + /// + /// # Panics + /// + /// Panics if any new entity handles exist. The panic message lists every + /// leaked entity with its type name, and includes allocation-site backtraces + /// when `LEAK_BACKTRACE` is set. + #[cfg(any(test, feature = "leak-detection"))] + pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) { + self.entities.assert_no_new_leaks(snapshot) + } + /// Quit the application gracefully. Handlers registered with [`Context::on_app_quit`] /// will be given 100ms to complete before exiting. pub fn shutdown(&mut self) { diff --git a/crates/gpui/src/app/async_context.rs b/crates/gpui/src/app/async_context.rs index ccd39dda89003cf90d51fae43102a565b2136dc2..e2fd203c78364a4d096f9792dcea7e6f7b8113ea 100644 --- a/crates/gpui/src/app/async_context.rs +++ b/crates/gpui/src/app/async_context.rs @@ -4,7 +4,7 @@ use crate::{ PromptLevel, Render, Reservation, Result, Subscription, Task, VisualContext, Window, WindowHandle, }; -use anyhow::Context as _; +use anyhow::{Context as _, bail}; use derive_more::{Deref, DerefMut}; use futures::channel::oneshot; use futures::future::FutureExt; @@ -88,6 +88,9 @@ impl AppContext for AsyncApp { { let app = self.app.upgrade().context("app was released")?; let mut lock = app.try_borrow_mut()?; + if lock.quitting { + bail!("app is quitting"); + } lock.update_window(window, f) } @@ -101,6 +104,9 @@ impl AppContext for AsyncApp { { let app = self.app.upgrade().context("app was released")?; let lock = app.borrow(); + if lock.quitting { + bail!("app is quitting"); + } lock.read_window(window, read) } @@ -174,6 +180,9 @@ impl AsyncApp { { let app = self.app(); let mut lock = app.borrow_mut(); + if lock.quitting { + bail!("app is quitting"); + } lock.open_window(options, build_root_view) } @@ -211,6 +220,9 @@ impl AsyncApp { pub fn try_read_global(&self, read: impl FnOnce(&G, &App) -> R) -> Option { let app = self.app(); let app = app.borrow_mut(); + if app.quitting { + return None; + } Some(read(app.try_global()?, &app)) } diff --git a/crates/gpui/src/app/entity_map.rs b/crates/gpui/src/app/entity_map.rs index b8d9e82680eb6978d073e3e51c420cef9f1f61ec..c12f952cc82ae8c161c5263ea47533bdef55e5e5 100644 --- a/crates/gpui/src/app/entity_map.rs +++ b/crates/gpui/src/app/entity_map.rs @@ -83,6 +83,32 @@ impl EntityMap { } } + #[doc(hidden)] + pub fn ref_counts_drop_handle(&self) -> impl Sized + use<> { + self.ref_counts.clone() + } + + /// Captures a snapshot of all entities that currently have alive handles. + /// + /// The returned [`LeakDetectorSnapshot`] can later be passed to + /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no + /// entities created after the snapshot are still alive. + #[cfg(any(test, feature = "leak-detection"))] + pub fn leak_detector_snapshot(&self) -> LeakDetectorSnapshot { + self.ref_counts.read().leak_detector.snapshot() + } + + /// Asserts that no entities created after `snapshot` still have alive handles. + /// + /// See [`LeakDetector::assert_no_new_leaks`] for details. + #[cfg(any(test, feature = "leak-detection"))] + pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) { + self.ref_counts + .read() + .leak_detector + .assert_no_new_leaks(snapshot) + } + /// Reserve a slot for an entity, which you can subsequently use with `insert`. pub fn reserve(&self) -> Slot { let id = self.ref_counts.write().counts.insert(1.into()); @@ -225,7 +251,12 @@ pub struct AnyEntity { } impl AnyEntity { - fn new(id: EntityId, entity_type: TypeId, entity_map: Weak>) -> Self { + fn new( + id: EntityId, + entity_type: TypeId, + entity_map: Weak>, + #[cfg(any(test, feature = "leak-detection"))] type_name: &'static str, + ) -> Self { Self { entity_id: id, entity_type, @@ -236,7 +267,7 @@ impl AnyEntity { .unwrap() .write() .leak_detector - .handle_created(id), + .handle_created(id, Some(type_name)), entity_map, } } @@ -299,7 +330,7 @@ impl Clone for AnyEntity { .unwrap() .write() .leak_detector - .handle_created(self.entity_id), + .handle_created(self.entity_id, None), } } } @@ -395,7 +426,13 @@ impl Entity { T: 'static, { Self { - any_entity: AnyEntity::new(id, TypeId::of::(), entity_map), + any_entity: AnyEntity::new( + id, + TypeId::of::(), + entity_map, + #[cfg(any(test, feature = "leak-detection"))] + std::any::type_name::(), + ), entity_type: PhantomData, } } @@ -574,7 +611,7 @@ impl AnyWeakEntity { .unwrap() .write() .leak_detector - .handle_created(self.entity_id), + .handle_created(self.entity_id, None), }) } @@ -892,7 +929,23 @@ pub(crate) struct HandleId { #[cfg(any(test, feature = "leak-detection"))] pub(crate) struct LeakDetector { next_handle_id: u64, - entity_handles: HashMap>>, + entity_handles: HashMap, +} + +/// A snapshot of the set of alive entities at a point in time. +/// +/// Created by [`LeakDetector::snapshot`]. Can later be passed to +/// [`LeakDetector::assert_no_new_leaks`] to verify that no new entity +/// handles remain between the snapshot and the current state. +#[cfg(any(test, feature = "leak-detection"))] +pub struct LeakDetectorSnapshot { + entity_ids: collections::HashSet, +} + +#[cfg(any(test, feature = "leak-detection"))] +struct EntityLeakData { + handles: HashMap>, + type_name: &'static str, } #[cfg(any(test, feature = "leak-detection"))] @@ -903,11 +956,21 @@ impl LeakDetector { /// the handle is dropped. If `LEAK_BACKTRACE` is set, captures a backtrace /// at the allocation site. #[track_caller] - pub fn handle_created(&mut self, entity_id: EntityId) -> HandleId { + pub fn handle_created( + &mut self, + entity_id: EntityId, + type_name: Option<&'static str>, + ) -> HandleId { let id = gpui_util::post_inc(&mut self.next_handle_id); let handle_id = HandleId { id }; - let handles = self.entity_handles.entry(entity_id).or_default(); - handles.insert( + let handles = self + .entity_handles + .entry(entity_id) + .or_insert_with(|| EntityLeakData { + handles: HashMap::default(), + type_name: type_name.unwrap_or(""), + }); + handles.handles.insert( handle_id, LEAK_BACKTRACE.then(backtrace::Backtrace::new_unresolved), ); @@ -919,8 +982,14 @@ impl LeakDetector { /// This removes the handle from tracking. The `handle_id` should be the same /// one returned by `handle_created` when the handle was allocated. pub fn handle_released(&mut self, entity_id: EntityId, handle_id: HandleId) { - let handles = self.entity_handles.entry(entity_id).or_default(); - handles.remove(&handle_id); + if let std::collections::hash_map::Entry::Occupied(mut data) = + self.entity_handles.entry(entity_id) + { + data.get_mut().handles.remove(&handle_id); + if data.get().handles.is_empty() { + data.remove(); + } + } } /// Asserts that all handles to the given entity have been released. @@ -932,11 +1001,10 @@ impl LeakDetector { /// otherwise it suggests setting the environment variable to get more info. pub fn assert_released(&mut self, entity_id: EntityId) { use std::fmt::Write as _; - let handles = self.entity_handles.entry(entity_id).or_default(); - if !handles.is_empty() { + if let Some(data) = self.entity_handles.remove(&entity_id) { let mut out = String::new(); - for backtrace in handles.values_mut() { - if let Some(mut backtrace) = backtrace.take() { + for (_, backtrace) in data.handles { + if let Some(mut backtrace) = backtrace { backtrace.resolve(); writeln!(out, "Leaked handle:\n{:?}", backtrace).unwrap(); } else { @@ -950,6 +1018,96 @@ impl LeakDetector { panic!("{out}"); } } + + /// Captures a snapshot of all entity IDs that currently have alive handles. + /// + /// The returned [`LeakDetectorSnapshot`] can later be passed to + /// [`assert_no_new_leaks`](Self::assert_no_new_leaks) to verify that no + /// entities created after the snapshot are still alive. + pub fn snapshot(&self) -> LeakDetectorSnapshot { + LeakDetectorSnapshot { + entity_ids: self.entity_handles.keys().copied().collect(), + } + } + + /// Asserts that no entities created after `snapshot` still have alive handles. + /// + /// Entities that were already tracked at the time of the snapshot are ignored, + /// even if they still have handles. Only *new* entities (those whose + /// `EntityId` was not present in the snapshot) are considered leaks. + /// + /// # Panics + /// + /// Panics if any new entity handles exist. The panic message lists every + /// leaked entity with its type name, and includes allocation-site backtraces + /// when `LEAK_BACKTRACE` is set. + pub fn assert_no_new_leaks(&self, snapshot: &LeakDetectorSnapshot) { + use std::fmt::Write as _; + + let mut out = String::new(); + for (entity_id, data) in &self.entity_handles { + if snapshot.entity_ids.contains(entity_id) { + continue; + } + for (_, backtrace) in &data.handles { + if let Some(backtrace) = backtrace { + let mut backtrace = backtrace.clone(); + backtrace.resolve(); + writeln!( + out, + "Leaked handle for entity {} ({entity_id:?}):\n{:?}", + data.type_name, backtrace + ) + .unwrap(); + } else { + writeln!( + out, + "Leaked handle for entity {} ({entity_id:?}): (export LEAK_BACKTRACE to find allocation site)", + data.type_name + ) + .unwrap(); + } + } + } + + if !out.is_empty() { + panic!("New entity leaks detected since snapshot:\n{out}"); + } + } +} + +#[cfg(any(test, feature = "leak-detection"))] +impl Drop for LeakDetector { + fn drop(&mut self) { + use std::fmt::Write; + + if self.entity_handles.is_empty() || std::thread::panicking() { + return; + } + + let mut out = String::new(); + for (entity_id, data) in self.entity_handles.drain() { + for (_handle, backtrace) in data.handles { + if let Some(mut backtrace) = backtrace { + backtrace.resolve(); + writeln!( + out, + "Leaked handle for entity {} ({entity_id:?}):\n{:?}", + data.type_name, backtrace + ) + .unwrap(); + } else { + writeln!( + out, + "Leaked handle for entity {} ({entity_id:?}): (export LEAK_BACKTRACE to find allocation site)", + data.type_name + ) + .unwrap(); + } + } + } + panic!("Exited with leaked handles:\n{out}"); + } } #[cfg(test)] @@ -1007,4 +1165,42 @@ mod test { vec![1], ); } + + #[test] + fn test_leak_detector_snapshot_no_leaks() { + let mut entity_map = EntityMap::new(); + + let slot = entity_map.reserve::(); + let pre_existing = entity_map.insert(slot, TestEntity { i: 1 }); + + let snapshot = entity_map.leak_detector_snapshot(); + + let slot = entity_map.reserve::(); + let temporary = entity_map.insert(slot, TestEntity { i: 2 }); + drop(temporary); + + entity_map.assert_no_new_leaks(&snapshot); + + drop(pre_existing); + } + + #[test] + #[should_panic(expected = "New entity leaks detected since snapshot")] + fn test_leak_detector_snapshot_detects_new_leak() { + let mut entity_map = EntityMap::new(); + + let slot = entity_map.reserve::(); + let pre_existing = entity_map.insert(slot, TestEntity { i: 1 }); + + let snapshot = entity_map.leak_detector_snapshot(); + + let slot = entity_map.reserve::(); + let leaked = entity_map.insert(slot, TestEntity { i: 2 }); + + // `leaked` is still alive, so this should panic. + entity_map.assert_no_new_leaks(&snapshot); + + drop(pre_existing); + drop(leaked); + } } diff --git a/crates/gpui/src/app/test_context.rs b/crates/gpui/src/app/test_context.rs index dd4f37ed2a561f4259b41241c7cf4c83790a2b2f..0f0f0e14fbd8565d8f948579ed1ab23381c80108 100644 --- a/crates/gpui/src/app/test_context.rs +++ b/crates/gpui/src/app/test_context.rs @@ -5,7 +5,7 @@ use crate::{ ModifiersChangedEvent, MouseButton, MouseDownEvent, MouseMoveEvent, MouseUpEvent, Pixels, Platform, Point, Render, Result, Size, Task, TestDispatcher, TestPlatform, TestScreenCaptureSource, TestWindow, TextSystem, VisualContext, Window, WindowBounds, - WindowHandle, WindowOptions, app::GpuiMode, + WindowHandle, WindowOptions, app::GpuiMode, window::ElementArenaScope, }; use anyhow::{anyhow, bail}; use futures::{Stream, StreamExt, channel::oneshot}; @@ -18,18 +18,17 @@ use std::{ /// an implementation of `Context` with additional methods that are useful in tests. #[derive(Clone)] pub struct TestAppContext { - #[doc(hidden)] - pub app: Rc, #[doc(hidden)] pub background_executor: BackgroundExecutor, #[doc(hidden)] pub foreground_executor: ForegroundExecutor, - #[doc(hidden)] - pub dispatcher: TestDispatcher, + dispatcher: TestDispatcher, test_platform: Rc, text_system: Arc, fn_name: Option<&'static str>, on_quit: Rc>>>, + #[doc(hidden)] + pub app: Rc, } impl AppContext for TestAppContext { @@ -402,8 +401,8 @@ impl TestAppContext { } /// Wait until there are no more pending tasks. - pub fn run_until_parked(&mut self) { - self.background_executor.run_until_parked() + pub fn run_until_parked(&self) { + self.dispatcher.run_until_parked(); } /// Simulate dispatching an action to the currently focused node in the window. @@ -819,6 +818,8 @@ impl VisualTestContext { E: Element, { self.update(|window, cx| { + let _arena_scope = ElementArenaScope::enter(&cx.element_arena); + window.invalidator.set_phase(DrawPhase::Prepaint); let mut element = Drawable::new(f(window, cx)); element.layout_as_root(space.into(), window, cx); @@ -830,6 +831,9 @@ impl VisualTestContext { window.invalidator.set_phase(DrawPhase::None); window.refresh(); + drop(element); + cx.element_arena.borrow_mut().clear(); + (request_layout_state, prepaint_state) }) } diff --git a/crates/gpui/src/gpui.rs b/crates/gpui/src/gpui.rs index af3fb51ce51f7df570a8e28faad23018ed7dc778..ff36dbce500b8e7472f3d7faa31d9e5cb17e087e 100644 --- a/crates/gpui/src/gpui.rs +++ b/crates/gpui/src/gpui.rs @@ -1,5 +1,5 @@ #![doc = include_str!("../README.md")] -#![deny(missing_docs)] +#![warn(missing_docs)] #![allow(clippy::type_complexity)] // Not useful, GPUI makes heavy use of callbacks #![allow(clippy::collapsible_else_if)] // False positives in platform specific code #![allow(unused_mut)] // False positives in platform specific code diff --git a/crates/gpui/src/platform/test/dispatcher.rs b/crates/gpui/src/platform/test/dispatcher.rs index 081f5feab014b3712fa23290038f34d8ed4f5a92..c40ec8f669d1e2e58f8af3bcf0fbd64fbddbe4d8 100644 --- a/crates/gpui/src/platform/test/dispatcher.rs +++ b/crates/gpui/src/platform/test/dispatcher.rs @@ -48,6 +48,10 @@ impl TestDispatcher { self.session_id } + pub fn drain_tasks(&self) { + self.scheduler.drain_tasks(); + } + pub fn advance_clock(&self, by: Duration) { self.scheduler.advance_clock(by); } diff --git a/crates/gpui_macros/src/test.rs b/crates/gpui_macros/src/test.rs index 490ea07fee696908fad91410aa67ff124cdabe64..087e01740d2ba48392afee0ed7e31cf0779b180d 100644 --- a/crates/gpui_macros/src/test.rs +++ b/crates/gpui_macros/src/test.rs @@ -165,12 +165,13 @@ fn generate_test_function( dispatcher.clone(), Some(stringify!(#outer_fn_name)), ); + let _entity_refcounts = #cx_varname.app.borrow().ref_counts_drop_handle(); )); cx_teardowns.extend(quote!( - dispatcher.run_until_parked(); - #cx_varname.executor().forbid_parking(); - #cx_varname.quit(); - dispatcher.run_until_parked(); + #cx_varname.run_until_parked(); + #cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); }); + #cx_varname.run_until_parked(); + drop(#cx_varname); )); inner_fn_args.extend(quote!(&mut #cx_varname,)); continue; @@ -191,10 +192,17 @@ fn generate_test_function( &[#seeds], #max_retries, &mut |dispatcher, _seed| { - let foreground_executor = gpui::ForegroundExecutor::new(std::sync::Arc::new(dispatcher.clone())); + let exec = std::sync::Arc::new(dispatcher.clone()); #cx_vars - foreground_executor.block_test(#inner_fn_name(#inner_fn_args)); + gpui::ForegroundExecutor::new(exec.clone()).block_test(#inner_fn_name(#inner_fn_args)); + drop(exec); #cx_teardowns + // Ideally we would only drop cancelled tasks, that way we could detect leaks due to task <-> entity + // cycles as cancelled tasks will be dropped properly once the runnable gets run again + // + // async-task does not give us the power to do this just yet though + dispatcher.drain_tasks(); + drop(dispatcher); }, #on_failure_fn_name ); @@ -229,13 +237,15 @@ fn generate_test_function( Some(stringify!(#outer_fn_name)) ); let mut #cx_varname_lock = #cx_varname.app.borrow_mut(); + let _entity_refcounts = #cx_varname_lock.ref_counts_drop_handle(); )); inner_fn_args.extend(quote!(&mut #cx_varname_lock,)); cx_teardowns.extend(quote!( drop(#cx_varname_lock); - dispatcher.run_until_parked(); + #cx_varname.run_until_parked(); #cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); }); - dispatcher.run_until_parked(); + #cx_varname.run_until_parked(); + drop(#cx_varname); )); continue; } @@ -246,12 +256,13 @@ fn generate_test_function( dispatcher.clone(), Some(stringify!(#outer_fn_name)) ); + let _entity_refcounts = #cx_varname.app.borrow().ref_counts_drop_handle(); )); cx_teardowns.extend(quote!( - dispatcher.run_until_parked(); - #cx_varname.executor().forbid_parking(); - #cx_varname.quit(); - dispatcher.run_until_parked(); + #cx_varname.run_until_parked(); + #cx_varname.update(|cx| { cx.background_executor().forbid_parking(); cx.quit(); }); + #cx_varname.run_until_parked(); + drop(#cx_varname); )); inner_fn_args.extend(quote!(&mut #cx_varname,)); continue; @@ -277,6 +288,12 @@ fn generate_test_function( #cx_vars #inner_fn_name(#inner_fn_args); #cx_teardowns + // Ideally we would only drop cancelled tasks, that way we could detect leaks due to task <-> entity + // cycles as cancelled tasks will be dropped properly once they runnable gets run again + // + // async-task does not give us the power to do this just yet though + dispatcher.drain_tasks(); + drop(dispatcher); }, #on_failure_fn_name, ); diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index f12e4da5cd39847c94c32fd26c826dff886edbf7..b1dbefa15a3dcaf64c36d027d68060d18f533def 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -1,7 +1,6 @@ use remote::Interactive; use std::{ any::Any, - borrow::Borrow, path::{Path, PathBuf}, sync::Arc, time::Duration, @@ -83,7 +82,7 @@ impl From for SharedString { } } -impl Borrow for ExternalAgentServerName { +impl std::borrow::Borrow for ExternalAgentServerName { fn borrow(&self) -> &str { &self.0 } diff --git a/crates/project/src/buffer_store.rs b/crates/project/src/buffer_store.rs index 9faf80b7ac00002c005df3a3b1e0674dcdd4cc81..b9d1105ad02415699fa6a9bd1be8ec1f9c71271a 100644 --- a/crates/project/src/buffer_store.rs +++ b/crates/project/src/buffer_store.rs @@ -869,7 +869,6 @@ impl BufferStore { entry .insert( - // todo(lw): hot foreground spawn cx.spawn(async move |this, cx| { let load_result = load_buffer.await; this.update(cx, |this, _cx| { diff --git a/crates/project/src/project_settings.rs b/crates/project/src/project_settings.rs index 75a3faf4f82d9e98e3c85a96222486cac217afd4..9258b16eef9f1c07cc44987f6608c2e0867c4154 100644 --- a/crates/project/src/project_settings.rs +++ b/crates/project/src/project_settings.rs @@ -1407,35 +1407,38 @@ impl SettingsObserver { let (mut user_tasks_file_rx, watcher_task) = watch_config_file(cx.background_executor(), fs, file_path.clone()); let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next()); - let weak_entry = cx.weak_entity(); cx.spawn(async move |settings_observer, cx| { let _watcher_task = watcher_task; let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| { - settings_observer.task_store.clone() + settings_observer.task_store.downgrade() }) else { return; }; if let Some(user_tasks_content) = user_tasks_content { - task_store.update(cx, |task_store, cx| { - task_store - .update_user_tasks( - TaskSettingsLocation::Global(&file_path), - Some(&user_tasks_content), - cx, - ) - .log_err(); - }); + task_store + .update(cx, |task_store, cx| { + task_store + .update_user_tasks( + TaskSettingsLocation::Global(&file_path), + Some(&user_tasks_content), + cx, + ) + .log_err(); + }) + .ok(); } while let Some(user_tasks_content) = user_tasks_file_rx.next().await { - let result = task_store.update(cx, |task_store, cx| { + let Ok(result) = task_store.update(cx, |task_store, cx| { task_store.update_user_tasks( TaskSettingsLocation::Global(&file_path), Some(&user_tasks_content), cx, ) - }); + }) else { + continue; + }; - weak_entry + settings_observer .update(cx, |_, cx| match result { Ok(()) => cx.emit(SettingsObserverEvent::LocalTasksUpdated(Ok( file_path.clone() @@ -1459,35 +1462,38 @@ impl SettingsObserver { let (mut user_tasks_file_rx, watcher_task) = watch_config_file(cx.background_executor(), fs, file_path.clone()); let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next()); - let weak_entry = cx.weak_entity(); cx.spawn(async move |settings_observer, cx| { let _watcher_task = watcher_task; let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| { - settings_observer.task_store.clone() + settings_observer.task_store.downgrade() }) else { return; }; if let Some(user_tasks_content) = user_tasks_content { - task_store.update(cx, |task_store, cx| { - task_store - .update_user_debug_scenarios( - TaskSettingsLocation::Global(&file_path), - Some(&user_tasks_content), - cx, - ) - .log_err(); - }); + task_store + .update(cx, |task_store, cx| { + task_store + .update_user_debug_scenarios( + TaskSettingsLocation::Global(&file_path), + Some(&user_tasks_content), + cx, + ) + .log_err(); + }) + .ok(); } while let Some(user_tasks_content) = user_tasks_file_rx.next().await { - let result = task_store.update(cx, |task_store, cx| { + let Ok(result) = task_store.update(cx, |task_store, cx| { task_store.update_user_debug_scenarios( TaskSettingsLocation::Global(&file_path), Some(&user_tasks_content), cx, ) - }); + }) else { + continue; + }; - weak_entry + settings_observer .update(cx, |_, cx| match result { Ok(()) => cx.emit(SettingsObserverEvent::LocalDebugScenariosUpdated(Ok( file_path.clone(), diff --git a/crates/recent_projects/src/remote_servers.rs b/crates/recent_projects/src/remote_servers.rs index 8bddcf37270e56932e75635fcd35616d12309b6e..6c0ce4b18854320fda8e72f291800049b07cec1a 100644 --- a/crates/recent_projects/src/remote_servers.rs +++ b/crates/recent_projects/src/remote_servers.rs @@ -1849,6 +1849,7 @@ impl RemoteServerProjects { ) { let replace_window = window.window_handle().downcast::(); + let app_state = Arc::downgrade(&app_state); cx.spawn_in(window, async move |entity, cx| { let (connection, starting_dir) = match start_dev_container_with_config(context, config).await { @@ -1882,6 +1883,9 @@ impl RemoteServerProjects { }) .log_err(); + let Some(app_state) = app_state.upgrade() else { + return; + }; let result = open_remote_project( connection.into(), vec![starting_dir].into_iter().map(PathBuf::from).collect(), diff --git a/crates/scheduler/src/executor.rs b/crates/scheduler/src/executor.rs index 05ea973c4ece53f996b732a7e8c3673487f3b8dc..76df2e69f66398e3709e1db58a847b1cd0079fc4 100644 --- a/crates/scheduler/src/executor.rs +++ b/crates/scheduler/src/executor.rs @@ -372,8 +372,9 @@ where impl Drop for Checked { fn drop(&mut self) { - assert!( - self.id == thread_id(), + assert_eq!( + self.id, + thread_id(), "local task dropped by a thread that didn't spawn it. Task spawned at {}", self.location ); diff --git a/crates/scheduler/src/test_scheduler.rs b/crates/scheduler/src/test_scheduler.rs index 03a8c0b90c77e4c17bd8a1130e5c82ccd935e80f..18ab4d09072fa25919e6e9e1858601d5173bd239 100644 --- a/crates/scheduler/src/test_scheduler.rs +++ b/crates/scheduler/src/test_scheduler.rs @@ -335,6 +335,28 @@ impl TestScheduler { false } + /// Drops all runnable tasks from the scheduler. + /// + /// This is used by the leak detector to ensure that all tasks have been dropped as tasks may keep entities alive otherwise. + /// Why do we even have tasks left when tests finish you may ask. The reason for that is simple, the scheduler itself is the executor and it retains the scheduled runnables. + /// A lot of tasks, including every foreground task contain an executor handle that keeps the test scheduler alive, causing a reference cycle, thus the need for this function right now. + pub fn drain_tasks(&self) { + // dropping runnables may reschedule tasks + // due to drop impls with executors in them + // so drop until we reach a fixpoint + loop { + let mut state = self.state.lock(); + if state.runnables.is_empty() && state.timers.is_empty() { + break; + } + let runnables = std::mem::take(&mut state.runnables); + let timers = std::mem::take(&mut state.timers); + drop(state); + drop(timers); + drop(runnables); + } + } + pub fn advance_clock_to_next_timer(&self) -> bool { if let Some(timer) = self.state.lock().timers.first() { self.clock.advance(timer.expiration - self.clock.now());